[automerger skipped] [RESTRICT AUTOMERGE] C2AllocatorIon:protect mMappings using mutex am: ac5f5cade2 am: 3b8914d681 -s ours am: 21f7fa7ba4 -s ours am: c284e722b9 -s ours am: 86cfdb6d3f -s ours am: d66b5159a6 -s ours

am skip reason: subject contains skip directive

Original change: https://googleplex-android-review.googlesource.com/c/platform/frameworks/av/+/16622626

Change-Id: I8c5500ffc89cc6af5660c7fad75c4db6358cda58
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..a7614d2
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,13 @@
+BasedOnStyle: Google
+Standard: Cpp11
+AccessModifierOffset: -2
+AllowShortFunctionsOnASingleLine: Inline
+ColumnLimit: 100
+CommentPragmas: NOLINT:.*
+DerivePointerAlignment: false
+IncludeBlocks: Preserve
+IndentWidth: 4
+ContinuationIndentWidth: 8
+PointerAlignment: Left
+TabWidth: 4
+UseTab: Never
diff --git a/Android.bp b/Android.bp
index 60f0ff1..ee609e1 100644
--- a/Android.bp
+++ b/Android.bp
@@ -57,7 +57,7 @@
             min_sdk_version: "29",
             apex_available: [
                 "//apex_available:platform",
-                "com.android.bluetooth.updatable",
+                "com.android.bluetooth",
                 "com.android.media",
                 "com.android.media.swcodec",
             ],
@@ -86,7 +86,7 @@
     min_sdk_version: "29",
     apex_available: [
         "//apex_available:platform",
-        "com.android.bluetooth.updatable",
+        "com.android.bluetooth",
         "com.android.media",
         "com.android.media.swcodec",
     ],
diff --git a/OWNERS b/OWNERS
index 0be1196..40c65e7 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,7 +1,6 @@
 # Bug component: 1344
 elaurent@google.com
 etalvala@google.com
-hkuang@google.com
 lajos@google.com
 
 # go/android-fwk-media-solutions for info on areas of ownership.
diff --git a/apex/OWNERS b/apex/OWNERS
index 5587f5f..54802d4 100644
--- a/apex/OWNERS
+++ b/apex/OWNERS
@@ -1,6 +1,7 @@
-chz@google.com
-dwkang@google.com
+essick@google.com
 jiyong@google.com
 lajos@google.com
-marcone@google.com
-wjia@google.com
+nchalko@google.com
+
+include platform/packages/modules/common:/MODULES_OWNERS
+
diff --git a/apex/mediaswcodec.32rc b/apex/mediaswcodec.32rc
new file mode 100644
index 0000000..79aef36
--- /dev/null
+++ b/apex/mediaswcodec.32rc
@@ -0,0 +1,6 @@
+service media.swcodec /apex/com.android.media.swcodec/bin/mediaswcodec
+    class main
+    user mediacodec
+    group camera drmrpc mediadrm
+    ioprio rt 4
+    task_profiles ProcessCapacityHigh
diff --git a/apex/mediatranscoding.32rc b/apex/mediatranscoding.32rc
new file mode 100644
index 0000000..5169462
--- /dev/null
+++ b/apex/mediatranscoding.32rc
@@ -0,0 +1,12 @@
+# media.transcoding service is defined on com.android.media apex which goes back
+# to API29, but we only want it started on API31+ devices. So we declare it as
+# "disabled" and start it explicitly on boot.
+service media.transcoding /apex/com.android.media/bin/mediatranscoding
+    class main
+    user media
+    group media
+    ioprio rt 4
+    # Restrict to little cores only with system-background cpuset.
+    task_profiles ServiceCapacityLow
+    interface aidl media.transcoding
+    disabled
diff --git a/camera/Android.bp b/camera/Android.bp
index 6878c20..e44202b 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -43,6 +43,10 @@
     ],
 }
 
+cc_library_headers {
+    name: "camera_headers",
+    export_include_dirs: ["include"],
+}
 cc_library_shared {
     name: "libcamera_client",
 
@@ -109,6 +113,30 @@
 
 }
 
+cc_library_host_static {
+    name: "libcamera_client_host",
+
+    srcs: [
+        "CameraMetadata.cpp",
+        "VendorTagDescriptor.cpp",
+    ],
+
+    shared_libs: [
+        "libbase",
+        "libcamera_metadata",
+    ],
+
+    include_dirs: [
+        "system/media/private/camera/include",
+        "frameworks/native/include/media/openmax",
+    ],
+
+    export_include_dirs: [
+        "include",
+        "include/camera"
+    ],
+}
+
 // AIDL interface between camera clients and the camera service.
 filegroup {
     name: "libcamera_client_aidl",
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index 03439fd..24c9108 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -68,6 +68,9 @@
         unavailablePhysicalIds16.push_back(String16(id8));
     }
     res = parcel->writeString16Vector(unavailablePhysicalIds16);
+    if (res != OK) return res;
+
+    res = parcel->writeString16(String16(clientPackage));
     return res;
 }
 
@@ -86,6 +89,12 @@
     for (auto& id16 : unavailablePhysicalIds16) {
         unavailablePhysicalIds.push_back(String8(id16));
     }
+
+    String16 tempClientPackage;
+    res = parcel->readString16(&tempClientPackage);
+    if (res != OK) return res;
+    clientPackage = String8(tempClientPackage);
+
     return res;
 }
 
diff --git a/camera/CameraSessionStats.cpp b/camera/CameraSessionStats.cpp
index 28e037f..bc83ec1 100644
--- a/camera/CameraSessionStats.cpp
+++ b/camera/CameraSessionStats.cpp
@@ -112,6 +112,12 @@
         return err;
     }
 
+    int dynamicRangeProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
+    if ((err = parcel->readInt32(&dynamicRangeProfile)) != OK) {
+        ALOGE("%s: Failed to read dynamic range profile type from parcel", __FUNCTION__);
+        return err;
+    }
+
     mWidth = width;
     mHeight = height;
     mFormat = format;
@@ -125,6 +131,7 @@
     mHistogramType = histogramType;
     mHistogramBins = std::move(histogramBins);
     mHistogramCounts = std::move(histogramCounts);
+    mDynamicRangeProfile = dynamicRangeProfile;
 
     return OK;
 }
@@ -202,6 +209,11 @@
         return err;
     }
 
+    if ((err = parcel->writeInt32(mDynamicRangeProfile)) != OK) {
+        ALOGE("%s: Failed to write dynamic range profile type", __FUNCTION__);
+        return err;
+    }
+
     return OK;
 }
 
diff --git a/camera/OWNERS b/camera/OWNERS
index d6b95da..385c163 100644
--- a/camera/OWNERS
+++ b/camera/OWNERS
@@ -1,8 +1,7 @@
-epeev@google.com
+# Bug component: 41727
 etalvala@google.com
+arakesh@google.com
+epeev@google.com
 jchowdhary@google.com
 shuzhenwang@google.com
-yinchiayeh@google.com
-# backup owner
-cychen@google.com
-zhijunhe@google.com
+ruchamk@google.com
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index 78a77d4..1e748c7 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -173,6 +173,13 @@
 
     void setTorchMode(String cameraId, boolean enabled, IBinder clientBinder);
 
+    // Change the brightness level of the flash unit associated with cameraId to strengthLevel.
+    // If the torch is in OFF state and strengthLevel > 0 then the torch will also be turned ON.
+    void turnOnTorchWithStrengthLevel(String cameraId, int strengthLevel, IBinder clientBinder);
+
+    // Get the brightness level of the flash unit associated with cameraId.
+    int getTorchStrengthLevel(String cameraId);
+
     /**
      * Notify the camera service of a system event.  Should only be called from system_server.
      *
@@ -180,6 +187,8 @@
      */
     const int EVENT_NONE = 0;
     const int EVENT_USER_SWITCHED = 1; // The argument is the set of new foreground user IDs.
+    const int EVENT_USB_DEVICE_ATTACHED = 2; // The argument is the deviceId and vendorId
+    const int EVENT_USB_DEVICE_DETACHED = 3; // The argument is the deviceId and vendorId
     oneway void notifySystemEvent(int eventId, in int[] args);
 
     /**
diff --git a/camera/aidl/android/hardware/ICameraServiceListener.aidl b/camera/aidl/android/hardware/ICameraServiceListener.aidl
index c54813c..5f17f5b 100644
--- a/camera/aidl/android/hardware/ICameraServiceListener.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceListener.aidl
@@ -83,6 +83,8 @@
 
     oneway void onTorchStatusChanged(int status, String cameraId);
 
+    oneway void onTorchStrengthLevelChanged(String cameraId, int newTorchStrength);
+
     /**
      * Notify registered clients about camera access priority changes.
      * Clients which were previously unable to open a certain camera device
diff --git a/camera/aidl/android/hardware/ICameraServiceProxy.aidl b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
index bbb0289..f5d0120 100644
--- a/camera/aidl/android/hardware/ICameraServiceProxy.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
@@ -37,8 +37,11 @@
     oneway void notifyCameraState(in CameraSessionStats cameraSessionStats);
 
     /**
-     * Reports whether the top activity needs a rotate and crop override.
+     * Returns the necessary rotate and crop override for the top activity which
+     * will be one of ({@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_NONE},
+     * {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_90},
+     * {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_180},
+     * {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_270}).
      */
-    boolean isRotateAndCropOverrideNeeded(String packageName, int sensorOrientation,
-            int lensFacing);
+    int getRotateAndCropOverride(String packageName, int lensFacing, int userId);
 }
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index 2bccd87..15c9dc9 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -23,6 +23,7 @@
 #include <camera/camera2/OutputConfiguration.h>
 #include <binder/Parcel.h>
 #include <gui/view/Surface.h>
+#include <system/camera_metadata.h>
 #include <utils/String8.h>
 
 namespace android {
@@ -76,6 +77,10 @@
     return mSensorPixelModesUsed;
 }
 
+int OutputConfiguration::getDynamicRangeProfile() const {
+    return mDynamicRangeProfile;
+}
+
 OutputConfiguration::OutputConfiguration() :
         mRotation(INVALID_ROTATION),
         mSurfaceSetID(INVALID_SET_ID),
@@ -84,7 +89,8 @@
         mHeight(0),
         mIsDeferred(false),
         mIsShared(false),
-        mIsMultiResolution(false) {
+        mIsMultiResolution(false),
+        mDynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {
 }
 
 OutputConfiguration::OutputConfiguration(const android::Parcel& parcel) :
@@ -165,6 +171,12 @@
         ALOGE("%s: Failed to read sensor pixel mode(s) from parcel", __FUNCTION__);
         return err;
     }
+    int dynamicProfile;
+    if ((err = parcel->readInt32(&dynamicProfile)) != OK) {
+        ALOGE("%s: Failed to read surface dynamic range profile flag from parcel", __FUNCTION__);
+        return err;
+    }
+
     mRotation = rotation;
     mSurfaceSetID = setID;
     mSurfaceType = surfaceType;
@@ -181,6 +193,7 @@
     }
 
     mSensorPixelModesUsed = std::move(sensorPixelModesUsed);
+    mDynamicRangeProfile = dynamicProfile;
 
     ALOGV("%s: OutputConfiguration: rotation = %d, setId = %d, surfaceType = %d,"
           " physicalCameraId = %s, isMultiResolution = %d", __FUNCTION__, mRotation,
@@ -199,6 +212,7 @@
     mIsShared = isShared;
     mPhysicalCameraId = physicalId;
     mIsMultiResolution = false;
+    mDynamicRangeProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
 }
 
 OutputConfiguration::OutputConfiguration(
@@ -207,7 +221,8 @@
     int width, int height, bool isShared)
   : mGbps(gbps), mRotation(rotation), mSurfaceSetID(surfaceSetID), mSurfaceType(surfaceType),
     mWidth(width), mHeight(height), mIsDeferred(false), mIsShared(isShared),
-    mPhysicalCameraId(physicalCameraId), mIsMultiResolution(false) { }
+    mPhysicalCameraId(physicalCameraId), mIsMultiResolution(false),
+    mDynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) { }
 
 status_t OutputConfiguration::writeToParcel(android::Parcel* parcel) const {
 
@@ -254,6 +269,9 @@
     err = parcel->writeParcelableVector(mSensorPixelModesUsed);
     if (err != OK) return err;
 
+    err = parcel->writeInt32(mDynamicRangeProfile ? 1 : 0);
+    if (err != OK) return err;
+
     return OK;
 }
 
diff --git a/camera/cameraserver/Android.bp b/camera/cameraserver/Android.bp
index 8ca8920..6d884cb 100644
--- a/camera/cameraserver/Android.bp
+++ b/camera/cameraserver/Android.bp
@@ -46,6 +46,7 @@
         "android.hardware.camera.device@1.0",
         "android.hardware.camera.device@3.2",
         "android.hardware.camera.device@3.4",
+        "android.hardware.camera.device@3.8",
     ],
     compile_multilib: "first",
     cflags: [
diff --git a/camera/include/camera/CameraBase.h b/camera/include/camera/CameraBase.h
index e156994..8e53968 100644
--- a/camera/include/camera/CameraBase.h
+++ b/camera/include/camera/CameraBase.h
@@ -85,11 +85,17 @@
      */
     std::vector<String8> unavailablePhysicalIds;
 
+    /**
+     * Client package name if camera is open, otherwise not applicable
+     */
+    String8 clientPackage;
+
     virtual status_t writeToParcel(android::Parcel* parcel) const;
     virtual status_t readFromParcel(const android::Parcel* parcel);
 
-    CameraStatus(String8 id, int32_t s, const std::vector<String8>& unavailSubIds) :
-            cameraId(id), status(s), unavailablePhysicalIds(unavailSubIds) {}
+    CameraStatus(String8 id, int32_t s, const std::vector<String8>& unavailSubIds,
+            const String8& clientPkg) : cameraId(id), status(s),
+            unavailablePhysicalIds(unavailSubIds), clientPackage(clientPkg) {}
     CameraStatus() : status(ICameraServiceListener::STATUS_PRESENT) {}
 };
 
diff --git a/camera/include/camera/CameraSessionStats.h b/camera/include/camera/CameraSessionStats.h
index c398aca..1209a20 100644
--- a/camera/include/camera/CameraSessionStats.h
+++ b/camera/include/camera/CameraSessionStats.h
@@ -19,6 +19,8 @@
 
 #include <binder/Parcelable.h>
 
+#include <camera/CameraMetadata.h>
+
 namespace android {
 namespace hardware {
 
@@ -60,16 +62,21 @@
     // size(mHistogramBins) + 1 = size(mHistogramCounts)
     std::vector<int64_t> mHistogramCounts;
 
+    // Dynamic range profile
+    int mDynamicRangeProfile;
+
     CameraStreamStats() :
             mWidth(0), mHeight(0), mFormat(0), mDataSpace(0), mUsage(0),
             mRequestCount(0), mErrorCount(0), mStartLatencyMs(0),
-            mMaxHalBuffers(0), mMaxAppBuffers(0), mHistogramType(HISTOGRAM_TYPE_UNKNOWN) {}
+            mMaxHalBuffers(0), mMaxAppBuffers(0), mHistogramType(HISTOGRAM_TYPE_UNKNOWN),
+            mDynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {}
     CameraStreamStats(int width, int height, int format, int dataSpace, int64_t usage,
-            int maxHalBuffers, int maxAppBuffers)
+            int maxHalBuffers, int maxAppBuffers, int dynamicRangeProfile)
             : mWidth(width), mHeight(height), mFormat(format), mDataSpace(dataSpace),
               mUsage(usage), mRequestCount(0), mErrorCount(0), mStartLatencyMs(0),
               mMaxHalBuffers(maxHalBuffers), mMaxAppBuffers(maxAppBuffers),
-              mHistogramType(HISTOGRAM_TYPE_UNKNOWN) {}
+              mHistogramType(HISTOGRAM_TYPE_UNKNOWN),
+              mDynamicRangeProfile(dynamicRangeProfile) {}
 
     virtual status_t readFromParcel(const android::Parcel* parcel) override;
     virtual status_t writeToParcel(android::Parcel* parcel) const override;
diff --git a/camera/include/camera/camera2/OutputConfiguration.h b/camera/include/camera/camera2/OutputConfiguration.h
index f80ed3a..1631903 100644
--- a/camera/include/camera/camera2/OutputConfiguration.h
+++ b/camera/include/camera/camera2/OutputConfiguration.h
@@ -44,6 +44,7 @@
     int                        getSurfaceType() const;
     int                        getWidth() const;
     int                        getHeight() const;
+    int                        getDynamicRangeProfile() const;
     bool                       isDeferred() const;
     bool                       isShared() const;
     String16                   getPhysicalCameraId() const;
@@ -89,7 +90,8 @@
                 gbpsEqual(other) &&
                 mPhysicalCameraId == other.mPhysicalCameraId &&
                 mIsMultiResolution == other.mIsMultiResolution &&
-                sensorPixelModesUsedEqual(other));
+                sensorPixelModesUsedEqual(other) &&
+                mDynamicRangeProfile == other.mDynamicRangeProfile);
     }
     bool operator != (const OutputConfiguration& other) const {
         return !(*this == other);
@@ -126,6 +128,9 @@
         if (!sensorPixelModesUsedEqual(other)) {
             return sensorPixelModesUsedLessThan(other);
         }
+        if (mDynamicRangeProfile != other.mDynamicRangeProfile) {
+            return mDynamicRangeProfile < other.mDynamicRangeProfile;
+        }
         return gbpsLessThan(other);
     }
 
@@ -150,6 +155,7 @@
     String16                   mPhysicalCameraId;
     bool                       mIsMultiResolution;
     std::vector<int32_t>       mSensorPixelModesUsed;
+    int                        mDynamicRangeProfile;
 };
 } // namespace params
 } // namespace camera2
diff --git a/camera/ndk/NdkCameraCaptureSession.cpp b/camera/ndk/NdkCameraCaptureSession.cpp
index 1ac8482..9c98778 100644
--- a/camera/ndk/NdkCameraCaptureSession.cpp
+++ b/camera/ndk/NdkCameraCaptureSession.cpp
@@ -29,6 +29,7 @@
 #include "impl/ACameraCaptureSession.h"
 
 #include "impl/ACameraCaptureSession.inc"
+#include "NdkCameraCaptureSession.inc"
 
 using namespace android;
 
@@ -72,22 +73,16 @@
         int numRequests, ACaptureRequest** requests,
         /*optional*/int* captureSequenceId) {
     ATRACE_CALL();
-    if (session == nullptr || requests == nullptr || numRequests < 1) {
-        ALOGE("%s: Error: invalid input: session %p, numRequest %d, requests %p",
-                __FUNCTION__, session, numRequests, requests);
-        return ACAMERA_ERROR_INVALID_PARAMETER;
-    }
+    return captureTemplate(session, cbs, numRequests, requests, captureSequenceId);
+}
 
-    if (session->isClosed()) {
-        ALOGE("%s: session %p is already closed", __FUNCTION__, session);
-        if (captureSequenceId != nullptr) {
-            *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
-        }
-        return ACAMERA_ERROR_SESSION_CLOSED;
-    }
-
-    return session->capture(
-            cbs, numRequests, requests, captureSequenceId);
+EXPORT
+camera_status_t ACameraCaptureSession_captureV2(
+        ACameraCaptureSession* session, /*optional*/ACameraCaptureSession_captureCallbacksV2* cbs,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) {
+    ATRACE_CALL();
+    return captureTemplate(session, cbs, numRequests, requests, captureSequenceId);
 }
 
 EXPORT
@@ -97,22 +92,26 @@
         int numRequests, ACaptureRequest** requests,
         /*optional*/int* captureSequenceId) {
     ATRACE_CALL();
-    if (session == nullptr || requests == nullptr || numRequests < 1) {
-        ALOGE("%s: Error: invalid input: session %p, numRequest %d, requests %p",
-                __FUNCTION__, session, numRequests, requests);
-        return ACAMERA_ERROR_INVALID_PARAMETER;
-    }
+    return captureTemplate(session, lcbs, numRequests, requests, captureSequenceId);
+}
 
-    if (session->isClosed()) {
-        ALOGE("%s: session %p is already closed", __FUNCTION__, session);
-        if (captureSequenceId) {
-            *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
-        }
-        return ACAMERA_ERROR_SESSION_CLOSED;
-    }
+EXPORT
+camera_status_t ACameraCaptureSession_logicalCamera_captureV2(
+        ACameraCaptureSession* session,
+        /*optional*/ACameraCaptureSession_logicalCamera_captureCallbacksV2* lcbs,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) {
+    ATRACE_CALL();
+    return captureTemplate(session, lcbs, numRequests, requests, captureSequenceId);
+}
 
-    return session->capture(
-            lcbs, numRequests, requests, captureSequenceId);
+EXPORT
+camera_status_t ACameraCaptureSession_setRepeatingRequestV2(
+        ACameraCaptureSession* session, /*optional*/ACameraCaptureSession_captureCallbacksV2* cbs,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) {
+    ATRACE_CALL();
+    return setRepeatingRequestTemplate(session, cbs, numRequests, requests, captureSequenceId);
 }
 
 EXPORT
@@ -121,23 +120,10 @@
         int numRequests, ACaptureRequest** requests,
         /*optional*/int* captureSequenceId) {
     ATRACE_CALL();
-    if (session == nullptr || requests == nullptr || numRequests < 1) {
-        ALOGE("%s: Error: invalid input: session %p, numRequest %d, requests %p",
-                __FUNCTION__, session, numRequests, requests);
-        return ACAMERA_ERROR_INVALID_PARAMETER;
-    }
-
-    if (session->isClosed()) {
-        ALOGE("%s: session %p is already closed", __FUNCTION__, session);
-        if (captureSequenceId) {
-            *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
-        }
-        return ACAMERA_ERROR_SESSION_CLOSED;
-    }
-
-    return session->setRepeatingRequest(cbs, numRequests, requests, captureSequenceId);
+    return setRepeatingRequestTemplate(session, cbs, numRequests, requests, captureSequenceId);
 }
 
+
 EXPORT
 camera_status_t ACameraCaptureSession_logicalCamera_setRepeatingRequest(
         ACameraCaptureSession* session,
@@ -145,21 +131,18 @@
         int numRequests, ACaptureRequest** requests,
         /*optional*/int* captureSequenceId) {
     ATRACE_CALL();
-    if (session == nullptr || requests == nullptr || numRequests < 1) {
-        ALOGE("%s: Error: invalid input: session %p, numRequest %d, requests %p",
-                __FUNCTION__, session, numRequests, requests);
-        return ACAMERA_ERROR_INVALID_PARAMETER;
-    }
+    return setRepeatingRequestTemplate(session, lcbs, numRequests, requests, captureSequenceId);
+}
 
-    if (session->isClosed()) {
-        ALOGE("%s: session %p is already closed", __FUNCTION__, session);
-        if (captureSequenceId) {
-            *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
-        }
-        return ACAMERA_ERROR_SESSION_CLOSED;
-    }
 
-    return session->setRepeatingRequest(lcbs, numRequests, requests, captureSequenceId);
+EXPORT
+camera_status_t ACameraCaptureSession_logicalCamera_setRepeatingRequestV2(
+        ACameraCaptureSession* session,
+        /*optional*/ACameraCaptureSession_logicalCamera_captureCallbacksV2* lcbs,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) {
+    ATRACE_CALL();
+    return setRepeatingRequestTemplate(session, lcbs, numRequests, requests, captureSequenceId);
 }
 
 EXPORT
diff --git a/camera/ndk/NdkCameraCaptureSession.inc b/camera/ndk/NdkCameraCaptureSession.inc
new file mode 100644
index 0000000..258e20d
--- /dev/null
+++ b/camera/ndk/NdkCameraCaptureSession.inc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "impl/ACameraCaptureSession.h"
+
+#include <camera/NdkCameraCaptureSession.h>
+
+using namespace android;
+
+template <class CallbackType>
+camera_status_t captureTemplate(
+        ACameraCaptureSession* session,
+        /*optional*/CallbackType* cbs,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) {
+    ATRACE_CALL();
+    if (session == nullptr || requests == nullptr || numRequests < 1) {
+        ALOGE("%s: Error: invalid input: session %p, numRequest %d, requests %p",
+                __FUNCTION__, session, numRequests, requests);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+
+    if (session->isClosed()) {
+        ALOGE("%s: session %p is already closed", __FUNCTION__, session);
+        if (captureSequenceId) {
+            *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
+        }
+        return ACAMERA_ERROR_SESSION_CLOSED;
+    }
+
+    return session->capture(
+            cbs, numRequests, requests, captureSequenceId);
+}
+
+template <class CallbackType>
+camera_status_t setRepeatingRequestTemplate(
+        ACameraCaptureSession* session,
+        /*optional*/CallbackType* cbs,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) {
+    ATRACE_CALL();
+    if (session == nullptr || requests == nullptr || numRequests < 1) {
+        ALOGE("%s: Error: invalid input: session %p, numRequest %d, requests %p",
+                __FUNCTION__, session, numRequests, requests);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+
+    if (session->isClosed()) {
+        ALOGE("%s: session %p is already closed", __FUNCTION__, session);
+        if (captureSequenceId) {
+            *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
+        }
+        return ACAMERA_ERROR_SESSION_CLOSED;
+    }
+
+    return session->setRepeatingRequest(cbs, numRequests, requests, captureSequenceId);
+}
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index dd652c7..7997768 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -26,8 +26,6 @@
 #include "ACaptureRequest.h"
 #include "ACameraCaptureSession.h"
 
-#include "ACameraCaptureSession.inc"
-
 ACameraDevice::~ACameraDevice() {
     mDevice->stopLooperAndDisconnect();
 }
@@ -913,6 +911,7 @@
         case kWhatOnError:
         case kWhatSessionStateCb:
         case kWhatCaptureStart:
+        case kWhatCaptureStart2:
         case kWhatCaptureResult:
         case kWhatLogicalCaptureResult:
         case kWhatCaptureFail:
@@ -985,6 +984,7 @@
         }
         case kWhatSessionStateCb:
         case kWhatCaptureStart:
+        case kWhatCaptureStart2:
         case kWhatCaptureResult:
         case kWhatLogicalCaptureResult:
         case kWhatCaptureFail:
@@ -1004,6 +1004,7 @@
             sp<CaptureRequest> requestSp = nullptr;
             switch (msg->what()) {
                 case kWhatCaptureStart:
+                case kWhatCaptureStart2:
                 case kWhatCaptureResult:
                 case kWhatLogicalCaptureResult:
                 case kWhatCaptureFail:
@@ -1055,6 +1056,35 @@
                     freeACaptureRequest(request);
                     break;
                 }
+                case kWhatCaptureStart2:
+                {
+                    ACameraCaptureSession_captureCallback_startV2 onStart2;
+                    found = msg->findPointer(kCallbackFpKey, (void**) &onStart2);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture startV2 callback!", __FUNCTION__);
+                        return;
+                    }
+                    if (onStart2 == nullptr) {
+                        return;
+                    }
+                    int64_t timestamp;
+                    found = msg->findInt64(kTimeStampKey, &timestamp);
+                    if (!found) {
+                        ALOGE("%s: Cannot find timestamp!", __FUNCTION__);
+                        return;
+                    }
+                    int64_t frameNumber;
+                    found = msg->findInt64(kFrameNumberKey, &frameNumber);
+                    if (!found) {
+                        ALOGE("%s: Cannot find frame number!", __FUNCTION__);
+                        return;
+                    }
+
+                    ACaptureRequest* request = allocateACaptureRequest(requestSp, mId);
+                    (*onStart2)(context, session.get(), request, timestamp, frameNumber);
+                    freeACaptureRequest(request);
+                    break;
+                }
                 case kWhatCaptureResult:
                 {
                     ACameraCaptureSession_captureCallback_result onResult;
@@ -1285,7 +1315,8 @@
         ACameraCaptureSession_captureCallbacks* cbs) :
         mSession(session), mRequests(requests),
         mIsRepeating(isRepeating),
-        mIsLogicalCameraCallback(false) {
+        mIsLogicalCameraCallback(false),
+        mIs2Callback(false) {
     initCaptureCallbacks(cbs);
 
     if (cbs != nullptr) {
@@ -1301,7 +1332,8 @@
         ACameraCaptureSession_logicalCamera_captureCallbacks* lcbs) :
         mSession(session), mRequests(requests),
         mIsRepeating(isRepeating),
-        mIsLogicalCameraCallback(true) {
+        mIsLogicalCameraCallback(true),
+        mIs2Callback(false) {
     initCaptureCallbacks(lcbs);
 
     if (lcbs != nullptr) {
@@ -1310,6 +1342,40 @@
     }
 }
 
+CameraDevice::CallbackHolder::CallbackHolder(
+        sp<ACameraCaptureSession>          session,
+        const Vector<sp<CaptureRequest> >& requests,
+        bool                               isRepeating,
+        ACameraCaptureSession_captureCallbacksV2* cbs) :
+        mSession(session), mRequests(requests),
+        mIsRepeating(isRepeating),
+        mIsLogicalCameraCallback(false),
+        mIs2Callback(true) {
+    initCaptureCallbacksV2(cbs);
+
+    if (cbs != nullptr) {
+        mOnCaptureCompleted = cbs->onCaptureCompleted;
+        mOnCaptureFailed = cbs->onCaptureFailed;
+    }
+}
+
+CameraDevice::CallbackHolder::CallbackHolder(
+        sp<ACameraCaptureSession>          session,
+        const Vector<sp<CaptureRequest> >& requests,
+        bool                               isRepeating,
+        ACameraCaptureSession_logicalCamera_captureCallbacksV2* lcbs) :
+        mSession(session), mRequests(requests),
+        mIsRepeating(isRepeating),
+        mIsLogicalCameraCallback(true),
+        mIs2Callback(true) {
+    initCaptureCallbacksV2(lcbs);
+
+    if (lcbs != nullptr) {
+        mOnLogicalCameraCaptureCompleted = lcbs->onLogicalCameraCaptureCompleted;
+        mOnLogicalCameraCaptureFailed = lcbs->onLogicalCameraCaptureFailed;
+    }
+}
+
 void
 CameraDevice::checkRepeatingSequenceCompleteLocked(
     const int sequenceId, const int64_t lastFrameNumber) {
@@ -1536,7 +1602,6 @@
         const CaptureResultExtras& resultExtras,
         int64_t timestamp) {
     binder::Status ret = binder::Status::ok();
-
     sp<CameraDevice> dev = mDevice.promote();
     if (dev == nullptr) {
         return ret; // device has been closed
@@ -1551,11 +1616,14 @@
 
     int sequenceId = resultExtras.requestId;
     int32_t burstId = resultExtras.burstId;
+    int64_t frameNumber = resultExtras.frameNumber;
 
     auto it = dev->mSequenceCallbackMap.find(sequenceId);
     if (it != dev->mSequenceCallbackMap.end()) {
         CallbackHolder cbh = (*it).second;
+        bool v2Callback = cbh.mIs2Callback;
         ACameraCaptureSession_captureCallback_start onStart = cbh.mOnCaptureStarted;
+        ACameraCaptureSession_captureCallback_startV2 onStart2 = cbh.mOnCaptureStarted2;
         sp<ACameraCaptureSession> session = cbh.mSession;
         if ((size_t) burstId >= cbh.mRequests.size()) {
             ALOGE("%s: Error: request index %d out of bound (size %zu)",
@@ -1563,12 +1631,19 @@
             dev->setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_SERVICE);
         }
         sp<CaptureRequest> request = cbh.mRequests[burstId];
-        sp<AMessage> msg = new AMessage(kWhatCaptureStart, dev->mHandler);
+        sp<AMessage> msg = nullptr;
+        if (v2Callback) {
+            msg = new AMessage(kWhatCaptureStart2, dev->mHandler);
+            msg->setPointer(kCallbackFpKey, (void*) onStart2);
+        } else {
+            msg = new AMessage(kWhatCaptureStart, dev->mHandler);
+            msg->setPointer(kCallbackFpKey, (void *)onStart);
+        }
         msg->setPointer(kContextKey, cbh.mContext);
         msg->setObject(kSessionSpKey, session);
-        msg->setPointer(kCallbackFpKey, (void*) onStart);
         msg->setObject(kCaptureRequestKey, request);
         msg->setInt64(kTimeStampKey, timestamp);
+        msg->setInt64(kFrameNumberKey, frameNumber);
         dev->postSessionMsgAndCleanup(msg);
     }
     return ret;
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 344d964..17988fe 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -215,6 +215,7 @@
         kWhatSessionStateCb,   // onReady, onActive
         // Capture callbacks
         kWhatCaptureStart,     // onCaptureStarted
+        kWhatCaptureStart2,     // onCaptureStarted
         kWhatCaptureResult,    // onCaptureProgressed, onCaptureCompleted
         kWhatLogicalCaptureResult, // onLogicalCameraCaptureCompleted
         kWhatCaptureFail,      // onCaptureFailed
@@ -294,11 +295,18 @@
                        const Vector<sp<CaptureRequest> >& requests,
                        bool                               isRepeating,
                        ACameraCaptureSession_logicalCamera_captureCallbacks* lcbs);
-
-        template <class T>
-        void initCaptureCallbacks(T* cbs) {
+        CallbackHolder(sp<ACameraCaptureSession>          session,
+                       const Vector<sp<CaptureRequest> >& requests,
+                       bool                               isRepeating,
+                       ACameraCaptureSession_captureCallbacksV2* cbs);
+        CallbackHolder(sp<ACameraCaptureSession>          session,
+                       const Vector<sp<CaptureRequest> >& requests,
+                       bool                               isRepeating,
+                       ACameraCaptureSession_logicalCamera_captureCallbacksV2* lcbs);
+        void clearCallbacks() {
             mContext = nullptr;
             mOnCaptureStarted = nullptr;
+            mOnCaptureStarted2 = nullptr;
             mOnCaptureProgressed = nullptr;
             mOnCaptureCompleted = nullptr;
             mOnLogicalCameraCaptureCompleted = nullptr;
@@ -307,6 +315,24 @@
             mOnCaptureSequenceCompleted = nullptr;
             mOnCaptureSequenceAborted = nullptr;
             mOnCaptureBufferLost = nullptr;
+        }
+
+        template <class T>
+        void initCaptureCallbacksV2(T* cbs) {
+            clearCallbacks();
+            if (cbs != nullptr) {
+                mContext = cbs->context;
+                mOnCaptureStarted2 = cbs->onCaptureStarted;
+                mOnCaptureProgressed = cbs->onCaptureProgressed;
+                mOnCaptureSequenceCompleted = cbs->onCaptureSequenceCompleted;
+                mOnCaptureSequenceAborted = cbs->onCaptureSequenceAborted;
+                mOnCaptureBufferLost = cbs->onCaptureBufferLost;
+            }
+        }
+
+        template <class T>
+        void initCaptureCallbacks(T* cbs) {
+            clearCallbacks();
             if (cbs != nullptr) {
                 mContext = cbs->context;
                 mOnCaptureStarted = cbs->onCaptureStarted;
@@ -320,9 +346,11 @@
         Vector<sp<CaptureRequest> > mRequests;
         const bool                  mIsRepeating;
         const bool                  mIsLogicalCameraCallback;
+        const bool                  mIs2Callback;
 
         void*                       mContext;
         ACameraCaptureSession_captureCallback_start mOnCaptureStarted;
+        ACameraCaptureSession_captureCallback_startV2 mOnCaptureStarted2;
         ACameraCaptureSession_captureCallback_result mOnCaptureProgressed;
         ACameraCaptureSession_captureCallback_result mOnCaptureCompleted;
         ACameraCaptureSession_logicalCamera_captureCallback_result mOnLogicalCameraCaptureCompleted;
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index 95ef2b2..5892f1a 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -189,8 +189,12 @@
     sp<CameraManagerGlobal> cm = mCameraManager.promote();
     if (cm != nullptr) {
         AutoMutex lock(cm->mLock);
+        std::vector<String8> cameraIdList;
         for (auto& pair : cm->mDeviceStatusMap) {
-            const String8 &cameraId = pair.first;
+            cameraIdList.push_back(pair.first);
+        }
+
+        for (String8 cameraId : cameraIdList) {
             cm->onStatusChangedLocked(
                     CameraServiceListener::STATUS_NOT_PRESENT, cameraId);
         }
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
index ccbfaa9..d53d809 100644
--- a/camera/ndk/impl/ACameraManager.h
+++ b/camera/ndk/impl/ACameraManager.h
@@ -95,6 +95,9 @@
         virtual binder::Status onTorchStatusChanged(int32_t, const String16&) {
             return binder::Status::ok();
         }
+        virtual binder::Status onTorchStrengthLevelChanged(const String16&, int32_t) {
+            return binder::Status::ok();
+        }
 
         virtual binder::Status onCameraAccessPrioritiesChanged();
         virtual binder::Status onCameraOpened(const String16&, const String16&) {
@@ -139,6 +142,8 @@
             return !(*this == other);
         }
         bool operator < (const Callback& other) const {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wordered-compare-function-pointers"
             if (*this == other) return false;
             if (mContext != other.mContext) return mContext < other.mContext;
             if (mPhysicalCamAvailable != other.mPhysicalCamAvailable) {
@@ -152,6 +157,7 @@
             }
             if (mAvailable != other.mAvailable) return mAvailable < other.mAvailable;
             return mUnavailable < other.mUnavailable;
+#pragma GCC diagnostic pop
         }
         bool operator > (const Callback& other) const {
             return (*this != other && !(*this < other));
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index dab2fef..05124c0 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -182,7 +182,7 @@
         int64_t format = entry.data.i64[i + STREAM_FORMAT_OFFSET];
         int64_t width = entry.data.i64[i + STREAM_WIDTH_OFFSET];
         int64_t height = entry.data.i64[i + STREAM_HEIGHT_OFFSET];
-        int64_t duration = entry.data.i32[i + STREAM_DURATION_OFFSET];
+        int64_t duration = entry.data.i64[i + STREAM_DURATION_OFFSET];
 
         // Leave the unfiltered format in so apps depending on previous wrong
         // filter behavior continue to work
diff --git a/camera/ndk/include/camera/NdkCameraCaptureSession.h b/camera/ndk/include/camera/NdkCameraCaptureSession.h
index 2b7f040..b0fd00c 100644
--- a/camera/ndk/include/camera/NdkCameraCaptureSession.h
+++ b/camera/ndk/include/camera/NdkCameraCaptureSession.h
@@ -811,6 +811,184 @@
         int numRequests, ACaptureRequest** requests,
         /*optional*/int* captureSequenceId) __INTRODUCED_IN(29);
 
+/**
+ * The definition of camera capture start callback. The same as
+ * {@link ACameraCaptureSession_captureCallbacks#onCaptureStarted}, except that
+ * it has the frame number of the capture as well.
+ *
+ * @param context The optional application context provided by user in
+ *                {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param request The capture request that is starting. Note that this pointer points to a copy of
+ *                capture request sent by application, so the address is different to what
+ *                application sent but the content will match. This request will be freed by
+ *                framework immediately after this callback returns.
+ * @param timestamp The timestamp when the capture is started. This timestamp will match
+ *                  {@link ACAMERA_SENSOR_TIMESTAMP} of the {@link ACameraMetadata} in
+ *                  {@link ACameraCaptureSession_captureCallbacks#onCaptureCompleted} callback.
+ * @param frameNumber the frame number of the capture started
+ */
+typedef void (*ACameraCaptureSession_captureCallback_startV2)(
+        void* context, ACameraCaptureSession* session,
+        const ACaptureRequest* request, int64_t timestamp, int64_t frameNumber);
+/**
+ * This has the same functionality as ACameraCaptureSession_captureCallbacks,
+ * with the exception that captureCallback_startV2 callback is
+ * used, instead of captureCallback_start, to support retrieving the frame number.
+ */
+typedef struct ACameraCaptureSession_captureCallbacksV2 {
+    /**
+     * Same as ACameraCaptureSession_captureCallbacks
+     */
+    void*                                               context;
+
+    /**
+     * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureStarted},
+     * except that it has the frame number of the capture added in the parameter
+     * list.
+     */
+    ACameraCaptureSession_captureCallback_startV2         onCaptureStarted;
+
+    /**
+     * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureProgressed}.
+     */
+    ACameraCaptureSession_captureCallback_result        onCaptureProgressed;
+
+    /**
+     * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureCompleted}.
+     */
+    ACameraCaptureSession_captureCallback_result        onCaptureCompleted;
+
+    /**
+     * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureFailed}.
+     */
+    ACameraCaptureSession_captureCallback_failed        onCaptureFailed;
+
+    /**
+     * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureSequenceCompleted}.
+     */
+    ACameraCaptureSession_captureCallback_sequenceEnd   onCaptureSequenceCompleted;
+
+    /**
+     * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureSequenceAborted}.
+     */
+    ACameraCaptureSession_captureCallback_sequenceAbort onCaptureSequenceAborted;
+
+    /**
+     * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureBufferLost}.
+     */
+    ACameraCaptureSession_captureCallback_bufferLost    onCaptureBufferLost;
+
+
+} ACameraCaptureSession_captureCallbacksV2;
+
+/**
+ * This has the same functionality as ACameraCaptureSession_logicalCamera_captureCallbacks,
+ * with the exception that an captureCallback_startV2 callback is
+ * used, instead of captureCallback_start, to support retrieving frame number.
+ */
+typedef struct ACameraCaptureSession_logicalCamera_captureCallbacksV2 {
+    /**
+     * Same as ACameraCaptureSession_captureCallbacks
+     */
+    void*                                               context;
+
+    /**
+     * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureStarted},
+     * except that it has the frame number of the capture added in the parameter
+     * list.
+     */
+    ACameraCaptureSession_captureCallback_startV2         onCaptureStarted;
+
+
+    /**
+     * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureProgressed}.
+     */
+    ACameraCaptureSession_captureCallback_result        onCaptureProgressed;
+
+    /**
+     * Same as
+     * {@link ACameraCaptureSession_logicalCamera_captureCallbacks#onLogicalCaptureCompleted}.
+     */
+    ACameraCaptureSession_logicalCamera_captureCallback_result onLogicalCameraCaptureCompleted;
+
+    /**
+     * This callback is called instead of {@link onLogicalCameraCaptureCompleted} when the
+     * camera device failed to produce a capture result for the
+     * request.
+     *
+     * <p>Other requests are unaffected, and some or all image buffers from
+     * the capture may have been pushed to their respective output
+     * streams.</p>
+     *
+     * <p>Note that the ACaptureRequest pointer in the callback will not match what application has
+     * submitted, but the contents the ACaptureRequest will match what application submitted.</p>
+     *
+     * @see ALogicalCameraCaptureFailure
+     */
+    ACameraCaptureSession_logicalCamera_captureCallback_failed onLogicalCameraCaptureFailed;
+
+    /**
+     * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureSequenceCompleted}.
+     */
+    ACameraCaptureSession_captureCallback_sequenceEnd   onCaptureSequenceCompleted;
+
+    /**
+     * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureSequenceAborted}.
+     */
+    ACameraCaptureSession_captureCallback_sequenceAbort onCaptureSequenceAborted;
+
+    /**
+     * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureBufferLost}.
+     */
+    ACameraCaptureSession_captureCallback_bufferLost    onCaptureBufferLost;
+
+} ACameraCaptureSession_logicalCamera_captureCallbacksV2;
+
+/**
+ * This has the same functionality as ACameraCaptureSession_capture, with added
+ * support for v2 of camera callbacks, where the onCaptureStarted callback
+ * adds frame number in its parameter list.
+ */
+camera_status_t ACameraCaptureSession_captureV2(
+        ACameraCaptureSession* session,
+        /*optional*/ACameraCaptureSession_captureCallbacksV2* callbacks,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) __INTRODUCED_IN(33);
+
+/**
+ * This has the same functionality as ACameraCaptureSession_logical_setRepeatingRequest, with added
+ * support for v2 of logical multi-camera callbacks where the onCaptureStarted
+ * callback adds frame number in its parameter list.
+ */
+camera_status_t ACameraCaptureSession_setRepeatingRequestV2(
+        ACameraCaptureSession* session,
+        /*optional*/ACameraCaptureSession_captureCallbacksV2* callbacks,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) __INTRODUCED_IN(33);
+
+/**
+ * This has the same functionality as ACameraCaptureSession_logical_capture, with added
+ * support for v2 of logical multi-camera  callbacks where the onCaptureStarted callback
+ * adds frame number in its parameter list.
+ */
+camera_status_t ACameraCaptureSession_logicalCamera_captureV2(
+        ACameraCaptureSession* session,
+        /*optional*/ACameraCaptureSession_logicalCamera_captureCallbacksV2* callbacks,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) __INTRODUCED_IN(33);
+
+/**
+ * This has the same functionality as ACameraCaptureSession_logical_setRepeatingRequest, with added
+ * support for v2 of logical multi-camera callbacks where the onCaptureStarted
+ * callback adds frame number in its parameter list.
+ */
+camera_status_t ACameraCaptureSession_logicalCamera_setRepeatingRequestV2(
+        ACameraCaptureSession* session,
+        /*optional*/ACameraCaptureSession_logicalCamera_captureCallbacksV2* callbacks,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) __INTRODUCED_IN(33);
+
 __END_DECLS
 
 #endif /* _NDK_CAMERA_CAPTURE_SESSION_H */
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 86781e5..bd281c8 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -1090,6 +1090,15 @@
      * (ACAMERA_LENS_OPTICAL_STABILIZATION_MODE), turning both modes on may
      * produce undesirable interaction, so it is recommended not to enable
      * both at the same time.</p>
+     * <p>If video stabilization is set to "PREVIEW_STABILIZATION",
+     * ACAMERA_LENS_OPTICAL_STABILIZATION_MODE is overridden. The camera sub-system may choose
+     * to turn on hardware based image stabilization in addition to software based stabilization
+     * if it deems that appropriate.
+     * This key may be a part of the available session keys, which camera clients may
+     * query via
+     * {@link ACameraManager_getCameraCharacteristics }.
+     * If this is the case, changing this key over the life-time of a capture session may
+     * cause delays / glitches.</p>
      *
      * @see ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE
      * @see ACAMERA_LENS_OPTICAL_STABILIZATION_MODE
@@ -2144,6 +2153,51 @@
      */
     ACAMERA_FLASH_INFO_AVAILABLE =                              // byte (acamera_metadata_enum_android_flash_info_available_t)
             ACAMERA_FLASH_INFO_START,
+    /**
+     * <p>Maximum flashlight brightness level.</p>
+     *
+     * <p>Type: int32</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>If this value is greater than 1, then the device supports controlling the
+     * flashlight brightness level via
+     * {android.hardware.camera2.CameraManager#turnOnTorchWithStrengthLevel}.
+     * If this value is equal to 1, flashlight brightness control is not supported.
+     * The value for this key will be null for devices with no flash unit.</p>
+     */
+    ACAMERA_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL =                 // int32
+            ACAMERA_FLASH_INFO_START + 2,
+    /**
+     * <p>Default flashlight brightness level to be set via
+     * {android.hardware.camera2.CameraManager#turnOnTorchWithStrengthLevel}.</p>
+     *
+     * <p>Type: int32</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>If flash unit is available this will be greater than or equal to 1 and less
+     * or equal to <code>ACAMERA_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL</code>.</p>
+     * <p>Setting flashlight brightness above the default level
+     * (i.e.<code>ACAMERA_FLASH_INFO_STRENGTH_DEFAULT_LEVEL</code>) may make the device more
+     * likely to reach thermal throttling conditions and slow down, or drain the
+     * battery quicker than normal. To minimize such issues, it is recommended to
+     * start the flashlight at this default brightness until a user explicitly requests
+     * a brighter level.
+     * Note that the value for this key will be null for devices with no flash unit.
+     * The default level should always be &gt; 0.</p>
+     *
+     * @see ACAMERA_FLASH_INFO_STRENGTH_DEFAULT_LEVEL
+     * @see ACAMERA_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL
+     */
+    ACAMERA_FLASH_INFO_STRENGTH_DEFAULT_LEVEL =                 // int32
+            ACAMERA_FLASH_INFO_START + 3,
     ACAMERA_FLASH_INFO_END,
 
     /**
@@ -2526,12 +2580,18 @@
      * <p>If a camera device supports both OIS and digital image stabilization
      * (ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE), turning both modes on may produce undesirable
      * interaction, so it is recommended not to enable both at the same time.</p>
+     * <p>If ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE is set to "PREVIEW_STABILIZATION",
+     * ACAMERA_LENS_OPTICAL_STABILIZATION_MODE is overridden. The camera sub-system may choose
+     * to turn on hardware based image stabilization in addition to software based stabilization
+     * if it deems that appropriate. This key's value in the capture result will reflect which
+     * OIS mode was chosen.</p>
      * <p>Not all devices will support OIS; see
      * ACAMERA_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION for
      * available controls.</p>
      *
      * @see ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE
      * @see ACAMERA_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION
+     * @see ACAMERA_LENS_OPTICAL_STABILIZATION_MODE
      */
     ACAMERA_LENS_OPTICAL_STABILIZATION_MODE =                   // byte (acamera_metadata_enum_android_lens_optical_stabilization_mode_t)
             ACAMERA_LENS_START + 4,
@@ -3403,6 +3463,25 @@
      */
     ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS =    // int32[n]
             ACAMERA_REQUEST_START + 17,
+    /**
+     * <p>A map of all available 10-bit dynamic range profiles along with their
+     * capture request constraints.</p>
+     *
+     * <p>Type: int32[n*2] (acamera_metadata_enum_android_request_available_dynamic_range_profiles_map_t)</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>Devices supporting the 10-bit output capability
+     * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#REQUEST_AVAILABLE_CAPABILITIES_DYNAMIC_RANGE_TEN_BIT">CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES_DYNAMIC_RANGE_TEN_BIT</a>
+     * must list their supported dynamic range profiles. In case the camera is not able to
+     * support every possible profile combination within a single capture request, then the
+     * constraints must be listed here as well.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP =      // int32[n*2] (acamera_metadata_enum_android_request_available_dynamic_range_profiles_map_t)
+            ACAMERA_REQUEST_START + 19,
     ACAMERA_REQUEST_END,
 
     /**
@@ -3600,7 +3679,8 @@
      * YUV_420_888    | all output sizes available for JPEG, up to the maximum video size | LIMITED        |
      * IMPLEMENTATION_DEFINED | same as YUV_420_888                  | Any            |</p>
      * <p>For applications targeting SDK version 31 or newer, if the mobile device declares to be
-     * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">media performance class</a> S,
+     * media performance class 12 or higher by setting
+     * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">MEDIA_PERFORMANCE_CLASS</a> to be 31 or larger,
      * the primary camera devices (first rear/front camera in the camera ID list) will not
      * support JPEG sizes smaller than 1080p. If the application configures a JPEG stream
      * smaller than 1080p, the camera device will round up the JPEG image size to at least
@@ -3618,9 +3698,11 @@
      * YUV_420_888    | all output sizes available for FULL hardware level, up to the maximum video size | LIMITED        |
      * IMPLEMENTATION_DEFINED | same as YUV_420_888                  | Any            |</p>
      * <p>For applications targeting SDK version 31 or newer, if the mobile device doesn't declare
-     * to be media performance class S, or if the camera device isn't a primary rear/front
-     * camera, the minimum required output stream configurations are the same as for applications
-     * targeting SDK version older than 31.</p>
+     * to be media performance class 12 or better by setting
+     * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">MEDIA_PERFORMANCE_CLASS</a> to be 31 or larger,
+     * or if the camera device isn't a primary rear/front camera, the minimum required output
+     * stream configurations are the same as for applications targeting SDK version older than
+     * 31.</p>
      * <p>Refer to ACAMERA_REQUEST_AVAILABLE_CAPABILITIES for additional
      * mandatory stream configurations on a per-capability basis.</p>
      * <p>Exception on 176x144 (QCIF) resolution: camera devices usually have a fixed capability for
@@ -4578,6 +4660,25 @@
      *
      * <p>Also defines the direction of rolling shutter readout, which is from top to bottom in
      * the sensor's coordinate system.</p>
+     * <p>Starting with Android API level 32, camera clients that query the orientation via
+     * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#get">CameraCharacteristics#get</a> on foldable devices which
+     * include logical cameras can receive a value that can dynamically change depending on the
+     * device/fold state.
+     * Clients are advised to not cache or store the orientation value of such logical sensors.
+     * In case repeated queries to CameraCharacteristics are not preferred, then clients can
+     * also access the entire mapping from device state to sensor orientation in
+     * <a href="https://developer.android.com/reference/android/hardware/camera2/params/DeviceStateSensorOrientationMap.html">DeviceStateSensorOrientationMap</a>.
+     * Do note that a dynamically changing sensor orientation value in camera characteristics
+     * will not be the best way to establish the orientation per frame. Clients that want to
+     * know the sensor orientation of a particular captured frame should query the
+     * ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID from the corresponding capture result and
+     * check the respective physical camera orientation.</p>
+     * <p>Native camera clients must query ACAMERA_INFO_DEVICE_STATE_ORIENTATIONS for the mapping
+     * between device state and camera sensor orientation. Dynamic updates to the sensor
+     * orientation are not supported in this code path.</p>
+     *
+     * @see ACAMERA_INFO_DEVICE_STATE_ORIENTATIONS
+     * @see ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID
      */
     ACAMERA_SENSOR_ORIENTATION =                                // int32
             ACAMERA_SENSOR_START + 14,
@@ -6284,6 +6385,21 @@
      */
     ACAMERA_INFO_VERSION =                                      // byte
             ACAMERA_INFO_START + 1,
+    /**
+     *
+     * <p>Type: int64[2*n]</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>HAL must populate the array with
+     * (hardware::camera::provider::V2_5::DeviceState, sensorOrientation) pairs for each
+     * supported device state bitwise combination.</p>
+     */
+    ACAMERA_INFO_DEVICE_STATE_ORIENTATIONS =                    // int64[2*n]
+            ACAMERA_INFO_START + 3,
     ACAMERA_INFO_END,
 
     /**
@@ -7935,6 +8051,17 @@
      */
     ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE_ON                      = 1,
 
+    /**
+     * <p>Preview stabilization, where the preview in addition to all other non-RAW streams are
+     * stabilized with the same quality of stabilization, is enabled. This mode aims to give
+     * clients a 'what you see is what you get' effect. In this mode, the FoV reduction will
+     * be a maximum of 20 % both horizontally and vertically
+     * (10% from left, right, top, bottom) for the given zoom ratio / crop region.
+     * The resultant FoV will also be the same across all processed streams
+     * (that have the same aspect ratio).</p>
+     */
+    ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE_PREVIEW_STABILIZATION   = 2,
+
 } acamera_metadata_enum_android_control_video_stabilization_mode_t;
 
 // ACAMERA_CONTROL_AE_STATE
@@ -9017,6 +9144,97 @@
 
 } acamera_metadata_enum_android_request_available_capabilities_t;
 
+// ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP
+typedef enum acamera_metadata_enum_acamera_request_available_dynamic_range_profiles_map {
+    /**
+     * <p>8-bit SDR profile which is the default for all non 10-bit output capable devices.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD    = 0x1,
+
+    /**
+     * <p>10-bit pixel samples encoded using the Hybrid log-gamma transfer function.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HLG10       = 0x2,
+
+    /**
+     * <p>10-bit pixel samples encoded using the SMPTE ST 2084 transfer function.
+     * This profile utilizes internal static metadata to increase the quality
+     * of the capture.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10       = 0x4,
+
+    /**
+     * <p>10-bit pixel samples encoded using the SMPTE ST 2084 transfer function.
+     * In contrast to HDR10, this profile uses internal per-frame metadata
+     * to further enhance the quality of the capture.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10_PLUS  = 0x8,
+
+    /**
+     * <p>This is a camera mode for Dolby Vision capture optimized for a more scene
+     * accurate capture. This would typically differ from what a specific device
+     * might want to tune for a consumer optimized Dolby Vision general capture.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_REF
+                                                                      = 0x10,
+
+    /**
+     * <p>This is the power optimized mode for 10-bit Dolby Vision HDR Reference Mode.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_REF_PO
+                                                                      = 0x20,
+
+    /**
+     * <p>This is the camera mode for the default Dolby Vision capture mode for the
+     * specific device. This would be tuned by each specific device for consumer
+     * pleasing results that resonate with their particular audience. We expect
+     * that each specific device would have a different look for their default
+     * Dolby Vision capture.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_OEM
+                                                                      = 0x40,
+
+    /**
+     * <p>This is the power optimized mode for 10-bit Dolby Vision HDR device specific
+     * capture Mode.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_OEM_PO
+                                                                      = 0x80,
+
+    /**
+     * <p>This is the 8-bit version of the Dolby Vision reference capture mode optimized
+     * for scene accuracy.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_REF
+                                                                      = 0x100,
+
+    /**
+     * <p>This is the power optimized mode for 8-bit Dolby Vision HDR Reference Mode.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_REF_PO
+                                                                      = 0x200,
+
+    /**
+     * <p>This is the 8-bit version of device specific tuned and optimized Dolby Vision
+     * capture mode.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_OEM
+                                                                      = 0x400,
+
+    /**
+     * <p>This is the power optimized mode for 8-bit Dolby Vision HDR device specific
+     * capture Mode.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_OEM_PO
+                                                                      = 0x800,
+
+    /**
+     *
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_MAX         = 0x1000,
+
+} acamera_metadata_enum_android_request_available_dynamic_range_profiles_map_t;
+
 
 // ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS
 typedef enum acamera_metadata_enum_acamera_scaler_available_stream_configurations {
@@ -9108,6 +9326,20 @@
                                                                       = 0x7,
 
     /**
+     * <p>If supported, the recommended 10-bit output stream configurations must include
+     * a subset of the advertised <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#YCBCR_P010">ImageFormat#YCBCR_P010</a> and
+     * <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#PRIVATE">ImageFormat#PRIVATE</a> outputs that are optimized for power
+     * and performance when registered along with a supported 10-bit dynamic range profile.
+     * see android.hardware.camera2.params.OutputConfiguration#setDynamicRangeProfile for
+     * details.</p>
+     */
+    ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_10BIT_OUTPUT
+                                                                      = 0x8,
+
+    ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_PUBLIC_END_3_8
+                                                                      = 0x9,
+
+    /**
      * <p>Vendor defined use cases. These depend on the vendor implementation.</p>
      */
     ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_VENDOR_START
diff --git a/camera/ndk/libcamera2ndk.map.txt b/camera/ndk/libcamera2ndk.map.txt
index 2b630db..b3977ff 100644
--- a/camera/ndk/libcamera2ndk.map.txt
+++ b/camera/ndk/libcamera2ndk.map.txt
@@ -2,11 +2,15 @@
   global:
     ACameraCaptureSession_abortCaptures;
     ACameraCaptureSession_capture;
+    ACameraCaptureSession_captureV2; # introduced=33
     ACameraCaptureSession_logicalCamera_capture; # introduced=29
+    ACameraCaptureSession_logicalCamera_captureV2; # introduced=33
     ACameraCaptureSession_close;
     ACameraCaptureSession_getDevice;
     ACameraCaptureSession_setRepeatingRequest;
+    ACameraCaptureSession_setRepeatingRequestV2; # introduced=33
     ACameraCaptureSession_logicalCamera_setRepeatingRequest; # introduced=29
+    ACameraCaptureSession_logicalCamera_setRepeatingRequestV2; # introduced=33
     ACameraCaptureSession_stopRepeating;
     ACameraCaptureSession_updateSharedOutput; # introduced=28
     ACameraDevice_close;
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index 9f63099..4cc1292 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -29,8 +29,6 @@
 #include "ACaptureRequest.h"
 #include "utils.h"
 
-#include "ACameraCaptureSession.inc"
-
 #define CHECK_TRANSACTION_AND_RET(remoteRet, status, callName) \
     if (!remoteRet.isOk()) { \
         ALOGE("%s: Transaction error during %s call %s", __FUNCTION__, callName, \
@@ -910,6 +908,7 @@
         case kWhatOnError:
         case kWhatSessionStateCb:
         case kWhatCaptureStart:
+        case kWhatCaptureStart2:
         case kWhatCaptureResult:
         case kWhatLogicalCaptureResult:
         case kWhatCaptureFail:
@@ -982,6 +981,7 @@
         }
         case kWhatSessionStateCb:
         case kWhatCaptureStart:
+        case kWhatCaptureStart2:
         case kWhatCaptureResult:
         case kWhatLogicalCaptureResult:
         case kWhatCaptureFail:
@@ -1002,6 +1002,7 @@
             const char *id_cstr = mId.c_str();
             switch (msg->what()) {
                 case kWhatCaptureStart:
+                case kWhatCaptureStart2:
                 case kWhatCaptureResult:
                 case kWhatLogicalCaptureResult:
                 case kWhatCaptureFail:
@@ -1053,6 +1054,35 @@
                     freeACaptureRequest(request);
                     break;
                 }
+                case kWhatCaptureStart2:
+                {
+                    ACameraCaptureSession_captureCallback_startV2 onStart2;
+                    found = msg->findPointer(kCallbackFpKey, (void**) &onStart2);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture startV2 callback!", __FUNCTION__);
+                        return;
+                    }
+                    if (onStart2 == nullptr) {
+                        return;
+                    }
+                    int64_t timestamp;
+                    found = msg->findInt64(kTimeStampKey, &timestamp);
+                    if (!found) {
+                        ALOGE("%s: Cannot find timestamp!", __FUNCTION__);
+                        return;
+                    }
+                    int64_t frameNumber;
+                    found = msg->findInt64(kFrameNumberKey, &frameNumber);
+                    if (!found) {
+                        ALOGE("%s: Cannot find frame number!", __FUNCTION__);
+                        return;
+                    }
+
+                    ACaptureRequest* request = allocateACaptureRequest(requestSp, id_cstr);
+                    (*onStart2)(context, session.get(), request, timestamp, frameNumber);
+                    freeACaptureRequest(request);
+                    break;
+                }
                 case kWhatCaptureResult:
                 {
                     ACameraCaptureSession_captureCallback_result onResult;
@@ -1281,6 +1311,7 @@
         ACameraCaptureSession_captureCallbacks* cbs) :
         mSession(session), mRequests(requests),
         mIsRepeating(isRepeating),
+        mIs2Callback(false),
         mIsLogicalCameraCallback(false) {
     initCaptureCallbacks(cbs);
 
@@ -1297,6 +1328,7 @@
         ACameraCaptureSession_logicalCamera_captureCallbacks* lcbs) :
         mSession(session), mRequests(requests),
         mIsRepeating(isRepeating),
+        mIs2Callback(false),
         mIsLogicalCameraCallback(true) {
     initCaptureCallbacks(lcbs);
 
@@ -1306,6 +1338,40 @@
     }
 }
 
+CameraDevice::CallbackHolder::CallbackHolder(
+        sp<ACameraCaptureSession>          session,
+        const Vector<sp<CaptureRequest> >& requests,
+        bool                               isRepeating,
+        ACameraCaptureSession_captureCallbacksV2* cbs) :
+        mSession(session), mRequests(requests),
+        mIsRepeating(isRepeating),
+        mIs2Callback(true),
+        mIsLogicalCameraCallback(false) {
+    initCaptureCallbacksV2(cbs);
+
+    if (cbs != nullptr) {
+        mOnCaptureCompleted = cbs->onCaptureCompleted;
+        mOnCaptureFailed = cbs->onCaptureFailed;
+    }
+}
+
+CameraDevice::CallbackHolder::CallbackHolder(
+        sp<ACameraCaptureSession>          session,
+        const Vector<sp<CaptureRequest> >& requests,
+        bool                               isRepeating,
+        ACameraCaptureSession_logicalCamera_captureCallbacksV2* lcbs) :
+        mSession(session), mRequests(requests),
+        mIsRepeating(isRepeating),
+        mIs2Callback(true),
+        mIsLogicalCameraCallback(true) {
+    initCaptureCallbacksV2(lcbs);
+
+    if (lcbs != nullptr) {
+        mOnLogicalCameraCaptureCompleted = lcbs->onLogicalCameraCaptureCompleted;
+        mOnLogicalCameraCaptureFailed = lcbs->onLogicalCameraCaptureFailed;
+    }
+}
+
 void
 CameraDevice::checkRepeatingSequenceCompleteLocked(
     const int sequenceId, const int64_t lastFrameNumber) {
@@ -1542,11 +1608,14 @@
 
     int32_t sequenceId = resultExtras.requestId;
     int32_t burstId = resultExtras.burstId;
+    int64_t frameNumber = resultExtras.frameNumber;
 
     auto it = dev->mSequenceCallbackMap.find(sequenceId);
     if (it != dev->mSequenceCallbackMap.end()) {
         CallbackHolder cbh = (*it).second;
         ACameraCaptureSession_captureCallback_start onStart = cbh.mOnCaptureStarted;
+        ACameraCaptureSession_captureCallback_startV2 onStart2 = cbh.mOnCaptureStarted2;
+        bool v2Callback = cbh.mIs2Callback;
         sp<ACameraCaptureSession> session = cbh.mSession;
         if ((size_t) burstId >= cbh.mRequests.size()) {
             ALOGE("%s: Error: request index %d out of bound (size %zu)",
@@ -1554,12 +1623,19 @@
             dev->setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_SERVICE);
         }
         sp<CaptureRequest> request = cbh.mRequests[burstId];
-        sp<AMessage> msg = new AMessage(kWhatCaptureStart, dev->mHandler);
+        sp<AMessage> msg = nullptr;
+        if (v2Callback) {
+            msg = new AMessage(kWhatCaptureStart2, dev->mHandler);
+            msg->setPointer(kCallbackFpKey, (void*) onStart2);
+        } else {
+            msg = new AMessage(kWhatCaptureStart, dev->mHandler);
+            msg->setPointer(kCallbackFpKey, (void*) onStart);
+        }
         msg->setPointer(kContextKey, cbh.mContext);
         msg->setObject(kSessionSpKey, session);
-        msg->setPointer(kCallbackFpKey, (void*) onStart);
         msg->setObject(kCaptureRequestKey, request);
         msg->setInt64(kTimeStampKey, timestamp);
+        msg->setInt64(kFrameNumberKey, frameNumber);
         dev->postSessionMsgAndCleanup(msg);
     }
     return ret;
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index 0b6c7c8..c306206 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -245,6 +245,7 @@
         kWhatSessionStateCb,   // onReady, onActive
         // Capture callbacks
         kWhatCaptureStart,     // onCaptureStarted
+        kWhatCaptureStart2,     // onCaptureStarted2
         kWhatCaptureResult,    // onCaptureProgressed, onCaptureCompleted
         kWhatLogicalCaptureResult, // onLogicalCameraCaptureCompleted
         kWhatCaptureFail,      // onCaptureFailed
@@ -309,11 +310,18 @@
                        const Vector<sp<CaptureRequest>>&  requests,
                        bool                               isRepeating,
                        ACameraCaptureSession_logicalCamera_captureCallbacks* lcbs);
-
-        template <class T>
-        void initCaptureCallbacks(T* cbs) {
+        CallbackHolder(sp<ACameraCaptureSession>          session,
+                       const Vector<sp<CaptureRequest> >& requests,
+                       bool                               isRepeating,
+                       ACameraCaptureSession_captureCallbacksV2* cbs);
+        CallbackHolder(sp<ACameraCaptureSession>          session,
+                       const Vector<sp<CaptureRequest> >& requests,
+                       bool                               isRepeating,
+                       ACameraCaptureSession_logicalCamera_captureCallbacksV2* lcbs);
+        void clearCallbacks() {
             mContext = nullptr;
             mOnCaptureStarted = nullptr;
+            mOnCaptureStarted2 = nullptr;
             mOnCaptureProgressed = nullptr;
             mOnCaptureCompleted = nullptr;
             mOnLogicalCameraCaptureCompleted = nullptr;
@@ -322,6 +330,24 @@
             mOnCaptureSequenceCompleted = nullptr;
             mOnCaptureSequenceAborted = nullptr;
             mOnCaptureBufferLost = nullptr;
+        }
+
+        template <class T>
+        void initCaptureCallbacksV2(T* cbs) {
+            clearCallbacks();
+            if (cbs != nullptr) {
+                mContext = cbs->context;
+                mOnCaptureStarted2 = cbs->onCaptureStarted;
+                mOnCaptureProgressed = cbs->onCaptureProgressed;
+                mOnCaptureSequenceCompleted = cbs->onCaptureSequenceCompleted;
+                mOnCaptureSequenceAborted = cbs->onCaptureSequenceAborted;
+                mOnCaptureBufferLost = cbs->onCaptureBufferLost;
+            }
+        }
+
+        template <class T>
+        void initCaptureCallbacks(T* cbs) {
+            clearCallbacks();
             if (cbs != nullptr) {
                 mContext = cbs->context;
                 mOnCaptureStarted = cbs->onCaptureStarted;
@@ -335,10 +361,12 @@
         sp<ACameraCaptureSession>   mSession;
         Vector<sp<CaptureRequest>>  mRequests;
         const bool                  mIsRepeating;
+        const bool                  mIs2Callback;
         const bool                  mIsLogicalCameraCallback;
 
         void*                       mContext;
         ACameraCaptureSession_captureCallback_start mOnCaptureStarted;
+        ACameraCaptureSession_captureCallback_startV2 mOnCaptureStarted2;
         ACameraCaptureSession_captureCallback_result mOnCaptureProgressed;
         ACameraCaptureSession_captureCallback_result mOnCaptureCompleted;
         ACameraCaptureSession_logicalCamera_captureCallback_result mOnLogicalCameraCaptureCompleted;
diff --git a/camera/ndk/ndk_vendor/impl/ACameraManager.h b/camera/ndk/ndk_vendor/impl/ACameraManager.h
index 8359bb1..4663529 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraManager.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraManager.h
@@ -136,6 +136,8 @@
             return !(*this == other);
         }
         bool operator < (const Callback& other) const {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wordered-compare-function-pointers"
             if (*this == other) return false;
             if (mContext != other.mContext) return mContext < other.mContext;
             if (mAvailable != other.mAvailable) return mAvailable < other.mAvailable;
@@ -146,6 +148,7 @@
             if (mPhysicalCamUnavailable != other.mPhysicalCamUnavailable)
                     return mPhysicalCamUnavailable < other.mPhysicalCamUnavailable;
             return mUnavailable < other.mUnavailable;
+#pragma GCC diagnostic pop
         }
         bool operator > (const Callback& other) const {
             return (*this != other && !(*this < other));
diff --git a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
index ba14c5c..63cdb76 100644
--- a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
+++ b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
@@ -236,6 +236,11 @@
         return ACameraCaptureSession_capture(mSession, &mCaptureCallbacks, 1, &mStillRequest,
                                              &seqId);
     }
+    int takePicture2() {
+        int seqId;
+        return ACameraCaptureSession_captureV2(mSession, &mCaptureCallbacksV2, 1,
+                &mStillRequest, &seqId);
+    }
 
     int takeLogicalCameraPicture() {
         int seqId;
@@ -243,15 +248,31 @@
                 1, &mStillRequest, &seqId);
     }
 
+    int takeLogicalCameraPicture2() {
+        int seqId;
+        return ACameraCaptureSession_logicalCamera_captureV2(mSession,
+                &mLogicalCaptureCallbacksV2, 1, &mStillRequest, &seqId);
+    }
+
     bool checkCallbacks(int pictureCount) {
         std::lock_guard<std::mutex> lock(mMutex);
         if (mCompletedCaptureCallbackCount != pictureCount) {
-            ALOGE("Completed capture callaback count not as expected. expected %d actual %d",
+            ALOGE("Completed capture callback count not as expected. expected %d actual %d",
                   pictureCount, mCompletedCaptureCallbackCount);
             return false;
         }
         return true;
     }
+    bool checkCallbacksV2(int pictureCount) {
+        std::lock_guard<std::mutex> lock(mMutex);
+        if (mCaptureStartedCallbackCount != pictureCount) {
+            ALOGE("Capture started callback count not as expected. expected %d actual %d",
+                  pictureCount, mCaptureStartedCallbackCount);
+            return false;
+        }
+        return true;
+    }
+
 
    private:
     ACameraDevice_StateCallbacks mDeviceCb{this, nullptr, nullptr};
@@ -276,6 +297,7 @@
     const char* mCameraId;
     ACameraManager* mCameraManager;
     int mCompletedCaptureCallbackCount = 0;
+    int mCaptureStartedCallbackCount = 0;
     std::mutex mMutex;
     ACameraCaptureSession_captureCallbacks mCaptureCallbacks = {
         // TODO: Add tests for other callbacks
@@ -293,8 +315,25 @@
         nullptr, // onCaptureSequenceAborted
         nullptr, // onCaptureBufferLost
     };
+    ACameraCaptureSession_captureCallbacksV2 mCaptureCallbacksV2 = {
+        this, // context
+        [](void* ctx , ACameraCaptureSession *,const ACaptureRequest *, int64_t,
+              int64_t frameNumber ) {
+            CameraHelper *ch = static_cast<CameraHelper *>(ctx);
+            ASSERT_TRUE(frameNumber >= 0);
+            std::lock_guard<std::mutex> lock(ch->mMutex);
+            ch->mCaptureStartedCallbackCount++;
+        },
+        nullptr, // onCaptureProgressed
+        nullptr, // onCaptureCompleted
+        nullptr, // onCaptureFailed
+        nullptr, // onCaptureSequenceCompleted
+        nullptr, // onCaptureSequenceAborted
+        nullptr, // onCaptureBufferLost
+    };
 
     std::vector<std::string> mPhysicalCameraIds;
+
     ACameraCaptureSession_logicalCamera_captureCallbacks mLogicalCaptureCallbacks = {
         // TODO: Add tests for other callbacks
         this, // context
@@ -336,6 +375,23 @@
         nullptr, // onCaptureSequenceAborted
         nullptr, // onCaptureBufferLost
     };
+    ACameraCaptureSession_logicalCamera_captureCallbacksV2 mLogicalCaptureCallbacksV2 = {
+        this, // context
+        [](void* ctx , ACameraCaptureSession *,const ACaptureRequest *, int64_t,
+                int64_t frameNumber) {
+            CameraHelper *ch = static_cast<CameraHelper *>(ctx);
+            ASSERT_TRUE(frameNumber >= 0);
+            std::lock_guard<std::mutex> lock(ch->mMutex);
+            ch->mCaptureStartedCallbackCount++;
+        },
+        nullptr, // onCaptureProgressed
+        nullptr, //onLogicalCaptureCompleted
+        nullptr, //onLogicalCpatureFailed
+        nullptr, // onCaptureSequenceCompleted
+        nullptr, // onCaptureSequenceAborted
+        nullptr, // onCaptureBufferLost
+    };
+
 };
 
 class ImageReaderTestCase {
@@ -570,7 +626,7 @@
     }
 
     bool takePictures(const char* id, uint64_t readerUsage, int readerMaxImages,
-            bool readerAsync, int pictureCount) {
+            bool readerAsync, int pictureCount, bool v2 = false) {
         int ret = 0;
 
         ImageReaderTestCase testCase(
@@ -600,7 +656,11 @@
         }
 
         for (int i = 0; i < pictureCount; i++) {
-            ret = cameraHelper.takePicture();
+            if (v2) {
+                ret = cameraHelper.takePicture2();
+            } else {
+                ret = cameraHelper.takePicture();
+            }
             if (ret < 0) {
                 ALOGE("Unable to take picture");
                 return false;
@@ -617,7 +677,8 @@
             }
         }
         return testCase.getAcquiredImageCount() == pictureCount &&
-                cameraHelper.checkCallbacks(pictureCount);
+               v2 ? cameraHelper.checkCallbacksV2(pictureCount) :
+                    cameraHelper.checkCallbacks(pictureCount);
     }
 
     bool testTakePicturesNative(const char* id) {
@@ -626,12 +687,14 @@
             for (auto& readerMaxImages : {1, 4, 8}) {
                 for (auto& readerAsync : {true, false}) {
                     for (auto& pictureCount : {1, 4, 8}) {
-                        if (!takePictures(id, readerUsage, readerMaxImages,
-                                readerAsync, pictureCount)) {
-                            ALOGE("Test takePictures failed for test case usage=%" PRIu64
-                                  ", maxImages=%d, async=%d, pictureCount=%d",
-                                  readerUsage, readerMaxImages, readerAsync, pictureCount);
-                            return false;
+                        for ( auto & v2 : {true, false}) {
+                            if (!takePictures(id, readerUsage, readerMaxImages,
+                                    readerAsync, pictureCount, v2)) {
+                                ALOGE("Test takePictures failed for test case usage=%" PRIu64
+                                      ", maxImages=%d, async=%d, pictureCount=%d",
+                                      readerUsage, readerMaxImages, readerAsync, pictureCount);
+                                return false;
+                            }
                         }
                     }
                 }
@@ -725,7 +788,7 @@
         return;
     }
 
-    void testLogicalCameraPhysicalStream(bool usePhysicalSettings) {
+    void testLogicalCameraPhysicalStream(bool usePhysicalSettings, bool v2) {
         const char* cameraId = nullptr;
         ACameraMetadata* staticMetadata = nullptr;
         std::vector<const char*> physicalCameraIds;
@@ -772,7 +835,12 @@
         }
 
         for (int i = 0; i < pictureCount; i++) {
-            ret = cameraHelper.takeLogicalCameraPicture();
+            if (v2) {
+              ret = cameraHelper.takeLogicalCameraPicture2();
+            }
+            else {
+              ret = cameraHelper.takeLogicalCameraPicture();
+            }
             ASSERT_EQ(ret, 0);
         }
 
@@ -793,8 +861,11 @@
             ALOGI("Testing window %p", testCase->getNativeWindow());
             ASSERT_EQ(testCase->getAcquiredImageCount(), pictureCount);
         }
-
-        ASSERT_TRUE(cameraHelper.checkCallbacks(pictureCount));
+        if (v2) {
+            ASSERT_TRUE(cameraHelper.checkCallbacksV2(pictureCount));
+        } else {
+            ASSERT_TRUE(cameraHelper.checkCallbacks(pictureCount));
+        }
 
         ACameraMetadata_free(staticMetadata);
     }
@@ -834,8 +905,10 @@
 }
 
 TEST_F(AImageReaderVendorTest, LogicalCameraPhysicalStream) {
-    testLogicalCameraPhysicalStream(false/*usePhysicalSettings*/);
-    testLogicalCameraPhysicalStream(true/*usePhysicalSettings*/);
+    for (auto & v2 : {true, false}) {
+        testLogicalCameraPhysicalStream(false/*usePhysicalSettings*/, v2);
+        testLogicalCameraPhysicalStream(true/*usePhysicalSettings*/, v2);
+    }
 }
 
 }  // namespace
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index 9f2f430..17ea512 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -96,6 +96,12 @@
         return binder::Status::ok();
     };
 
+    virtual binder::Status onTorchStrengthLevelChanged(const String16& /*cameraId*/,
+            int32_t /*torchStrength*/) {
+        // No op
+        return binder::Status::ok();
+    }
+
     virtual binder::Status onCameraAccessPrioritiesChanged() {
         // No op
         return binder::Status::ok();
diff --git a/cmds/OWNERS b/cmds/OWNERS
index 0d32aac..a48c37a 100644
--- a/cmds/OWNERS
+++ b/cmds/OWNERS
@@ -1,3 +1,3 @@
 elaurent@google.com
+essick@google.com
 lajos@google.com
-marcone@google.com
diff --git a/cmds/screenrecord/Android.bp b/cmds/screenrecord/Android.bp
index 359a835..d0b3ce0 100644
--- a/cmds/screenrecord/Android.bp
+++ b/cmds/screenrecord/Android.bp
@@ -55,12 +55,6 @@
         "libGLESv2",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-        "frameworks/av/media/libstagefright/include",
-        "frameworks/native/include/media/openmax",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index e6e3473..2e0b678 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -701,7 +701,7 @@
         printf("Display is %dx%d @%.2ffps (orientation=%s), layerStack=%u\n",
                 layerStackSpaceRect.getWidth(), layerStackSpaceRect.getHeight(),
                 displayMode.refreshRate, toCString(displayState.orientation),
-                displayState.layerStack);
+                displayState.layerStack.id);
         fflush(stdout);
     }
 
@@ -1067,7 +1067,7 @@
 
     std::optional<PhysicalDisplayId> displayId = SurfaceComposerClient::getInternalDisplayId();
     if (!displayId) {
-        fprintf(stderr, "Failed to get token for internal display\n");
+        fprintf(stderr, "Failed to get ID for internal display\n");
         return 1;
     }
 
@@ -1168,17 +1168,14 @@
             }
             break;
         case 'd':
-            gPhysicalDisplayId = PhysicalDisplayId(atoll(optarg));
-            if (gPhysicalDisplayId.value == 0) {
-                fprintf(stderr, "Please specify a valid physical display id\n");
-                return 2;
-            } else if (SurfaceComposerClient::
-                    getPhysicalDisplayToken(gPhysicalDisplayId) == nullptr) {
-                fprintf(stderr, "Invalid physical display id: %s\n",
-                        to_string(gPhysicalDisplayId).c_str());
-                return 2;
+            if (const auto id = android::DisplayId::fromValue<PhysicalDisplayId>(atoll(optarg));
+                id && SurfaceComposerClient::getPhysicalDisplayToken(*id)) {
+                gPhysicalDisplayId = *id;
+                break;
             }
-            break;
+
+            fprintf(stderr, "Invalid physical display ID\n");
+            return 2;
         default:
             if (ic != '?') {
                 fprintf(stderr, "getopt_long returned unexpected value 0x%x\n", ic);
diff --git a/cmds/stagefright/Android.bp b/cmds/stagefright/Android.bp
new file mode 100644
index 0000000..e1fe07e
--- /dev/null
+++ b/cmds/stagefright/Android.bp
@@ -0,0 +1,276 @@
+package {
+    default_applicable_licenses: ["frameworks_av_cmds_stagefright_license"],
+}
+
+// Added automatically by a large-scale-change
+// See: http://go/android-license-faq
+license {
+    name: "frameworks_av_cmds_stagefright_license",
+    visibility: [":__subpackages__"],
+    license_kinds: [
+        "SPDX-license-identifier-Apache-2.0",
+    ],
+    license_text: [
+        "NOTICE",
+    ],
+}
+
+cc_binary {
+    name: "stagefright",
+
+    srcs: [
+        "AudioPlayer.cpp",
+        "stagefright.cpp",
+        "jpeg.cpp",
+        "SineSource.cpp",
+    ],
+
+    header_libs: [
+        "libmediametrics_headers",
+        "libstagefright_headers",
+    ],
+
+    shared_libs: [
+        "libstagefright",
+        "libmedia",
+        "libmedia_codeclist",
+        "libutils",
+        "libbinder",
+        "libstagefright_foundation",
+        "libjpeg",
+        "libui",
+        "libgui",
+        "libcutils",
+        "liblog",
+        "libhidlbase",
+        "libdatasource",
+        "libaudioclient",
+        "android.hardware.media.omx@1.0",
+        "framework-permission-aidl-cpp",
+    ],
+
+    static_libs: ["framework-permission-aidl-cpp"],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+
+    system_ext_specific: true,
+}
+
+cc_binary {
+    name: "record",
+
+    srcs: [
+        "AudioPlayer.cpp",
+        "SineSource.cpp",
+        "record.cpp",
+    ],
+
+    header_libs: [
+        "libmediametrics_headers",
+        "libstagefright_headers",
+        "camera_headers",
+    ],
+
+    shared_libs: [
+        "libstagefright",
+        "libmedia",
+        "liblog",
+        "libutils",
+        "libbinder",
+        "libstagefright_foundation",
+        "libdatasource",
+        "libaudioclient",
+        "framework-permission-aidl-cpp",
+    ],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+}
+
+cc_binary {
+    name: "recordvideo",
+
+    srcs: [
+        "AudioPlayer.cpp",
+        "recordvideo.cpp",
+    ],
+
+    header_libs: [
+        "libmediametrics_headers",
+        "libstagefright_headers",
+    ],
+
+    shared_libs: [
+        "libstagefright",
+        "libmedia",
+        "liblog",
+        "libutils",
+        "libbinder",
+        "libstagefright_foundation",
+        "libaudioclient",
+        "framework-permission-aidl-cpp",
+    ],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+}
+
+cc_binary {
+    name: "audioloop",
+
+    srcs: [
+        "AudioPlayer.cpp",
+        "SineSource.cpp",
+        "audioloop.cpp",
+    ],
+
+    header_libs: [
+        "libmediametrics_headers",
+        "libstagefright_headers",
+    ],
+
+    shared_libs: [
+        "libstagefright",
+        "libmedia",
+        "liblog",
+        "libutils",
+        "libbinder",
+        "libstagefright_foundation",
+        "libaudioclient",
+        "framework-permission-aidl-cpp",
+    ],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+}
+
+cc_binary {
+    name: "stream",
+
+    srcs: ["stream.cpp"],
+
+    header_libs: [
+        "libmediametrics_headers",
+        "libstagefright_headers",
+    ],
+
+    shared_libs: [
+        "libstagefright",
+        "liblog",
+        "libutils",
+        "libbinder",
+        "libui",
+        "libgui",
+        "libstagefright_foundation",
+        "libmedia",
+        "libcutils",
+        "libdatasource",
+    ],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+}
+
+cc_binary {
+    name: "codec",
+
+    srcs: [
+        "codec.cpp",
+        "SimplePlayer.cpp",
+    ],
+
+    header_libs: [
+        "libmediadrm_headers",
+        "libmediametrics_headers",
+        "libstagefright_headers",
+    ],
+
+    shared_libs: [
+        "libstagefright",
+        "liblog",
+        "libutils",
+        "libbinder",
+        "libstagefright_foundation",
+        "libmedia",
+        "libmedia_omx",
+        "libaudioclient",
+        "libui",
+        "libgui",
+        "libcutils",
+    ],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+}
+
+cc_binary {
+    name: "mediafilter",
+
+    srcs: [
+        "filters/argbtorgba.rscript",
+        "filters/nightvision.rscript",
+        "filters/saturation.rscript",
+        "mediafilter.cpp",
+    ],
+
+    header_libs: [
+        "libmediadrm_headers",
+        "libmediametrics_headers",
+        "libstagefright_headers",
+        "rs-headers",
+    ],
+
+    shared_libs: [
+        "libstagefright",
+        "liblog",
+        "libutils",
+        "libbinder",
+        "libstagefright_foundation",
+        "libmedia_omx",
+        "libui",
+        "libgui",
+        "libRScpp",
+    ],
+
+    static_libs: ["libstagefright_mediafilter"],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+
+    sanitize: {
+        cfi: true,
+    },
+}
+
+cc_binary {
+    name: "muxer",
+
+    srcs: ["muxer.cpp"],
+
+    header_libs: [
+        "libmediametrics_headers",
+        "libstagefright_headers",
+    ],
+
+    shared_libs: [
+        "libstagefright",
+        "liblog",
+        "libutils",
+        "libbinder",
+        "libstagefright_foundation",
+        "libcutils",
+        "libc",
+    ],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+}
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
deleted file mode 100644
index 803c4a4..0000000
--- a/cmds/stagefright/Android.mk
+++ /dev/null
@@ -1,276 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=       \
-        AudioPlayer.cpp \
-        stagefright.cpp \
-        jpeg.cpp        \
-        SineSource.cpp
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright libmedia libmedia_codeclist libutils libbinder \
-        libstagefright_foundation libjpeg libui libgui libcutils liblog \
-        libhidlbase libdatasource libaudioclient \
-        android.hardware.media.omx@1.0 \
-        framework-permission-aidl-cpp
-
-LOCAL_STATIC_LIBRARIES := framework-permission-aidl-cpp
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/media/libstagefright \
-        frameworks/av/media/libstagefright/include \
-        frameworks/native/include/media/openmax \
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_SYSTEM_EXT_MODULE:= true
-LOCAL_MODULE:= stagefright
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=         \
-        AudioPlayer.cpp \
-        SineSource.cpp    \
-        record.cpp
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright libmedia liblog libutils libbinder \
-        libstagefright_foundation libdatasource libaudioclient \
-        framework-permission-aidl-cpp
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/camera/include \
-        frameworks/av/media/libstagefright \
-        frameworks/native/include/media/openmax \
-        frameworks/native/include/media/hardware
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= record
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=         \
-        AudioPlayer.cpp \
-        recordvideo.cpp
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright libmedia liblog libutils libbinder \
-        libstagefright_foundation libaudioclient
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/media/libstagefright \
-        frameworks/native/include/media/openmax \
-        frameworks/native/include/media/hardware \
-        framework-permission-aidl-cpp
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= recordvideo
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=         \
-        AudioPlayer.cpp \
-        SineSource.cpp    \
-        audioloop.cpp
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright libmedia liblog libutils libbinder \
-        libstagefright_foundation libaudioclient \
-        framework-permission-aidl-cpp
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/media/libstagefright \
-        frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= audioloop
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=         \
-        stream.cpp    \
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright liblog libutils libbinder libui libgui \
-        libstagefright_foundation libmedia libcutils libdatasource
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/media/libstagefright \
-        frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= stream
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=               \
-        codec.cpp               \
-        SimplePlayer.cpp        \
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediadrm_headers \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright liblog libutils libbinder libstagefright_foundation \
-        libmedia libmedia_omx libaudioclient libui libgui libcutils
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/media/libstagefright \
-        frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= codec
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
-        filters/argbtorgba.rscript \
-        filters/nightvision.rscript \
-        filters/saturation.rscript \
-        mediafilter.cpp \
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediadrm_headers \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright \
-        liblog \
-        libutils \
-        libbinder \
-        libstagefright_foundation \
-        libmedia_omx \
-        libui \
-        libgui \
-        libRScpp \
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/media/libstagefright \
-        frameworks/native/include/media/openmax \
-        frameworks/rs/cpp \
-        frameworks/rs \
-
-intermediates := $(call intermediates-dir-for,STATIC_LIBRARIES,libRS,TARGET,)
-LOCAL_C_INCLUDES += $(intermediates)
-
-LOCAL_STATIC_LIBRARIES:= \
-        libstagefright_mediafilter
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= mediafilter
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-LOCAL_SANITIZE := cfi
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=               \
-        muxer.cpp            \
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright liblog libutils libbinder libstagefright_foundation \
-        libcutils libc
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/media/libstagefright \
-        frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= muxer
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
diff --git a/cmds/stagefright/AudioPlayer.cpp b/cmds/stagefright/AudioPlayer.cpp
index 55427ca..a63bde6 100644
--- a/cmds/stagefright/AudioPlayer.cpp
+++ b/cmds/stagefright/AudioPlayer.cpp
@@ -249,7 +249,8 @@
 
         mAudioTrack = new AudioTrack(
                 AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask,
-                0 /*frameCount*/, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this,
+                0 /*frameCount*/, AUDIO_OUTPUT_FLAG_NONE,
+                wp<IAudioTrackCallback>::fromExisting(this),
                 0 /*notificationFrames*/);
 
         if ((err = mAudioTrack->initCheck()) != OK) {
@@ -397,10 +398,6 @@
     mStartPosUs = 0;
 }
 
-// static
-void AudioPlayer::AudioCallback(int event, void *user, void *info) {
-    static_cast<AudioPlayer *>(user)->AudioCallback(event, info);
-}
 
 bool AudioPlayer::reachedEOS(status_t *finalStatus) {
     *finalStatus = OK;
@@ -455,20 +452,12 @@
     return 0;
 }
 
-void AudioPlayer::AudioCallback(int event, void *info) {
-    switch (event) {
-    case AudioTrack::EVENT_MORE_DATA:
-        {
-        AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
-        size_t numBytesWritten = fillBuffer(buffer->raw, buffer->size);
-        buffer->size = numBytesWritten;
-        }
-        break;
+size_t AudioPlayer::onMoreData(const AudioTrack::Buffer& buffer) {
+    return fillBuffer(buffer.raw, buffer.size);
+}
 
-    case AudioTrack::EVENT_STREAM_END:
-        mReachedEOS = true;
-        break;
-    }
+void AudioPlayer::onStreamEnd() {
+    mReachedEOS = true;
 }
 
 size_t AudioPlayer::fillBuffer(void *data, size_t size) {
diff --git a/cmds/stagefright/AudioPlayer.h b/cmds/stagefright/AudioPlayer.h
index 43550ea..608f54b 100644
--- a/cmds/stagefright/AudioPlayer.h
+++ b/cmds/stagefright/AudioPlayer.h
@@ -19,6 +19,7 @@
 #define AUDIO_PLAYER_H_
 
 #include <media/AudioResamplerPublic.h>
+#include <media/AudioTrack.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/MediaPlayerInterface.h>
 #include <media/stagefright/MediaBuffer.h>
@@ -26,10 +27,9 @@
 
 namespace android {
 
-class AudioTrack;
 struct AwesomePlayer;
 
-class AudioPlayer {
+class AudioPlayer : AudioTrack::IAudioTrackCallback {
 public:
     enum {
         REACHED_EOS,
@@ -66,6 +66,9 @@
     status_t getPlaybackRate(AudioPlaybackRate *rate /* nonnull */);
 
 private:
+    friend sp<AudioPlayer>;
+    size_t onMoreData(const AudioTrack::Buffer& buffer) override;
+    void onStreamEnd() override;
     sp<MediaSource> mSource;
     sp<AudioTrack> mAudioTrack;
 
@@ -99,9 +102,6 @@
     int64_t mStartPosUs;
     const uint32_t mCreateFlags;
 
-    static void AudioCallback(int event, void *user, void *info);
-    void AudioCallback(int event, void *info);
-
     static size_t AudioSinkCallback(
             MediaPlayerBase::AudioSink *audioSink,
             void *data, size_t size, void *me,
diff --git a/cmds/stagefright/audioloop.cpp b/cmds/stagefright/audioloop.cpp
index 4b41ff8..83f8fe9 100644
--- a/cmds/stagefright/audioloop.cpp
+++ b/cmds/stagefright/audioloop.cpp
@@ -166,9 +166,9 @@
         sp<MediaSource> decoder = SimpleDecodingSource::Create(encoder);
 
         if (playToSpeaker) {
-            AudioPlayer player(NULL);
-            player.setSource(decoder);
-            player.start();
+            sp<AudioPlayer> player = sp<AudioPlayer>::make(nullptr);
+            player->setSource(decoder);
+            player->start();
             sleep(duration);
 
 ALOGI("Line: %d", __LINE__);
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index 098c278..5743ad6 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -32,7 +32,6 @@
 #include <media/stagefright/SimpleDecodingSource.h>
 #include <media/MediaPlayerInterface.h>
 
-#include "AudioPlayer.h"
 
 using namespace android;
 
@@ -274,17 +273,6 @@
     const int32_t kNumChannels = 2;
     sp<MediaSource> audioSource = new SineSource(kSampleRate, kNumChannels);
 
-#if 0
-    sp<MediaPlayerBase::AudioSink> audioSink;
-    AudioPlayer *player = new AudioPlayer(audioSink);
-    player->setSource(audioSource);
-    player->start();
-
-    sleep(10);
-
-    player->stop();
-#endif
-
     sp<AMessage> encMeta = new AMessage;
     encMeta->setString("mime",
             0 ? MEDIA_MIMETYPE_AUDIO_AMR_WB : MEDIA_MIMETYPE_AUDIO_AAC);
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index c430f05..ec16bc2 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -220,7 +220,7 @@
     }
 
     if (gPlaybackAudio) {
-        AudioPlayer *player = new AudioPlayer(NULL);
+        sp<AudioPlayer> player = sp<AudioPlayer>::make(nullptr);
         player->setSource(rawSource);
         rawSource.clear();
 
@@ -235,9 +235,6 @@
             fprintf(stderr, "unable to start playback err=%d (0x%08x)\n", err, err);
         }
 
-        delete player;
-        player = NULL;
-
         return;
     } else if (gReproduceBug >= 3 && gReproduceBug <= 5) {
         int64_t durationUs;
diff --git a/drm/drmserver/drmserver.rc b/drm/drmserver/drmserver.rc
index eb176c1..0319ff9 100644
--- a/drm/drmserver/drmserver.rc
+++ b/drm/drmserver/drmserver.rc
@@ -3,7 +3,7 @@
     class main
     user drm
     group drm system inet drmrpc readproc
-    writepid /dev/cpuset/foreground/tasks
+    task_profiles ProcessCapacityHigh
 
 on property:drm.service.enabled=true
     start drm
diff --git a/drm/libmediadrm/Android.bp b/drm/libmediadrm/Android.bp
index 0ffe626..6e1e10b 100644
--- a/drm/libmediadrm/Android.bp
+++ b/drm/libmediadrm/Android.bp
@@ -28,7 +28,11 @@
         "DrmSessionManager.cpp",
         "SharedLibrary.cpp",
         "DrmHal.cpp",
+        "DrmHalHidl.cpp",
+        "DrmHalAidl.cpp",
         "CryptoHal.cpp",
+        "CryptoHalHidl.cpp",
+        "CryptoHalAidl.cpp",
         "DrmUtils.cpp",
     ],
 
@@ -63,10 +67,12 @@
         "android.hardware.drm@1.4",
         "libhidlallocatorutils",
         "libhidlbase",
+        "android.hardware.drm-V1-ndk",
     ],
 
     static_libs: [
-        "resourcemanager_aidl_interface-ndk_platform",
+        "resourcemanager_aidl_interface-ndk",
+        "libaidlcommonsupport",
     ],
 
     export_shared_lib_headers: [
diff --git a/drm/libmediadrm/CryptoHal.cpp b/drm/libmediadrm/CryptoHal.cpp
index e0db1c4..f95d527 100644
--- a/drm/libmediadrm/CryptoHal.cpp
+++ b/drm/libmediadrm/CryptoHal.cpp
@@ -16,389 +16,100 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "CryptoHal"
-#include <utils/Log.h>
-
-#include <android/hardware/drm/1.0/types.h>
-#include <android/hidl/manager/1.2/IServiceManager.h>
-#include <hidl/ServiceManagement.h>
-#include <hidlmemory/FrameworkUtils.h>
-#include <media/hardware/CryptoAPI.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/MediaErrors.h>
 #include <mediadrm/CryptoHal.h>
+#include <mediadrm/CryptoHalHidl.h>
+#include <mediadrm/CryptoHalAidl.h>
 #include <mediadrm/DrmUtils.h>
 
-using drm::V1_0::BufferType;
-using drm::V1_0::DestinationBuffer;
-using drm::V1_0::ICryptoFactory;
-using drm::V1_0::ICryptoPlugin;
-using drm::V1_0::Mode;
-using drm::V1_0::Pattern;
-using drm::V1_0::SharedBuffer;
-using drm::V1_0::Status;
-using drm::V1_0::SubSample;
-
-using ::android::DrmUtils::toStatusT;
-using ::android::hardware::hidl_array;
-using ::android::hardware::hidl_handle;
-using ::android::hardware::hidl_memory;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::HidlMemory;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::sp;
-
-typedef drm::V1_2::Status Status_V1_2;
-
 namespace android {
 
-static hidl_vec<uint8_t> toHidlVec(const Vector<uint8_t> &vector) {
-    hidl_vec<uint8_t> vec;
-    vec.setToExternal(const_cast<uint8_t *>(vector.array()), vector.size());
-    return vec;
+CryptoHal::CryptoHal() {
+    mCryptoHalAidl = sp<CryptoHalAidl>::make();
+    mCryptoHalHidl = sp<CryptoHalHidl>::make();
 }
 
-static hidl_vec<uint8_t> toHidlVec(const void *ptr, size_t size) {
-    hidl_vec<uint8_t> vec;
-    vec.resize(size);
-    memcpy(vec.data(), ptr, size);
-    return vec;
-}
-
-static hidl_array<uint8_t, 16> toHidlArray16(const uint8_t *ptr) {
-    if (!ptr) {
-        return hidl_array<uint8_t, 16>();
-    }
-    return hidl_array<uint8_t, 16>(ptr);
-}
-
-
-static String8 toString8(hidl_string hString) {
-    return String8(hString.c_str());
-}
-
-
-CryptoHal::CryptoHal()
-    : mFactories(makeCryptoFactories()),
-      mInitCheck((mFactories.size() == 0) ? ERROR_UNSUPPORTED : NO_INIT),
-      mHeapSeqNum(0) {
-}
-
-CryptoHal::~CryptoHal() {
-}
-
-Vector<sp<ICryptoFactory>> CryptoHal::makeCryptoFactories() {
-    Vector<sp<ICryptoFactory>> factories;
-
-    auto manager = hardware::defaultServiceManager1_2();
-    if (manager != NULL) {
-        manager->listManifestByInterface(drm::V1_0::ICryptoFactory::descriptor,
-                [&factories](const hidl_vec<hidl_string> &registered) {
-                    for (const auto &instance : registered) {
-                        auto factory = drm::V1_0::ICryptoFactory::getService(instance);
-                        if (factory != NULL) {
-                            ALOGD("found drm@1.0 ICryptoFactory %s", instance.c_str());
-                            factories.push_back(factory);
-                        }
-                    }
-                }
-            );
-        manager->listManifestByInterface(drm::V1_1::ICryptoFactory::descriptor,
-                [&factories](const hidl_vec<hidl_string> &registered) {
-                    for (const auto &instance : registered) {
-                        auto factory = drm::V1_1::ICryptoFactory::getService(instance);
-                        if (factory != NULL) {
-                            ALOGD("found drm@1.1 ICryptoFactory %s", instance.c_str());
-                            factories.push_back(factory);
-                        }
-                    }
-                }
-            );
-    }
-
-    if (factories.size() == 0) {
-        // must be in passthrough mode, load the default passthrough service
-        auto passthrough = ICryptoFactory::getService();
-        if (passthrough != NULL) {
-            ALOGI("makeCryptoFactories: using default passthrough crypto instance");
-            factories.push_back(passthrough);
-        } else {
-            ALOGE("Failed to find any crypto factories");
-        }
-    }
-    return factories;
-}
-
-sp<ICryptoPlugin> CryptoHal::makeCryptoPlugin(const sp<ICryptoFactory>& factory,
-        const uint8_t uuid[16], const void *initData, size_t initDataSize) {
-
-    sp<ICryptoPlugin> plugin;
-    Return<void> hResult = factory->createPlugin(toHidlArray16(uuid),
-            toHidlVec(initData, initDataSize),
-            [&](Status status, const sp<ICryptoPlugin>& hPlugin) {
-                if (status != Status::OK) {
-                    ALOGE("Failed to make crypto plugin");
-                    return;
-                }
-                plugin = hPlugin;
-            }
-        );
-    if (!hResult.isOk()) {
-        mInitCheck = DEAD_OBJECT;
-    }
-    return plugin;
-}
-
+CryptoHal::~CryptoHal() {}
 
 status_t CryptoHal::initCheck() const {
-    return mInitCheck;
+    if (mCryptoHalAidl->initCheck() == OK || mCryptoHalHidl->initCheck() == OK) return OK;
+    if (mCryptoHalAidl->initCheck() == NO_INIT || mCryptoHalHidl->initCheck() == NO_INIT)
+        return NO_INIT;
+    return mCryptoHalHidl->initCheck();
 }
 
-
 bool CryptoHal::isCryptoSchemeSupported(const uint8_t uuid[16]) {
-    Mutex::Autolock autoLock(mLock);
-
-    for (size_t i = 0; i < mFactories.size(); i++) {
-        if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
-            return true;
-        }
-    }
-    return false;
+    return mCryptoHalAidl->isCryptoSchemeSupported(uuid) ||
+           mCryptoHalHidl->isCryptoSchemeSupported(uuid);
 }
 
-status_t CryptoHal::createPlugin(const uint8_t uuid[16], const void *data,
-        size_t size) {
-    Mutex::Autolock autoLock(mLock);
-
-    for (size_t i = 0; i < mFactories.size(); i++) {
-        if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
-            mPlugin = makeCryptoPlugin(mFactories[i], uuid, data, size);
-            if (mPlugin != NULL) {
-                mPluginV1_2 = drm::V1_2::ICryptoPlugin::castFrom(mPlugin);
-            }
-        }
-    }
-
-    if (mInitCheck == NO_INIT) {
-        mInitCheck = mPlugin == NULL ? ERROR_UNSUPPORTED : OK;
-    }
-
-    return mInitCheck;
+status_t CryptoHal::createPlugin(const uint8_t uuid[16], const void* data, size_t size) {
+    if (mCryptoHalAidl->createPlugin(uuid, data, size) != OK)
+        return mCryptoHalHidl->createPlugin(uuid, data, size);
+    return OK;
 }
 
 status_t CryptoHal::destroyPlugin() {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mInitCheck != OK) {
-        return mInitCheck;
-    }
-
-    mPlugin.clear();
-    mPluginV1_2.clear();
-    return OK;
+    // This requires plugin to be created.
+    if (mCryptoHalAidl->initCheck() == OK) return mCryptoHalAidl->destroyPlugin();
+    return mCryptoHalHidl->destroyPlugin();
 }
 
-bool CryptoHal::requiresSecureDecoderComponent(const char *mime) const {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mInitCheck != OK) {
-        return false;
-    }
-
-    Return<bool> hResult = mPlugin->requiresSecureDecoderComponent(hidl_string(mime));
-    if (!hResult.isOk()) {
-        return false;
-    }
-    return hResult;
-}
-
-
-/**
- * If the heap base isn't set, get the heap base from the HidlMemory
- * and send it to the HAL so it can map a remote heap of the same
- * size.  Once the heap base is established, shared memory buffers
- * are sent by providing an offset into the heap and a buffer size.
- */
-int32_t CryptoHal::setHeapBase(const sp<HidlMemory>& heap) {
-    if (heap == NULL || mHeapSeqNum < 0) {
-        ALOGE("setHeapBase(): heap %p mHeapSeqNum %d", heap.get(), mHeapSeqNum);
-        return -1;
-    }
-
-    Mutex::Autolock autoLock(mLock);
-
-    int32_t seqNum = mHeapSeqNum++;
-    uint32_t bufferId = static_cast<uint32_t>(seqNum);
-    mHeapSizes.add(seqNum, heap->size());
-    Return<void> hResult = mPlugin->setSharedBufferBase(*heap, bufferId);
-    ALOGE_IF(!hResult.isOk(), "setSharedBufferBase(): remote call failed");
-    return seqNum;
-}
-
-void CryptoHal::clearHeapBase(int32_t seqNum) {
-    Mutex::Autolock autoLock(mLock);
-
-    /*
-     * Clear the remote shared memory mapping by setting the shared
-     * buffer base to a null hidl_memory.
-     *
-     * TODO: Add a releaseSharedBuffer method in a future DRM HAL
-     * API version to make this explicit.
-     */
-    ssize_t index = mHeapSizes.indexOfKey(seqNum);
-    if (index >= 0) {
-        if (mPlugin != NULL) {
-            uint32_t bufferId = static_cast<uint32_t>(seqNum);
-            Return<void> hResult = mPlugin->setSharedBufferBase(hidl_memory(), bufferId);
-            ALOGE_IF(!hResult.isOk(), "setSharedBufferBase(): remote call failed");
-        }
-        mHeapSizes.removeItem(seqNum);
-    }
-}
-
-status_t CryptoHal::checkSharedBuffer(const ::SharedBuffer &buffer) {
-    int32_t seqNum = static_cast<int32_t>(buffer.bufferId);
-    // memory must be in one of the heaps that have been set
-    if (mHeapSizes.indexOfKey(seqNum) < 0) {
-        return UNKNOWN_ERROR;
-    }
-
-    // memory must be within the address space of the heap
-    size_t heapSize = mHeapSizes.valueFor(seqNum);
-    if (heapSize < buffer.offset + buffer.size ||
-            SIZE_MAX - buffer.offset < buffer.size) {
-        android_errorWriteLog(0x534e4554, "76221123");
-        return UNKNOWN_ERROR;
-    }
-
-    return OK;
-}
-
-ssize_t CryptoHal::decrypt(const uint8_t keyId[16], const uint8_t iv[16],
-        CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
-        const ::SharedBuffer &hSource, size_t offset,
-        const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
-        const ::DestinationBuffer &hDestination, AString *errorDetailMsg) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mInitCheck != OK) {
-        return mInitCheck;
-    }
-
-    Mode hMode;
-    switch(mode) {
-    case CryptoPlugin::kMode_Unencrypted:
-        hMode = Mode::UNENCRYPTED ;
-        break;
-    case CryptoPlugin::kMode_AES_CTR:
-        hMode = Mode::AES_CTR;
-        break;
-    case CryptoPlugin::kMode_AES_WV:
-        hMode = Mode::AES_CBC_CTS;
-        break;
-    case CryptoPlugin::kMode_AES_CBC:
-        hMode = Mode::AES_CBC;
-        break;
-    default:
-        return UNKNOWN_ERROR;
-    }
-
-    Pattern hPattern;
-    hPattern.encryptBlocks = pattern.mEncryptBlocks;
-    hPattern.skipBlocks = pattern.mSkipBlocks;
-
-    std::vector<SubSample> stdSubSamples;
-    for (size_t i = 0; i < numSubSamples; i++) {
-        SubSample subSample;
-        subSample.numBytesOfClearData = subSamples[i].mNumBytesOfClearData;
-        subSample.numBytesOfEncryptedData = subSamples[i].mNumBytesOfEncryptedData;
-        stdSubSamples.push_back(subSample);
-    }
-    auto hSubSamples = hidl_vec<SubSample>(stdSubSamples);
-
-    bool secure;
-    if (hDestination.type == BufferType::SHARED_MEMORY) {
-        status_t status = checkSharedBuffer(hDestination.nonsecureMemory);
-        if (status != OK) {
-            return status;
-        }
-        secure = false;
-    } else if (hDestination.type == BufferType::NATIVE_HANDLE) {
-        secure = true;
-    } else {
-        android_errorWriteLog(0x534e4554, "70526702");
-        return UNKNOWN_ERROR;
-    }
-
-    status_t status = checkSharedBuffer(hSource);
-    if (status != OK) {
-        return status;
-    }
-
-    status_t err = UNKNOWN_ERROR;
-    uint32_t bytesWritten = 0;
-
-    Return<void> hResult;
-
-    mLock.unlock();
-    if (mPluginV1_2 != NULL) {
-        hResult = mPluginV1_2->decrypt_1_2(secure, toHidlArray16(keyId), toHidlArray16(iv),
-                hMode, hPattern, hSubSamples, hSource, offset, hDestination,
-                [&](Status_V1_2 status, uint32_t hBytesWritten, hidl_string hDetailedError) {
-                    if (status == Status_V1_2::OK) {
-                        bytesWritten = hBytesWritten;
-                        *errorDetailMsg = toString8(hDetailedError);
-                    }
-                    err = toStatusT(status);
-                }
-            );
-    } else {
-        hResult = mPlugin->decrypt(secure, toHidlArray16(keyId), toHidlArray16(iv),
-                hMode, hPattern, hSubSamples, hSource, offset, hDestination,
-                [&](Status status, uint32_t hBytesWritten, hidl_string hDetailedError) {
-                    if (status == Status::OK) {
-                        bytesWritten = hBytesWritten;
-                        *errorDetailMsg = toString8(hDetailedError);
-                    }
-                    err = toStatusT(status);
-                }
-            );
-    }
-
-    err = hResult.isOk() ? err : DEAD_OBJECT;
-    if (err == OK) {
-        return bytesWritten;
-    }
-    return err;
+bool CryptoHal::requiresSecureDecoderComponent(const char* mime) const {
+    // This requires plugin to be created.
+    if (mCryptoHalAidl->initCheck() == OK)
+        return mCryptoHalAidl->requiresSecureDecoderComponent(mime);
+    return mCryptoHalHidl->requiresSecureDecoderComponent(mime);
 }
 
 void CryptoHal::notifyResolution(uint32_t width, uint32_t height) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mInitCheck != OK) {
+    // This requires plugin to be created.
+    if (mCryptoHalAidl->initCheck() == OK) {
+        mCryptoHalAidl->notifyResolution(width, height);
         return;
     }
 
-    auto hResult = mPlugin->notifyResolution(width, height);
-    ALOGE_IF(!hResult.isOk(), "notifyResolution txn failed %s", hResult.description().c_str());
+    mCryptoHalHidl->notifyResolution(width, height);
 }
 
-status_t CryptoHal::setMediaDrmSession(const Vector<uint8_t> &sessionId) {
-    Mutex::Autolock autoLock(mLock);
+status_t CryptoHal::setMediaDrmSession(const Vector<uint8_t>& sessionId) {
+    // This requires plugin to be created.
+    if (mCryptoHalAidl->initCheck() == OK) return mCryptoHalAidl->setMediaDrmSession(sessionId);
+    return mCryptoHalHidl->setMediaDrmSession(sessionId);
+}
 
-    if (mInitCheck != OK) {
-        return mInitCheck;
+ssize_t CryptoHal::decrypt(const uint8_t key[16], const uint8_t iv[16], CryptoPlugin::Mode mode,
+                           const CryptoPlugin::Pattern& pattern, const ::SharedBuffer& source,
+                           size_t offset, const CryptoPlugin::SubSample* subSamples,
+                           size_t numSubSamples, const ::DestinationBuffer& destination,
+                           AString* errorDetailMsg) {
+    // This requires plugin to be created.
+    if (mCryptoHalAidl->initCheck() == OK)
+        return mCryptoHalAidl->decrypt(key, iv, mode, pattern, source, offset, subSamples,
+                                       numSubSamples, destination, errorDetailMsg);
+    return mCryptoHalHidl->decrypt(key, iv, mode, pattern, source, offset, subSamples,
+                                   numSubSamples, destination, errorDetailMsg);
+}
+
+int32_t CryptoHal::setHeap(const sp<HidlMemory>& heap) {
+    // This requires plugin to be created.
+    if (mCryptoHalAidl->initCheck() == OK) return mCryptoHalAidl->setHeap(heap);
+    return mCryptoHalHidl->setHeap(heap);
+}
+
+void CryptoHal::unsetHeap(int32_t seqNum) {
+    // This requires plugin to be created.
+    if (mCryptoHalAidl->initCheck() == OK) {
+        mCryptoHalAidl->unsetHeap(seqNum);
+        return;
     }
 
-    auto err = mPlugin->setMediaDrmSession(toHidlVec(sessionId));
-    return err.isOk() ? toStatusT(err) : DEAD_OBJECT;
+    mCryptoHalHidl->unsetHeap(seqNum);
 }
 
-status_t CryptoHal::getLogMessages(Vector<drm::V1_4::LogMessage> &logs) const {
-    Mutex::Autolock autoLock(mLock);
-    return DrmUtils::GetLogMessages<drm::V1_4::ICryptoPlugin>(mPlugin, logs);
+status_t CryptoHal::getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const {
+    // This requires plugin to be created.
+    if (mCryptoHalAidl->initCheck() == OK) return mCryptoHalAidl->getLogMessages(logs);
+    return mCryptoHalHidl->getLogMessages(logs);
 }
-}  // namespace android
+
+}  // namespace android
\ No newline at end of file
diff --git a/drm/libmediadrm/CryptoHalAidl.cpp b/drm/libmediadrm/CryptoHalAidl.cpp
new file mode 100644
index 0000000..a688728
--- /dev/null
+++ b/drm/libmediadrm/CryptoHalAidl.cpp
@@ -0,0 +1,409 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CryptoHalAidl"
+
+#include <aidlcommonsupport/NativeHandle.h>
+#include <android/binder_auto_utils.h>
+#include <android/binder_manager.h>
+#include <media/hardware/CryptoAPI.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <mediadrm/CryptoHalAidl.h>
+#include <mediadrm/DrmUtils.h>
+
+using ::aidl::android::hardware::drm::BufferType;
+using ::aidl::android::hardware::drm::DecryptResult;
+using DestinationBufferAidl = ::aidl::android::hardware::drm::DestinationBuffer;
+using ::aidl::android::hardware::drm::Mode;
+using ::aidl::android::hardware::drm::Pattern;
+using SharedBufferAidl = ::aidl::android::hardware::drm::SharedBuffer;
+using ::aidl::android::hardware::drm::Status;
+using ::aidl::android::hardware::drm::SubSample;
+using ::aidl::android::hardware::drm::Uuid;
+
+using ::aidl::android::hardware::common::Ashmem;
+
+using ::android::sp;
+using ::android::DrmUtils::toStatusTAidl;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_handle;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::HidlMemory;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+using ::aidl::android::hardware::drm::Uuid;
+// -------Hidl interface related-----------------
+// TODO: replace before removing hidl interface
+
+using BufferTypeHidl = ::android::hardware::drm::V1_0::BufferType;
+using SharedBufferHidl = ::android::hardware::drm::V1_0::SharedBuffer;
+using DestinationBufferHidl = ::android::hardware::drm::V1_0::DestinationBuffer;
+
+// -------Hidl interface related end-------------
+
+namespace android {
+
+static Uuid toAidlUuid(const uint8_t* uuid) {
+    Uuid uuidAidl;
+    uuidAidl.uuid = std::vector<uint8_t>(uuid, uuid + 16);
+    return uuidAidl;
+}
+
+template <typename Byte = uint8_t>
+static std::vector<Byte> toStdVec(const Vector<uint8_t>& vector) {
+    auto v = reinterpret_cast<const Byte*>(vector.array());
+    std::vector<Byte> vec(v, v + vector.size());
+    return vec;
+}
+
+// -------Hidl interface related-----------------
+// TODO: replace before removing hidl interface
+status_t CryptoHalAidl::checkSharedBuffer(const SharedBufferHidl& buffer) {
+    int32_t seqNum = static_cast<int32_t>(buffer.bufferId);
+    // memory must be in one of the heaps that have been set
+    if (mHeapSizes.indexOfKey(seqNum) < 0) {
+        return UNKNOWN_ERROR;
+    }
+
+    // memory must be within the address space of the heap
+    size_t heapSize = mHeapSizes.valueFor(seqNum);
+    if (heapSize < buffer.offset + buffer.size || SIZE_MAX - buffer.offset < buffer.size) {
+        android_errorWriteLog(0x534e4554, "76221123");
+        return UNKNOWN_ERROR;
+    }
+
+    return OK;
+}
+
+static SharedBufferAidl hidlSharedBufferToAidlSharedBuffer(const SharedBufferHidl& buffer) {
+    SharedBufferAidl aidlsb;
+    aidlsb.bufferId = buffer.bufferId;
+    aidlsb.offset = buffer.offset;
+    aidlsb.size = buffer.size;
+    return aidlsb;
+}
+
+static DestinationBufferAidl hidlDestinationBufferToAidlDestinationBuffer(
+        const DestinationBufferHidl& buffer) {
+    DestinationBufferAidl aidldb;
+    // skip negative convert check as count of enum elements are 2
+    aidldb.type = static_cast<BufferType>((int32_t)buffer.type);
+    aidldb.nonsecureMemory = hidlSharedBufferToAidlSharedBuffer(buffer.nonsecureMemory);
+    aidldb.secureMemory = ::android::makeToAidl(buffer.secureMemory.getNativeHandle());
+    return aidldb;
+}
+
+static hidl_vec<uint8_t> toHidlVec(const void* ptr, size_t size) {
+    hidl_vec<uint8_t> vec;
+    vec.resize(size);
+    memcpy(vec.data(), ptr, size);
+    return vec;
+}
+
+static const Vector<uint8_t> toVector(const std::vector<uint8_t>& vec) {
+    Vector<uint8_t> vector;
+    vector.appendArray(vec.data(), vec.size());
+    return *const_cast<const Vector<uint8_t>*>(&vector);
+}
+
+static String8 toString8(const std::string& string) {
+    return String8(string.c_str());
+}
+
+// -------Hidl interface related end--------------
+
+CryptoHalAidl::CryptoHalAidl()
+    : mFactories(makeCryptoFactories()),
+      mInitCheck((mFactories.size() == 0) ? ERROR_UNSUPPORTED : NO_INIT),
+      mHeapSeqNum(0) {}
+
+CryptoHalAidl::~CryptoHalAidl() {}
+
+std::vector<std::shared_ptr<ICryptoFactoryAidl>> CryptoHalAidl::makeCryptoFactories() {
+    std::vector<std::shared_ptr<ICryptoFactoryAidl>> factories;
+    AServiceManager_forEachDeclaredInstance(
+            ICryptoFactoryAidl::descriptor, static_cast<void*>(&factories),
+            [](const char* instance, void* context) {
+                auto fullName = std::string(ICryptoFactoryAidl::descriptor) + "/" + std::string(instance);
+                auto factory = ICryptoFactoryAidl::fromBinder(
+                        ::ndk::SpAIBinder(AServiceManager_getService(fullName.c_str())));
+                if (factory == nullptr) {
+                    ALOGE("not found ICryptoFactoryAidl. Instance name:[%s]", fullName.c_str());
+                    return;
+                }
+
+                ALOGI("found ICryptoFactoryAidl. Instance name:[%s]", fullName.c_str());
+                static_cast<std::vector<std::shared_ptr<ICryptoFactoryAidl>>*>(context)
+                        ->emplace_back(factory);
+            });
+
+    return factories;
+}
+
+status_t CryptoHalAidl::initCheck() const {
+    return mInitCheck;
+}
+
+bool CryptoHalAidl::isCryptoSchemeSupported(const uint8_t uuid[16]) {
+    Mutex::Autolock autoLock(mLock);
+
+    bool isSupported = false;
+    Uuid uuidAidl = toAidlUuid(uuid);
+    for (size_t i = 0; i < mFactories.size(); i++) {
+        if (mFactories[i]->isCryptoSchemeSupported(uuidAidl, &isSupported).isOk()) {
+            if (isSupported) break;
+        }
+    }
+    return isSupported;
+}
+
+status_t CryptoHalAidl::createPlugin(const uint8_t uuid[16], const void* data, size_t size) {
+    Mutex::Autolock autoLock(mLock);
+
+    bool isSupported = false;
+    Uuid uuidAidl = toAidlUuid(uuid);
+    std::vector<uint8_t> dataAidl = toStdVec(toVector(toHidlVec(data, size)));
+    for (size_t i = 0; i < mFactories.size(); i++) {
+        if (mFactories[i]->isCryptoSchemeSupported(uuidAidl, &isSupported).isOk() && isSupported) {
+            mPlugin = makeCryptoPlugin(mFactories[i], uuidAidl, dataAidl);
+            // Reserve place for future plugins with new versions
+
+            break;
+        }
+    }
+
+    if (mInitCheck == NO_INIT) {
+        mInitCheck = mPlugin == NULL ? ERROR_UNSUPPORTED : OK;
+    }
+
+    return mInitCheck;
+}
+
+std::shared_ptr<ICryptoPluginAidl> CryptoHalAidl::makeCryptoPlugin(
+        const std::shared_ptr<ICryptoFactoryAidl>& factory, const Uuid& uuidAidl,
+        const std::vector<uint8_t> initData) {
+    std::shared_ptr<ICryptoPluginAidl> pluginAidl;
+    if (factory->createPlugin(uuidAidl, initData, &pluginAidl).isOk()) {
+        ALOGI("Create ICryptoPluginAidl. UUID:[%s]", uuidAidl.toString().c_str());
+    } else {
+        mInitCheck = DEAD_OBJECT;
+        ALOGE("Failed to create ICryptoPluginAidl. UUID:[%s]", uuidAidl.toString().c_str());
+    }
+
+    return pluginAidl;
+}
+
+status_t CryptoHalAidl::destroyPlugin() {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    mPlugin.reset();
+    return OK;
+}
+
+bool CryptoHalAidl::requiresSecureDecoderComponent(const char* mime) const {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return false;
+    }
+
+    std::string mimeStr = std::string(mime);
+    bool result;
+    if (!mPlugin->requiresSecureDecoderComponent(mimeStr, &result).isOk()) {
+        ALOGE("Failed to requiresSecureDecoderComponent. mime:[%s]", mime);
+        return false;
+    }
+
+    return result;
+}
+
+void CryptoHalAidl::notifyResolution(uint32_t width, uint32_t height) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return;
+    }
+
+    // Check negative width and height after type conversion
+    // Log error and return if any is negative
+    if ((int32_t)width < 0 || (int32_t)height < 0) {
+        ALOGE("Negative width: %d or height %d in notifyResolution", width, height);
+        return;
+    }
+
+    ::ndk::ScopedAStatus status = mPlugin->notifyResolution(width, height);
+    if (!status.isOk()) {
+        ALOGE("notifyResolution txn failed status code: %d", status.getServiceSpecificError());
+    }
+}
+
+status_t CryptoHalAidl::setMediaDrmSession(const Vector<uint8_t>& sessionId) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    auto err = mPlugin->setMediaDrmSession(toStdVec(sessionId));
+    return err.isOk() ? toStatusTAidl(err.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+ssize_t CryptoHalAidl::decrypt(const uint8_t keyId[16], const uint8_t iv[16],
+                               CryptoPlugin::Mode mode, const CryptoPlugin::Pattern& pattern,
+                               const SharedBufferHidl& hSource, size_t offset,
+                               const CryptoPlugin::SubSample* subSamples, size_t numSubSamples,
+                               const DestinationBufferHidl& hDestination, AString* errorDetailMsg) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    Mode aMode;
+    switch (mode) {
+        case CryptoPlugin::kMode_Unencrypted:
+            aMode = Mode::UNENCRYPTED;
+            break;
+        case CryptoPlugin::kMode_AES_CTR:
+            aMode = Mode::AES_CTR;
+            break;
+        case CryptoPlugin::kMode_AES_WV:
+            aMode = Mode::AES_CBC_CTS;
+            break;
+        case CryptoPlugin::kMode_AES_CBC:
+            aMode = Mode::AES_CBC;
+            break;
+        default:
+            return UNKNOWN_ERROR;
+    }
+
+    Pattern aPattern;
+    aPattern.encryptBlocks = pattern.mEncryptBlocks;
+    aPattern.skipBlocks = pattern.mSkipBlocks;
+
+    std::vector<SubSample> stdSubSamples;
+    for (size_t i = 0; i < numSubSamples; i++) {
+        SubSample subSample;
+        subSample.numBytesOfClearData = subSamples[i].mNumBytesOfClearData;
+        subSample.numBytesOfEncryptedData = subSamples[i].mNumBytesOfEncryptedData;
+        stdSubSamples.push_back(subSample);
+    }
+
+    bool secure;
+    if (hDestination.type == BufferTypeHidl::SHARED_MEMORY) {
+        status_t status = checkSharedBuffer(hDestination.nonsecureMemory);
+        if (status != OK) {
+            return status;
+        }
+        secure = false;
+    } else if (hDestination.type == BufferTypeHidl::NATIVE_HANDLE) {
+        secure = true;
+    } else {
+        android_errorWriteLog(0x534e4554, "70526702");
+        return UNKNOWN_ERROR;
+    }
+
+    status_t status = checkSharedBuffer(hSource);
+    if (status != OK) {
+        return status;
+    }
+
+    status_t err = UNKNOWN_ERROR;
+    mLock.unlock();
+
+    std::vector<uint8_t> keyIdAidl = std::vector<uint8_t>(keyId, keyId + 16);
+    std::vector<uint8_t> ivAidl = std::vector<uint8_t>(iv, iv + 16);
+    DecryptResult result;
+    err = mPlugin->decrypt(secure, keyIdAidl, ivAidl, aMode, aPattern, stdSubSamples,
+                           hidlSharedBufferToAidlSharedBuffer(hSource), offset,
+                           hidlDestinationBufferToAidlDestinationBuffer(hDestination), &result)
+                          .isOk()
+                  ? OK
+                  : DEAD_OBJECT;
+
+    *errorDetailMsg = toString8(result.detailedError);
+    if (err != OK) {
+        ALOGE("Failed on decrypt, error message:%s, bytes written:%d", result.detailedError.c_str(),
+              result.bytesWritten);
+        return err;
+    }
+
+    return result.bytesWritten;
+}
+
+int32_t CryptoHalAidl::setHeap(const sp<HidlMemory>& heap) {
+    if (heap == NULL || mHeapSeqNum < 0) {
+        ALOGE("setHeap(): heap %p mHeapSeqNum %d", heap.get(), mHeapSeqNum);
+        return -1;
+    }
+
+    Mutex::Autolock autoLock(mLock);
+
+    int32_t seqNum = mHeapSeqNum++;
+    uint32_t bufferId = static_cast<uint32_t>(seqNum);
+    mHeapSizes.add(seqNum, heap->size());
+
+    Ashmem memAidl;
+    memAidl.fd.set(heap->handle()->data[0]);
+    memAidl.size = heap->size();
+
+    ALOGE_IF(!mPlugin->setSharedBufferBase(memAidl, bufferId).isOk(),
+             "setSharedBufferBase(): remote call failed");
+    return seqNum;
+}
+
+void CryptoHalAidl::unsetHeap(int32_t seqNum) {
+    Mutex::Autolock autoLock(mLock);
+
+    /*
+     * Clear the remote shared memory mapping by setting the shared
+     * buffer base to a null hidl_memory.
+     *
+     * TODO: Add a releaseSharedBuffer method in a future DRM HAL
+     * API version to make this explicit.
+     */
+    ssize_t index = mHeapSizes.indexOfKey(seqNum);
+    if (index >= 0) {
+        if (mPlugin != NULL) {
+            uint32_t bufferId = static_cast<uint32_t>(seqNum);
+            Ashmem memAidl;
+            memAidl.fd.set(-1);
+            memAidl.size = 0;
+            ALOGE_IF(!mPlugin->setSharedBufferBase(memAidl, bufferId).isOk(),
+                     "setSharedBufferBase(): remote call failed");
+        }
+        mHeapSizes.removeItem(seqNum);
+    }
+}
+
+status_t CryptoHalAidl::getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const {
+    Mutex::Autolock autoLock(mLock);
+    // Need to convert logmessage
+
+    return  DrmUtils::GetLogMessagesAidl<ICryptoPluginAidl>(mPlugin, logs);
+}
+}  // namespace android
\ No newline at end of file
diff --git a/drm/libmediadrm/CryptoHalHidl.cpp b/drm/libmediadrm/CryptoHalHidl.cpp
new file mode 100644
index 0000000..cbb6ddf
--- /dev/null
+++ b/drm/libmediadrm/CryptoHalHidl.cpp
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CryptoHalHidl"
+#include <utils/Log.h>
+
+#include <android/hardware/drm/1.0/types.h>
+#include <android/hidl/manager/1.2/IServiceManager.h>
+#include <hidl/ServiceManagement.h>
+#include <hidlmemory/FrameworkUtils.h>
+#include <media/hardware/CryptoAPI.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <mediadrm/CryptoHalHidl.h>
+#include <mediadrm/DrmUtils.h>
+
+using drm::V1_0::BufferType;
+using drm::V1_0::DestinationBuffer;
+using drm::V1_0::ICryptoFactory;
+using drm::V1_0::ICryptoPlugin;
+using drm::V1_0::Mode;
+using drm::V1_0::Pattern;
+using drm::V1_0::SharedBuffer;
+using drm::V1_0::Status;
+using drm::V1_0::SubSample;
+
+using ::android::sp;
+using ::android::DrmUtils::toStatusT;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_handle;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::HidlMemory;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+typedef drm::V1_2::Status Status_V1_2;
+
+namespace android {
+
+static hidl_vec<uint8_t> toHidlVec(const Vector<uint8_t>& vector) {
+    hidl_vec<uint8_t> vec;
+    vec.setToExternal(const_cast<uint8_t*>(vector.array()), vector.size());
+    return vec;
+}
+
+static hidl_vec<uint8_t> toHidlVec(const void* ptr, size_t size) {
+    hidl_vec<uint8_t> vec;
+    vec.resize(size);
+    memcpy(vec.data(), ptr, size);
+    return vec;
+}
+
+static hidl_array<uint8_t, 16> toHidlArray16(const uint8_t* ptr) {
+    if (!ptr) {
+        return hidl_array<uint8_t, 16>();
+    }
+    return hidl_array<uint8_t, 16>(ptr);
+}
+
+static String8 toString8(hidl_string hString) {
+    return String8(hString.c_str());
+}
+
+CryptoHalHidl::CryptoHalHidl()
+    : mFactories(makeCryptoFactories()),
+      mInitCheck((mFactories.size() == 0) ? ERROR_UNSUPPORTED : NO_INIT),
+      mHeapSeqNum(0) {}
+
+CryptoHalHidl::~CryptoHalHidl() {}
+
+Vector<sp<ICryptoFactory>> CryptoHalHidl::makeCryptoFactories() {
+    Vector<sp<ICryptoFactory>> factories;
+
+    auto manager = hardware::defaultServiceManager1_2();
+    if (manager != NULL) {
+        manager->listManifestByInterface(
+                drm::V1_0::ICryptoFactory::descriptor,
+                [&factories](const hidl_vec<hidl_string>& registered) {
+                    for (const auto& instance : registered) {
+                        auto factory = drm::V1_0::ICryptoFactory::getService(instance);
+                        if (factory != NULL) {
+                            ALOGD("found drm@1.0 ICryptoFactory %s", instance.c_str());
+                            factories.push_back(factory);
+                        }
+                    }
+                });
+        manager->listManifestByInterface(
+                drm::V1_1::ICryptoFactory::descriptor,
+                [&factories](const hidl_vec<hidl_string>& registered) {
+                    for (const auto& instance : registered) {
+                        auto factory = drm::V1_1::ICryptoFactory::getService(instance);
+                        if (factory != NULL) {
+                            ALOGD("found drm@1.1 ICryptoFactory %s", instance.c_str());
+                            factories.push_back(factory);
+                        }
+                    }
+                });
+    }
+
+    if (factories.size() == 0) {
+        // must be in passthrough mode, load the default passthrough service
+        auto passthrough = ICryptoFactory::getService();
+        if (passthrough != NULL) {
+            ALOGI("makeCryptoFactories: using default passthrough crypto instance");
+            factories.push_back(passthrough);
+        } else {
+            ALOGE("Failed to find any crypto factories");
+        }
+    }
+    return factories;
+}
+
+sp<ICryptoPlugin> CryptoHalHidl::makeCryptoPlugin(const sp<ICryptoFactory>& factory,
+                                                  const uint8_t uuid[16], const void* initData,
+                                                  size_t initDataSize) {
+    sp<ICryptoPlugin> plugin;
+    Return<void> hResult =
+            factory->createPlugin(toHidlArray16(uuid), toHidlVec(initData, initDataSize),
+                                  [&](Status status, const sp<ICryptoPlugin>& hPlugin) {
+                                      if (status != Status::OK) {
+                                          ALOGE("Failed to make crypto plugin");
+                                          return;
+                                      }
+                                      plugin = hPlugin;
+                                  });
+    if (!hResult.isOk()) {
+        mInitCheck = DEAD_OBJECT;
+    }
+    return plugin;
+}
+
+status_t CryptoHalHidl::initCheck() const {
+    return mInitCheck;
+}
+
+bool CryptoHalHidl::isCryptoSchemeSupported(const uint8_t uuid[16]) {
+    Mutex::Autolock autoLock(mLock);
+
+    for (size_t i = 0; i < mFactories.size(); i++) {
+        if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+status_t CryptoHalHidl::createPlugin(const uint8_t uuid[16], const void* data, size_t size) {
+    Mutex::Autolock autoLock(mLock);
+
+    for (size_t i = 0; i < mFactories.size(); i++) {
+        if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
+            mPlugin = makeCryptoPlugin(mFactories[i], uuid, data, size);
+            if (mPlugin != NULL) {
+                mPluginV1_2 = drm::V1_2::ICryptoPlugin::castFrom(mPlugin);
+            }
+        }
+    }
+
+    if (mInitCheck == NO_INIT) {
+        mInitCheck = mPlugin == NULL ? ERROR_UNSUPPORTED : OK;
+    }
+
+    return mInitCheck;
+}
+
+status_t CryptoHalHidl::destroyPlugin() {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    mPlugin.clear();
+    mPluginV1_2.clear();
+    return OK;
+}
+
+bool CryptoHalHidl::requiresSecureDecoderComponent(const char* mime) const {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return false;
+    }
+
+    Return<bool> hResult = mPlugin->requiresSecureDecoderComponent(hidl_string(mime));
+    if (!hResult.isOk()) {
+        return false;
+    }
+    return hResult;
+}
+
+/**
+ * If the heap base isn't set, get the heap base from the HidlMemory
+ * and send it to the HAL so it can map a remote heap of the same
+ * size.  Once the heap base is established, shared memory buffers
+ * are sent by providing an offset into the heap and a buffer size.
+ */
+int32_t CryptoHalHidl::setHeapBase(const sp<HidlMemory>& heap) {
+    if (heap == NULL || mHeapSeqNum < 0) {
+        ALOGE("setHeapBase(): heap %p mHeapSeqNum %d", heap.get(), mHeapSeqNum);
+        return -1;
+    }
+
+    Mutex::Autolock autoLock(mLock);
+
+    int32_t seqNum = mHeapSeqNum++;
+    uint32_t bufferId = static_cast<uint32_t>(seqNum);
+    mHeapSizes.add(seqNum, heap->size());
+    Return<void> hResult = mPlugin->setSharedBufferBase(*heap, bufferId);
+    ALOGE_IF(!hResult.isOk(), "setSharedBufferBase(): remote call failed");
+    return seqNum;
+}
+
+void CryptoHalHidl::clearHeapBase(int32_t seqNum) {
+    Mutex::Autolock autoLock(mLock);
+
+    /*
+     * Clear the remote shared memory mapping by setting the shared
+     * buffer base to a null hidl_memory.
+     *
+     * TODO: Add a releaseSharedBuffer method in a future DRM HAL
+     * API version to make this explicit.
+     */
+    ssize_t index = mHeapSizes.indexOfKey(seqNum);
+    if (index >= 0) {
+        if (mPlugin != NULL) {
+            uint32_t bufferId = static_cast<uint32_t>(seqNum);
+            Return<void> hResult = mPlugin->setSharedBufferBase(hidl_memory(), bufferId);
+            ALOGE_IF(!hResult.isOk(), "setSharedBufferBase(): remote call failed");
+        }
+        mHeapSizes.removeItem(seqNum);
+    }
+}
+
+status_t CryptoHalHidl::checkSharedBuffer(const ::SharedBuffer& buffer) {
+    int32_t seqNum = static_cast<int32_t>(buffer.bufferId);
+    // memory must be in one of the heaps that have been set
+    if (mHeapSizes.indexOfKey(seqNum) < 0) {
+        return UNKNOWN_ERROR;
+    }
+
+    // memory must be within the address space of the heap
+    size_t heapSize = mHeapSizes.valueFor(seqNum);
+    if (heapSize < buffer.offset + buffer.size || SIZE_MAX - buffer.offset < buffer.size) {
+        android_errorWriteLog(0x534e4554, "76221123");
+        return UNKNOWN_ERROR;
+    }
+
+    return OK;
+}
+
+ssize_t CryptoHalHidl::decrypt(const uint8_t keyId[16], const uint8_t iv[16],
+                               CryptoPlugin::Mode mode, const CryptoPlugin::Pattern& pattern,
+                               const drm::V1_0::SharedBuffer& hSource, size_t offset,
+                               const CryptoPlugin::SubSample* subSamples, size_t numSubSamples,
+                               const drm::V1_0::DestinationBuffer& hDestination,
+                               AString* errorDetailMsg) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    Mode hMode;
+    switch (mode) {
+        case CryptoPlugin::kMode_Unencrypted:
+            hMode = Mode::UNENCRYPTED;
+            break;
+        case CryptoPlugin::kMode_AES_CTR:
+            hMode = Mode::AES_CTR;
+            break;
+        case CryptoPlugin::kMode_AES_WV:
+            hMode = Mode::AES_CBC_CTS;
+            break;
+        case CryptoPlugin::kMode_AES_CBC:
+            hMode = Mode::AES_CBC;
+            break;
+        default:
+            return UNKNOWN_ERROR;
+    }
+
+    Pattern hPattern;
+    hPattern.encryptBlocks = pattern.mEncryptBlocks;
+    hPattern.skipBlocks = pattern.mSkipBlocks;
+
+    std::vector<SubSample> stdSubSamples;
+    for (size_t i = 0; i < numSubSamples; i++) {
+        SubSample subSample;
+        subSample.numBytesOfClearData = subSamples[i].mNumBytesOfClearData;
+        subSample.numBytesOfEncryptedData = subSamples[i].mNumBytesOfEncryptedData;
+        stdSubSamples.push_back(subSample);
+    }
+    auto hSubSamples = hidl_vec<SubSample>(stdSubSamples);
+
+    bool secure;
+    if (hDestination.type == BufferType::SHARED_MEMORY) {
+        status_t status = checkSharedBuffer(hDestination.nonsecureMemory);
+        if (status != OK) {
+            return status;
+        }
+        secure = false;
+    } else if (hDestination.type == BufferType::NATIVE_HANDLE) {
+        secure = true;
+    } else {
+        android_errorWriteLog(0x534e4554, "70526702");
+        return UNKNOWN_ERROR;
+    }
+
+    status_t status = checkSharedBuffer(hSource);
+    if (status != OK) {
+        return status;
+    }
+
+    status_t err = UNKNOWN_ERROR;
+    uint32_t bytesWritten = 0;
+
+    Return<void> hResult;
+
+    mLock.unlock();
+    if (mPluginV1_2 != NULL) {
+        hResult = mPluginV1_2->decrypt_1_2(
+                secure, toHidlArray16(keyId), toHidlArray16(iv), hMode, hPattern, hSubSamples,
+                hSource, offset, hDestination,
+                [&](Status_V1_2 status, uint32_t hBytesWritten, hidl_string hDetailedError) {
+                    if (status == Status_V1_2::OK) {
+                        bytesWritten = hBytesWritten;
+                        *errorDetailMsg = toString8(hDetailedError);
+                    }
+                    err = toStatusT(status);
+                });
+    } else {
+        hResult = mPlugin->decrypt(
+                secure, toHidlArray16(keyId), toHidlArray16(iv), hMode, hPattern, hSubSamples,
+                hSource, offset, hDestination,
+                [&](Status status, uint32_t hBytesWritten, hidl_string hDetailedError) {
+                    if (status == Status::OK) {
+                        bytesWritten = hBytesWritten;
+                        *errorDetailMsg = toString8(hDetailedError);
+                    }
+                    err = toStatusT(status);
+                });
+    }
+
+    err = hResult.isOk() ? err : DEAD_OBJECT;
+    if (err == OK) {
+        return bytesWritten;
+    }
+    return err;
+}
+
+void CryptoHalHidl::notifyResolution(uint32_t width, uint32_t height) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return;
+    }
+
+    auto hResult = mPlugin->notifyResolution(width, height);
+    ALOGE_IF(!hResult.isOk(), "notifyResolution txn failed %s", hResult.description().c_str());
+}
+
+status_t CryptoHalHidl::setMediaDrmSession(const Vector<uint8_t>& sessionId) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    auto err = mPlugin->setMediaDrmSession(toHidlVec(sessionId));
+    return err.isOk() ? toStatusT(err) : DEAD_OBJECT;
+}
+
+status_t CryptoHalHidl::getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const {
+    Mutex::Autolock autoLock(mLock);
+    return DrmUtils::GetLogMessages<drm::V1_4::ICryptoPlugin>(mPlugin, logs);
+}
+}  // namespace android
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index 40d1e0c..fe8b9f6 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -17,1557 +17,273 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "DrmHal"
 
-#include <aidl/android/media/BnResourceManagerClient.h>
-#include <android/binder_manager.h>
-#include <android/hardware/drm/1.2/types.h>
-#include <android/hidl/manager/1.2/IServiceManager.h>
-#include <hidl/ServiceManagement.h>
-#include <media/EventMetric.h>
-#include <media/MediaMetrics.h>
-#include <media/PluginMetricsReporting.h>
-#include <media/drm/DrmAPI.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/foundation/base64.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/MediaErrors.h>
 #include <mediadrm/DrmHal.h>
-#include <mediadrm/DrmSessionClientInterface.h>
-#include <mediadrm/DrmSessionManager.h>
-#include <mediadrm/IDrmMetricsConsumer.h>
+#include <mediadrm/DrmHalAidl.h>
+#include <mediadrm/DrmHalHidl.h>
 #include <mediadrm/DrmUtils.h>
-#include <utils/Log.h>
-
-#include <iomanip>
-#include <vector>
-
-using drm::V1_0::KeyedVector;
-using drm::V1_0::KeyRequestType;
-using drm::V1_0::KeyType;
-using drm::V1_0::KeyValue;
-using drm::V1_0::SecureStop;
-using drm::V1_0::SecureStopId;
-using drm::V1_0::Status;
-using drm::V1_1::HdcpLevel;
-using drm::V1_1::SecureStopRelease;
-using drm::V1_1::SecurityLevel;
-using drm::V1_2::KeySetId;
-using drm::V1_2::KeyStatusType;
-using ::android::DrmUtils::toStatusT;
-using ::android::hardware::drm::V1_1::DrmMetricGroup;
-using ::android::hardware::hidl_array;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::os::PersistableBundle;
-using ::android::sp;
-
-typedef drm::V1_1::KeyRequestType KeyRequestType_V1_1;
-typedef drm::V1_2::Status Status_V1_2;
-typedef drm::V1_2::HdcpLevel HdcpLevel_V1_2;
-
-namespace {
-
-// This constant corresponds to the PROPERTY_DEVICE_UNIQUE_ID constant
-// in the MediaDrm API.
-constexpr char kPropertyDeviceUniqueId[] = "deviceUniqueId";
-constexpr char kEqualsSign[] = "=";
-
-template<typename T>
-std::string toBase64StringNoPad(const T* data, size_t size) {
-    // Note that the base 64 conversion only works with arrays of single-byte
-    // values. If the source is empty or is not an array of single-byte values,
-    // return empty string.
-    if (size == 0 || sizeof(data[0]) != 1) {
-      return "";
-    }
-
-    android::AString outputString;
-    encodeBase64(data, size, &outputString);
-    // Remove trailing equals padding if it exists.
-    while (outputString.size() > 0 && outputString.endsWith(kEqualsSign)) {
-        outputString.erase(outputString.size() - 1, 1);
-    }
-
-    return std::string(outputString.c_str(), outputString.size());
-}
-
-}  // anonymous namespace
 
 namespace android {
 
-#define INIT_CHECK() {if (mInitCheck != OK) return mInitCheck;}
-
-static const Vector<uint8_t> toVector(const hidl_vec<uint8_t> &vec) {
-    Vector<uint8_t> vector;
-    vector.appendArray(vec.data(), vec.size());
-    return *const_cast<const Vector<uint8_t> *>(&vector);
+DrmHal::DrmHal() {
+    mDrmHalHidl = sp<DrmHalHidl>::make();
+    mDrmHalAidl = ndk::SharedRefBase::make<DrmHalAidl>();
 }
 
-static hidl_vec<uint8_t> toHidlVec(const Vector<uint8_t> &vector) {
-    hidl_vec<uint8_t> vec;
-    vec.setToExternal(const_cast<uint8_t *>(vector.array()), vector.size());
-    return vec;
-}
-
-static String8 toString8(const hidl_string &string) {
-    return String8(string.c_str());
-}
-
-static hidl_string toHidlString(const String8& string) {
-    return hidl_string(string.string());
-}
-
-static DrmPlugin::SecurityLevel toSecurityLevel(SecurityLevel level) {
-    switch(level) {
-    case SecurityLevel::SW_SECURE_CRYPTO:
-        return DrmPlugin::kSecurityLevelSwSecureCrypto;
-    case SecurityLevel::SW_SECURE_DECODE:
-        return DrmPlugin::kSecurityLevelSwSecureDecode;
-    case SecurityLevel::HW_SECURE_CRYPTO:
-        return DrmPlugin::kSecurityLevelHwSecureCrypto;
-    case SecurityLevel::HW_SECURE_DECODE:
-        return DrmPlugin::kSecurityLevelHwSecureDecode;
-    case SecurityLevel::HW_SECURE_ALL:
-        return DrmPlugin::kSecurityLevelHwSecureAll;
-    default:
-        return DrmPlugin::kSecurityLevelUnknown;
-    }
-}
-
-static SecurityLevel toHidlSecurityLevel(DrmPlugin::SecurityLevel level) {
-    switch(level) {
-    case DrmPlugin::kSecurityLevelSwSecureCrypto:
-        return SecurityLevel::SW_SECURE_CRYPTO;
-    case DrmPlugin::kSecurityLevelSwSecureDecode:
-        return SecurityLevel::SW_SECURE_DECODE;
-    case DrmPlugin::kSecurityLevelHwSecureCrypto:
-        return SecurityLevel::HW_SECURE_CRYPTO;
-    case DrmPlugin::kSecurityLevelHwSecureDecode:
-        return SecurityLevel::HW_SECURE_DECODE;
-    case DrmPlugin::kSecurityLevelHwSecureAll:
-        return SecurityLevel::HW_SECURE_ALL;
-    default:
-        return SecurityLevel::UNKNOWN;
-    }
-}
-
-static DrmPlugin::OfflineLicenseState toOfflineLicenseState(
-        OfflineLicenseState licenseState) {
-    switch(licenseState) {
-    case OfflineLicenseState::USABLE:
-        return DrmPlugin::kOfflineLicenseStateUsable;
-    case OfflineLicenseState::INACTIVE:
-        return DrmPlugin::kOfflineLicenseStateReleased;
-    default:
-        return DrmPlugin::kOfflineLicenseStateUnknown;
-    }
-}
-
-static DrmPlugin::HdcpLevel toHdcpLevel(HdcpLevel_V1_2 level) {
-    switch(level) {
-    case HdcpLevel_V1_2::HDCP_NONE:
-        return DrmPlugin::kHdcpNone;
-    case HdcpLevel_V1_2::HDCP_V1:
-        return DrmPlugin::kHdcpV1;
-    case HdcpLevel_V1_2::HDCP_V2:
-        return DrmPlugin::kHdcpV2;
-    case HdcpLevel_V1_2::HDCP_V2_1:
-        return DrmPlugin::kHdcpV2_1;
-    case HdcpLevel_V1_2::HDCP_V2_2:
-        return DrmPlugin::kHdcpV2_2;
-    case HdcpLevel_V1_2::HDCP_V2_3:
-        return DrmPlugin::kHdcpV2_3;
-    case HdcpLevel_V1_2::HDCP_NO_OUTPUT:
-        return DrmPlugin::kHdcpNoOutput;
-    default:
-        return DrmPlugin::kHdcpLevelUnknown;
-    }
-}
-static ::KeyedVector toHidlKeyedVector(const KeyedVector<String8, String8>&
-        keyedVector) {
-    std::vector<KeyValue> stdKeyedVector;
-    for (size_t i = 0; i < keyedVector.size(); i++) {
-        KeyValue keyValue;
-        keyValue.key = toHidlString(keyedVector.keyAt(i));
-        keyValue.value = toHidlString(keyedVector.valueAt(i));
-        stdKeyedVector.push_back(keyValue);
-    }
-    return ::KeyedVector(stdKeyedVector);
-}
-
-static KeyedVector<String8, String8> toKeyedVector(const ::KeyedVector&
-        hKeyedVector) {
-    KeyedVector<String8, String8> keyedVector;
-    for (size_t i = 0; i < hKeyedVector.size(); i++) {
-        keyedVector.add(toString8(hKeyedVector[i].key),
-                toString8(hKeyedVector[i].value));
-    }
-    return keyedVector;
-}
-
-static List<Vector<uint8_t>> toSecureStops(const hidl_vec<SecureStop>&
-        hSecureStops) {
-    List<Vector<uint8_t>> secureStops;
-    for (size_t i = 0; i < hSecureStops.size(); i++) {
-        secureStops.push_back(toVector(hSecureStops[i].opaqueData));
-    }
-    return secureStops;
-}
-
-static List<Vector<uint8_t>> toSecureStopIds(const hidl_vec<SecureStopId>&
-        hSecureStopIds) {
-    List<Vector<uint8_t>> secureStopIds;
-    for (size_t i = 0; i < hSecureStopIds.size(); i++) {
-        secureStopIds.push_back(toVector(hSecureStopIds[i]));
-    }
-    return secureStopIds;
-}
-
-static List<Vector<uint8_t>> toKeySetIds(const hidl_vec<KeySetId>&
-        hKeySetIds) {
-    List<Vector<uint8_t>> keySetIds;
-    for (size_t i = 0; i < hKeySetIds.size(); i++) {
-        keySetIds.push_back(toVector(hKeySetIds[i]));
-    }
-    return keySetIds;
-}
-
-Mutex DrmHal::mLock;
-
-struct DrmHal::DrmSessionClient : public aidl::android::media::BnResourceManagerClient {
-    explicit DrmSessionClient(DrmHal* drm, const Vector<uint8_t>& sessionId)
-      : mSessionId(sessionId),
-        mDrm(drm) {}
-
-    ::ndk::ScopedAStatus reclaimResource(bool* _aidl_return) override;
-    ::ndk::ScopedAStatus getName(::std::string* _aidl_return) override;
-
-    const Vector<uint8_t> mSessionId;
-
-    virtual ~DrmSessionClient();
-
-private:
-    wp<DrmHal> mDrm;
-
-    DISALLOW_EVIL_CONSTRUCTORS(DrmSessionClient);
-};
-
-::ndk::ScopedAStatus DrmHal::DrmSessionClient::reclaimResource(bool* _aidl_return) {
-    auto sessionId = mSessionId;
-    sp<DrmHal> drm = mDrm.promote();
-    if (drm == NULL) {
-        *_aidl_return = true;
-        return ::ndk::ScopedAStatus::ok();
-    }
-    status_t err = drm->closeSession(sessionId);
-    if (err != OK) {
-        *_aidl_return = false;
-        return ::ndk::ScopedAStatus::ok();
-    }
-    drm->sendEvent(EventType::SESSION_RECLAIMED,
-            toHidlVec(sessionId), hidl_vec<uint8_t>());
-    *_aidl_return = true;
-    return ::ndk::ScopedAStatus::ok();
-}
-
-::ndk::ScopedAStatus DrmHal::DrmSessionClient::getName(::std::string* _aidl_return) {
-    String8 name;
-    sp<DrmHal> drm = mDrm.promote();
-    if (drm == NULL) {
-        name.append("<deleted>");
-    } else if (drm->getPropertyStringInternal(String8("vendor"), name) != OK
-        || name.isEmpty()) {
-      name.append("<Get vendor failed or is empty>");
-    }
-    name.append("[");
-    for (size_t i = 0; i < mSessionId.size(); ++i) {
-        name.appendFormat("%02x", mSessionId[i]);
-    }
-    name.append("]");
-    *_aidl_return = name;
-    return ::ndk::ScopedAStatus::ok();
-}
-
-DrmHal::DrmSessionClient::~DrmSessionClient() {
-    DrmSessionManager::Instance()->removeSession(mSessionId);
-}
-
-DrmHal::DrmHal()
-   : mFactories(makeDrmFactories()),
-     mInitCheck((mFactories.size() == 0) ? ERROR_UNSUPPORTED : NO_INIT) {
-}
-
-void DrmHal::closeOpenSessions() {
-    Mutex::Autolock autoLock(mLock);
-    auto openSessions = mOpenSessions;
-    for (size_t i = 0; i < openSessions.size(); i++) {
-        mLock.unlock();
-        closeSession(openSessions[i]->mSessionId);
-        mLock.lock();
-    }
-    mOpenSessions.clear();
-}
-
-DrmHal::~DrmHal() {
-}
-
-void DrmHal::cleanup() {
-    closeOpenSessions();
-
-    Mutex::Autolock autoLock(mLock);
-    reportFrameworkMetrics(reportPluginMetrics());
-
-    setListener(NULL);
-    mInitCheck = NO_INIT;
-    if (mPluginV1_2 != NULL) {
-        if (!mPluginV1_2->setListener(NULL).isOk()) {
-            mInitCheck = DEAD_OBJECT;
-        }
-    } else if (mPlugin != NULL) {
-        if (!mPlugin->setListener(NULL).isOk()) {
-            mInitCheck = DEAD_OBJECT;
-        }
-    }
-    mPlugin.clear();
-    mPluginV1_1.clear();
-    mPluginV1_2.clear();
-    mPluginV1_4.clear();
-}
-
-std::vector<sp<IDrmFactory>> DrmHal::makeDrmFactories() {
-    static std::vector<sp<IDrmFactory>> factories(DrmUtils::MakeDrmFactories());
-    if (factories.size() == 0) {
-        // must be in passthrough mode, load the default passthrough service
-        auto passthrough = IDrmFactory::getService();
-        if (passthrough != NULL) {
-            DrmUtils::LOG2BI("makeDrmFactories: using default passthrough drm instance");
-            factories.push_back(passthrough);
-        } else {
-            DrmUtils::LOG2BE("Failed to find any drm factories");
-        }
-    }
-    return factories;
-}
-
-sp<IDrmPlugin> DrmHal::makeDrmPlugin(const sp<IDrmFactory>& factory,
-        const uint8_t uuid[16], const String8& appPackageName) {
-    mAppPackageName = appPackageName;
-    mMetrics.SetAppPackageName(appPackageName);
-    mMetrics.SetAppUid(AIBinder_getCallingUid());
-
-    sp<IDrmPlugin> plugin;
-    Return<void> hResult = factory->createPlugin(uuid, appPackageName.string(),
-            [&](Status status, const sp<IDrmPlugin>& hPlugin) {
-                if (status != Status::OK) {
-                    DrmUtils::LOG2BE(uuid, "Failed to make drm plugin: %d", status);
-                    return;
-                }
-                plugin = hPlugin;
-            }
-        );
-
-    if (!hResult.isOk()) {
-        DrmUtils::LOG2BE(uuid, "createPlugin remote call failed: %s",
-                         hResult.description().c_str());
-    }
-
-    return plugin;
-}
+DrmHal::~DrmHal() {}
 
 status_t DrmHal::initCheck() const {
-    return mInitCheck;
+    if (mDrmHalAidl->initCheck() == OK || mDrmHalHidl->initCheck() == OK) return OK;
+    if (mDrmHalAidl->initCheck() == NO_INIT || mDrmHalHidl->initCheck() == NO_INIT) return NO_INIT;
+    return mDrmHalHidl->initCheck();
 }
 
-status_t DrmHal::setListener(const sp<IDrmClient>& listener)
-{
-    Mutex::Autolock lock(mEventLock);
-    mListener = listener;
-    return NO_ERROR;
+status_t DrmHal::isCryptoSchemeSupported(const uint8_t uuid[16], const String8& mimeType,
+                                         DrmPlugin::SecurityLevel securityLevel, bool* result) {
+    status_t statusResult;
+    statusResult = mDrmHalAidl->isCryptoSchemeSupported(uuid, mimeType, securityLevel, result);
+    if (*result) return statusResult;
+    return mDrmHalHidl->isCryptoSchemeSupported(uuid, mimeType, securityLevel, result);
 }
 
-Return<void> DrmHal::sendEvent(EventType hEventType,
-        const hidl_vec<uint8_t>& sessionId, const hidl_vec<uint8_t>& data) {
-    mMetrics.mEventCounter.Increment(hEventType);
-
-    mEventLock.lock();
-    sp<IDrmClient> listener = mListener;
-    mEventLock.unlock();
-
-    if (listener != NULL) {
-        Mutex::Autolock lock(mNotifyLock);
-        DrmPlugin::EventType eventType;
-        switch(hEventType) {
-        case EventType::PROVISION_REQUIRED:
-            eventType = DrmPlugin::kDrmPluginEventProvisionRequired;
-            break;
-        case EventType::KEY_NEEDED:
-            eventType = DrmPlugin::kDrmPluginEventKeyNeeded;
-            break;
-        case EventType::KEY_EXPIRED:
-            eventType = DrmPlugin::kDrmPluginEventKeyExpired;
-            break;
-        case EventType::VENDOR_DEFINED:
-            eventType = DrmPlugin::kDrmPluginEventVendorDefined;
-            break;
-        case EventType::SESSION_RECLAIMED:
-            eventType = DrmPlugin::kDrmPluginEventSessionReclaimed;
-            break;
-        default:
-            return Void();
-        }
-        listener->sendEvent(eventType, sessionId, data);
-    }
-    return Void();
-}
-
-Return<void> DrmHal::sendExpirationUpdate(const hidl_vec<uint8_t>& sessionId,
-        int64_t expiryTimeInMS) {
-
-    mEventLock.lock();
-    sp<IDrmClient> listener = mListener;
-    mEventLock.unlock();
-
-    if (listener != NULL) {
-        Mutex::Autolock lock(mNotifyLock);
-        listener->sendExpirationUpdate(sessionId, expiryTimeInMS);
-    }
-    return Void();
-}
-
-Return<void> DrmHal::sendKeysChange(const hidl_vec<uint8_t>& sessionId,
-        const hidl_vec<KeyStatus_V1_0>& keyStatusList_V1_0, bool hasNewUsableKey) {
-    std::vector<KeyStatus> keyStatusVec;
-    for (const auto &keyStatus_V1_0 : keyStatusList_V1_0) {
-        keyStatusVec.push_back({keyStatus_V1_0.keyId,
-                static_cast<KeyStatusType>(keyStatus_V1_0.type)});
-    }
-    hidl_vec<KeyStatus> keyStatusList_V1_2(keyStatusVec);
-    return sendKeysChange_1_2(sessionId, keyStatusList_V1_2, hasNewUsableKey);
-}
-
-Return<void> DrmHal::sendKeysChange_1_2(const hidl_vec<uint8_t>& sessionId,
-        const hidl_vec<KeyStatus>& hKeyStatusList, bool hasNewUsableKey) {
-
-    mEventLock.lock();
-    sp<IDrmClient> listener = mListener;
-    mEventLock.unlock();
-
-    if (listener != NULL) {
-        std::vector<DrmKeyStatus> keyStatusList;
-        size_t nKeys = hKeyStatusList.size();
-        for (size_t i = 0; i < nKeys; ++i) {
-            const KeyStatus &keyStatus = hKeyStatusList[i];
-            uint32_t type;
-            switch(keyStatus.type) {
-            case KeyStatusType::USABLE:
-                type = DrmPlugin::kKeyStatusType_Usable;
-                break;
-            case KeyStatusType::EXPIRED:
-                type = DrmPlugin::kKeyStatusType_Expired;
-                break;
-            case KeyStatusType::OUTPUTNOTALLOWED:
-                type = DrmPlugin::kKeyStatusType_OutputNotAllowed;
-                break;
-            case KeyStatusType::STATUSPENDING:
-                type = DrmPlugin::kKeyStatusType_StatusPending;
-                break;
-            case KeyStatusType::USABLEINFUTURE:
-                type = DrmPlugin::kKeyStatusType_UsableInFuture;
-                break;
-            case KeyStatusType::INTERNALERROR:
-            default:
-                type = DrmPlugin::kKeyStatusType_InternalError;
-                break;
-            }
-            keyStatusList.push_back({type, keyStatus.keyId});
-            mMetrics.mKeyStatusChangeCounter.Increment(keyStatus.type);
-        }
-
-        Mutex::Autolock lock(mNotifyLock);
-        listener->sendKeysChange(sessionId, keyStatusList, hasNewUsableKey);
-    } else {
-        // There's no listener. But we still want to count the key change
-        // events.
-        size_t nKeys = hKeyStatusList.size();
-        for (size_t i = 0; i < nKeys; i++) {
-            mMetrics.mKeyStatusChangeCounter.Increment(hKeyStatusList[i].type);
-        }
-    }
-
-    return Void();
-}
-
-Return<void> DrmHal::sendSessionLostState(
-        const hidl_vec<uint8_t>& sessionId) {
-
-    mEventLock.lock();
-    sp<IDrmClient> listener = mListener;
-    mEventLock.unlock();
-
-    if (listener != NULL) {
-        Mutex::Autolock lock(mNotifyLock);
-        listener->sendSessionLostState(sessionId);
-    }
-    return Void();
-}
-
-status_t DrmHal::matchMimeTypeAndSecurityLevel(const sp<IDrmFactory> &factory,
-                                               const uint8_t uuid[16],
-                                               const String8 &mimeType,
-                                               DrmPlugin::SecurityLevel level,
-                                               bool *isSupported) {
-    *isSupported = false;
-
-    // handle default value cases
-    if (level == DrmPlugin::kSecurityLevelUnknown) {
-        if (mimeType == "") {
-            // isCryptoSchemeSupported(uuid)
-            *isSupported = true;
-        } else {
-            // isCryptoSchemeSupported(uuid, mimeType)
-            *isSupported = factory->isContentTypeSupported(mimeType.string());
-        }
-        return OK;
-    } else if (mimeType == "") {
-        return BAD_VALUE;
-    }
-
-    sp<drm::V1_2::IDrmFactory> factoryV1_2 = drm::V1_2::IDrmFactory::castFrom(factory);
-    if (factoryV1_2 == NULL) {
-        return ERROR_UNSUPPORTED;
-    } else {
-        *isSupported = factoryV1_2->isCryptoSchemeSupported_1_2(uuid,
-                mimeType.string(), toHidlSecurityLevel(level));
-        return OK;
-    }
-}
-
-status_t DrmHal::isCryptoSchemeSupported(const uint8_t uuid[16],
-                                         const String8 &mimeType,
-                                         DrmPlugin::SecurityLevel level,
-                                         bool *isSupported) {
-    Mutex::Autolock autoLock(mLock);
-    *isSupported = false;
-    for (ssize_t i = mFactories.size() - 1; i >= 0; i--) {
-        if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
-            return matchMimeTypeAndSecurityLevel(mFactories[i],
-                    uuid, mimeType, level, isSupported);
-        }
-    }
-    return OK;
-}
-
-status_t DrmHal::createPlugin(const uint8_t uuid[16],
-        const String8& appPackageName) {
-    Mutex::Autolock autoLock(mLock);
-
-    for (ssize_t i = mFactories.size() - 1; i >= 0; i--) {
-        auto hResult = mFactories[i]->isCryptoSchemeSupported(uuid);
-        if (hResult.isOk() && hResult) {
-            auto plugin = makeDrmPlugin(mFactories[i], uuid, appPackageName);
-            if (plugin != NULL) {
-                mPlugin = plugin;
-                mPluginV1_1 = drm::V1_1::IDrmPlugin::castFrom(mPlugin);
-                mPluginV1_2 = drm::V1_2::IDrmPlugin::castFrom(mPlugin);
-                mPluginV1_4 = drm::V1_4::IDrmPlugin::castFrom(mPlugin);
-                break;
-            }
-        }
-    }
-
-    if (mPlugin == NULL) {
-        DrmUtils::LOG2BE(uuid, "No supported hal instance found");
-        mInitCheck = ERROR_UNSUPPORTED;
-    } else {
-        mInitCheck = OK;
-        if (mPluginV1_2 != NULL) {
-            if (!mPluginV1_2->setListener(this).isOk()) {
-                mInitCheck = DEAD_OBJECT;
-            }
-        } else if (!mPlugin->setListener(this).isOk()) {
-            mInitCheck = DEAD_OBJECT;
-        }
-        if (mInitCheck != OK) {
-            mPlugin.clear();
-            mPluginV1_1.clear();
-            mPluginV1_2.clear();
-            mPluginV1_4.clear();
-        }
-    }
-
-
-    return mInitCheck;
+status_t DrmHal::createPlugin(const uint8_t uuid[16], const String8& appPackageName) {
+    status_t statusResult;
+    statusResult = mDrmHalAidl->createPlugin(uuid, appPackageName);
+    if (statusResult != OK) return mDrmHalHidl->createPlugin(uuid, appPackageName);
+    return statusResult;
 }
 
 status_t DrmHal::destroyPlugin() {
-    cleanup();
-    return OK;
+    status_t statusResult = mDrmHalAidl->destroyPlugin();
+    status_t statusResultHidl = mDrmHalHidl->destroyPlugin();
+    if (statusResult != OK) return statusResult;
+    return statusResultHidl;
 }
 
-status_t DrmHal::openSession(DrmPlugin::SecurityLevel level,
-        Vector<uint8_t> &sessionId) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    SecurityLevel hSecurityLevel = toHidlSecurityLevel(level);
-    bool setSecurityLevel = true;
-
-    if (level == DrmPlugin::kSecurityLevelMax) {
-        setSecurityLevel = false;
-    } else {
-        if (hSecurityLevel == SecurityLevel::UNKNOWN) {
-            return ERROR_DRM_CANNOT_HANDLE;
-        }
-    }
-
-    status_t  err = UNKNOWN_ERROR;
-    bool retry = true;
-    do {
-        hidl_vec<uint8_t> hSessionId;
-
-        Return<void> hResult;
-        if (mPluginV1_1 == NULL || !setSecurityLevel) {
-            hResult = mPlugin->openSession(
-                    [&](Status status,const hidl_vec<uint8_t>& id) {
-                        if (status == Status::OK) {
-                            sessionId = toVector(id);
-                        }
-                        err = toStatusT(status);
-                    }
-                );
-        } else {
-            hResult = mPluginV1_1->openSession_1_1(hSecurityLevel,
-                    [&](Status status, const hidl_vec<uint8_t>& id) {
-                        if (status == Status::OK) {
-                            sessionId = toVector(id);
-                        }
-                        err = toStatusT(status);
-                    }
-                );
-        }
-
-        if (!hResult.isOk()) {
-            err = DEAD_OBJECT;
-        }
-
-        if (err == ERROR_DRM_RESOURCE_BUSY && retry) {
-            mLock.unlock();
-            // reclaimSession may call back to closeSession, since mLock is
-            // shared between Drm instances, we should unlock here to avoid
-            // deadlock.
-            retry = DrmSessionManager::Instance()->reclaimSession(AIBinder_getCallingPid());
-            mLock.lock();
-        } else {
-            retry = false;
-        }
-    } while (retry);
-
-    if (err == OK) {
-        std::shared_ptr<DrmSessionClient> client =
-                ndk::SharedRefBase::make<DrmSessionClient>(this, sessionId);
-        DrmSessionManager::Instance()->addSession(AIBinder_getCallingPid(),
-                std::static_pointer_cast<IResourceManagerClient>(client), sessionId);
-        mOpenSessions.push_back(client);
-        mMetrics.SetSessionStart(sessionId);
-    }
-
-    mMetrics.mOpenSessionCounter.Increment(err);
-    return err;
+status_t DrmHal::openSession(DrmPlugin::SecurityLevel securityLevel, Vector<uint8_t>& sessionId) {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->openSession(securityLevel, sessionId);
+    return mDrmHalHidl->openSession(securityLevel, sessionId);
 }
 
-status_t DrmHal::closeSession(Vector<uint8_t> const &sessionId) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    Return<Status> status = mPlugin->closeSession(toHidlVec(sessionId));
-    if (status.isOk()) {
-        if (status == Status::OK) {
-            DrmSessionManager::Instance()->removeSession(sessionId);
-            for (auto i = mOpenSessions.begin(); i != mOpenSessions.end(); i++) {
-                if (isEqualSessionId((*i)->mSessionId, sessionId)) {
-                    mOpenSessions.erase(i);
-                    break;
-                }
-            }
-        }
-        status_t response = toStatusT(status);
-        mMetrics.SetSessionEnd(sessionId);
-        mMetrics.mCloseSessionCounter.Increment(response);
-        return response;
-    }
-    mMetrics.mCloseSessionCounter.Increment(DEAD_OBJECT);
-    return DEAD_OBJECT;
+status_t DrmHal::closeSession(Vector<uint8_t> const& sessionId) {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->closeSession(sessionId);
+    return mDrmHalHidl->closeSession(sessionId);
 }
 
-static DrmPlugin::KeyRequestType toKeyRequestType(
-        KeyRequestType keyRequestType) {
-    switch (keyRequestType) {
-        case KeyRequestType::INITIAL:
-            return DrmPlugin::kKeyRequestType_Initial;
-            break;
-        case KeyRequestType::RENEWAL:
-            return DrmPlugin::kKeyRequestType_Renewal;
-            break;
-        case KeyRequestType::RELEASE:
-            return DrmPlugin::kKeyRequestType_Release;
-            break;
-        default:
-            return DrmPlugin::kKeyRequestType_Unknown;
-            break;
-    }
+status_t DrmHal::getKeyRequest(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& initData,
+                               String8 const& mimeType, DrmPlugin::KeyType keyType,
+                               KeyedVector<String8, String8> const& optionalParameters,
+                               Vector<uint8_t>& request, String8& defaultUrl,
+                               DrmPlugin::KeyRequestType* keyRequestType) {
+    if (mDrmHalAidl->initCheck() == OK)
+        return mDrmHalAidl->getKeyRequest(sessionId, initData, mimeType, keyType,
+                                          optionalParameters, request, defaultUrl, keyRequestType);
+    return mDrmHalHidl->getKeyRequest(sessionId, initData, mimeType, keyType, optionalParameters,
+                                      request, defaultUrl, keyRequestType);
 }
 
-static DrmPlugin::KeyRequestType toKeyRequestType_1_1(
-        KeyRequestType_V1_1 keyRequestType) {
-    switch (keyRequestType) {
-        case KeyRequestType_V1_1::NONE:
-            return DrmPlugin::kKeyRequestType_None;
-            break;
-        case KeyRequestType_V1_1::UPDATE:
-            return DrmPlugin::kKeyRequestType_Update;
-            break;
-        default:
-            return toKeyRequestType(static_cast<KeyRequestType>(keyRequestType));
-            break;
-    }
+status_t DrmHal::provideKeyResponse(Vector<uint8_t> const& sessionId,
+                                    Vector<uint8_t> const& response, Vector<uint8_t>& keySetId) {
+    if (mDrmHalAidl->initCheck() == OK)
+        return mDrmHalAidl->provideKeyResponse(sessionId, response, keySetId);
+    return mDrmHalHidl->provideKeyResponse(sessionId, response, keySetId);
 }
 
-status_t DrmHal::getKeyRequest(Vector<uint8_t> const &sessionId,
-        Vector<uint8_t> const &initData, String8 const &mimeType,
-        DrmPlugin::KeyType keyType, KeyedVector<String8,
-        String8> const &optionalParameters, Vector<uint8_t> &request,
-        String8 &defaultUrl, DrmPlugin::KeyRequestType *keyRequestType) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-    EventTimer<status_t> keyRequestTimer(&mMetrics.mGetKeyRequestTimeUs);
-
-    DrmSessionManager::Instance()->useSession(sessionId);
-
-    KeyType hKeyType;
-    if (keyType == DrmPlugin::kKeyType_Streaming) {
-        hKeyType = KeyType::STREAMING;
-    } else if (keyType == DrmPlugin::kKeyType_Offline) {
-        hKeyType = KeyType::OFFLINE;
-    } else if (keyType == DrmPlugin::kKeyType_Release) {
-        hKeyType = KeyType::RELEASE;
-    } else {
-        keyRequestTimer.SetAttribute(BAD_VALUE);
-        return BAD_VALUE;
-    }
-
-    ::KeyedVector hOptionalParameters = toHidlKeyedVector(optionalParameters);
-
-    status_t err = UNKNOWN_ERROR;
-    Return<void> hResult;
-
-    if (mPluginV1_2 != NULL) {
-        hResult = mPluginV1_2->getKeyRequest_1_2(
-                toHidlVec(sessionId), toHidlVec(initData),
-                toHidlString(mimeType), hKeyType, hOptionalParameters,
-                [&](Status_V1_2 status, const hidl_vec<uint8_t>& hRequest,
-                        KeyRequestType_V1_1 hKeyRequestType,
-                        const hidl_string& hDefaultUrl) {
-                    if (status == Status_V1_2::OK) {
-                        request = toVector(hRequest);
-                        defaultUrl = toString8(hDefaultUrl);
-                        *keyRequestType = toKeyRequestType_1_1(hKeyRequestType);
-                    }
-                    err = toStatusT(status);
-                });
-    } else if (mPluginV1_1 != NULL) {
-        hResult = mPluginV1_1->getKeyRequest_1_1(
-                toHidlVec(sessionId), toHidlVec(initData),
-                toHidlString(mimeType), hKeyType, hOptionalParameters,
-                [&](Status status, const hidl_vec<uint8_t>& hRequest,
-                        KeyRequestType_V1_1 hKeyRequestType,
-                        const hidl_string& hDefaultUrl) {
-                    if (status == Status::OK) {
-                        request = toVector(hRequest);
-                        defaultUrl = toString8(hDefaultUrl);
-                        *keyRequestType = toKeyRequestType_1_1(hKeyRequestType);
-                    }
-                    err = toStatusT(status);
-                });
-    } else {
-        hResult = mPlugin->getKeyRequest(
-                toHidlVec(sessionId), toHidlVec(initData),
-                toHidlString(mimeType), hKeyType, hOptionalParameters,
-                [&](Status status, const hidl_vec<uint8_t>& hRequest,
-                        KeyRequestType hKeyRequestType,
-                        const hidl_string& hDefaultUrl) {
-                    if (status == Status::OK) {
-                        request = toVector(hRequest);
-                        defaultUrl = toString8(hDefaultUrl);
-                        *keyRequestType = toKeyRequestType(hKeyRequestType);
-                    }
-                    err = toStatusT(status);
-                });
-    }
-
-    err = hResult.isOk() ? err : DEAD_OBJECT;
-    keyRequestTimer.SetAttribute(err);
-    return err;
+status_t DrmHal::removeKeys(Vector<uint8_t> const& keySetId) {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->removeKeys(keySetId);
+    return mDrmHalHidl->removeKeys(keySetId);
 }
 
-status_t DrmHal::provideKeyResponse(Vector<uint8_t> const &sessionId,
-        Vector<uint8_t> const &response, Vector<uint8_t> &keySetId) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-    EventTimer<status_t> keyResponseTimer(&mMetrics.mProvideKeyResponseTimeUs);
-
-    DrmSessionManager::Instance()->useSession(sessionId);
-
-    status_t err = UNKNOWN_ERROR;
-
-    Return<void> hResult = mPlugin->provideKeyResponse(toHidlVec(sessionId),
-            toHidlVec(response),
-            [&](Status status, const hidl_vec<uint8_t>& hKeySetId) {
-                if (status == Status::OK) {
-                    keySetId = toVector(hKeySetId);
-                }
-                err = toStatusT(status);
-            }
-        );
-    err = hResult.isOk() ? err : DEAD_OBJECT;
-    keyResponseTimer.SetAttribute(err);
-    return err;
+status_t DrmHal::restoreKeys(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keySetId) {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->restoreKeys(sessionId, keySetId);
+    return mDrmHalHidl->restoreKeys(sessionId, keySetId);
 }
 
-status_t DrmHal::removeKeys(Vector<uint8_t> const &keySetId) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    Return<Status> status = mPlugin->removeKeys(toHidlVec(keySetId));
-    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+status_t DrmHal::queryKeyStatus(Vector<uint8_t> const& sessionId,
+                                KeyedVector<String8, String8>& infoMap) const {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->queryKeyStatus(sessionId, infoMap);
+    return mDrmHalHidl->queryKeyStatus(sessionId, infoMap);
 }
 
-status_t DrmHal::restoreKeys(Vector<uint8_t> const &sessionId,
-        Vector<uint8_t> const &keySetId) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    DrmSessionManager::Instance()->useSession(sessionId);
-
-    Return<Status> status = mPlugin->restoreKeys(toHidlVec(sessionId),
-            toHidlVec(keySetId));
-    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+status_t DrmHal::getProvisionRequest(String8 const& certType, String8 const& certAuthority,
+                                     Vector<uint8_t>& request, String8& defaultUrl) {
+    if (mDrmHalAidl->initCheck() == OK)
+        return mDrmHalAidl->getProvisionRequest(certType, certAuthority, request, defaultUrl);
+    return mDrmHalHidl->getProvisionRequest(certType, certAuthority, request, defaultUrl);
 }
 
-status_t DrmHal::queryKeyStatus(Vector<uint8_t> const &sessionId,
-        KeyedVector<String8, String8> &infoMap) const {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    DrmSessionManager::Instance()->useSession(sessionId);
-
-    ::KeyedVector hInfoMap;
-
-    status_t err = UNKNOWN_ERROR;
-
-    Return<void> hResult = mPlugin->queryKeyStatus(toHidlVec(sessionId),
-            [&](Status status, const hidl_vec<KeyValue>& map) {
-                if (status == Status::OK) {
-                    infoMap = toKeyedVector(map);
-                }
-                err = toStatusT(status);
-            }
-        );
-
-    return hResult.isOk() ? err : DEAD_OBJECT;
+status_t DrmHal::provideProvisionResponse(Vector<uint8_t> const& response,
+                                          Vector<uint8_t>& certificate,
+                                          Vector<uint8_t>& wrappedKey) {
+    if (mDrmHalAidl->initCheck() == OK)
+        return mDrmHalAidl->provideProvisionResponse(response, certificate, wrappedKey);
+    return mDrmHalHidl->provideProvisionResponse(response, certificate, wrappedKey);
 }
 
-status_t DrmHal::getProvisionRequest(String8 const &certType,
-        String8 const &certAuthority, Vector<uint8_t> &request,
-        String8 &defaultUrl) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    status_t err = UNKNOWN_ERROR;
-    Return<void> hResult;
-
-    if (mPluginV1_2 != NULL) {
-        hResult = mPluginV1_2->getProvisionRequest_1_2(
-                toHidlString(certType), toHidlString(certAuthority),
-                [&](Status_V1_2 status, const hidl_vec<uint8_t>& hRequest,
-                        const hidl_string& hDefaultUrl) {
-                    if (status == Status_V1_2::OK) {
-                        request = toVector(hRequest);
-                        defaultUrl = toString8(hDefaultUrl);
-                    }
-                    err = toStatusT(status);
-                }
-            );
-    } else {
-        hResult = mPlugin->getProvisionRequest(
-                toHidlString(certType), toHidlString(certAuthority),
-                [&](Status status, const hidl_vec<uint8_t>& hRequest,
-                        const hidl_string& hDefaultUrl) {
-                    if (status == Status::OK) {
-                        request = toVector(hRequest);
-                        defaultUrl = toString8(hDefaultUrl);
-                    }
-                    err = toStatusT(status);
-                }
-            );
-    }
-
-    err = hResult.isOk() ? err : DEAD_OBJECT;
-    mMetrics.mGetProvisionRequestCounter.Increment(err);
-    return err;
+status_t DrmHal::getSecureStops(List<Vector<uint8_t>>& secureStops) {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getSecureStops(secureStops);
+    return mDrmHalHidl->getSecureStops(secureStops);
 }
 
-status_t DrmHal::provideProvisionResponse(Vector<uint8_t> const &response,
-        Vector<uint8_t> &certificate, Vector<uint8_t> &wrappedKey) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    status_t err = UNKNOWN_ERROR;
-
-    Return<void> hResult = mPlugin->provideProvisionResponse(toHidlVec(response),
-            [&](Status status, const hidl_vec<uint8_t>& hCertificate,
-                    const hidl_vec<uint8_t>& hWrappedKey) {
-                if (status == Status::OK) {
-                    certificate = toVector(hCertificate);
-                    wrappedKey = toVector(hWrappedKey);
-                }
-                err = toStatusT(status);
-            }
-        );
-
-    err = hResult.isOk() ? err : DEAD_OBJECT;
-    mMetrics.mProvideProvisionResponseCounter.Increment(err);
-    return err;
+status_t DrmHal::getSecureStopIds(List<Vector<uint8_t>>& secureStopIds) {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getSecureStopIds(secureStopIds);
+    return mDrmHalHidl->getSecureStopIds(secureStopIds);
 }
 
-status_t DrmHal::getSecureStops(List<Vector<uint8_t>> &secureStops) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    status_t err = UNKNOWN_ERROR;
-
-    Return<void> hResult = mPlugin->getSecureStops(
-            [&](Status status, const hidl_vec<SecureStop>& hSecureStops) {
-                if (status == Status::OK) {
-                    secureStops = toSecureStops(hSecureStops);
-                }
-                err = toStatusT(status);
-            }
-    );
-
-    return hResult.isOk() ? err : DEAD_OBJECT;
+status_t DrmHal::getSecureStop(Vector<uint8_t> const& ssid, Vector<uint8_t>& secureStop) {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getSecureStop(ssid, secureStop);
+    return mDrmHalHidl->getSecureStop(ssid, secureStop);
 }
 
-
-status_t DrmHal::getSecureStopIds(List<Vector<uint8_t>> &secureStopIds) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mInitCheck != OK) {
-        return mInitCheck;
-    }
-
-    if (mPluginV1_1 == NULL) {
-        return ERROR_DRM_CANNOT_HANDLE;
-    }
-
-    status_t err = UNKNOWN_ERROR;
-
-    Return<void> hResult = mPluginV1_1->getSecureStopIds(
-            [&](Status status, const hidl_vec<SecureStopId>& hSecureStopIds) {
-                if (status == Status::OK) {
-                    secureStopIds = toSecureStopIds(hSecureStopIds);
-                }
-                err = toStatusT(status);
-            }
-    );
-
-    return hResult.isOk() ? err : DEAD_OBJECT;
+status_t DrmHal::releaseSecureStops(Vector<uint8_t> const& ssRelease) {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->releaseSecureStops(ssRelease);
+    return mDrmHalHidl->releaseSecureStops(ssRelease);
 }
 
-
-status_t DrmHal::getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    status_t err = UNKNOWN_ERROR;
-
-    Return<void> hResult = mPlugin->getSecureStop(toHidlVec(ssid),
-            [&](Status status, const SecureStop& hSecureStop) {
-                if (status == Status::OK) {
-                    secureStop = toVector(hSecureStop.opaqueData);
-                }
-                err = toStatusT(status);
-            }
-    );
-
-    return hResult.isOk() ? err : DEAD_OBJECT;
-}
-
-status_t DrmHal::releaseSecureStops(Vector<uint8_t> const &ssRelease) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    Return<Status> status(Status::ERROR_DRM_UNKNOWN);
-    if (mPluginV1_1 != NULL) {
-        SecureStopRelease secureStopRelease;
-        secureStopRelease.opaqueData = toHidlVec(ssRelease);
-        status = mPluginV1_1->releaseSecureStops(secureStopRelease);
-    } else {
-        status = mPlugin->releaseSecureStop(toHidlVec(ssRelease));
-    }
-    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
-}
-
-status_t DrmHal::removeSecureStop(Vector<uint8_t> const &ssid) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mInitCheck != OK) {
-        return mInitCheck;
-    }
-
-    if (mPluginV1_1 == NULL) {
-        return ERROR_DRM_CANNOT_HANDLE;
-    }
-
-    Return<Status> status = mPluginV1_1->removeSecureStop(toHidlVec(ssid));
-    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+status_t DrmHal::removeSecureStop(Vector<uint8_t> const& ssid) {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->removeSecureStop(ssid);
+    return mDrmHalHidl->removeSecureStop(ssid);
 }
 
 status_t DrmHal::removeAllSecureStops() {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    Return<Status> status(Status::ERROR_DRM_UNKNOWN);
-    if (mPluginV1_1 != NULL) {
-        status = mPluginV1_1->removeAllSecureStops();
-    } else {
-        status = mPlugin->releaseAllSecureStops();
-    }
-    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->removeAllSecureStops();
+    return mDrmHalHidl->removeAllSecureStops();
 }
 
-status_t DrmHal::getHdcpLevels(DrmPlugin::HdcpLevel *connected,
-            DrmPlugin::HdcpLevel *max) const {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    if (connected == NULL || max == NULL) {
-        return BAD_VALUE;
-    }
-    status_t err = UNKNOWN_ERROR;
-
-    *connected = DrmPlugin::kHdcpLevelUnknown;
-    *max = DrmPlugin::kHdcpLevelUnknown;
-
-    Return<void> hResult;
-    if (mPluginV1_2 != NULL) {
-        hResult = mPluginV1_2->getHdcpLevels_1_2(
-                [&](Status_V1_2 status, const HdcpLevel_V1_2& hConnected, const HdcpLevel_V1_2& hMax) {
-                    if (status == Status_V1_2::OK) {
-                        *connected = toHdcpLevel(hConnected);
-                        *max = toHdcpLevel(hMax);
-                    }
-                    err = toStatusT(status);
-                });
-    } else if (mPluginV1_1 != NULL) {
-        hResult = mPluginV1_1->getHdcpLevels(
-                [&](Status status, const HdcpLevel& hConnected, const HdcpLevel& hMax) {
-                    if (status == Status::OK) {
-                        *connected = toHdcpLevel(static_cast<HdcpLevel_V1_2>(hConnected));
-                        *max = toHdcpLevel(static_cast<HdcpLevel_V1_2>(hMax));
-                    }
-                    err = toStatusT(status);
-                });
-    } else {
-        return ERROR_DRM_CANNOT_HANDLE;
-    }
-
-    return hResult.isOk() ? err : DEAD_OBJECT;
+status_t DrmHal::getHdcpLevels(DrmPlugin::HdcpLevel* connectedLevel,
+                               DrmPlugin::HdcpLevel* maxLevel) const {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getHdcpLevels(connectedLevel, maxLevel);
+    return mDrmHalHidl->getHdcpLevels(connectedLevel, maxLevel);
 }
 
-status_t DrmHal::getNumberOfSessions(uint32_t *open, uint32_t *max) const {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    if (open == NULL || max == NULL) {
-        return BAD_VALUE;
-    }
-    status_t err = UNKNOWN_ERROR;
-
-    *open = 0;
-    *max = 0;
-
-    if (mPluginV1_1 == NULL) {
-        return ERROR_DRM_CANNOT_HANDLE;
-    }
-
-    Return<void> hResult = mPluginV1_1->getNumberOfSessions(
-            [&](Status status, uint32_t hOpen, uint32_t hMax) {
-                if (status == Status::OK) {
-                    *open = hOpen;
-                    *max = hMax;
-                }
-                err = toStatusT(status);
-            }
-    );
-
-    return hResult.isOk() ? err : DEAD_OBJECT;
+status_t DrmHal::getNumberOfSessions(uint32_t* currentSessions, uint32_t* maxSessions) const {
+    if (mDrmHalAidl->initCheck() == OK)
+        return mDrmHalAidl->getNumberOfSessions(currentSessions, maxSessions);
+    return mDrmHalHidl->getNumberOfSessions(currentSessions, maxSessions);
 }
 
-status_t DrmHal::getSecurityLevel(Vector<uint8_t> const &sessionId,
-        DrmPlugin::SecurityLevel *level) const {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    if (level == NULL) {
-        return BAD_VALUE;
-    }
-    status_t err = UNKNOWN_ERROR;
-
-    if (mPluginV1_1 == NULL) {
-        return ERROR_DRM_CANNOT_HANDLE;
-    }
-
-    *level = DrmPlugin::kSecurityLevelUnknown;
-
-    Return<void> hResult = mPluginV1_1->getSecurityLevel(toHidlVec(sessionId),
-            [&](Status status, SecurityLevel hLevel) {
-                if (status == Status::OK) {
-                    *level = toSecurityLevel(hLevel);
-                }
-                err = toStatusT(status);
-            }
-    );
-
-    return hResult.isOk() ? err : DEAD_OBJECT;
+status_t DrmHal::getSecurityLevel(Vector<uint8_t> const& sessionId,
+                                  DrmPlugin::SecurityLevel* level) const {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getSecurityLevel(sessionId, level);
+    return mDrmHalHidl->getSecurityLevel(sessionId, level);
 }
 
-status_t DrmHal::getOfflineLicenseKeySetIds(List<Vector<uint8_t>> &keySetIds) const {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mInitCheck != OK) {
-        return mInitCheck;
-    }
-
-    if (mPluginV1_2 == NULL) {
-        return ERROR_UNSUPPORTED;
-    }
-
-    status_t err = UNKNOWN_ERROR;
-
-    Return<void> hResult = mPluginV1_2->getOfflineLicenseKeySetIds(
-            [&](Status status, const hidl_vec<KeySetId>& hKeySetIds) {
-                if (status == Status::OK) {
-                    keySetIds = toKeySetIds(hKeySetIds);
-                }
-                err = toStatusT(status);
-            }
-    );
-
-    return hResult.isOk() ? err : DEAD_OBJECT;
+status_t DrmHal::getOfflineLicenseKeySetIds(List<Vector<uint8_t>>& keySetIds) const {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getOfflineLicenseKeySetIds(keySetIds);
+    return mDrmHalHidl->getOfflineLicenseKeySetIds(keySetIds);
 }
 
-status_t DrmHal::removeOfflineLicense(Vector<uint8_t> const &keySetId) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mInitCheck != OK) {
-        return mInitCheck;
-    }
-
-    if (mPluginV1_2 == NULL) {
-        return ERROR_UNSUPPORTED;
-    }
-
-    Return<Status> status = mPluginV1_2->removeOfflineLicense(toHidlVec(keySetId));
-    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+status_t DrmHal::removeOfflineLicense(Vector<uint8_t> const& keySetId) {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->removeOfflineLicense(keySetId);
+    return mDrmHalHidl->removeOfflineLicense(keySetId);
 }
 
-status_t DrmHal::getOfflineLicenseState(Vector<uint8_t> const &keySetId,
-        DrmPlugin::OfflineLicenseState *licenseState) const {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mInitCheck != OK) {
-        return mInitCheck;
-    }
-
-    if (mPluginV1_2 == NULL) {
-        return ERROR_UNSUPPORTED;
-    }
-    *licenseState = DrmPlugin::kOfflineLicenseStateUnknown;
-
-    status_t err = UNKNOWN_ERROR;
-
-    Return<void> hResult = mPluginV1_2->getOfflineLicenseState(toHidlVec(keySetId),
-            [&](Status status, OfflineLicenseState hLicenseState) {
-                if (status == Status::OK) {
-                    *licenseState = toOfflineLicenseState(hLicenseState);
-                }
-                err = toStatusT(status);
-            }
-    );
-
-    return hResult.isOk() ? err : DEAD_OBJECT;
+status_t DrmHal::getOfflineLicenseState(Vector<uint8_t> const& keySetId,
+                                        DrmPlugin::OfflineLicenseState* licenseState) const {
+    if (mDrmHalAidl->initCheck() == OK)
+        return mDrmHalAidl->getOfflineLicenseState(keySetId, licenseState);
+    return mDrmHalHidl->getOfflineLicenseState(keySetId, licenseState);
 }
 
-status_t DrmHal::getPropertyString(String8 const &name, String8 &value ) const {
-    Mutex::Autolock autoLock(mLock);
-    return getPropertyStringInternal(name, value);
+status_t DrmHal::getPropertyString(String8 const& name, String8& value) const {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getPropertyString(name, value);
+    return mDrmHalHidl->getPropertyString(name, value);
 }
 
-status_t DrmHal::getPropertyStringInternal(String8 const &name, String8 &value) const {
-    // This function is internal to the class and should only be called while
-    // mLock is already held.
-    INIT_CHECK();
-
-    status_t err = UNKNOWN_ERROR;
-
-    Return<void> hResult = mPlugin->getPropertyString(toHidlString(name),
-            [&](Status status, const hidl_string& hValue) {
-                if (status == Status::OK) {
-                    value = toString8(hValue);
-                }
-                err = toStatusT(status);
-            }
-    );
-
-    return hResult.isOk() ? err : DEAD_OBJECT;
+status_t DrmHal::getPropertyByteArray(String8 const& name, Vector<uint8_t>& value) const {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getPropertyByteArray(name, value);
+    return mDrmHalHidl->getPropertyByteArray(name, value);
 }
 
-status_t DrmHal::getPropertyByteArray(String8 const &name, Vector<uint8_t> &value ) const {
-    Mutex::Autolock autoLock(mLock);
-    return getPropertyByteArrayInternal(name, value);
+status_t DrmHal::setPropertyString(String8 const& name, String8 const& value) const {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->setPropertyString(name, value);
+    return mDrmHalHidl->setPropertyString(name, value);
 }
 
-status_t DrmHal::getPropertyByteArrayInternal(String8 const &name, Vector<uint8_t> &value ) const {
-    // This function is internal to the class and should only be called while
-    // mLock is already held.
-    INIT_CHECK();
-
-    status_t err = UNKNOWN_ERROR;
-
-    Return<void> hResult = mPlugin->getPropertyByteArray(toHidlString(name),
-            [&](Status status, const hidl_vec<uint8_t>& hValue) {
-                if (status == Status::OK) {
-                    value = toVector(hValue);
-                }
-                err = toStatusT(status);
-            }
-    );
-
-    err = hResult.isOk() ? err : DEAD_OBJECT;
-    if (name == kPropertyDeviceUniqueId) {
-        mMetrics.mGetDeviceUniqueIdCounter.Increment(err);
-    }
-    return err;
+status_t DrmHal::setPropertyByteArray(String8 const& name, Vector<uint8_t> const& value) const {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->setPropertyByteArray(name, value);
+    return mDrmHalHidl->setPropertyByteArray(name, value);
 }
 
-status_t DrmHal::setPropertyString(String8 const &name, String8 const &value ) const {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    Return<Status> status = mPlugin->setPropertyString(toHidlString(name),
-            toHidlString(value));
-    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+status_t DrmHal::getMetrics(const sp<IDrmMetricsConsumer>& consumer) {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getMetrics(consumer);
+    return mDrmHalHidl->getMetrics(consumer);
 }
 
-status_t DrmHal::setPropertyByteArray(String8 const &name,
-                                   Vector<uint8_t> const &value ) const {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    Return<Status> status = mPlugin->setPropertyByteArray(toHidlString(name),
-            toHidlVec(value));
-    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+status_t DrmHal::setCipherAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm) {
+    if (mDrmHalAidl->initCheck() == OK)
+        return mDrmHalAidl->setCipherAlgorithm(sessionId, algorithm);
+    return mDrmHalHidl->setCipherAlgorithm(sessionId, algorithm);
 }
 
-status_t DrmHal::getMetrics(const sp<IDrmMetricsConsumer> &consumer) {
-    if (consumer == nullptr) {
-        return UNEXPECTED_NULL;
-    }
-    consumer->consumeFrameworkMetrics(mMetrics);
-
-    // Append vendor metrics if they are supported.
-    if (mPluginV1_1 != NULL) {
-        String8 vendor;
-        String8 description;
-        if (getPropertyStringInternal(String8("vendor"), vendor) != OK
-            || vendor.isEmpty()) {
-          ALOGE("Get vendor failed or is empty");
-          vendor = "NONE";
-        }
-        if (getPropertyStringInternal(String8("description"), description) != OK
-            || description.isEmpty()) {
-          ALOGE("Get description failed or is empty.");
-          description = "NONE";
-        }
-        vendor += ".";
-        vendor += description;
-
-        hidl_vec<DrmMetricGroup> pluginMetrics;
-        status_t err = UNKNOWN_ERROR;
-
-        Return<void> status = mPluginV1_1->getMetrics(
-                [&](Status status, hidl_vec<DrmMetricGroup> pluginMetrics) {
-                    if (status != Status::OK) {
-                      ALOGV("Error getting plugin metrics: %d", status);
-                    } else {
-                      consumer->consumeHidlMetrics(vendor, pluginMetrics);
-                    }
-                    err = toStatusT(status);
-                });
-        return status.isOk() ? err : DEAD_OBJECT;
-    }
-
-    return OK;
+status_t DrmHal::setMacAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm) {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->setMacAlgorithm(sessionId, algorithm);
+    return mDrmHalHidl->setMacAlgorithm(sessionId, algorithm);
 }
 
-status_t DrmHal::setCipherAlgorithm(Vector<uint8_t> const &sessionId,
-                                 String8 const &algorithm) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    DrmSessionManager::Instance()->useSession(sessionId);
-
-    Return<Status> status = mPlugin->setCipherAlgorithm(toHidlVec(sessionId),
-            toHidlString(algorithm));
-    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+status_t DrmHal::encrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                         Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+                         Vector<uint8_t>& output) {
+    if (mDrmHalAidl->initCheck() == OK)
+        return mDrmHalAidl->encrypt(sessionId, keyId, input, iv, output);
+    return mDrmHalHidl->encrypt(sessionId, keyId, input, iv, output);
 }
 
-status_t DrmHal::setMacAlgorithm(Vector<uint8_t> const &sessionId,
-                              String8 const &algorithm) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    DrmSessionManager::Instance()->useSession(sessionId);
-
-    Return<Status> status = mPlugin->setMacAlgorithm(toHidlVec(sessionId),
-            toHidlString(algorithm));
-    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+status_t DrmHal::decrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                         Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+                         Vector<uint8_t>& output) {
+    if (mDrmHalAidl->initCheck() == OK)
+        return mDrmHalAidl->decrypt(sessionId, keyId, input, iv, output);
+    return mDrmHalHidl->decrypt(sessionId, keyId, input, iv, output);
 }
 
-status_t DrmHal::encrypt(Vector<uint8_t> const &sessionId,
-        Vector<uint8_t> const &keyId, Vector<uint8_t> const &input,
-        Vector<uint8_t> const &iv, Vector<uint8_t> &output) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    DrmSessionManager::Instance()->useSession(sessionId);
-
-    status_t err = UNKNOWN_ERROR;
-
-    Return<void> hResult = mPlugin->encrypt(toHidlVec(sessionId),
-            toHidlVec(keyId), toHidlVec(input), toHidlVec(iv),
-            [&](Status status, const hidl_vec<uint8_t>& hOutput) {
-                if (status == Status::OK) {
-                    output = toVector(hOutput);
-                }
-                err = toStatusT(status);
-            }
-    );
-
-    return hResult.isOk() ? err : DEAD_OBJECT;
+status_t DrmHal::sign(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                      Vector<uint8_t> const& message, Vector<uint8_t>& signature) {
+    if (mDrmHalAidl->initCheck() == OK)
+        return mDrmHalAidl->sign(sessionId, keyId, message, signature);
+    return mDrmHalHidl->sign(sessionId, keyId, message, signature);
 }
 
-status_t DrmHal::decrypt(Vector<uint8_t> const &sessionId,
-        Vector<uint8_t> const &keyId, Vector<uint8_t> const &input,
-        Vector<uint8_t> const &iv, Vector<uint8_t> &output) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    DrmSessionManager::Instance()->useSession(sessionId);
-
-    status_t  err = UNKNOWN_ERROR;
-
-    Return<void> hResult = mPlugin->decrypt(toHidlVec(sessionId),
-            toHidlVec(keyId), toHidlVec(input), toHidlVec(iv),
-            [&](Status status, const hidl_vec<uint8_t>& hOutput) {
-                if (status == Status::OK) {
-                    output = toVector(hOutput);
-                }
-                err = toStatusT(status);
-            }
-    );
-
-    return hResult.isOk() ? err : DEAD_OBJECT;
+status_t DrmHal::verify(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                        Vector<uint8_t> const& message, Vector<uint8_t> const& signature,
+                        bool& match) {
+    if (mDrmHalAidl->initCheck() == OK)
+        return mDrmHalAidl->verify(sessionId, keyId, message, signature, match);
+    return mDrmHalHidl->verify(sessionId, keyId, message, signature, match);
 }
 
-status_t DrmHal::sign(Vector<uint8_t> const &sessionId,
-        Vector<uint8_t> const &keyId, Vector<uint8_t> const &message,
-        Vector<uint8_t> &signature) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    DrmSessionManager::Instance()->useSession(sessionId);
-
-    status_t err = UNKNOWN_ERROR;
-
-    Return<void> hResult = mPlugin->sign(toHidlVec(sessionId),
-            toHidlVec(keyId), toHidlVec(message),
-            [&](Status status, const hidl_vec<uint8_t>& hSignature)  {
-                if (status == Status::OK) {
-                    signature = toVector(hSignature);
-                }
-                err = toStatusT(status);
-            }
-    );
-
-    return hResult.isOk() ? err : DEAD_OBJECT;
+status_t DrmHal::signRSA(Vector<uint8_t> const& sessionId, String8 const& algorithm,
+                         Vector<uint8_t> const& message, Vector<uint8_t> const& wrappedKey,
+                         Vector<uint8_t>& signature) {
+    if (mDrmHalAidl->initCheck() == OK)
+        return mDrmHalAidl->signRSA(sessionId, algorithm, message, wrappedKey, signature);
+    return mDrmHalHidl->signRSA(sessionId, algorithm, message, wrappedKey, signature);
 }
 
-status_t DrmHal::verify(Vector<uint8_t> const &sessionId,
-        Vector<uint8_t> const &keyId, Vector<uint8_t> const &message,
-        Vector<uint8_t> const &signature, bool &match) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    DrmSessionManager::Instance()->useSession(sessionId);
-
-    status_t err = UNKNOWN_ERROR;
-
-    Return<void> hResult = mPlugin->verify(toHidlVec(sessionId),toHidlVec(keyId),
-            toHidlVec(message), toHidlVec(signature),
-            [&](Status status, bool hMatch) {
-                if (status == Status::OK) {
-                    match = hMatch;
-                } else {
-                    match = false;
-                }
-                err = toStatusT(status);
-            }
-    );
-
-    return hResult.isOk() ? err : DEAD_OBJECT;
+status_t DrmHal::setListener(const sp<IDrmClient>& listener) {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->setListener(listener);
+    return mDrmHalHidl->setListener(listener);
 }
 
-status_t DrmHal::signRSA(Vector<uint8_t> const &sessionId,
-        String8 const &algorithm, Vector<uint8_t> const &message,
-        Vector<uint8_t> const &wrappedKey, Vector<uint8_t> &signature) {
-    Mutex::Autolock autoLock(mLock);
-    INIT_CHECK();
-
-    DrmSessionManager::Instance()->useSession(sessionId);
-
-    status_t err = UNKNOWN_ERROR;
-
-    Return<void> hResult = mPlugin->signRSA(toHidlVec(sessionId),
-            toHidlString(algorithm), toHidlVec(message), toHidlVec(wrappedKey),
-            [&](Status status, const hidl_vec<uint8_t>& hSignature) {
-                if (status == Status::OK) {
-                    signature = toVector(hSignature);
-                }
-                err = toStatusT(status);
-            }
-        );
-
-    return hResult.isOk() ? err : DEAD_OBJECT;
+status_t DrmHal::requiresSecureDecoder(const char* mime, bool* required) const {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->requiresSecureDecoder(mime, required);
+    return mDrmHalHidl->requiresSecureDecoder(mime, required);
 }
 
-std::string DrmHal::reportFrameworkMetrics(const std::string& pluginMetrics) const
-{
-    mediametrics_handle_t item(mediametrics_create("mediadrm"));
-    mediametrics_setUid(item, mMetrics.GetAppUid());
-    String8 vendor;
-    String8 description;
-    status_t result = getPropertyStringInternal(String8("vendor"), vendor);
-    if (result != OK) {
-        ALOGE("Failed to get vendor from drm plugin: %d", result);
-    } else {
-        mediametrics_setCString(item, "vendor", vendor.c_str());
-    }
-    result = getPropertyStringInternal(String8("description"), description);
-    if (result != OK) {
-        ALOGE("Failed to get description from drm plugin: %d", result);
-    } else {
-        mediametrics_setCString(item, "description", description.c_str());
-    }
-
-    std::string serializedMetrics;
-    result = mMetrics.GetSerializedMetrics(&serializedMetrics);
-    if (result != OK) {
-        ALOGE("Failed to serialize framework metrics: %d", result);
-    }
-    std::string b64EncodedMetrics = toBase64StringNoPad(serializedMetrics.data(),
-                                                        serializedMetrics.size());
-    if (!b64EncodedMetrics.empty()) {
-        mediametrics_setCString(item, "serialized_metrics", b64EncodedMetrics.c_str());
-    }
-    if (!pluginMetrics.empty()) {
-        mediametrics_setCString(item, "plugin_metrics", pluginMetrics.c_str());
-    }
-    if (!mediametrics_selfRecord(item)) {
-        ALOGE("Failed to self record framework metrics");
-    }
-    mediametrics_delete(item);
-    return serializedMetrics;
+status_t DrmHal::requiresSecureDecoder(const char* mime, DrmPlugin::SecurityLevel securityLevel,
+                                       bool* required) const {
+    if (mDrmHalAidl->initCheck() == OK)
+        return mDrmHalAidl->requiresSecureDecoder(mime, securityLevel, required);
+    return mDrmHalHidl->requiresSecureDecoder(mime, securityLevel, required);
 }
 
-std::string DrmHal::reportPluginMetrics() const
-{
-    Vector<uint8_t> metricsVector;
-    String8 vendor;
-    String8 description;
-    std::string metricsString;
-    if (getPropertyStringInternal(String8("vendor"), vendor) == OK &&
-            getPropertyStringInternal(String8("description"), description) == OK &&
-            getPropertyByteArrayInternal(String8("metrics"), metricsVector) == OK) {
-        metricsString = toBase64StringNoPad(metricsVector.array(),
-                                                        metricsVector.size());
-        status_t res = android::reportDrmPluginMetrics(metricsString, vendor,
-                                                       description, mMetrics.GetAppUid());
-        if (res != OK) {
-            ALOGE("Metrics were retrieved but could not be reported: %d", res);
-        }
-    }
-    return metricsString;
+status_t DrmHal::setPlaybackId(Vector<uint8_t> const& sessionId, const char* playbackId) {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->setPlaybackId(sessionId, playbackId);
+    return mDrmHalHidl->setPlaybackId(sessionId, playbackId);
 }
 
-status_t DrmHal::requiresSecureDecoder(const char *mime, bool *required) const {
-    Mutex::Autolock autoLock(mLock);
-    if (mPluginV1_4 == NULL) {
-        return false;
-    }
-    auto hResult = mPluginV1_4->requiresSecureDecoderDefault(hidl_string(mime));
-    if (!hResult.isOk()) {
-        DrmUtils::LOG2BE("requiresSecureDecoder txn failed: %s", hResult.description().c_str());
-        return DEAD_OBJECT;
-    }
-    if (required) {
-        *required = hResult;
-    }
-    return OK;
-}
-
-status_t DrmHal::requiresSecureDecoder(const char *mime, DrmPlugin::SecurityLevel securityLevel,
-                                       bool *required) const {
-    Mutex::Autolock autoLock(mLock);
-    if (mPluginV1_4 == NULL) {
-        return false;
-    }
-    auto hLevel = toHidlSecurityLevel(securityLevel);
-    auto hResult = mPluginV1_4->requiresSecureDecoder(hidl_string(mime), hLevel);
-    if (!hResult.isOk()) {
-        DrmUtils::LOG2BE("requiresSecureDecoder txn failed: %s", hResult.description().c_str());
-        return DEAD_OBJECT;
-    }
-    if (required) {
-        *required = hResult;
-    }
-    return OK;
-}
-
-status_t DrmHal::setPlaybackId(Vector<uint8_t> const &sessionId, const char *playbackId) {
-    Mutex::Autolock autoLock(mLock);
-    if (mPluginV1_4 == NULL) {
-        return ERROR_UNSUPPORTED;
-    }
-    auto err = mPluginV1_4->setPlaybackId(toHidlVec(sessionId), hidl_string(playbackId));
-    return err.isOk() ? toStatusT(err) : DEAD_OBJECT;
-}
-
-status_t DrmHal::getLogMessages(Vector<drm::V1_4::LogMessage> &logs) const {
-    Mutex::Autolock autoLock(mLock);
-    return DrmUtils::GetLogMessages<drm::V1_4::IDrmPlugin>(mPlugin, logs);
+status_t DrmHal::getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const {
+    if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getLogMessages(logs);
+    return mDrmHalHidl->getLogMessages(logs);
 }
 
 }  // namespace android
diff --git a/drm/libmediadrm/DrmHalAidl.cpp b/drm/libmediadrm/DrmHalAidl.cpp
new file mode 100644
index 0000000..7df57a3
--- /dev/null
+++ b/drm/libmediadrm/DrmHalAidl.cpp
@@ -0,0 +1,1326 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DrmHalAidl"
+
+#include <android/binder_auto_utils.h>
+#include <android/binder_manager.h>
+#include <media/PluginMetricsReporting.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/base64.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <mediadrm/DrmHalAidl.h>
+#include <mediadrm/DrmSessionManager.h>
+#include <mediadrm/DrmUtils.h>
+
+using ::android::DrmUtils::toStatusTAidl;
+
+using ::aidl::android::hardware::drm::DrmMetricNamedValue;
+using ::aidl::android::hardware::drm::DrmMetricValue;
+using ::aidl::android::hardware::drm::HdcpLevel;
+using ::aidl::android::hardware::drm::HdcpLevels;
+using ::aidl::android::hardware::drm::KeyRequest;
+using ::aidl::android::hardware::drm::KeyRequestType;
+using ::aidl::android::hardware::drm::KeySetId;
+using ::aidl::android::hardware::drm::KeyStatus;
+using ::aidl::android::hardware::drm::KeyStatusType;
+using ::aidl::android::hardware::drm::KeyType;
+using ::aidl::android::hardware::drm::KeyValue;
+using ::aidl::android::hardware::drm::NumberOfSessions;
+using ::aidl::android::hardware::drm::OfflineLicenseState;
+using ::aidl::android::hardware::drm::OpaqueData;
+using ::aidl::android::hardware::drm::ProvideProvisionResponseResult;
+using ::aidl::android::hardware::drm::ProvisionRequest;
+using ::aidl::android::hardware::drm::SecureStop;
+using ::aidl::android::hardware::drm::SecureStopId;
+using ::aidl::android::hardware::drm::SecurityLevel;
+using ::aidl::android::hardware::drm::Status;
+using ::aidl::android::hardware::drm::Uuid;
+using DrmMetricGroupAidl = ::aidl::android::hardware::drm::DrmMetricGroup;
+using DrmMetricGroupHidl = ::android::hardware::drm::V1_1::DrmMetricGroup;
+using DrmMetricAidl = ::aidl::android::hardware::drm::DrmMetric;
+using DrmMetricHidl = ::android::hardware::drm::V1_1::DrmMetricGroup::Metric;
+using ValueHidl = ::android::hardware::drm::V1_1::DrmMetricGroup::Value;
+using AttributeHidl = ::android::hardware::drm::V1_1::DrmMetricGroup::Attribute;
+using IDrmPluginAidl = ::aidl::android::hardware::drm::IDrmPlugin;
+using EventTypeAidl = ::aidl::android::hardware::drm::EventType;
+using KeyStatusAidl = ::aidl::android::hardware::drm::KeyStatus;
+using ::android::hardware::hidl_vec;
+
+namespace {
+
+constexpr char kPropertyDeviceUniqueId[] = "deviceUniqueId";
+constexpr char kEqualsSign[] = "=";
+
+template <typename T>
+std::string toBase64StringNoPad(const T* data, size_t size) {
+    // Note that the base 64 conversion only works with arrays of single-byte
+    // values. If the source is empty or is not an array of single-byte values,
+    // return empty string.
+    if (size == 0 || sizeof(data[0]) != 1) {
+        return "";
+    }
+
+    android::AString outputString;
+    encodeBase64(data, size, &outputString);
+    // Remove trailing equals padding if it exists.
+    while (outputString.size() > 0 && outputString.endsWith(kEqualsSign)) {
+        outputString.erase(outputString.size() - 1, 1);
+    }
+
+    return std::string(outputString.c_str(), outputString.size());
+}
+
+}  // anonymous namespace
+
+namespace android {
+
+#define INIT_CHECK()                             \
+    {                                            \
+        if (mInitCheck != OK) return mInitCheck; \
+    }
+
+static Uuid toAidlUuid(const uint8_t* uuid) {
+    Uuid uuidAidl;
+    uuidAidl.uuid = std::vector<uint8_t>(uuid, uuid + 16);
+    return uuidAidl;
+}
+
+template <typename Byte = uint8_t>
+static std::vector<Byte> toStdVec(const Vector<uint8_t>& vector) {
+    auto v = reinterpret_cast<const Byte*>(vector.array());
+    std::vector<Byte> vec(v, v + vector.size());
+    return vec;
+}
+
+static const Vector<uint8_t> toVector(const std::vector<uint8_t>& vec) {
+    Vector<uint8_t> vector;
+    vector.appendArray(vec.data(), vec.size());
+    return *const_cast<const Vector<uint8_t>*>(&vector);
+}
+
+static String8 toString8(const std::string& string) {
+    return String8(string.c_str());
+}
+
+static std::string toStdString(const String8& string8) {
+    return std::string(string8.string());
+}
+
+static std::vector<KeyValue> toKeyValueVector(const KeyedVector<String8, String8>& keyedVector) {
+    std::vector<KeyValue> stdKeyedVector;
+    for (size_t i = 0; i < keyedVector.size(); i++) {
+        KeyValue keyValue;
+        keyValue.key = toStdString(keyedVector.keyAt(i));
+        keyValue.value = toStdString(keyedVector.valueAt(i));
+        stdKeyedVector.push_back(keyValue);
+    }
+    return stdKeyedVector;
+}
+
+static KeyedVector<String8, String8> toKeyedVector(const std::vector<KeyValue>& keyValueVec) {
+    KeyedVector<String8, String8> keyedVector;
+    for (size_t i = 0; i < keyValueVec.size(); i++) {
+        keyedVector.add(toString8(keyValueVec[i].key), toString8(keyValueVec[i].value));
+    }
+    return keyedVector;
+}
+
+static DrmPlugin::KeyRequestType toKeyRequestType(KeyRequestType keyRequestType) {
+    switch (keyRequestType) {
+        case KeyRequestType::INITIAL:
+            return DrmPlugin::kKeyRequestType_Initial;
+            break;
+        case KeyRequestType::RENEWAL:
+            return DrmPlugin::kKeyRequestType_Renewal;
+            break;
+        case KeyRequestType::RELEASE:
+            return DrmPlugin::kKeyRequestType_Release;
+            break;
+        case KeyRequestType::NONE:
+            return DrmPlugin::kKeyRequestType_None;
+            break;
+        case KeyRequestType::UPDATE:
+            return DrmPlugin::kKeyRequestType_Update;
+            break;
+        default:
+            return DrmPlugin::kKeyRequestType_Unknown;
+            break;
+    }
+}
+
+static List<Vector<uint8_t>> toSecureStops(const std::vector<SecureStop>& aSecureStops) {
+    List<Vector<uint8_t>> secureStops;
+    for (size_t i = 0; i < aSecureStops.size(); i++) {
+        secureStops.push_back(toVector(aSecureStops[i].opaqueData));
+    }
+    return secureStops;
+}
+
+static List<Vector<uint8_t>> toSecureStopIds(const std::vector<SecureStopId>& aSecureStopIds) {
+    List<Vector<uint8_t>> secureStopIds;
+    for (size_t i = 0; i < aSecureStopIds.size(); i++) {
+        secureStopIds.push_back(toVector(aSecureStopIds[i].secureStopId));
+    }
+    return secureStopIds;
+}
+
+static DrmPlugin::HdcpLevel toHdcpLevel(HdcpLevel level) {
+    switch (level) {
+        case HdcpLevel::HDCP_NONE:
+            return DrmPlugin::kHdcpNone;
+        case HdcpLevel::HDCP_V1:
+            return DrmPlugin::kHdcpV1;
+        case HdcpLevel::HDCP_V2:
+            return DrmPlugin::kHdcpV2;
+        case HdcpLevel::HDCP_V2_1:
+            return DrmPlugin::kHdcpV2_1;
+        case HdcpLevel::HDCP_V2_2:
+            return DrmPlugin::kHdcpV2_2;
+        case HdcpLevel::HDCP_V2_3:
+            return DrmPlugin::kHdcpV2_3;
+        case HdcpLevel::HDCP_NO_OUTPUT:
+            return DrmPlugin::kHdcpNoOutput;
+        default:
+            return DrmPlugin::kHdcpLevelUnknown;
+    }
+}
+
+static DrmPlugin::SecurityLevel toSecurityLevel(SecurityLevel level) {
+    switch (level) {
+        case SecurityLevel::SW_SECURE_CRYPTO:
+            return DrmPlugin::kSecurityLevelSwSecureCrypto;
+        case SecurityLevel::SW_SECURE_DECODE:
+            return DrmPlugin::kSecurityLevelSwSecureDecode;
+        case SecurityLevel::HW_SECURE_CRYPTO:
+            return DrmPlugin::kSecurityLevelHwSecureCrypto;
+        case SecurityLevel::HW_SECURE_DECODE:
+            return DrmPlugin::kSecurityLevelHwSecureDecode;
+        case SecurityLevel::HW_SECURE_ALL:
+            return DrmPlugin::kSecurityLevelHwSecureAll;
+        case SecurityLevel::DEFAULT:
+            return DrmPlugin::kSecurityLevelMax;
+        default:
+            return DrmPlugin::kSecurityLevelUnknown;
+    }
+}
+
+static SecurityLevel toAidlSecurityLevel(DrmPlugin::SecurityLevel level) {
+    switch (level) {
+        case DrmPlugin::kSecurityLevelSwSecureCrypto:
+            return SecurityLevel::SW_SECURE_CRYPTO;
+        case DrmPlugin::kSecurityLevelSwSecureDecode:
+            return SecurityLevel::SW_SECURE_DECODE;
+        case DrmPlugin::kSecurityLevelHwSecureCrypto:
+            return SecurityLevel::HW_SECURE_CRYPTO;
+        case DrmPlugin::kSecurityLevelHwSecureDecode:
+            return SecurityLevel::HW_SECURE_DECODE;
+        case DrmPlugin::kSecurityLevelHwSecureAll:
+            return SecurityLevel::HW_SECURE_ALL;
+        case DrmPlugin::kSecurityLevelMax:
+            return SecurityLevel::DEFAULT;
+        default:
+            return SecurityLevel::UNKNOWN;
+    }
+}
+
+static List<Vector<uint8_t>> toKeySetIds(const std::vector<KeySetId>& hKeySetIds) {
+    List<Vector<uint8_t>> keySetIds;
+    for (size_t i = 0; i < hKeySetIds.size(); i++) {
+        keySetIds.push_back(toVector(hKeySetIds[i].keySetId));
+    }
+    return keySetIds;
+}
+
+static DrmPlugin::OfflineLicenseState toOfflineLicenseState(OfflineLicenseState licenseState) {
+    switch (licenseState) {
+        case OfflineLicenseState::USABLE:
+            return DrmPlugin::kOfflineLicenseStateUsable;
+        case OfflineLicenseState::INACTIVE:
+            return DrmPlugin::kOfflineLicenseStateReleased;
+        default:
+            return DrmPlugin::kOfflineLicenseStateUnknown;
+    }
+}
+
+template <typename T = uint8_t>
+static hidl_vec<T> toHidlVec(const Vector<T>& vector) {
+    hidl_vec<T> vec;
+    vec.setToExternal(const_cast<T*>(vector.array()), vector.size());
+    return vec;
+}
+
+Mutex DrmHalAidl::mLock;
+
+static hidl_vec<DrmMetricGroupHidl> toDrmMetricGroupHidl(std::vector<DrmMetricGroupAidl> result) {
+    Vector<DrmMetricGroupHidl> resultHidl;
+    for (auto r : result) {
+        DrmMetricGroupHidl re;
+        Vector<DrmMetricHidl> tmpMetric;
+        for (auto m : r.metrics) {
+            DrmMetricHidl me;
+            me.name = m.name;
+            Vector<AttributeHidl> aTmp;
+            for (auto attr : m.attributes) {
+                AttributeHidl attrHidl;
+                attrHidl.name = attr.name;
+
+                switch (attr.value.getTag()) {
+                    case DrmMetricValue::Tag::int64Value:
+                        attrHidl.type = DrmMetricGroupHidl::ValueType::INT64_TYPE;
+                        attrHidl.int64Value = attr.value.get<DrmMetricValue::Tag::int64Value>();
+                        break;
+                    case DrmMetricValue::Tag::doubleValue:
+                        attrHidl.type = DrmMetricGroupHidl::ValueType::DOUBLE_TYPE;
+                        attrHidl.doubleValue = attr.value.get<DrmMetricValue::Tag::doubleValue>();
+                        break;
+                    case DrmMetricValue::Tag::stringValue:
+                        attrHidl.type = DrmMetricGroupHidl::ValueType::STRING_TYPE;
+                        attrHidl.stringValue = attr.value.get<DrmMetricValue::Tag::stringValue>();
+                        break;
+                    default:
+                        break;
+                }
+
+                aTmp.push_back(attrHidl);
+            }
+
+            me.attributes = toHidlVec<AttributeHidl>(aTmp);
+
+            Vector<ValueHidl> vTmp;
+            for (auto value : m.values) {
+                ValueHidl valueHidl;
+                valueHidl.componentName = value.name;
+                switch (value.value.getTag()) {
+                    case DrmMetricValue::Tag::int64Value:
+                        valueHidl.type = DrmMetricGroupHidl::ValueType::INT64_TYPE;
+                        valueHidl.int64Value = value.value.get<DrmMetricValue::Tag::int64Value>();
+                        break;
+                    case DrmMetricValue::Tag::doubleValue:
+                        valueHidl.type = DrmMetricGroupHidl::ValueType::DOUBLE_TYPE;
+                        valueHidl.doubleValue = value.value.get<DrmMetricValue::Tag::doubleValue>();
+                        break;
+                    case DrmMetricValue::Tag::stringValue:
+                        valueHidl.type = DrmMetricGroupHidl::ValueType::STRING_TYPE;
+                        valueHidl.stringValue = value.value.get<DrmMetricValue::Tag::stringValue>();
+                        break;
+                    default:
+                        break;
+                }
+
+                vTmp.push_back(valueHidl);
+            }
+
+            me.values = toHidlVec<ValueHidl>(vTmp);
+            tmpMetric.push_back(me);
+        }
+
+        re.metrics = toHidlVec<DrmMetricHidl>(tmpMetric);
+        resultHidl.push_back(re);
+    }
+
+    return toHidlVec<DrmMetricGroupHidl>(resultHidl);
+}
+
+// DrmSessionClient Definition
+
+struct DrmHalAidl::DrmSessionClient : public aidl::android::media::BnResourceManagerClient {
+    explicit DrmSessionClient(DrmHalAidl* drm, const Vector<uint8_t>& sessionId)
+        : mSessionId(sessionId), mDrm(drm) {}
+
+    ::ndk::ScopedAStatus reclaimResource(bool* _aidl_return) override;
+    ::ndk::ScopedAStatus getName(::std::string* _aidl_return) override;
+
+    const Vector<uint8_t> mSessionId;
+
+    virtual ~DrmSessionClient();
+
+  private:
+    wp<DrmHalAidl> mDrm;
+
+    DISALLOW_EVIL_CONSTRUCTORS(DrmSessionClient);
+};
+
+::ndk::ScopedAStatus DrmHalAidl::DrmSessionClient::reclaimResource(bool* _aidl_return) {
+    auto sessionId = mSessionId;
+    sp<DrmHalAidl> drm = mDrm.promote();
+    if (drm == NULL) {
+        *_aidl_return = true;
+        return ::ndk::ScopedAStatus::ok();
+    }
+    status_t err = drm->closeSession(sessionId);
+    if (err != OK) {
+        *_aidl_return = false;
+        return ::ndk::ScopedAStatus::ok();
+    }
+    drm->onEvent(EventTypeAidl::SESSION_RECLAIMED, toHidlVec(sessionId), hidl_vec<uint8_t>());
+    *_aidl_return = true;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus DrmHalAidl::DrmSessionClient::getName(::std::string* _aidl_return) {
+    String8 name;
+    sp<DrmHalAidl> drm = mDrm.promote();
+    if (drm == NULL) {
+        name.append("<deleted>");
+    } else if (drm->getPropertyStringInternal(String8("vendor"), name) != OK || name.isEmpty()) {
+        name.append("<Get vendor failed or is empty>");
+    }
+    name.append("[");
+    for (size_t i = 0; i < mSessionId.size(); ++i) {
+        name.appendFormat("%02x", mSessionId[i]);
+    }
+    name.append("]");
+    *_aidl_return = name;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+DrmHalAidl::DrmSessionClient::~DrmSessionClient() {
+    DrmSessionManager::Instance()->removeSession(mSessionId);
+}
+
+// DrmHalAidl methods
+DrmHalAidl::DrmHalAidl()
+    : mFactories(makeDrmFactories()),
+      mInitCheck((mFactories.size() == 0) ? ERROR_UNSUPPORTED : NO_INIT) {}
+
+status_t DrmHalAidl::initCheck() const {
+    return mInitCheck;
+}
+
+DrmHalAidl::~DrmHalAidl() {}
+
+std::vector<std::shared_ptr<IDrmFactoryAidl>> DrmHalAidl::makeDrmFactories() {
+    std::vector<std::shared_ptr<IDrmFactoryAidl>> factories;
+    AServiceManager_forEachDeclaredInstance(
+            IDrmFactoryAidl::descriptor, static_cast<void*>(&factories),
+            [](const char* instance, void* context) {
+                auto fullName = std::string(IDrmFactoryAidl::descriptor) + "/" + std::string(instance);
+                auto factory = IDrmFactoryAidl::fromBinder(
+                        ::ndk::SpAIBinder(AServiceManager_getService(fullName.c_str())));
+                if (factory == nullptr) {
+                    ALOGE("not found IDrmFactory. Instance name:[%s]", fullName.c_str());
+                    return;
+                }
+
+                ALOGI("found IDrmFactory. Instance name:[%s]", fullName.c_str());
+                static_cast<std::vector<std::shared_ptr<IDrmFactoryAidl>>*>(context)->emplace_back(
+                        factory);
+            });
+
+    return factories;
+}
+
+status_t DrmHalAidl::setListener(const sp<IDrmClient>& listener) {
+    Mutex::Autolock lock(mEventLock);
+    mListener = listener;
+    return NO_ERROR;
+}
+
+status_t DrmHalAidl::isCryptoSchemeSupported(const uint8_t uuid[16], const String8& mimeType,
+                                             DrmPlugin::SecurityLevel level, bool* isSupported) {
+    Mutex::Autolock autoLock(mLock);
+    *isSupported = false;
+    Uuid uuidAidl = toAidlUuid(uuid);
+    SecurityLevel levelAidl = static_cast<SecurityLevel>((int32_t)level);
+    std::string mimeTypeStr = mimeType.string();
+    for (ssize_t i = mFactories.size() - 1; i >= 0; i--) {
+        if (mFactories[i]
+                    ->isCryptoSchemeSupported(uuidAidl, mimeTypeStr, levelAidl, isSupported)
+                    .isOk()) {
+            if (*isSupported) break;
+        }
+    }
+
+    return OK;
+}
+
+status_t DrmHalAidl::createPlugin(const uint8_t uuid[16], const String8& appPackageName) {
+    Mutex::Autolock autoLock(mLock);
+
+    Uuid uuidAidl = toAidlUuid(uuid);
+    std::string appPackageNameAidl = toStdString(appPackageName);
+    std::shared_ptr<IDrmPluginAidl> pluginAidl;
+    mMetrics.SetAppPackageName(appPackageName);
+    mMetrics.SetAppUid(AIBinder_getCallingUid());
+    for (ssize_t i = mFactories.size() - 1; i >= 0; i--) {
+        ::ndk::ScopedAStatus status =
+                mFactories[i]->createPlugin(uuidAidl, appPackageNameAidl, &pluginAidl);
+        if (status.isOk()) {
+            if (pluginAidl != NULL) {
+                mPlugin = pluginAidl;
+                break;
+            }
+        } else {
+            DrmUtils::LOG2BE(uuid, "Failed to make drm plugin: %d",
+                             status.getServiceSpecificError());
+        }
+    }
+
+    if (mPlugin == NULL) {
+        DrmUtils::LOG2BE(uuid, "No supported hal instance found");
+        mInitCheck = ERROR_UNSUPPORTED;
+    } else {
+        mInitCheck = OK;
+
+        if (!mPlugin->setListener(shared_from_this()).isOk()) {
+            mInitCheck = DEAD_OBJECT;
+        }
+
+        if (mInitCheck != OK) {
+            mPlugin.reset();
+        }
+    }
+
+    return mInitCheck;
+}
+
+status_t DrmHalAidl::openSession(DrmPlugin::SecurityLevel level, Vector<uint8_t>& sessionId) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    SecurityLevel aSecurityLevel = toAidlSecurityLevel(level);
+
+    if (aSecurityLevel == SecurityLevel::UNKNOWN) {
+        return ERROR_DRM_CANNOT_HANDLE;
+    }
+
+    status_t err = UNKNOWN_ERROR;
+    bool retry = true;
+    do {
+        std::vector<uint8_t> aSessionId;
+
+        ::ndk::ScopedAStatus status = mPlugin->openSession(aSecurityLevel, &aSessionId);
+        if (status.isOk()) sessionId = toVector(aSessionId);
+        err = status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+
+        if (err == ERROR_DRM_RESOURCE_BUSY && retry) {
+            mLock.unlock();
+            // reclaimSession may call back to closeSession, since mLock is
+            // shared between Drm instances, we should unlock here to avoid
+            // deadlock.
+            retry = DrmSessionManager::Instance()->reclaimSession(AIBinder_getCallingPid());
+            mLock.lock();
+        } else {
+            retry = false;
+        }
+    } while (retry);
+
+    if (err == OK) {
+        std::shared_ptr<DrmSessionClient> client =
+                ndk::SharedRefBase::make<DrmSessionClient>(this, sessionId);
+        DrmSessionManager::Instance()->addSession(
+                AIBinder_getCallingPid(), std::static_pointer_cast<IResourceManagerClient>(client),
+                sessionId);
+        mOpenSessions.push_back(client);
+        mMetrics.SetSessionStart(sessionId);
+    }
+
+    mMetrics.mOpenSessionCounter.Increment(err);
+    return err;
+}
+
+status_t DrmHalAidl::closeSession(Vector<uint8_t> const& sessionId) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    std::vector<uint8_t> sessionIdAidl = toStdVec(sessionId);
+    ::ndk::ScopedAStatus status = mPlugin->closeSession(sessionIdAidl);
+    if (status.isOk()) {
+        DrmSessionManager::Instance()->removeSession(sessionId);
+        for (auto i = mOpenSessions.begin(); i != mOpenSessions.end(); i++) {
+            if (isEqualSessionId((*i)->mSessionId, sessionId)) {
+                mOpenSessions.erase(i);
+                break;
+            }
+        }
+
+        status_t response = toStatusTAidl(status.getServiceSpecificError());
+        mMetrics.SetSessionEnd(sessionId);
+        mMetrics.mCloseSessionCounter.Increment(response);
+        return response;
+    }
+    mMetrics.mCloseSessionCounter.Increment(DEAD_OBJECT);
+    return DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::getKeyRequest(Vector<uint8_t> const& sessionId,
+                                   Vector<uint8_t> const& initData, String8 const& mimeType,
+                                   DrmPlugin::KeyType keyType,
+                                   KeyedVector<String8, String8> const& optionalParameters,
+                                   Vector<uint8_t>& request, String8& defaultUrl,
+                                   DrmPlugin::KeyRequestType* keyRequestType) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+    EventTimer<status_t> keyRequestTimer(&mMetrics.mGetKeyRequestTimeUs);
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    KeyType aKeyType;
+    if (keyType == DrmPlugin::kKeyType_Streaming) {
+        aKeyType = KeyType::STREAMING;
+    } else if (keyType == DrmPlugin::kKeyType_Offline) {
+        aKeyType = KeyType::OFFLINE;
+    } else if (keyType == DrmPlugin::kKeyType_Release) {
+        aKeyType = KeyType::RELEASE;
+    } else {
+        keyRequestTimer.SetAttribute(BAD_VALUE);
+        return BAD_VALUE;
+    }
+
+    status_t err = UNKNOWN_ERROR;
+
+    std::vector<uint8_t> sessionIdAidl = toStdVec(sessionId);
+    std::vector<uint8_t> initDataAidl = toStdVec(initData);
+    KeyRequest keyRequest;
+
+    ::ndk::ScopedAStatus status =
+            mPlugin->getKeyRequest(sessionIdAidl, initDataAidl, toStdString(mimeType), aKeyType,
+                                   toKeyValueVector(optionalParameters), &keyRequest);
+    if (status.isOk()) {
+        request = toVector(keyRequest.request);
+        defaultUrl = toString8(keyRequest.defaultUrl);
+        *keyRequestType = toKeyRequestType(keyRequest.requestType);
+    }
+
+    err = status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+    keyRequestTimer.SetAttribute(err);
+    return err;
+}
+
+status_t DrmHalAidl::provideKeyResponse(Vector<uint8_t> const& sessionId,
+                                        Vector<uint8_t> const& response,
+                                        Vector<uint8_t>& keySetId) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+    EventTimer<status_t> keyResponseTimer(&mMetrics.mProvideKeyResponseTimeUs);
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    status_t err = UNKNOWN_ERROR;
+
+    std::vector<uint8_t> sessionIdAidl = toStdVec(sessionId);
+    std::vector<uint8_t> responseAidl = toStdVec(response);
+    KeySetId keySetIdsAidl;
+    ::ndk::ScopedAStatus status =
+            mPlugin->provideKeyResponse(sessionIdAidl, responseAidl, &keySetIdsAidl);
+
+    if (status.isOk()) keySetId = toVector(keySetIdsAidl.keySetId);
+    err = status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+    keyResponseTimer.SetAttribute(err);
+    return err;
+}
+
+status_t DrmHalAidl::removeKeys(Vector<uint8_t> const& keySetId) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    ::ndk::ScopedAStatus status = mPlugin->removeKeys(toStdVec(keySetId));
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::restoreKeys(Vector<uint8_t> const& sessionId,
+                                 Vector<uint8_t> const& keySetId) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    KeySetId keySetIdsAidl;
+    keySetIdsAidl.keySetId = toStdVec(keySetId);
+    ::ndk::ScopedAStatus status = mPlugin->restoreKeys(toStdVec(sessionId), keySetIdsAidl);
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::queryKeyStatus(Vector<uint8_t> const& sessionId,
+                                    KeyedVector<String8, String8>& infoMap) const {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    std::vector<KeyValue> infoMapAidl;
+    ::ndk::ScopedAStatus status = mPlugin->queryKeyStatus(toStdVec(sessionId), &infoMapAidl);
+
+    infoMap = toKeyedVector(infoMapAidl);
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::getProvisionRequest(String8 const& certType, String8 const& certAuthority,
+                                         Vector<uint8_t>& request, String8& defaultUrl) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    status_t err = UNKNOWN_ERROR;
+
+    ProvisionRequest requestAidl;
+    ::ndk::ScopedAStatus status = mPlugin->getProvisionRequest(
+            toStdString(certType), toStdString(certAuthority), &requestAidl);
+
+    request = toVector(requestAidl.request);
+    defaultUrl = toString8(requestAidl.defaultUrl);
+
+    err = status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+    mMetrics.mGetProvisionRequestCounter.Increment(err);
+    return err;
+}
+
+status_t DrmHalAidl::provideProvisionResponse(Vector<uint8_t> const& response,
+                                              Vector<uint8_t>& certificate,
+                                              Vector<uint8_t>& wrappedKey) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    status_t err = UNKNOWN_ERROR;
+    ProvideProvisionResponseResult result;
+    ::ndk::ScopedAStatus status = mPlugin->provideProvisionResponse(toStdVec(response), &result);
+
+    certificate = toVector(result.certificate);
+    wrappedKey = toVector(result.wrappedKey);
+    err = status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+    mMetrics.mProvideProvisionResponseCounter.Increment(err);
+    return err;
+}
+
+status_t DrmHalAidl::getSecureStops(List<Vector<uint8_t>>& secureStops) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    std::vector<SecureStop> result;
+    ::ndk::ScopedAStatus status = mPlugin->getSecureStops(&result);
+
+    secureStops = toSecureStops(result);
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::getSecureStopIds(List<Vector<uint8_t>>& secureStopIds) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    std::vector<SecureStopId> result;
+    ::ndk::ScopedAStatus status = mPlugin->getSecureStopIds(&result);
+
+    secureStopIds = toSecureStopIds(result);
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::getSecureStop(Vector<uint8_t> const& ssid, Vector<uint8_t>& secureStop) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    SecureStopId ssidAidl;
+    ssidAidl.secureStopId = toStdVec(ssid);
+
+    SecureStop result;
+    ::ndk::ScopedAStatus status = mPlugin->getSecureStop(ssidAidl, &result);
+
+    secureStop = toVector(result.opaqueData);
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::releaseSecureStops(Vector<uint8_t> const& ssRelease) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    OpaqueData ssId;
+    ssId.opaqueData = toStdVec(ssRelease);
+    ::ndk::ScopedAStatus status = mPlugin->releaseSecureStops(ssId);
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::removeSecureStop(Vector<uint8_t> const& ssid) {
+    Mutex::Autolock autoLock(mLock);
+
+    INIT_CHECK();
+
+    SecureStopId ssidAidl;
+    ssidAidl.secureStopId = toStdVec(ssid);
+    ::ndk::ScopedAStatus status = mPlugin->removeSecureStop(ssidAidl);
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::removeAllSecureStops() {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    ::ndk::ScopedAStatus status = mPlugin->releaseAllSecureStops();
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::getHdcpLevels(DrmPlugin::HdcpLevel* connected,
+                                   DrmPlugin::HdcpLevel* max) const {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    if (connected == NULL || max == NULL) {
+        return BAD_VALUE;
+    }
+
+    *connected = DrmPlugin::kHdcpLevelUnknown;
+    *max = DrmPlugin::kHdcpLevelUnknown;
+
+    HdcpLevels lvlsAidl;
+    ::ndk::ScopedAStatus status = mPlugin->getHdcpLevels(&lvlsAidl);
+
+    *connected = toHdcpLevel(lvlsAidl.connectedLevel);
+    *max = toHdcpLevel(lvlsAidl.maxLevel);
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::getNumberOfSessions(uint32_t* open, uint32_t* max) const {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    if (open == NULL || max == NULL) {
+        return BAD_VALUE;
+    }
+
+    *open = 0;
+    *max = 0;
+
+    NumberOfSessions result;
+    ::ndk::ScopedAStatus status = mPlugin->getNumberOfSessions(&result);
+
+    *open = result.currentSessions;
+    *max = result.maxSessions;
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::getSecurityLevel(Vector<uint8_t> const& sessionId,
+                                      DrmPlugin::SecurityLevel* level) const {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    if (level == NULL) {
+        return BAD_VALUE;
+    }
+
+    *level = DrmPlugin::kSecurityLevelUnknown;
+
+    SecurityLevel result;
+    ::ndk::ScopedAStatus status = mPlugin->getSecurityLevel(toStdVec(sessionId), &result);
+
+    *level = toSecurityLevel(result);
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::getOfflineLicenseKeySetIds(List<Vector<uint8_t>>& keySetIds) const {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    std::vector<KeySetId> result;
+    ::ndk::ScopedAStatus status = mPlugin->getOfflineLicenseKeySetIds(&result);
+
+    keySetIds = toKeySetIds(result);
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::removeOfflineLicense(Vector<uint8_t> const& keySetId) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    KeySetId keySetIdAidl;
+    keySetIdAidl.keySetId = toStdVec(keySetId);
+    ::ndk::ScopedAStatus status = mPlugin->removeOfflineLicense(keySetIdAidl);
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::getOfflineLicenseState(Vector<uint8_t> const& keySetId,
+                                            DrmPlugin::OfflineLicenseState* licenseState) const {
+    Mutex::Autolock autoLock(mLock);
+
+    INIT_CHECK();
+    *licenseState = DrmPlugin::kOfflineLicenseStateUnknown;
+
+    KeySetId keySetIdAidl;
+    keySetIdAidl.keySetId = toStdVec(keySetId);
+
+    OfflineLicenseState result;
+    ::ndk::ScopedAStatus status = mPlugin->getOfflineLicenseState(keySetIdAidl, &result);
+
+    *licenseState = toOfflineLicenseState(result);
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::getPropertyString(String8 const& name, String8& value) const {
+    Mutex::Autolock autoLock(mLock);
+    return getPropertyStringInternal(name, value);
+}
+
+status_t DrmHalAidl::getPropertyStringInternal(String8 const& name, String8& value) const {
+    // This function is internal to the class and should only be called while
+    // mLock is already held.
+    INIT_CHECK();
+
+    std::string result;
+    ::ndk::ScopedAStatus status = mPlugin->getPropertyString(toStdString(name), &result);
+
+    value = toString8(result);
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::getPropertyByteArray(String8 const& name, Vector<uint8_t>& value) const {
+    Mutex::Autolock autoLock(mLock);
+    return getPropertyByteArrayInternal(name, value);
+}
+
+status_t DrmHalAidl::getPropertyByteArrayInternal(String8 const& name,
+                                                  Vector<uint8_t>& value) const {
+    // This function is internal to the class and should only be called while
+    // mLock is already held.
+    INIT_CHECK();
+
+    status_t err = UNKNOWN_ERROR;
+
+    std::vector<uint8_t> result;
+    ::ndk::ScopedAStatus status = mPlugin->getPropertyByteArray(toStdString(name), &result);
+
+    value = toVector(result);
+    err = status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+    if (name == kPropertyDeviceUniqueId) {
+        mMetrics.mGetDeviceUniqueIdCounter.Increment(err);
+    }
+    return err;
+}
+
+status_t DrmHalAidl::setPropertyString(String8 const& name, String8 const& value) const {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    ::ndk::ScopedAStatus status = mPlugin->setPropertyString(toStdString(name), toStdString(value));
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::setPropertyByteArray(String8 const& name, Vector<uint8_t> const& value) const {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    ::ndk::ScopedAStatus status = mPlugin->setPropertyByteArray(toStdString(name), toStdVec(value));
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::getMetrics(const sp<IDrmMetricsConsumer>& consumer) {
+    if (consumer == nullptr) {
+        return UNEXPECTED_NULL;
+    }
+    consumer->consumeFrameworkMetrics(mMetrics);
+
+    // Append vendor metrics if they are supported.
+
+    String8 vendor;
+    String8 description;
+    if (getPropertyStringInternal(String8("vendor"), vendor) != OK || vendor.isEmpty()) {
+        ALOGE("Get vendor failed or is empty");
+        vendor = "NONE";
+    }
+    if (getPropertyStringInternal(String8("description"), description) != OK ||
+        description.isEmpty()) {
+        ALOGE("Get description failed or is empty.");
+        description = "NONE";
+    }
+    vendor += ".";
+    vendor += description;
+
+    hidl_vec<DrmMetricGroupHidl> pluginMetrics;
+    status_t err = UNKNOWN_ERROR;
+
+    std::vector<DrmMetricGroupAidl> result;
+    ::ndk::ScopedAStatus status = mPlugin->getMetrics(&result);
+
+    if (status.isOk()) {
+        pluginMetrics = toDrmMetricGroupHidl(result);
+        consumer->consumeHidlMetrics(vendor, pluginMetrics);
+    }
+
+    err = status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+
+    return err;
+}
+
+status_t DrmHalAidl::setCipherAlgorithm(Vector<uint8_t> const& sessionId,
+                                        String8 const& algorithm) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    ::ndk::ScopedAStatus status =
+            mPlugin->setCipherAlgorithm(toStdVec(sessionId), toStdString(algorithm));
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::setMacAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    ::ndk::ScopedAStatus status =
+            mPlugin->setMacAlgorithm(toStdVec(sessionId), toStdString(algorithm));
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::encrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                             Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+                             Vector<uint8_t>& output) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    std::vector<uint8_t> result;
+    ::ndk::ScopedAStatus status = mPlugin->encrypt(toStdVec(sessionId), toStdVec(keyId),
+                                                   toStdVec(input), toStdVec(iv), &result);
+
+    output = toVector(result);
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::decrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                             Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+                             Vector<uint8_t>& output) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    std::vector<uint8_t> result;
+    ::ndk::ScopedAStatus status = mPlugin->decrypt(toStdVec(sessionId), toStdVec(keyId),
+                                                   toStdVec(input), toStdVec(iv), &result);
+
+    output = toVector(result);
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::sign(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                          Vector<uint8_t> const& message, Vector<uint8_t>& signature) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    std::vector<uint8_t> result;
+    ::ndk::ScopedAStatus status =
+            mPlugin->sign(toStdVec(sessionId), toStdVec(keyId), toStdVec(message), &result);
+
+    signature = toVector(result);
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::verify(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                            Vector<uint8_t> const& message, Vector<uint8_t> const& signature,
+                            bool& match) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    ::ndk::ScopedAStatus status = mPlugin->verify(toStdVec(sessionId), toStdVec(keyId),
+                                                  toStdVec(message), toStdVec(signature), &match);
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::signRSA(Vector<uint8_t> const& sessionId, String8 const& algorithm,
+                             Vector<uint8_t> const& message, Vector<uint8_t> const& wrappedKey,
+                             Vector<uint8_t>& signature) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    std::vector<uint8_t> result;
+    ::ndk::ScopedAStatus status =
+            mPlugin->signRSA(toStdVec(sessionId), toStdString(algorithm), toStdVec(message),
+                             toStdVec(wrappedKey), &result);
+
+    signature = toVector(result);
+
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::requiresSecureDecoder(const char* mime, bool* required) const {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    std::string mimeAidl(mime);
+    ::ndk::ScopedAStatus status = mPlugin->requiresSecureDecoderDefault(mimeAidl, required);
+    if (!status.isOk()) {
+        DrmUtils::LOG2BE("requiresSecureDecoder txn failed: %d", status.getServiceSpecificError());
+        return DEAD_OBJECT;
+    }
+
+    return OK;
+}
+
+status_t DrmHalAidl::requiresSecureDecoder(const char* mime, DrmPlugin::SecurityLevel securityLevel,
+                                           bool* required) const {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    auto aLevel = toAidlSecurityLevel(securityLevel);
+    std::string mimeAidl(mime);
+    ::ndk::ScopedAStatus status = mPlugin->requiresSecureDecoder(mimeAidl, aLevel, required);
+    if (!status.isOk()) {
+        DrmUtils::LOG2BE("requiresSecureDecoder txn failed: %d", status.getServiceSpecificError());
+        return DEAD_OBJECT;
+    }
+
+    return OK;
+}
+
+status_t DrmHalAidl::setPlaybackId(Vector<uint8_t> const& sessionId, const char* playbackId) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+    std::string playbackIdAidl(playbackId);
+    ::ndk::ScopedAStatus status = mPlugin->setPlaybackId(toStdVec(sessionId), playbackIdAidl);
+    return status.isOk() ? toStatusTAidl(status.getServiceSpecificError()) : DEAD_OBJECT;
+}
+
+status_t DrmHalAidl::getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const {
+    Mutex::Autolock autoLock(mLock);
+    return DrmUtils::GetLogMessagesAidl<IDrmPluginAidl>(mPlugin, logs);
+}
+
+void DrmHalAidl::closeOpenSessions() {
+    Mutex::Autolock autoLock(mLock);
+    auto openSessions = mOpenSessions;
+    for (size_t i = 0; i < openSessions.size(); i++) {
+        mLock.unlock();
+        closeSession(openSessions[i]->mSessionId);
+        mLock.lock();
+    }
+    mOpenSessions.clear();
+}
+
+std::string DrmHalAidl::reportPluginMetrics() const {
+    Vector<uint8_t> metricsVector;
+    String8 vendor;
+    String8 description;
+    std::string metricsString;
+    if (getPropertyStringInternal(String8("vendor"), vendor) == OK &&
+        getPropertyStringInternal(String8("description"), description) == OK &&
+        getPropertyByteArrayInternal(String8("metrics"), metricsVector) == OK) {
+        metricsString = toBase64StringNoPad(metricsVector.array(), metricsVector.size());
+        status_t res = android::reportDrmPluginMetrics(metricsString, vendor, description,
+                                                       mMetrics.GetAppUid());
+        if (res != OK) {
+            ALOGE("Metrics were retrieved but could not be reported: %d", res);
+        }
+    }
+    return metricsString;
+}
+
+std::string DrmHalAidl::reportFrameworkMetrics(const std::string& pluginMetrics) const {
+    mediametrics_handle_t item(mediametrics_create("mediadrm"));
+    mediametrics_setUid(item, mMetrics.GetAppUid());
+    String8 vendor;
+    String8 description;
+    status_t result = getPropertyStringInternal(String8("vendor"), vendor);
+    if (result != OK) {
+        ALOGE("Failed to get vendor from drm plugin: %d", result);
+    } else {
+        mediametrics_setCString(item, "vendor", vendor.c_str());
+    }
+    result = getPropertyStringInternal(String8("description"), description);
+    if (result != OK) {
+        ALOGE("Failed to get description from drm plugin: %d", result);
+    } else {
+        mediametrics_setCString(item, "description", description.c_str());
+    }
+
+    std::string serializedMetrics;
+    result = mMetrics.GetSerializedMetrics(&serializedMetrics);
+    if (result != OK) {
+        ALOGE("Failed to serialize framework metrics: %d", result);
+    }
+    std::string b64EncodedMetrics =
+            toBase64StringNoPad(serializedMetrics.data(), serializedMetrics.size());
+    if (!b64EncodedMetrics.empty()) {
+        mediametrics_setCString(item, "serialized_metrics", b64EncodedMetrics.c_str());
+    }
+    if (!pluginMetrics.empty()) {
+        mediametrics_setCString(item, "plugin_metrics", pluginMetrics.c_str());
+    }
+    if (!mediametrics_selfRecord(item)) {
+        ALOGE("Failed to self record framework metrics");
+    }
+    mediametrics_delete(item);
+    return serializedMetrics;
+}
+
+void DrmHalAidl::cleanup() {
+    closeOpenSessions();
+
+    Mutex::Autolock autoLock(mLock);
+    reportFrameworkMetrics(reportPluginMetrics());
+
+    setListener(NULL);
+    mInitCheck = NO_INIT;
+    if (mPlugin != NULL) {
+        if (!mPlugin->setListener(NULL).isOk()) {
+            mInitCheck = DEAD_OBJECT;
+        }
+    }
+
+    mPlugin.reset();
+}
+
+status_t DrmHalAidl::destroyPlugin() {
+    cleanup();
+    return OK;
+}
+
+::ndk::ScopedAStatus DrmHalAidl::onEvent(EventTypeAidl eventTypeAidl,
+                                         const std::vector<uint8_t>& sessionId,
+                                         const std::vector<uint8_t>& data) {
+    ::ndk::ScopedAStatus _aidl_status;
+    mMetrics.mEventCounter.Increment((uint32_t)eventTypeAidl);
+
+    mEventLock.lock();
+    sp<IDrmClient> listener = mListener;
+    mEventLock.unlock();
+
+    if (listener != NULL) {
+        Mutex::Autolock lock(mNotifyLock);
+        DrmPlugin::EventType eventType;
+        switch (eventTypeAidl) {
+            case EventTypeAidl::PROVISION_REQUIRED:
+                eventType = DrmPlugin::kDrmPluginEventProvisionRequired;
+                break;
+            case EventTypeAidl::KEY_NEEDED:
+                eventType = DrmPlugin::kDrmPluginEventKeyNeeded;
+                break;
+            case EventTypeAidl::KEY_EXPIRED:
+                eventType = DrmPlugin::kDrmPluginEventKeyExpired;
+                break;
+            case EventTypeAidl::VENDOR_DEFINED:
+                eventType = DrmPlugin::kDrmPluginEventVendorDefined;
+                break;
+            case EventTypeAidl::SESSION_RECLAIMED:
+                eventType = DrmPlugin::kDrmPluginEventSessionReclaimed;
+                break;
+            default:
+                return _aidl_status;
+        }
+
+        listener->sendEvent(eventType, toHidlVec(toVector(sessionId)), toHidlVec(toVector(data)));
+    }
+
+    return _aidl_status;
+}
+
+::ndk::ScopedAStatus DrmHalAidl::onExpirationUpdate(const std::vector<uint8_t>& sessionId,
+                                                    int64_t expiryTimeInMS) {
+    ::ndk::ScopedAStatus _aidl_status;
+    mEventLock.lock();
+    sp<IDrmClient> listener = mListener;
+    mEventLock.unlock();
+
+    if (listener != NULL) {
+        Mutex::Autolock lock(mNotifyLock);
+        listener->sendExpirationUpdate(toHidlVec(toVector(sessionId)), expiryTimeInMS);
+    }
+
+    return _aidl_status;
+}
+
+::ndk::ScopedAStatus DrmHalAidl::onKeysChange(const std::vector<uint8_t>& sessionId,
+                                              const std::vector<KeyStatus>& keyStatusListAidl,
+                                              bool hasNewUsableKey) {
+    ::ndk::ScopedAStatus _aidl_status;
+    mEventLock.lock();
+    sp<IDrmClient> listener = mListener;
+    mEventLock.unlock();
+
+    if (listener != NULL) {
+        std::vector<DrmKeyStatus> keyStatusList;
+        size_t nKeys = keyStatusListAidl.size();
+        for (size_t i = 0; i < nKeys; ++i) {
+            const KeyStatus& keyStatus = keyStatusListAidl[i];
+            uint32_t type;
+            switch (keyStatus.type) {
+                case KeyStatusType::USABLE:
+                    type = DrmPlugin::kKeyStatusType_Usable;
+                    break;
+                case KeyStatusType::EXPIRED:
+                    type = DrmPlugin::kKeyStatusType_Expired;
+                    break;
+                case KeyStatusType::OUTPUTNOTALLOWED:
+                    type = DrmPlugin::kKeyStatusType_OutputNotAllowed;
+                    break;
+                case KeyStatusType::STATUSPENDING:
+                    type = DrmPlugin::kKeyStatusType_StatusPending;
+                    break;
+                case KeyStatusType::USABLEINFUTURE:
+                    type = DrmPlugin::kKeyStatusType_UsableInFuture;
+                    break;
+                case KeyStatusType::INTERNALERROR:
+                default:
+                    type = DrmPlugin::kKeyStatusType_InternalError;
+                    break;
+            }
+            keyStatusList.push_back({type, toHidlVec(toVector(keyStatus.keyId))});
+            mMetrics.mKeyStatusChangeCounter.Increment((uint32_t)keyStatus.type);
+        }
+
+        Mutex::Autolock lock(mNotifyLock);
+        listener->sendKeysChange(toHidlVec(toVector(sessionId)), keyStatusList, hasNewUsableKey);
+    }
+    else {
+        // There's no listener. But we still want to count the key change
+        // events.
+        size_t nKeys = keyStatusListAidl.size();
+
+        for (size_t i = 0; i < nKeys; i++) {
+            mMetrics.mKeyStatusChangeCounter.Increment((uint32_t)keyStatusListAidl[i].type);
+        }
+    }
+
+    return _aidl_status;
+}
+
+::ndk::ScopedAStatus DrmHalAidl::onSessionLostState(const std::vector<uint8_t>& sessionId) {
+    ::ndk::ScopedAStatus _aidl_status;
+    mEventLock.lock();
+    sp<IDrmClient> listener = mListener;
+    mEventLock.unlock();
+
+    if (listener != NULL) {
+        Mutex::Autolock lock(mNotifyLock);
+        listener->sendSessionLostState(toHidlVec(toVector(sessionId)));
+    }
+
+    return _aidl_status;
+}
+
+}  // namespace android
\ No newline at end of file
diff --git a/drm/libmediadrm/DrmHalHidl.cpp b/drm/libmediadrm/DrmHalHidl.cpp
new file mode 100644
index 0000000..a5dd4d7
--- /dev/null
+++ b/drm/libmediadrm/DrmHalHidl.cpp
@@ -0,0 +1,1516 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DrmHalHidl"
+
+#include <aidl/android/media/BnResourceManagerClient.h>
+#include <android/binder_manager.h>
+#include <android/hardware/drm/1.2/types.h>
+#include <android/hidl/manager/1.2/IServiceManager.h>
+#include <hidl/ServiceManagement.h>
+#include <media/EventMetric.h>
+#include <media/MediaMetrics.h>
+#include <media/PluginMetricsReporting.h>
+#include <media/drm/DrmAPI.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/base64.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <mediadrm/DrmHalHidl.h>
+#include <mediadrm/DrmSessionClientInterface.h>
+#include <mediadrm/DrmSessionManager.h>
+#include <mediadrm/DrmUtils.h>
+#include <mediadrm/IDrmMetricsConsumer.h>
+#include <utils/Log.h>
+
+#include <iomanip>
+#include <vector>
+
+using ::android::sp;
+using ::android::DrmUtils::toStatusT;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::drm::V1_1::DrmMetricGroup;
+using ::android::os::PersistableBundle;
+using drm::V1_0::KeyedVector;
+using drm::V1_0::KeyRequestType;
+using drm::V1_0::KeyType;
+using drm::V1_0::KeyValue;
+using drm::V1_0::SecureStop;
+using drm::V1_0::SecureStopId;
+using drm::V1_0::Status;
+using drm::V1_1::HdcpLevel;
+using drm::V1_1::SecureStopRelease;
+using drm::V1_1::SecurityLevel;
+using drm::V1_2::KeySetId;
+using drm::V1_2::KeyStatusType;
+
+typedef drm::V1_1::KeyRequestType KeyRequestType_V1_1;
+typedef drm::V1_2::Status Status_V1_2;
+typedef drm::V1_2::HdcpLevel HdcpLevel_V1_2;
+
+namespace {
+
+// This constant corresponds to the PROPERTY_DEVICE_UNIQUE_ID constant
+// in the MediaDrm API.
+constexpr char kPropertyDeviceUniqueId[] = "deviceUniqueId";
+constexpr char kEqualsSign[] = "=";
+
+template <typename T>
+std::string toBase64StringNoPad(const T* data, size_t size) {
+    // Note that the base 64 conversion only works with arrays of single-byte
+    // values. If the source is empty or is not an array of single-byte values,
+    // return empty string.
+    if (size == 0 || sizeof(data[0]) != 1) {
+        return "";
+    }
+
+    android::AString outputString;
+    encodeBase64(data, size, &outputString);
+    // Remove trailing equals padding if it exists.
+    while (outputString.size() > 0 && outputString.endsWith(kEqualsSign)) {
+        outputString.erase(outputString.size() - 1, 1);
+    }
+
+    return std::string(outputString.c_str(), outputString.size());
+}
+
+}  // anonymous namespace
+
+namespace android {
+
+#define INIT_CHECK()                             \
+    {                                            \
+        if (mInitCheck != OK) return mInitCheck; \
+    }
+
+static const Vector<uint8_t> toVector(const hidl_vec<uint8_t>& vec) {
+    Vector<uint8_t> vector;
+    vector.appendArray(vec.data(), vec.size());
+    return *const_cast<const Vector<uint8_t>*>(&vector);
+}
+
+static hidl_vec<uint8_t> toHidlVec(const Vector<uint8_t>& vector) {
+    hidl_vec<uint8_t> vec;
+    vec.setToExternal(const_cast<uint8_t*>(vector.array()), vector.size());
+    return vec;
+}
+
+static String8 toString8(const hidl_string& string) {
+    return String8(string.c_str());
+}
+
+static hidl_string toHidlString(const String8& string) {
+    return hidl_string(string.string());
+}
+
+static DrmPlugin::SecurityLevel toSecurityLevel(SecurityLevel level) {
+    switch (level) {
+        case SecurityLevel::SW_SECURE_CRYPTO:
+            return DrmPlugin::kSecurityLevelSwSecureCrypto;
+        case SecurityLevel::SW_SECURE_DECODE:
+            return DrmPlugin::kSecurityLevelSwSecureDecode;
+        case SecurityLevel::HW_SECURE_CRYPTO:
+            return DrmPlugin::kSecurityLevelHwSecureCrypto;
+        case SecurityLevel::HW_SECURE_DECODE:
+            return DrmPlugin::kSecurityLevelHwSecureDecode;
+        case SecurityLevel::HW_SECURE_ALL:
+            return DrmPlugin::kSecurityLevelHwSecureAll;
+        default:
+            return DrmPlugin::kSecurityLevelUnknown;
+    }
+}
+
+static SecurityLevel toHidlSecurityLevel(DrmPlugin::SecurityLevel level) {
+    switch (level) {
+        case DrmPlugin::kSecurityLevelSwSecureCrypto:
+            return SecurityLevel::SW_SECURE_CRYPTO;
+        case DrmPlugin::kSecurityLevelSwSecureDecode:
+            return SecurityLevel::SW_SECURE_DECODE;
+        case DrmPlugin::kSecurityLevelHwSecureCrypto:
+            return SecurityLevel::HW_SECURE_CRYPTO;
+        case DrmPlugin::kSecurityLevelHwSecureDecode:
+            return SecurityLevel::HW_SECURE_DECODE;
+        case DrmPlugin::kSecurityLevelHwSecureAll:
+            return SecurityLevel::HW_SECURE_ALL;
+        default:
+            return SecurityLevel::UNKNOWN;
+    }
+}
+
+static DrmPlugin::OfflineLicenseState toOfflineLicenseState(OfflineLicenseState licenseState) {
+    switch (licenseState) {
+        case OfflineLicenseState::USABLE:
+            return DrmPlugin::kOfflineLicenseStateUsable;
+        case OfflineLicenseState::INACTIVE:
+            return DrmPlugin::kOfflineLicenseStateReleased;
+        default:
+            return DrmPlugin::kOfflineLicenseStateUnknown;
+    }
+}
+
+static DrmPlugin::HdcpLevel toHdcpLevel(HdcpLevel_V1_2 level) {
+    switch (level) {
+        case HdcpLevel_V1_2::HDCP_NONE:
+            return DrmPlugin::kHdcpNone;
+        case HdcpLevel_V1_2::HDCP_V1:
+            return DrmPlugin::kHdcpV1;
+        case HdcpLevel_V1_2::HDCP_V2:
+            return DrmPlugin::kHdcpV2;
+        case HdcpLevel_V1_2::HDCP_V2_1:
+            return DrmPlugin::kHdcpV2_1;
+        case HdcpLevel_V1_2::HDCP_V2_2:
+            return DrmPlugin::kHdcpV2_2;
+        case HdcpLevel_V1_2::HDCP_V2_3:
+            return DrmPlugin::kHdcpV2_3;
+        case HdcpLevel_V1_2::HDCP_NO_OUTPUT:
+            return DrmPlugin::kHdcpNoOutput;
+        default:
+            return DrmPlugin::kHdcpLevelUnknown;
+    }
+}
+static ::KeyedVector toHidlKeyedVector(const KeyedVector<String8, String8>& keyedVector) {
+    std::vector<KeyValue> stdKeyedVector;
+    for (size_t i = 0; i < keyedVector.size(); i++) {
+        KeyValue keyValue;
+        keyValue.key = toHidlString(keyedVector.keyAt(i));
+        keyValue.value = toHidlString(keyedVector.valueAt(i));
+        stdKeyedVector.push_back(keyValue);
+    }
+    return ::KeyedVector(stdKeyedVector);
+}
+
+static KeyedVector<String8, String8> toKeyedVector(const ::KeyedVector& hKeyedVector) {
+    KeyedVector<String8, String8> keyedVector;
+    for (size_t i = 0; i < hKeyedVector.size(); i++) {
+        keyedVector.add(toString8(hKeyedVector[i].key), toString8(hKeyedVector[i].value));
+    }
+    return keyedVector;
+}
+
+static List<Vector<uint8_t>> toSecureStops(const hidl_vec<SecureStop>& hSecureStops) {
+    List<Vector<uint8_t>> secureStops;
+    for (size_t i = 0; i < hSecureStops.size(); i++) {
+        secureStops.push_back(toVector(hSecureStops[i].opaqueData));
+    }
+    return secureStops;
+}
+
+static List<Vector<uint8_t>> toSecureStopIds(const hidl_vec<SecureStopId>& hSecureStopIds) {
+    List<Vector<uint8_t>> secureStopIds;
+    for (size_t i = 0; i < hSecureStopIds.size(); i++) {
+        secureStopIds.push_back(toVector(hSecureStopIds[i]));
+    }
+    return secureStopIds;
+}
+
+static List<Vector<uint8_t>> toKeySetIds(const hidl_vec<KeySetId>& hKeySetIds) {
+    List<Vector<uint8_t>> keySetIds;
+    for (size_t i = 0; i < hKeySetIds.size(); i++) {
+        keySetIds.push_back(toVector(hKeySetIds[i]));
+    }
+    return keySetIds;
+}
+
+Mutex DrmHalHidl::mLock;
+
+struct DrmHalHidl::DrmSessionClient : public aidl::android::media::BnResourceManagerClient {
+    explicit DrmSessionClient(DrmHalHidl* drm, const Vector<uint8_t>& sessionId)
+        : mSessionId(sessionId), mDrm(drm) {}
+
+    ::ndk::ScopedAStatus reclaimResource(bool* _aidl_return) override;
+    ::ndk::ScopedAStatus getName(::std::string* _aidl_return) override;
+
+    const Vector<uint8_t> mSessionId;
+
+    virtual ~DrmSessionClient();
+
+  private:
+    wp<DrmHalHidl> mDrm;
+
+    DISALLOW_EVIL_CONSTRUCTORS(DrmSessionClient);
+};
+
+::ndk::ScopedAStatus DrmHalHidl::DrmSessionClient::reclaimResource(bool* _aidl_return) {
+    auto sessionId = mSessionId;
+    sp<DrmHalHidl> drm = mDrm.promote();
+    if (drm == NULL) {
+        *_aidl_return = true;
+        return ::ndk::ScopedAStatus::ok();
+    }
+    status_t err = drm->closeSession(sessionId);
+    if (err != OK) {
+        *_aidl_return = false;
+        return ::ndk::ScopedAStatus::ok();
+    }
+    drm->sendEvent(EventType::SESSION_RECLAIMED, toHidlVec(sessionId), hidl_vec<uint8_t>());
+    *_aidl_return = true;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus DrmHalHidl::DrmSessionClient::getName(::std::string* _aidl_return) {
+    String8 name;
+    sp<DrmHalHidl> drm = mDrm.promote();
+    if (drm == NULL) {
+        name.append("<deleted>");
+    } else if (drm->getPropertyStringInternal(String8("vendor"), name) != OK || name.isEmpty()) {
+        name.append("<Get vendor failed or is empty>");
+    }
+    name.append("[");
+    for (size_t i = 0; i < mSessionId.size(); ++i) {
+        name.appendFormat("%02x", mSessionId[i]);
+    }
+    name.append("]");
+    *_aidl_return = name;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+DrmHalHidl::DrmSessionClient::~DrmSessionClient() {
+    DrmSessionManager::Instance()->removeSession(mSessionId);
+}
+
+DrmHalHidl::DrmHalHidl()
+    : mFactories(makeDrmFactories()),
+      mInitCheck((mFactories.size() == 0) ? ERROR_UNSUPPORTED : NO_INIT) {}
+
+void DrmHalHidl::closeOpenSessions() {
+    Mutex::Autolock autoLock(mLock);
+    auto openSessions = mOpenSessions;
+    for (size_t i = 0; i < openSessions.size(); i++) {
+        mLock.unlock();
+        closeSession(openSessions[i]->mSessionId);
+        mLock.lock();
+    }
+    mOpenSessions.clear();
+}
+
+DrmHalHidl::~DrmHalHidl() {}
+
+void DrmHalHidl::cleanup() {
+    closeOpenSessions();
+
+    Mutex::Autolock autoLock(mLock);
+    reportFrameworkMetrics(reportPluginMetrics());
+
+    setListener(NULL);
+    mInitCheck = NO_INIT;
+    if (mPluginV1_2 != NULL) {
+        if (!mPluginV1_2->setListener(NULL).isOk()) {
+            mInitCheck = DEAD_OBJECT;
+        }
+    } else if (mPlugin != NULL) {
+        if (!mPlugin->setListener(NULL).isOk()) {
+            mInitCheck = DEAD_OBJECT;
+        }
+    }
+    mPlugin.clear();
+    mPluginV1_1.clear();
+    mPluginV1_2.clear();
+    mPluginV1_4.clear();
+}
+
+std::vector<sp<IDrmFactory>> DrmHalHidl::makeDrmFactories() {
+    static std::vector<sp<IDrmFactory>> factories(DrmUtils::MakeDrmFactories());
+    if (factories.size() == 0) {
+        // must be in passthrough mode, load the default passthrough service
+        auto passthrough = IDrmFactory::getService();
+        if (passthrough != NULL) {
+            DrmUtils::LOG2BI("makeDrmFactories: using default passthrough drm instance");
+            factories.push_back(passthrough);
+        } else {
+            DrmUtils::LOG2BE("Failed to find any drm factories");
+        }
+    }
+    return factories;
+}
+
+sp<IDrmPlugin> DrmHalHidl::makeDrmPlugin(const sp<IDrmFactory>& factory, const uint8_t uuid[16],
+                                         const String8& appPackageName) {
+    mAppPackageName = appPackageName;
+    mMetrics.SetAppPackageName(appPackageName);
+    mMetrics.SetAppUid(AIBinder_getCallingUid());
+
+    sp<IDrmPlugin> plugin;
+    Return<void> hResult = factory->createPlugin(
+            uuid, appPackageName.string(), [&](Status status, const sp<IDrmPlugin>& hPlugin) {
+                if (status != Status::OK) {
+                    DrmUtils::LOG2BE(uuid, "Failed to make drm plugin: %d", status);
+                    return;
+                }
+                plugin = hPlugin;
+            });
+
+    if (!hResult.isOk()) {
+        DrmUtils::LOG2BE(uuid, "createPlugin remote call failed: %s",
+                         hResult.description().c_str());
+    }
+
+    return plugin;
+}
+
+status_t DrmHalHidl::initCheck() const {
+    return mInitCheck;
+}
+
+status_t DrmHalHidl::setListener(const sp<IDrmClient>& listener) {
+    Mutex::Autolock lock(mEventLock);
+    mListener = listener;
+    return NO_ERROR;
+}
+
+Return<void> DrmHalHidl::sendEvent(EventType hEventType, const hidl_vec<uint8_t>& sessionId,
+                                   const hidl_vec<uint8_t>& data) {
+    mMetrics.mEventCounter.Increment((uint32_t)hEventType);
+
+    mEventLock.lock();
+    sp<IDrmClient> listener = mListener;
+    mEventLock.unlock();
+
+    if (listener != NULL) {
+        Mutex::Autolock lock(mNotifyLock);
+        DrmPlugin::EventType eventType;
+        switch (hEventType) {
+            case EventType::PROVISION_REQUIRED:
+                eventType = DrmPlugin::kDrmPluginEventProvisionRequired;
+                break;
+            case EventType::KEY_NEEDED:
+                eventType = DrmPlugin::kDrmPluginEventKeyNeeded;
+                break;
+            case EventType::KEY_EXPIRED:
+                eventType = DrmPlugin::kDrmPluginEventKeyExpired;
+                break;
+            case EventType::VENDOR_DEFINED:
+                eventType = DrmPlugin::kDrmPluginEventVendorDefined;
+                break;
+            case EventType::SESSION_RECLAIMED:
+                eventType = DrmPlugin::kDrmPluginEventSessionReclaimed;
+                break;
+            default:
+                return Void();
+        }
+        listener->sendEvent(eventType, sessionId, data);
+    }
+    return Void();
+}
+
+Return<void> DrmHalHidl::sendExpirationUpdate(const hidl_vec<uint8_t>& sessionId,
+                                              int64_t expiryTimeInMS) {
+    mEventLock.lock();
+    sp<IDrmClient> listener = mListener;
+    mEventLock.unlock();
+
+    if (listener != NULL) {
+        Mutex::Autolock lock(mNotifyLock);
+        listener->sendExpirationUpdate(sessionId, expiryTimeInMS);
+    }
+    return Void();
+}
+
+Return<void> DrmHalHidl::sendKeysChange(const hidl_vec<uint8_t>& sessionId,
+                                        const hidl_vec<KeyStatus_V1_0>& keyStatusList_V1_0,
+                                        bool hasNewUsableKey) {
+    std::vector<KeyStatus> keyStatusVec;
+    for (const auto& keyStatus_V1_0 : keyStatusList_V1_0) {
+        keyStatusVec.push_back(
+                {keyStatus_V1_0.keyId, static_cast<KeyStatusType>(keyStatus_V1_0.type)});
+    }
+    hidl_vec<KeyStatus> keyStatusList_V1_2(keyStatusVec);
+    return sendKeysChange_1_2(sessionId, keyStatusList_V1_2, hasNewUsableKey);
+}
+
+Return<void> DrmHalHidl::sendKeysChange_1_2(const hidl_vec<uint8_t>& sessionId,
+                                            const hidl_vec<KeyStatus>& hKeyStatusList,
+                                            bool hasNewUsableKey) {
+    mEventLock.lock();
+    sp<IDrmClient> listener = mListener;
+    mEventLock.unlock();
+
+    if (listener != NULL) {
+        std::vector<DrmKeyStatus> keyStatusList;
+        size_t nKeys = hKeyStatusList.size();
+        for (size_t i = 0; i < nKeys; ++i) {
+            const KeyStatus& keyStatus = hKeyStatusList[i];
+            uint32_t type;
+            switch (keyStatus.type) {
+                case KeyStatusType::USABLE:
+                    type = DrmPlugin::kKeyStatusType_Usable;
+                    break;
+                case KeyStatusType::EXPIRED:
+                    type = DrmPlugin::kKeyStatusType_Expired;
+                    break;
+                case KeyStatusType::OUTPUTNOTALLOWED:
+                    type = DrmPlugin::kKeyStatusType_OutputNotAllowed;
+                    break;
+                case KeyStatusType::STATUSPENDING:
+                    type = DrmPlugin::kKeyStatusType_StatusPending;
+                    break;
+                case KeyStatusType::USABLEINFUTURE:
+                    type = DrmPlugin::kKeyStatusType_UsableInFuture;
+                    break;
+                case KeyStatusType::INTERNALERROR:
+                default:
+                    type = DrmPlugin::kKeyStatusType_InternalError;
+                    break;
+            }
+            keyStatusList.push_back({type, keyStatus.keyId});
+            mMetrics.mKeyStatusChangeCounter.Increment((uint32_t)keyStatus.type);
+        }
+
+        Mutex::Autolock lock(mNotifyLock);
+        listener->sendKeysChange(sessionId, keyStatusList, hasNewUsableKey);
+    } else {
+        // There's no listener. But we still want to count the key change
+        // events.
+        size_t nKeys = hKeyStatusList.size();
+        for (size_t i = 0; i < nKeys; i++) {
+            mMetrics.mKeyStatusChangeCounter.Increment((uint32_t)hKeyStatusList[i].type);
+        }
+    }
+
+    return Void();
+}
+
+Return<void> DrmHalHidl::sendSessionLostState(const hidl_vec<uint8_t>& sessionId) {
+    mEventLock.lock();
+    sp<IDrmClient> listener = mListener;
+    mEventLock.unlock();
+
+    if (listener != NULL) {
+        Mutex::Autolock lock(mNotifyLock);
+        listener->sendSessionLostState(sessionId);
+    }
+    return Void();
+}
+
+status_t DrmHalHidl::matchMimeTypeAndSecurityLevel(const sp<IDrmFactory>& factory,
+                                                   const uint8_t uuid[16], const String8& mimeType,
+                                                   DrmPlugin::SecurityLevel level,
+                                                   bool* isSupported) {
+    *isSupported = false;
+
+    // handle default value cases
+    if (level == DrmPlugin::kSecurityLevelUnknown) {
+        if (mimeType == "") {
+            // isCryptoSchemeSupported(uuid)
+            *isSupported = true;
+        } else {
+            // isCryptoSchemeSupported(uuid, mimeType)
+            *isSupported = factory->isContentTypeSupported(mimeType.string());
+        }
+        return OK;
+    } else if (mimeType == "") {
+        return BAD_VALUE;
+    }
+
+    sp<drm::V1_2::IDrmFactory> factoryV1_2 = drm::V1_2::IDrmFactory::castFrom(factory);
+    if (factoryV1_2 == NULL) {
+        return ERROR_UNSUPPORTED;
+    } else {
+        *isSupported = factoryV1_2->isCryptoSchemeSupported_1_2(uuid, mimeType.string(),
+                                                                toHidlSecurityLevel(level));
+        return OK;
+    }
+}
+
+status_t DrmHalHidl::isCryptoSchemeSupported(const uint8_t uuid[16], const String8& mimeType,
+                                             DrmPlugin::SecurityLevel level, bool* isSupported) {
+    Mutex::Autolock autoLock(mLock);
+    *isSupported = false;
+    for (ssize_t i = mFactories.size() - 1; i >= 0; i--) {
+        if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
+            return matchMimeTypeAndSecurityLevel(mFactories[i], uuid, mimeType, level, isSupported);
+        }
+    }
+    return OK;
+}
+
+status_t DrmHalHidl::createPlugin(const uint8_t uuid[16], const String8& appPackageName) {
+    Mutex::Autolock autoLock(mLock);
+
+    for (ssize_t i = mFactories.size() - 1; i >= 0; i--) {
+        auto hResult = mFactories[i]->isCryptoSchemeSupported(uuid);
+        if (hResult.isOk() && hResult) {
+            auto plugin = makeDrmPlugin(mFactories[i], uuid, appPackageName);
+            if (plugin != NULL) {
+                mPlugin = plugin;
+                mPluginV1_1 = drm::V1_1::IDrmPlugin::castFrom(mPlugin);
+                mPluginV1_2 = drm::V1_2::IDrmPlugin::castFrom(mPlugin);
+                mPluginV1_4 = drm::V1_4::IDrmPlugin::castFrom(mPlugin);
+                break;
+            }
+        }
+    }
+
+    if (mPlugin == NULL) {
+        DrmUtils::LOG2BE(uuid, "No supported hal instance found");
+        mInitCheck = ERROR_UNSUPPORTED;
+    } else {
+        mInitCheck = OK;
+        if (mPluginV1_2 != NULL) {
+            if (!mPluginV1_2->setListener(this).isOk()) {
+                mInitCheck = DEAD_OBJECT;
+            }
+        } else if (!mPlugin->setListener(this).isOk()) {
+            mInitCheck = DEAD_OBJECT;
+        }
+        if (mInitCheck != OK) {
+            mPlugin.clear();
+            mPluginV1_1.clear();
+            mPluginV1_2.clear();
+            mPluginV1_4.clear();
+        }
+    }
+
+    return mInitCheck;
+}
+
+status_t DrmHalHidl::destroyPlugin() {
+    cleanup();
+    return OK;
+}
+
+status_t DrmHalHidl::openSession(DrmPlugin::SecurityLevel level, Vector<uint8_t>& sessionId) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    SecurityLevel hSecurityLevel = toHidlSecurityLevel(level);
+    bool setSecurityLevel = true;
+
+    if (level == DrmPlugin::kSecurityLevelMax) {
+        setSecurityLevel = false;
+    } else {
+        if (hSecurityLevel == SecurityLevel::UNKNOWN) {
+            return ERROR_DRM_CANNOT_HANDLE;
+        }
+    }
+
+    status_t err = UNKNOWN_ERROR;
+    bool retry = true;
+    do {
+        hidl_vec<uint8_t> hSessionId;
+
+        Return<void> hResult;
+        if (mPluginV1_1 == NULL || !setSecurityLevel) {
+            hResult = mPlugin->openSession([&](Status status, const hidl_vec<uint8_t>& id) {
+                if (status == Status::OK) {
+                    sessionId = toVector(id);
+                }
+                err = toStatusT(status);
+            });
+        } else {
+            hResult = mPluginV1_1->openSession_1_1(hSecurityLevel,
+                                                   [&](Status status, const hidl_vec<uint8_t>& id) {
+                                                       if (status == Status::OK) {
+                                                           sessionId = toVector(id);
+                                                       }
+                                                       err = toStatusT(status);
+                                                   });
+        }
+
+        if (!hResult.isOk()) {
+            err = DEAD_OBJECT;
+        }
+
+        if (err == ERROR_DRM_RESOURCE_BUSY && retry) {
+            mLock.unlock();
+            // reclaimSession may call back to closeSession, since mLock is
+            // shared between Drm instances, we should unlock here to avoid
+            // deadlock.
+            retry = DrmSessionManager::Instance()->reclaimSession(AIBinder_getCallingPid());
+            mLock.lock();
+        } else {
+            retry = false;
+        }
+    } while (retry);
+
+    if (err == OK) {
+        std::shared_ptr<DrmSessionClient> client =
+                ndk::SharedRefBase::make<DrmSessionClient>(this, sessionId);
+        DrmSessionManager::Instance()->addSession(
+                AIBinder_getCallingPid(), std::static_pointer_cast<IResourceManagerClient>(client),
+                sessionId);
+        mOpenSessions.push_back(client);
+        mMetrics.SetSessionStart(sessionId);
+    }
+
+    mMetrics.mOpenSessionCounter.Increment(err);
+    return err;
+}
+
+status_t DrmHalHidl::closeSession(Vector<uint8_t> const& sessionId) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    Return<Status> status = mPlugin->closeSession(toHidlVec(sessionId));
+    if (status.isOk()) {
+        if (status == Status::OK) {
+            DrmSessionManager::Instance()->removeSession(sessionId);
+            for (auto i = mOpenSessions.begin(); i != mOpenSessions.end(); i++) {
+                if (isEqualSessionId((*i)->mSessionId, sessionId)) {
+                    mOpenSessions.erase(i);
+                    break;
+                }
+            }
+        }
+        status_t response = toStatusT(status);
+        mMetrics.SetSessionEnd(sessionId);
+        mMetrics.mCloseSessionCounter.Increment(response);
+        return response;
+    }
+    mMetrics.mCloseSessionCounter.Increment(DEAD_OBJECT);
+    return DEAD_OBJECT;
+}
+
+static DrmPlugin::KeyRequestType toKeyRequestType(KeyRequestType keyRequestType) {
+    switch (keyRequestType) {
+        case KeyRequestType::INITIAL:
+            return DrmPlugin::kKeyRequestType_Initial;
+            break;
+        case KeyRequestType::RENEWAL:
+            return DrmPlugin::kKeyRequestType_Renewal;
+            break;
+        case KeyRequestType::RELEASE:
+            return DrmPlugin::kKeyRequestType_Release;
+            break;
+        default:
+            return DrmPlugin::kKeyRequestType_Unknown;
+            break;
+    }
+}
+
+static DrmPlugin::KeyRequestType toKeyRequestType_1_1(KeyRequestType_V1_1 keyRequestType) {
+    switch (keyRequestType) {
+        case KeyRequestType_V1_1::NONE:
+            return DrmPlugin::kKeyRequestType_None;
+            break;
+        case KeyRequestType_V1_1::UPDATE:
+            return DrmPlugin::kKeyRequestType_Update;
+            break;
+        default:
+            return toKeyRequestType(static_cast<KeyRequestType>(keyRequestType));
+            break;
+    }
+}
+
+status_t DrmHalHidl::getKeyRequest(Vector<uint8_t> const& sessionId,
+                                   Vector<uint8_t> const& initData, String8 const& mimeType,
+                                   DrmPlugin::KeyType keyType,
+                                   KeyedVector<String8, String8> const& optionalParameters,
+                                   Vector<uint8_t>& request, String8& defaultUrl,
+                                   DrmPlugin::KeyRequestType* keyRequestType) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+    EventTimer<status_t> keyRequestTimer(&mMetrics.mGetKeyRequestTimeUs);
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    KeyType hKeyType;
+    if (keyType == DrmPlugin::kKeyType_Streaming) {
+        hKeyType = KeyType::STREAMING;
+    } else if (keyType == DrmPlugin::kKeyType_Offline) {
+        hKeyType = KeyType::OFFLINE;
+    } else if (keyType == DrmPlugin::kKeyType_Release) {
+        hKeyType = KeyType::RELEASE;
+    } else {
+        keyRequestTimer.SetAttribute(BAD_VALUE);
+        return BAD_VALUE;
+    }
+
+    ::KeyedVector hOptionalParameters = toHidlKeyedVector(optionalParameters);
+
+    status_t err = UNKNOWN_ERROR;
+    Return<void> hResult;
+
+    if (mPluginV1_2 != NULL) {
+        hResult = mPluginV1_2->getKeyRequest_1_2(
+                toHidlVec(sessionId), toHidlVec(initData), toHidlString(mimeType), hKeyType,
+                hOptionalParameters,
+                [&](Status_V1_2 status, const hidl_vec<uint8_t>& hRequest,
+                    KeyRequestType_V1_1 hKeyRequestType, const hidl_string& hDefaultUrl) {
+                    if (status == Status_V1_2::OK) {
+                        request = toVector(hRequest);
+                        defaultUrl = toString8(hDefaultUrl);
+                        *keyRequestType = toKeyRequestType_1_1(hKeyRequestType);
+                    }
+                    err = toStatusT(status);
+                });
+    } else if (mPluginV1_1 != NULL) {
+        hResult = mPluginV1_1->getKeyRequest_1_1(
+                toHidlVec(sessionId), toHidlVec(initData), toHidlString(mimeType), hKeyType,
+                hOptionalParameters,
+                [&](Status status, const hidl_vec<uint8_t>& hRequest,
+                    KeyRequestType_V1_1 hKeyRequestType, const hidl_string& hDefaultUrl) {
+                    if (status == Status::OK) {
+                        request = toVector(hRequest);
+                        defaultUrl = toString8(hDefaultUrl);
+                        *keyRequestType = toKeyRequestType_1_1(hKeyRequestType);
+                    }
+                    err = toStatusT(status);
+                });
+    } else {
+        hResult = mPlugin->getKeyRequest(
+                toHidlVec(sessionId), toHidlVec(initData), toHidlString(mimeType), hKeyType,
+                hOptionalParameters,
+                [&](Status status, const hidl_vec<uint8_t>& hRequest,
+                    KeyRequestType hKeyRequestType, const hidl_string& hDefaultUrl) {
+                    if (status == Status::OK) {
+                        request = toVector(hRequest);
+                        defaultUrl = toString8(hDefaultUrl);
+                        *keyRequestType = toKeyRequestType(hKeyRequestType);
+                    }
+                    err = toStatusT(status);
+                });
+    }
+
+    err = hResult.isOk() ? err : DEAD_OBJECT;
+    keyRequestTimer.SetAttribute(err);
+    return err;
+}
+
+status_t DrmHalHidl::provideKeyResponse(Vector<uint8_t> const& sessionId,
+                                        Vector<uint8_t> const& response,
+                                        Vector<uint8_t>& keySetId) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+    EventTimer<status_t> keyResponseTimer(&mMetrics.mProvideKeyResponseTimeUs);
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult =
+            mPlugin->provideKeyResponse(toHidlVec(sessionId), toHidlVec(response),
+                                        [&](Status status, const hidl_vec<uint8_t>& hKeySetId) {
+                                            if (status == Status::OK) {
+                                                keySetId = toVector(hKeySetId);
+                                            }
+                                            err = toStatusT(status);
+                                        });
+    err = hResult.isOk() ? err : DEAD_OBJECT;
+    keyResponseTimer.SetAttribute(err);
+    return err;
+}
+
+status_t DrmHalHidl::removeKeys(Vector<uint8_t> const& keySetId) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    Return<Status> status = mPlugin->removeKeys(toHidlVec(keySetId));
+    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::restoreKeys(Vector<uint8_t> const& sessionId,
+                                 Vector<uint8_t> const& keySetId) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    Return<Status> status = mPlugin->restoreKeys(toHidlVec(sessionId), toHidlVec(keySetId));
+    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::queryKeyStatus(Vector<uint8_t> const& sessionId,
+                                    KeyedVector<String8, String8>& infoMap) const {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    ::KeyedVector hInfoMap;
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->queryKeyStatus(
+            toHidlVec(sessionId), [&](Status status, const hidl_vec<KeyValue>& map) {
+                if (status == Status::OK) {
+                    infoMap = toKeyedVector(map);
+                }
+                err = toStatusT(status);
+            });
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::getProvisionRequest(String8 const& certType, String8 const& certAuthority,
+                                         Vector<uint8_t>& request, String8& defaultUrl) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    status_t err = UNKNOWN_ERROR;
+    Return<void> hResult;
+
+    if (mPluginV1_2 != NULL) {
+        hResult = mPluginV1_2->getProvisionRequest_1_2(
+                toHidlString(certType), toHidlString(certAuthority),
+                [&](Status_V1_2 status, const hidl_vec<uint8_t>& hRequest,
+                    const hidl_string& hDefaultUrl) {
+                    if (status == Status_V1_2::OK) {
+                        request = toVector(hRequest);
+                        defaultUrl = toString8(hDefaultUrl);
+                    }
+                    err = toStatusT(status);
+                });
+    } else {
+        hResult = mPlugin->getProvisionRequest(toHidlString(certType), toHidlString(certAuthority),
+                                               [&](Status status, const hidl_vec<uint8_t>& hRequest,
+                                                   const hidl_string& hDefaultUrl) {
+                                                   if (status == Status::OK) {
+                                                       request = toVector(hRequest);
+                                                       defaultUrl = toString8(hDefaultUrl);
+                                                   }
+                                                   err = toStatusT(status);
+                                               });
+    }
+
+    err = hResult.isOk() ? err : DEAD_OBJECT;
+    mMetrics.mGetProvisionRequestCounter.Increment(err);
+    return err;
+}
+
+status_t DrmHalHidl::provideProvisionResponse(Vector<uint8_t> const& response,
+                                              Vector<uint8_t>& certificate,
+                                              Vector<uint8_t>& wrappedKey) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->provideProvisionResponse(
+            toHidlVec(response), [&](Status status, const hidl_vec<uint8_t>& hCertificate,
+                                     const hidl_vec<uint8_t>& hWrappedKey) {
+                if (status == Status::OK) {
+                    certificate = toVector(hCertificate);
+                    wrappedKey = toVector(hWrappedKey);
+                }
+                err = toStatusT(status);
+            });
+
+    err = hResult.isOk() ? err : DEAD_OBJECT;
+    mMetrics.mProvideProvisionResponseCounter.Increment(err);
+    return err;
+}
+
+status_t DrmHalHidl::getSecureStops(List<Vector<uint8_t>>& secureStops) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult =
+            mPlugin->getSecureStops([&](Status status, const hidl_vec<SecureStop>& hSecureStops) {
+                if (status == Status::OK) {
+                    secureStops = toSecureStops(hSecureStops);
+                }
+                err = toStatusT(status);
+            });
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::getSecureStopIds(List<Vector<uint8_t>>& secureStopIds) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    if (mPluginV1_1 == NULL) {
+        return ERROR_DRM_CANNOT_HANDLE;
+    }
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPluginV1_1->getSecureStopIds(
+            [&](Status status, const hidl_vec<SecureStopId>& hSecureStopIds) {
+                if (status == Status::OK) {
+                    secureStopIds = toSecureStopIds(hSecureStopIds);
+                }
+                err = toStatusT(status);
+            });
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::getSecureStop(Vector<uint8_t> const& ssid, Vector<uint8_t>& secureStop) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->getSecureStop(
+            toHidlVec(ssid), [&](Status status, const SecureStop& hSecureStop) {
+                if (status == Status::OK) {
+                    secureStop = toVector(hSecureStop.opaqueData);
+                }
+                err = toStatusT(status);
+            });
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::releaseSecureStops(Vector<uint8_t> const& ssRelease) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    Return<Status> status(Status::ERROR_DRM_UNKNOWN);
+    if (mPluginV1_1 != NULL) {
+        SecureStopRelease secureStopRelease;
+        secureStopRelease.opaqueData = toHidlVec(ssRelease);
+        status = mPluginV1_1->releaseSecureStops(secureStopRelease);
+    } else {
+        status = mPlugin->releaseSecureStop(toHidlVec(ssRelease));
+    }
+    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::removeSecureStop(Vector<uint8_t> const& ssid) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    if (mPluginV1_1 == NULL) {
+        return ERROR_DRM_CANNOT_HANDLE;
+    }
+
+    Return<Status> status = mPluginV1_1->removeSecureStop(toHidlVec(ssid));
+    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::removeAllSecureStops() {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    Return<Status> status(Status::ERROR_DRM_UNKNOWN);
+    if (mPluginV1_1 != NULL) {
+        status = mPluginV1_1->removeAllSecureStops();
+    } else {
+        status = mPlugin->releaseAllSecureStops();
+    }
+    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::getHdcpLevels(DrmPlugin::HdcpLevel* connected,
+                                   DrmPlugin::HdcpLevel* max) const {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    if (connected == NULL || max == NULL) {
+        return BAD_VALUE;
+    }
+    status_t err = UNKNOWN_ERROR;
+
+    *connected = DrmPlugin::kHdcpLevelUnknown;
+    *max = DrmPlugin::kHdcpLevelUnknown;
+
+    Return<void> hResult;
+    if (mPluginV1_2 != NULL) {
+        hResult = mPluginV1_2->getHdcpLevels_1_2([&](Status_V1_2 status,
+                                                     const HdcpLevel_V1_2& hConnected,
+                                                     const HdcpLevel_V1_2& hMax) {
+            if (status == Status_V1_2::OK) {
+                *connected = toHdcpLevel(hConnected);
+                *max = toHdcpLevel(hMax);
+            }
+            err = toStatusT(status);
+        });
+    } else if (mPluginV1_1 != NULL) {
+        hResult = mPluginV1_1->getHdcpLevels(
+                [&](Status status, const HdcpLevel& hConnected, const HdcpLevel& hMax) {
+                    if (status == Status::OK) {
+                        *connected = toHdcpLevel(static_cast<HdcpLevel_V1_2>(hConnected));
+                        *max = toHdcpLevel(static_cast<HdcpLevel_V1_2>(hMax));
+                    }
+                    err = toStatusT(status);
+                });
+    } else {
+        return ERROR_DRM_CANNOT_HANDLE;
+    }
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::getNumberOfSessions(uint32_t* open, uint32_t* max) const {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    if (open == NULL || max == NULL) {
+        return BAD_VALUE;
+    }
+    status_t err = UNKNOWN_ERROR;
+
+    *open = 0;
+    *max = 0;
+
+    if (mPluginV1_1 == NULL) {
+        return ERROR_DRM_CANNOT_HANDLE;
+    }
+
+    Return<void> hResult =
+            mPluginV1_1->getNumberOfSessions([&](Status status, uint32_t hOpen, uint32_t hMax) {
+                if (status == Status::OK) {
+                    *open = hOpen;
+                    *max = hMax;
+                }
+                err = toStatusT(status);
+            });
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::getSecurityLevel(Vector<uint8_t> const& sessionId,
+                                      DrmPlugin::SecurityLevel* level) const {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    if (level == NULL) {
+        return BAD_VALUE;
+    }
+    status_t err = UNKNOWN_ERROR;
+
+    if (mPluginV1_1 == NULL) {
+        return ERROR_DRM_CANNOT_HANDLE;
+    }
+
+    *level = DrmPlugin::kSecurityLevelUnknown;
+
+    Return<void> hResult = mPluginV1_1->getSecurityLevel(toHidlVec(sessionId),
+                                                         [&](Status status, SecurityLevel hLevel) {
+                                                             if (status == Status::OK) {
+                                                                 *level = toSecurityLevel(hLevel);
+                                                             }
+                                                             err = toStatusT(status);
+                                                         });
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::getOfflineLicenseKeySetIds(List<Vector<uint8_t>>& keySetIds) const {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    if (mPluginV1_2 == NULL) {
+        return ERROR_UNSUPPORTED;
+    }
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPluginV1_2->getOfflineLicenseKeySetIds(
+            [&](Status status, const hidl_vec<KeySetId>& hKeySetIds) {
+                if (status == Status::OK) {
+                    keySetIds = toKeySetIds(hKeySetIds);
+                }
+                err = toStatusT(status);
+            });
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::removeOfflineLicense(Vector<uint8_t> const& keySetId) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    if (mPluginV1_2 == NULL) {
+        return ERROR_UNSUPPORTED;
+    }
+
+    Return<Status> status = mPluginV1_2->removeOfflineLicense(toHidlVec(keySetId));
+    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::getOfflineLicenseState(Vector<uint8_t> const& keySetId,
+                                            DrmPlugin::OfflineLicenseState* licenseState) const {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    if (mPluginV1_2 == NULL) {
+        return ERROR_UNSUPPORTED;
+    }
+    *licenseState = DrmPlugin::kOfflineLicenseStateUnknown;
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPluginV1_2->getOfflineLicenseState(
+            toHidlVec(keySetId), [&](Status status, OfflineLicenseState hLicenseState) {
+                if (status == Status::OK) {
+                    *licenseState = toOfflineLicenseState(hLicenseState);
+                }
+                err = toStatusT(status);
+            });
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::getPropertyString(String8 const& name, String8& value) const {
+    Mutex::Autolock autoLock(mLock);
+    return getPropertyStringInternal(name, value);
+}
+
+status_t DrmHalHidl::getPropertyStringInternal(String8 const& name, String8& value) const {
+    // This function is internal to the class and should only be called while
+    // mLock is already held.
+    INIT_CHECK();
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->getPropertyString(
+            toHidlString(name), [&](Status status, const hidl_string& hValue) {
+                if (status == Status::OK) {
+                    value = toString8(hValue);
+                }
+                err = toStatusT(status);
+            });
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::getPropertyByteArray(String8 const& name, Vector<uint8_t>& value) const {
+    Mutex::Autolock autoLock(mLock);
+    return getPropertyByteArrayInternal(name, value);
+}
+
+status_t DrmHalHidl::getPropertyByteArrayInternal(String8 const& name,
+                                                  Vector<uint8_t>& value) const {
+    // This function is internal to the class and should only be called while
+    // mLock is already held.
+    INIT_CHECK();
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->getPropertyByteArray(
+            toHidlString(name), [&](Status status, const hidl_vec<uint8_t>& hValue) {
+                if (status == Status::OK) {
+                    value = toVector(hValue);
+                }
+                err = toStatusT(status);
+            });
+
+    err = hResult.isOk() ? err : DEAD_OBJECT;
+    if (name == kPropertyDeviceUniqueId) {
+        mMetrics.mGetDeviceUniqueIdCounter.Increment(err);
+    }
+    return err;
+}
+
+status_t DrmHalHidl::setPropertyString(String8 const& name, String8 const& value) const {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    Return<Status> status = mPlugin->setPropertyString(toHidlString(name), toHidlString(value));
+    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::setPropertyByteArray(String8 const& name, Vector<uint8_t> const& value) const {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    Return<Status> status = mPlugin->setPropertyByteArray(toHidlString(name), toHidlVec(value));
+    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::getMetrics(const sp<IDrmMetricsConsumer>& consumer) {
+    if (consumer == nullptr) {
+        return UNEXPECTED_NULL;
+    }
+    consumer->consumeFrameworkMetrics(mMetrics);
+
+    // Append vendor metrics if they are supported.
+    if (mPluginV1_1 != NULL) {
+        String8 vendor;
+        String8 description;
+        if (getPropertyStringInternal(String8("vendor"), vendor) != OK || vendor.isEmpty()) {
+            ALOGE("Get vendor failed or is empty");
+            vendor = "NONE";
+        }
+        if (getPropertyStringInternal(String8("description"), description) != OK ||
+            description.isEmpty()) {
+            ALOGE("Get description failed or is empty.");
+            description = "NONE";
+        }
+        vendor += ".";
+        vendor += description;
+
+        hidl_vec<DrmMetricGroup> pluginMetrics;
+        status_t err = UNKNOWN_ERROR;
+
+        Return<void> status =
+                mPluginV1_1->getMetrics([&](Status status, hidl_vec<DrmMetricGroup> pluginMetrics) {
+                    if (status != Status::OK) {
+                        ALOGV("Error getting plugin metrics: %d", status);
+                    } else {
+                        consumer->consumeHidlMetrics(vendor, pluginMetrics);
+                    }
+                    err = toStatusT(status);
+                });
+        return status.isOk() ? err : DEAD_OBJECT;
+    }
+
+    return OK;
+}
+
+status_t DrmHalHidl::setCipherAlgorithm(Vector<uint8_t> const& sessionId,
+                                        String8 const& algorithm) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    Return<Status> status =
+            mPlugin->setCipherAlgorithm(toHidlVec(sessionId), toHidlString(algorithm));
+    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::setMacAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    Return<Status> status = mPlugin->setMacAlgorithm(toHidlVec(sessionId), toHidlString(algorithm));
+    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::encrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                             Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+                             Vector<uint8_t>& output) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult =
+            mPlugin->encrypt(toHidlVec(sessionId), toHidlVec(keyId), toHidlVec(input),
+                             toHidlVec(iv), [&](Status status, const hidl_vec<uint8_t>& hOutput) {
+                                 if (status == Status::OK) {
+                                     output = toVector(hOutput);
+                                 }
+                                 err = toStatusT(status);
+                             });
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::decrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                             Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+                             Vector<uint8_t>& output) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult =
+            mPlugin->decrypt(toHidlVec(sessionId), toHidlVec(keyId), toHidlVec(input),
+                             toHidlVec(iv), [&](Status status, const hidl_vec<uint8_t>& hOutput) {
+                                 if (status == Status::OK) {
+                                     output = toVector(hOutput);
+                                 }
+                                 err = toStatusT(status);
+                             });
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::sign(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                          Vector<uint8_t> const& message, Vector<uint8_t>& signature) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->sign(toHidlVec(sessionId), toHidlVec(keyId), toHidlVec(message),
+                                         [&](Status status, const hidl_vec<uint8_t>& hSignature) {
+                                             if (status == Status::OK) {
+                                                 signature = toVector(hSignature);
+                                             }
+                                             err = toStatusT(status);
+                                         });
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::verify(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                            Vector<uint8_t> const& message, Vector<uint8_t> const& signature,
+                            bool& match) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult =
+            mPlugin->verify(toHidlVec(sessionId), toHidlVec(keyId), toHidlVec(message),
+                            toHidlVec(signature), [&](Status status, bool hMatch) {
+                                if (status == Status::OK) {
+                                    match = hMatch;
+                                } else {
+                                    match = false;
+                                }
+                                err = toStatusT(status);
+                            });
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::signRSA(Vector<uint8_t> const& sessionId, String8 const& algorithm,
+                             Vector<uint8_t> const& message, Vector<uint8_t> const& wrappedKey,
+                             Vector<uint8_t>& signature) {
+    Mutex::Autolock autoLock(mLock);
+    INIT_CHECK();
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->signRSA(
+            toHidlVec(sessionId), toHidlString(algorithm), toHidlVec(message),
+            toHidlVec(wrappedKey), [&](Status status, const hidl_vec<uint8_t>& hSignature) {
+                if (status == Status::OK) {
+                    signature = toVector(hSignature);
+                }
+                err = toStatusT(status);
+            });
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+std::string DrmHalHidl::reportFrameworkMetrics(const std::string& pluginMetrics) const {
+    mediametrics_handle_t item(mediametrics_create("mediadrm"));
+    mediametrics_setUid(item, mMetrics.GetAppUid());
+    String8 vendor;
+    String8 description;
+    status_t result = getPropertyStringInternal(String8("vendor"), vendor);
+    if (result != OK) {
+        ALOGE("Failed to get vendor from drm plugin: %d", result);
+    } else {
+        mediametrics_setCString(item, "vendor", vendor.c_str());
+    }
+    result = getPropertyStringInternal(String8("description"), description);
+    if (result != OK) {
+        ALOGE("Failed to get description from drm plugin: %d", result);
+    } else {
+        mediametrics_setCString(item, "description", description.c_str());
+    }
+
+    std::string serializedMetrics;
+    result = mMetrics.GetSerializedMetrics(&serializedMetrics);
+    if (result != OK) {
+        ALOGE("Failed to serialize framework metrics: %d", result);
+    }
+    std::string b64EncodedMetrics =
+            toBase64StringNoPad(serializedMetrics.data(), serializedMetrics.size());
+    if (!b64EncodedMetrics.empty()) {
+        mediametrics_setCString(item, "serialized_metrics", b64EncodedMetrics.c_str());
+    }
+    if (!pluginMetrics.empty()) {
+        mediametrics_setCString(item, "plugin_metrics", pluginMetrics.c_str());
+    }
+    if (!mediametrics_selfRecord(item)) {
+        ALOGE("Failed to self record framework metrics");
+    }
+    mediametrics_delete(item);
+    return serializedMetrics;
+}
+
+std::string DrmHalHidl::reportPluginMetrics() const {
+    Vector<uint8_t> metricsVector;
+    String8 vendor;
+    String8 description;
+    std::string metricsString;
+    if (getPropertyStringInternal(String8("vendor"), vendor) == OK &&
+        getPropertyStringInternal(String8("description"), description) == OK &&
+        getPropertyByteArrayInternal(String8("metrics"), metricsVector) == OK) {
+        metricsString = toBase64StringNoPad(metricsVector.array(), metricsVector.size());
+        status_t res = android::reportDrmPluginMetrics(metricsString, vendor, description,
+                                                       mMetrics.GetAppUid());
+        if (res != OK) {
+            ALOGE("Metrics were retrieved but could not be reported: %d", res);
+        }
+    }
+    return metricsString;
+}
+
+status_t DrmHalHidl::requiresSecureDecoder(const char* mime, bool* required) const {
+    Mutex::Autolock autoLock(mLock);
+    if (mPluginV1_4 == NULL) {
+        return false;
+    }
+    auto hResult = mPluginV1_4->requiresSecureDecoderDefault(hidl_string(mime));
+    if (!hResult.isOk()) {
+        DrmUtils::LOG2BE("requiresSecureDecoder txn failed: %s", hResult.description().c_str());
+        return DEAD_OBJECT;
+    }
+    if (required) {
+        *required = hResult;
+    }
+    return OK;
+}
+
+status_t DrmHalHidl::requiresSecureDecoder(const char* mime, DrmPlugin::SecurityLevel securityLevel,
+                                           bool* required) const {
+    Mutex::Autolock autoLock(mLock);
+    if (mPluginV1_4 == NULL) {
+        return false;
+    }
+    auto hLevel = toHidlSecurityLevel(securityLevel);
+    auto hResult = mPluginV1_4->requiresSecureDecoder(hidl_string(mime), hLevel);
+    if (!hResult.isOk()) {
+        DrmUtils::LOG2BE("requiresSecureDecoder txn failed: %s", hResult.description().c_str());
+        return DEAD_OBJECT;
+    }
+    if (required) {
+        *required = hResult;
+    }
+    return OK;
+}
+
+status_t DrmHalHidl::setPlaybackId(Vector<uint8_t> const& sessionId, const char* playbackId) {
+    Mutex::Autolock autoLock(mLock);
+    if (mPluginV1_4 == NULL) {
+        return ERROR_UNSUPPORTED;
+    }
+    auto err = mPluginV1_4->setPlaybackId(toHidlVec(sessionId), hidl_string(playbackId));
+    return err.isOk() ? toStatusT(err) : DEAD_OBJECT;
+}
+
+status_t DrmHalHidl::getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const {
+    Mutex::Autolock autoLock(mLock);
+    return DrmUtils::GetLogMessages<drm::V1_4::IDrmPlugin>(mPlugin, logs);
+}
+
+}  // namespace android
diff --git a/drm/libmediadrm/DrmMetrics.cpp b/drm/libmediadrm/DrmMetrics.cpp
index 996fd19..77b5343 100644
--- a/drm/libmediadrm/DrmMetrics.cpp
+++ b/drm/libmediadrm/DrmMetrics.cpp
@@ -123,20 +123,19 @@
         });
 
     mKeyStatusChangeCounter.ExportValues(
-        [&](const KeyStatusType key_status_type, const int64_t value) {
+        [&](const uint32_t key_status_type, const int64_t value) {
             DrmFrameworkMetrics::Counter *counter =
                 metrics.add_key_status_change_counter();
             counter->set_count(value);
-            counter->mutable_attributes()->set_key_status_type(
-                (uint32_t)key_status_type);
+            counter->mutable_attributes()->set_key_status_type(key_status_type);
         });
 
     mEventCounter.ExportValues(
-        [&](const EventType event_type, const int64_t value) {
+        [&](const uint32_t event_type, const int64_t value) {
             DrmFrameworkMetrics::Counter *counter =
                 metrics.add_event_callback_counter();
             counter->set_count(value);
-            counter->mutable_attributes()->set_event_type((uint32_t)event_type);
+            counter->mutable_attributes()->set_event_type(event_type);
         });
 
     mGetDeviceUniqueIdCounter.ExportValues(
diff --git a/drm/libmediadrm/DrmMetricsConsumer.cpp b/drm/libmediadrm/DrmMetricsConsumer.cpp
index 5f0b26e..dca3050 100644
--- a/drm/libmediadrm/DrmMetricsConsumer.cpp
+++ b/drm/libmediadrm/DrmMetricsConsumer.cpp
@@ -32,26 +32,24 @@
 
 namespace {
 
-template <typename T> std::string GetAttributeName(T type);
-
-template <> std::string GetAttributeName<KeyStatusType>(KeyStatusType type) {
-    static const char *type_names[] = {"USABLE", "EXPIRED",
+std::string GetAttributeName(std::string typeName, uint32_t attribute) {
+    if (typeName == "KeyStatusChange") {
+        static const char *type_names[] = {"USABLE", "EXPIRED",
                                        "OUTPUT_NOT_ALLOWED", "STATUS_PENDING",
                                        "INTERNAL_ERROR", "USABLE_IN_FUTURE"};
-    if (((size_t)type) >= arraysize(type_names)) {
-        return "UNKNOWN_TYPE";
+        if (attribute >= arraysize(type_names)) {
+            return "UNKNOWN_TYPE";
+        }
+        return type_names[attribute];
     }
-    return type_names[(size_t)type];
-}
-
-template <> std::string GetAttributeName<EventType>(EventType type) {
+    
     static const char *type_names[] = {"PROVISION_REQUIRED", "KEY_NEEDED",
                                        "KEY_EXPIRED", "VENDOR_DEFINED",
                                        "SESSION_RECLAIMED"};
-    if (((size_t)type) >= arraysize(type_names)) {
+    if (attribute >= arraysize(type_names)) {
         return "UNKNOWN_TYPE";
     }
-    return type_names[(size_t)type];
+    return type_names[attribute];
 }
 
 template <typename T>
@@ -87,14 +85,14 @@
 
 template <typename T>
 void ExportCounterMetricWithAttributeNames(
-    const android::CounterMetric<T> &counter, PersistableBundle *metrics) {
+    const android::CounterMetric<T> &counter, std::string typeName, PersistableBundle *metrics) {
     if (!metrics) {
         ALOGE("metrics was unexpectedly null.");
         return;
     }
-    counter.ExportValues([&](const T &attribute, const int64_t value) {
+    counter.ExportValues([&](const uint32_t attribute, const int64_t value) {
         std::string name = counter.metric_name() + "." +
-                           GetAttributeName(attribute) + ".count";
+                           GetAttributeName(typeName, attribute) + ".count";
         metrics->putLong(android::String16(name.c_str()), value);
     });
 }
@@ -196,8 +194,8 @@
     ExportEventMetric(metrics.mProvideKeyResponseTimeUs, mBundle);
     ExportCounterMetric(metrics.mGetProvisionRequestCounter, mBundle);
     ExportCounterMetric(metrics.mProvideProvisionResponseCounter, mBundle);
-    ExportCounterMetricWithAttributeNames(metrics.mKeyStatusChangeCounter, mBundle);
-    ExportCounterMetricWithAttributeNames(metrics.mEventCounter, mBundle);
+    ExportCounterMetricWithAttributeNames(metrics.mKeyStatusChangeCounter, "KeyStatusChange", mBundle);
+    ExportCounterMetricWithAttributeNames(metrics.mEventCounter, "Event", mBundle);
     ExportCounterMetric(metrics.mGetDeviceUniqueIdCounter, mBundle);
     ExportSessionLifespans(metrics.GetSessionLifespans(), mBundle);
     return android::OK;
diff --git a/drm/libmediadrm/DrmUtils.cpp b/drm/libmediadrm/DrmUtils.cpp
index 0b117a3..bca292d 100644
--- a/drm/libmediadrm/DrmUtils.cpp
+++ b/drm/libmediadrm/DrmUtils.cpp
@@ -32,10 +32,10 @@
 #include <android/hidl/manager/1.2/IServiceManager.h>
 #include <hidl/HidlSupport.h>
 
+#include <cutils/properties.h>
 #include <utils/Errors.h>
 #include <utils/Log.h>
 #include <utils/String16.h>
-#include <cutils/properties.h>
 
 #include <mediadrm/CryptoHal.h>
 #include <mediadrm/DrmHal.h>
@@ -57,10 +57,10 @@
 
 namespace {
 
-template<typename Hal>
-Hal *MakeObject(status_t *pstatus) {
+template <typename Hal>
+Hal* MakeObject(status_t* pstatus) {
     status_t err = OK;
-    status_t &status = pstatus ? *pstatus : err;
+    status_t& status = pstatus ? *pstatus : err;
     auto obj = new Hal();
     status = obj->initCheck();
     if (status != OK && status != NO_INIT) {
@@ -70,43 +70,44 @@
 }
 
 template <typename Hal, typename V, typename M>
-void MakeHidlFactories(const uint8_t uuid[16], V &factories, M& instances) {
+void MakeHidlFactories(const uint8_t uuid[16], V& factories, M& instances) {
     sp<HServiceManager> serviceManager = HServiceManager::getService();
     if (serviceManager == nullptr) {
         LOG2BE("Failed to get service manager");
         return;
     }
 
-    serviceManager->listManifestByInterface(Hal::descriptor, [&](const hidl_vec<hidl_string> &registered) {
-        for (const auto &instance : registered) {
-            auto factory = Hal::getService(instance);
-            if (factory != nullptr) {
-                instances[instance.c_str()] = Hal::descriptor;
-                if (!uuid) {
-                    factories.push_back(factory);
-                    continue;
+    serviceManager->listManifestByInterface(
+            Hal::descriptor, [&](const hidl_vec<hidl_string>& registered) {
+                for (const auto& instance : registered) {
+                    auto factory = Hal::getService(instance);
+                    if (factory != nullptr) {
+                        instances[instance.c_str()] = Hal::descriptor;
+                        if (!uuid) {
+                            factories.push_back(factory);
+                            continue;
+                        }
+                        auto supported = factory->isCryptoSchemeSupported(uuid);
+                        if (!supported.isOk()) {
+                            LOG2BE(uuid, "isCryptoSchemeSupported txn failed: %s",
+                                   supported.description().c_str());
+                            continue;
+                        }
+                        if (supported) {
+                            factories.push_back(factory);
+                        }
+                    }
                 }
-                auto supported = factory->isCryptoSchemeSupported(uuid);
-                if (!supported.isOk()) {
-                    LOG2BE(uuid, "isCryptoSchemeSupported txn failed: %s",
-                           supported.description().c_str());
-                    continue;
-                }
-                if (supported) {
-                    factories.push_back(factory);
-                }
-            }
-        }
-    });
+            });
 }
 
 template <typename Hal, typename V>
-void MakeHidlFactories(const uint8_t uuid[16], V &factories) {
+void MakeHidlFactories(const uint8_t uuid[16], V& factories) {
     std::map<std::string, std::string> instances;
     MakeHidlFactories<Hal>(uuid, factories, instances);
 }
 
-hidl_vec<uint8_t> toHidlVec(const void *ptr, size_t size) {
+hidl_vec<uint8_t> toHidlVec(const void* ptr, size_t size) {
     hidl_vec<uint8_t> vec(size);
     if (ptr != nullptr) {
         memcpy(vec.data(), ptr, size);
@@ -114,19 +115,19 @@
     return vec;
 }
 
-hidl_array<uint8_t, 16> toHidlArray16(const uint8_t *ptr) {
+hidl_array<uint8_t, 16> toHidlArray16(const uint8_t* ptr) {
     if (ptr == nullptr) {
         return hidl_array<uint8_t, 16>();
     }
     return hidl_array<uint8_t, 16>(ptr);
 }
 
-sp<::V1_0::IDrmPlugin> MakeDrmPlugin(const sp<::V1_0::IDrmFactory> &factory,
-                                     const uint8_t uuid[16], const char *appPackageName) {
+sp<::V1_0::IDrmPlugin> MakeDrmPlugin(const sp<::V1_0::IDrmFactory>& factory, const uint8_t uuid[16],
+                                     const char* appPackageName) {
     sp<::V1_0::IDrmPlugin> plugin;
     auto err = factory->createPlugin(
             toHidlArray16(uuid), hidl_string(appPackageName),
-            [&](::V1_0::Status status, const sp<::V1_0::IDrmPlugin> &hPlugin) {
+            [&](::V1_0::Status status, const sp<::V1_0::IDrmPlugin>& hPlugin) {
                 if (status != ::V1_0::Status::OK) {
                     LOG2BE(uuid, "MakeDrmPlugin failed: %d", status);
                     return;
@@ -141,13 +142,13 @@
     }
 }
 
-sp<::V1_0::ICryptoPlugin> MakeCryptoPlugin(const sp<::V1_0::ICryptoFactory> &factory,
-                                           const uint8_t uuid[16], const void *initData,
+sp<::V1_0::ICryptoPlugin> MakeCryptoPlugin(const sp<::V1_0::ICryptoFactory>& factory,
+                                           const uint8_t uuid[16], const void* initData,
                                            size_t initDataSize) {
     sp<::V1_0::ICryptoPlugin> plugin;
     auto err = factory->createPlugin(
             toHidlArray16(uuid), toHidlVec(initData, initDataSize),
-            [&](::V1_0::Status status, const sp<::V1_0::ICryptoPlugin> &hPlugin) {
+            [&](::V1_0::Status status, const sp<::V1_0::ICryptoPlugin>& hPlugin) {
                 if (status != ::V1_0::Status::OK) {
                     LOG2BE(uuid, "MakeCryptoPlugin failed: %d", status);
                     return;
@@ -162,17 +163,17 @@
     }
 }
 
-} // namespace
+}  // namespace
 
 bool UseDrmService() {
     return property_get_bool("mediadrm.use_mediadrmserver", true);
 }
 
-sp<IDrm> MakeDrm(status_t *pstatus) {
+sp<IDrm> MakeDrm(status_t* pstatus) {
     return MakeObject<DrmHal>(pstatus);
 }
 
-sp<ICrypto> MakeCrypto(status_t *pstatus) {
+sp<ICrypto> MakeCrypto(status_t* pstatus) {
     return MakeObject<CryptoHal>(pstatus);
 }
 
@@ -191,9 +192,9 @@
 }
 
 std::vector<sp<::V1_0::IDrmPlugin>> MakeDrmPlugins(const uint8_t uuid[16],
-                                              const char *appPackageName) {
+                                                   const char* appPackageName) {
     std::vector<sp<::V1_0::IDrmPlugin>> plugins;
-    for (const auto &factory : MakeDrmFactories(uuid)) {
+    for (const auto& factory : MakeDrmFactories(uuid)) {
         plugins.push_back(MakeDrmPlugin(factory, uuid, appPackageName));
     }
     return plugins;
@@ -209,10 +210,11 @@
     return cryptoFactories;
 }
 
-std::vector<sp<ICryptoPlugin>> MakeCryptoPlugins(const uint8_t uuid[16], const void *initData,
-                                                 size_t initDataSize) {
-    std::vector<sp<ICryptoPlugin>> plugins;
-    for (const auto &factory : MakeCryptoFactories(uuid)) {
+std::vector<sp<::V1_0::ICryptoPlugin>> MakeCryptoPlugins(const uint8_t uuid[16],
+                                                         const void* initData,
+                                                         size_t initDataSize) {
+    std::vector<sp<::V1_0::ICryptoPlugin>> plugins;
+    for (const auto& factory : MakeCryptoFactories(uuid)) {
         plugins.push_back(MakeCryptoPlugin(factory, uuid, initData, initDataSize));
     }
     return plugins;
@@ -220,90 +222,90 @@
 
 status_t toStatusT_1_4(::V1_4::Status status) {
     switch (status) {
-    case ::V1_4::Status::OK:
-        return OK;
-    case ::V1_4::Status::BAD_VALUE:
-        return BAD_VALUE;
-    case ::V1_4::Status::ERROR_DRM_CANNOT_HANDLE:
-        return ERROR_DRM_CANNOT_HANDLE;
-    case ::V1_4::Status::ERROR_DRM_DECRYPT:
-        return ERROR_DRM_DECRYPT;
-    case ::V1_4::Status::ERROR_DRM_DEVICE_REVOKED:
-        return ERROR_DRM_DEVICE_REVOKED;
-    case ::V1_4::Status::ERROR_DRM_FRAME_TOO_LARGE:
-        return ERROR_DRM_FRAME_TOO_LARGE;
-    case ::V1_4::Status::ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION:
-        return ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION;
-    case ::V1_4::Status::ERROR_DRM_INSUFFICIENT_SECURITY:
-        return ERROR_DRM_INSUFFICIENT_SECURITY;
-    case ::V1_4::Status::ERROR_DRM_INVALID_STATE:
-        return ERROR_DRM_INVALID_STATE;
-    case ::V1_4::Status::ERROR_DRM_LICENSE_EXPIRED:
-        return ERROR_DRM_LICENSE_EXPIRED;
-    case ::V1_4::Status::ERROR_DRM_NO_LICENSE:
-        return ERROR_DRM_NO_LICENSE;
-    case ::V1_4::Status::ERROR_DRM_NOT_PROVISIONED:
-        return ERROR_DRM_NOT_PROVISIONED;
-    case ::V1_4::Status::ERROR_DRM_RESOURCE_BUSY:
-        return ERROR_DRM_RESOURCE_BUSY;
-    case ::V1_4::Status::ERROR_DRM_RESOURCE_CONTENTION:
-        return ERROR_DRM_RESOURCE_CONTENTION;
-    case ::V1_4::Status::ERROR_DRM_SESSION_LOST_STATE:
-        return ERROR_DRM_SESSION_LOST_STATE;
-    case ::V1_4::Status::ERROR_DRM_SESSION_NOT_OPENED:
-        return ERROR_DRM_SESSION_NOT_OPENED;
+        case ::V1_4::Status::OK:
+            return OK;
+        case ::V1_4::Status::BAD_VALUE:
+            return BAD_VALUE;
+        case ::V1_4::Status::ERROR_DRM_CANNOT_HANDLE:
+            return ERROR_DRM_CANNOT_HANDLE;
+        case ::V1_4::Status::ERROR_DRM_DECRYPT:
+            return ERROR_DRM_DECRYPT;
+        case ::V1_4::Status::ERROR_DRM_DEVICE_REVOKED:
+            return ERROR_DRM_DEVICE_REVOKED;
+        case ::V1_4::Status::ERROR_DRM_FRAME_TOO_LARGE:
+            return ERROR_DRM_FRAME_TOO_LARGE;
+        case ::V1_4::Status::ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION:
+            return ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION;
+        case ::V1_4::Status::ERROR_DRM_INSUFFICIENT_SECURITY:
+            return ERROR_DRM_INSUFFICIENT_SECURITY;
+        case ::V1_4::Status::ERROR_DRM_INVALID_STATE:
+            return ERROR_DRM_INVALID_STATE;
+        case ::V1_4::Status::ERROR_DRM_LICENSE_EXPIRED:
+            return ERROR_DRM_LICENSE_EXPIRED;
+        case ::V1_4::Status::ERROR_DRM_NO_LICENSE:
+            return ERROR_DRM_NO_LICENSE;
+        case ::V1_4::Status::ERROR_DRM_NOT_PROVISIONED:
+            return ERROR_DRM_NOT_PROVISIONED;
+        case ::V1_4::Status::ERROR_DRM_RESOURCE_BUSY:
+            return ERROR_DRM_RESOURCE_BUSY;
+        case ::V1_4::Status::ERROR_DRM_RESOURCE_CONTENTION:
+            return ERROR_DRM_RESOURCE_CONTENTION;
+        case ::V1_4::Status::ERROR_DRM_SESSION_LOST_STATE:
+            return ERROR_DRM_SESSION_LOST_STATE;
+        case ::V1_4::Status::ERROR_DRM_SESSION_NOT_OPENED:
+            return ERROR_DRM_SESSION_NOT_OPENED;
 
-    // New in S / drm@1.4:
-    case ::V1_4::Status::CANNOT_DECRYPT_ZERO_SUBSAMPLES:
-        return ERROR_DRM_ZERO_SUBSAMPLES;
-    case ::V1_4::Status::CRYPTO_LIBRARY_ERROR:
-        return ERROR_DRM_CRYPTO_LIBRARY;
-    case ::V1_4::Status::GENERAL_OEM_ERROR:
-        return ERROR_DRM_GENERIC_OEM;
-    case ::V1_4::Status::GENERAL_PLUGIN_ERROR:
-        return ERROR_DRM_GENERIC_PLUGIN;
-    case ::V1_4::Status::INIT_DATA_INVALID:
-        return ERROR_DRM_INIT_DATA;
-    case ::V1_4::Status::KEY_NOT_LOADED:
-        return ERROR_DRM_KEY_NOT_LOADED;
-    case ::V1_4::Status::LICENSE_PARSE_ERROR:
-        return ERROR_DRM_LICENSE_PARSE;
-    case ::V1_4::Status::LICENSE_POLICY_ERROR:
-        return ERROR_DRM_LICENSE_POLICY;
-    case ::V1_4::Status::LICENSE_RELEASE_ERROR:
-        return ERROR_DRM_LICENSE_RELEASE;
-    case ::V1_4::Status::LICENSE_REQUEST_REJECTED:
-        return ERROR_DRM_LICENSE_REQUEST_REJECTED;
-    case ::V1_4::Status::LICENSE_RESTORE_ERROR:
-        return ERROR_DRM_LICENSE_RESTORE;
-    case ::V1_4::Status::LICENSE_STATE_ERROR:
-        return ERROR_DRM_LICENSE_STATE;
-    case ::V1_4::Status::MALFORMED_CERTIFICATE:
-        return ERROR_DRM_CERTIFICATE_MALFORMED;
-    case ::V1_4::Status::MEDIA_FRAMEWORK_ERROR:
-        return ERROR_DRM_MEDIA_FRAMEWORK;
-    case ::V1_4::Status::MISSING_CERTIFICATE:
-        return ERROR_DRM_CERTIFICATE_MISSING;
-    case ::V1_4::Status::PROVISIONING_CERTIFICATE_ERROR:
-        return ERROR_DRM_PROVISIONING_CERTIFICATE;
-    case ::V1_4::Status::PROVISIONING_CONFIGURATION_ERROR:
-        return ERROR_DRM_PROVISIONING_CONFIG;
-    case ::V1_4::Status::PROVISIONING_PARSE_ERROR:
-        return ERROR_DRM_PROVISIONING_PARSE;
-    case ::V1_4::Status::PROVISIONING_REQUEST_REJECTED:
-        return ERROR_DRM_PROVISIONING_REQUEST_REJECTED;
-    case ::V1_4::Status::RETRYABLE_PROVISIONING_ERROR:
-        return ERROR_DRM_PROVISIONING_RETRY;
-    case ::V1_4::Status::SECURE_STOP_RELEASE_ERROR:
-        return ERROR_DRM_SECURE_STOP_RELEASE;
-    case ::V1_4::Status::STORAGE_READ_FAILURE:
-        return ERROR_DRM_STORAGE_READ;
-    case ::V1_4::Status::STORAGE_WRITE_FAILURE:
-        return ERROR_DRM_STORAGE_WRITE;
+        // New in S / drm@1.4:
+        case ::V1_4::Status::CANNOT_DECRYPT_ZERO_SUBSAMPLES:
+            return ERROR_DRM_ZERO_SUBSAMPLES;
+        case ::V1_4::Status::CRYPTO_LIBRARY_ERROR:
+            return ERROR_DRM_CRYPTO_LIBRARY;
+        case ::V1_4::Status::GENERAL_OEM_ERROR:
+            return ERROR_DRM_GENERIC_OEM;
+        case ::V1_4::Status::GENERAL_PLUGIN_ERROR:
+            return ERROR_DRM_GENERIC_PLUGIN;
+        case ::V1_4::Status::INIT_DATA_INVALID:
+            return ERROR_DRM_INIT_DATA;
+        case ::V1_4::Status::KEY_NOT_LOADED:
+            return ERROR_DRM_KEY_NOT_LOADED;
+        case ::V1_4::Status::LICENSE_PARSE_ERROR:
+            return ERROR_DRM_LICENSE_PARSE;
+        case ::V1_4::Status::LICENSE_POLICY_ERROR:
+            return ERROR_DRM_LICENSE_POLICY;
+        case ::V1_4::Status::LICENSE_RELEASE_ERROR:
+            return ERROR_DRM_LICENSE_RELEASE;
+        case ::V1_4::Status::LICENSE_REQUEST_REJECTED:
+            return ERROR_DRM_LICENSE_REQUEST_REJECTED;
+        case ::V1_4::Status::LICENSE_RESTORE_ERROR:
+            return ERROR_DRM_LICENSE_RESTORE;
+        case ::V1_4::Status::LICENSE_STATE_ERROR:
+            return ERROR_DRM_LICENSE_STATE;
+        case ::V1_4::Status::MALFORMED_CERTIFICATE:
+            return ERROR_DRM_CERTIFICATE_MALFORMED;
+        case ::V1_4::Status::MEDIA_FRAMEWORK_ERROR:
+            return ERROR_DRM_MEDIA_FRAMEWORK;
+        case ::V1_4::Status::MISSING_CERTIFICATE:
+            return ERROR_DRM_CERTIFICATE_MISSING;
+        case ::V1_4::Status::PROVISIONING_CERTIFICATE_ERROR:
+            return ERROR_DRM_PROVISIONING_CERTIFICATE;
+        case ::V1_4::Status::PROVISIONING_CONFIGURATION_ERROR:
+            return ERROR_DRM_PROVISIONING_CONFIG;
+        case ::V1_4::Status::PROVISIONING_PARSE_ERROR:
+            return ERROR_DRM_PROVISIONING_PARSE;
+        case ::V1_4::Status::PROVISIONING_REQUEST_REJECTED:
+            return ERROR_DRM_PROVISIONING_REQUEST_REJECTED;
+        case ::V1_4::Status::RETRYABLE_PROVISIONING_ERROR:
+            return ERROR_DRM_PROVISIONING_RETRY;
+        case ::V1_4::Status::SECURE_STOP_RELEASE_ERROR:
+            return ERROR_DRM_SECURE_STOP_RELEASE;
+        case ::V1_4::Status::STORAGE_READ_FAILURE:
+            return ERROR_DRM_STORAGE_READ;
+        case ::V1_4::Status::STORAGE_WRITE_FAILURE:
+            return ERROR_DRM_STORAGE_WRITE;
 
-    case ::V1_4::Status::ERROR_DRM_UNKNOWN:
-    default:
-        return ERROR_DRM_UNKNOWN;
+        case ::V1_4::Status::ERROR_DRM_UNKNOWN:
+        default:
+            return ERROR_DRM_UNKNOWN;
     }
     return ERROR_DRM_UNKNOWN;
 }
@@ -312,20 +314,34 @@
 char logPriorityToChar(::V1_4::LogPriority priority) {
     char p = 'U';
     switch (priority) {
-        case ::V1_4::LogPriority::VERBOSE:  p = 'V'; break;
-        case ::V1_4::LogPriority::DEBUG:    p = 'D'; break;
-        case ::V1_4::LogPriority::INFO:     p = 'I'; break;
-        case ::V1_4::LogPriority::WARN:     p = 'W'; break;
-        case ::V1_4::LogPriority::ERROR:    p = 'E'; break;
-        case ::V1_4::LogPriority::FATAL:    p = 'F'; break;
-        default: p = 'U'; break;
+        case ::V1_4::LogPriority::VERBOSE:
+            p = 'V';
+            break;
+        case ::V1_4::LogPriority::DEBUG:
+            p = 'D';
+            break;
+        case ::V1_4::LogPriority::INFO:
+            p = 'I';
+            break;
+        case ::V1_4::LogPriority::WARN:
+            p = 'W';
+            break;
+        case ::V1_4::LogPriority::ERROR:
+            p = 'E';
+            break;
+        case ::V1_4::LogPriority::FATAL:
+            p = 'F';
+            break;
+        default:
+            p = 'U';
+            break;
     }
     return p;
 }
 }  // namespace
 
-std::string GetExceptionMessage(status_t err, const char *msg,
-                                const Vector<::V1_4::LogMessage> &logs) {
+std::string GetExceptionMessage(status_t err, const char* msg,
+                                const Vector<::V1_4::LogMessage>& logs) {
     std::string ruler("==============================");
     std::string header("Beginning of DRM Plugin Log");
     std::string footer("End of DRM Plugin Log");
@@ -355,7 +371,7 @@
     return msg8.c_str();
 }
 
-void LogBuffer::addLog(const ::V1_4::LogMessage &log) {
+void LogBuffer::addLog(const ::V1_4::LogMessage& log) {
     std::unique_lock<std::mutex> lock(mMutex);
     mBuffer.push_back(log);
     while (mBuffer.size() > MAX_CAPACITY) {
diff --git a/drm/libmediadrm/fuzzer/Android.bp b/drm/libmediadrm/fuzzer/Android.bp
index 7281066..a85e3cf 100644
--- a/drm/libmediadrm/fuzzer/Android.bp
+++ b/drm/libmediadrm/fuzzer/Android.bp
@@ -35,7 +35,8 @@
     static_libs: [
         "libmediadrm",
         "liblog",
-        "resourcemanager_aidl_interface-ndk_platform",
+        "resourcemanager_aidl_interface-ndk",
+        "libaidlcommonsupport",
     ],
     header_libs: [
         "libmedia_headers",
@@ -59,6 +60,7 @@
         "android.hardware.drm@1.4",
         "libhidlallocatorutils",
         "libhidlbase",
+        "android.hardware.drm-V1-ndk",
     ],
     fuzz_config: {
         cc: [
diff --git a/drm/libmediadrm/fuzzer/mediadrm_fuzzer.cpp b/drm/libmediadrm/fuzzer/mediadrm_fuzzer.cpp
index 8df0477..eabd41f 100644
--- a/drm/libmediadrm/fuzzer/mediadrm_fuzzer.cpp
+++ b/drm/libmediadrm/fuzzer/mediadrm_fuzzer.cpp
@@ -24,6 +24,8 @@
 #include <mediadrm/DrmHal.h>
 #include <utils/String8.h>
 #include "fuzzer/FuzzedDataProvider.h"
+#include <binder/PersistableBundle.h>
+#include <android/hardware/drm/1.0/types.h>
 
 #define AES_BLOCK_SIZE 16
 #define UNUSED_PARAM __attribute__((unused))
@@ -33,6 +35,7 @@
 using android::hardware::fromHeap;
 using ::android::os::PersistableBundle;
 using drm::V1_0::BufferType;
+using ::android::hardware::drm::V1_0::DestinationBuffer;
 
 enum {
     INVALID_UUID = 0,
diff --git a/drm/libmediadrm/include/mediadrm/CryptoHal.h b/drm/libmediadrm/include/mediadrm/CryptoHal.h
index 5fd39e6..5be59f0 100644
--- a/drm/libmediadrm/include/mediadrm/CryptoHal.h
+++ b/drm/libmediadrm/include/mediadrm/CryptoHal.h
@@ -15,25 +15,12 @@
  */
 
 #ifndef CRYPTO_HAL_H_
-
 #define CRYPTO_HAL_H_
 
-#include <android/hardware/drm/1.0/ICryptoFactory.h>
-#include <android/hardware/drm/1.0/ICryptoPlugin.h>
-#include <android/hardware/drm/1.1/ICryptoFactory.h>
-#include <android/hardware/drm/1.2/ICryptoPlugin.h>
-#include <android/hardware/drm/1.4/ICryptoPlugin.h>
-
 #include <mediadrm/ICrypto.h>
 #include <utils/KeyedVector.h>
 #include <utils/threads.h>
 
-namespace drm = ::android::hardware::drm;
-using drm::V1_0::ICryptoFactory;
-using drm::V1_0::ICryptoPlugin;
-using drm::V1_0::SharedBuffer;
-using drm::V1_0::DestinationBuffer;
-
 using ::android::hardware::HidlMemory;
 
 class IMemoryHeap;
@@ -43,67 +30,30 @@
 struct CryptoHal : public ICrypto {
     CryptoHal();
     virtual ~CryptoHal();
-
     virtual status_t initCheck() const;
-
     virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]);
-
     virtual status_t createPlugin(
             const uint8_t uuid[16], const void *data, size_t size);
-
     virtual status_t destroyPlugin();
-
     virtual bool requiresSecureDecoderComponent(
             const char *mime) const;
-
     virtual void notifyResolution(uint32_t width, uint32_t height);
-
     virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId);
-
     virtual ssize_t decrypt(const uint8_t key[16], const uint8_t iv[16],
             CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
-            const ::SharedBuffer &source, size_t offset,
+            const drm::V1_0::SharedBuffer &source, size_t offset,
             const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
-            const ::DestinationBuffer &destination,
+            const drm::V1_0::DestinationBuffer &destination,
             AString *errorDetailMsg);
-
-    virtual int32_t setHeap(const sp<HidlMemory>& heap) {
-        return setHeapBase(heap);
-    }
-    virtual void unsetHeap(int32_t seqNum) { clearHeapBase(seqNum); }
-
+    virtual int32_t setHeap(const sp<HidlMemory>& heap);
+    virtual void unsetHeap(int32_t seqNum);
     virtual status_t getLogMessages(Vector<drm::V1_4::LogMessage> &logs) const;
-
 private:
-    mutable Mutex mLock;
-
-    const Vector<sp<ICryptoFactory>> mFactories;
-    sp<ICryptoPlugin> mPlugin;
-    sp<drm::V1_2::ICryptoPlugin> mPluginV1_2;
-
-    /**
-     * mInitCheck is:
-     *   NO_INIT if a plugin hasn't been created yet
-     *   ERROR_UNSUPPORTED if a plugin can't be created for the uuid
-     *   OK after a plugin has been created and mPlugin is valid
-     */
-    status_t mInitCheck;
-
-    KeyedVector<int32_t, size_t> mHeapSizes;
-    int32_t mHeapSeqNum;
-
-    Vector<sp<ICryptoFactory>> makeCryptoFactories();
-    sp<ICryptoPlugin> makeCryptoPlugin(const sp<ICryptoFactory>& factory,
-            const uint8_t uuid[16], const void *initData, size_t size);
-
-    int32_t setHeapBase(const sp<HidlMemory>& heap);
-    void clearHeapBase(int32_t seqNum);
-
-    status_t checkSharedBuffer(const ::SharedBuffer& buffer);
-
+    sp<ICrypto> mCryptoHalHidl;
+    sp<ICrypto> mCryptoHalAidl;
     DISALLOW_EVIL_CONSTRUCTORS(CryptoHal);
 };
 
-}  // namespace android
+}
 
-#endif  // CRYPTO_HAL_H_
+#endif
\ No newline at end of file
diff --git a/drm/libmediadrm/include/mediadrm/CryptoHalAidl.h b/drm/libmediadrm/include/mediadrm/CryptoHalAidl.h
new file mode 100644
index 0000000..a25b091
--- /dev/null
+++ b/drm/libmediadrm/include/mediadrm/CryptoHalAidl.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CRYPTO_HAL_AIDL_H_
+#define CRYPTO_HAL_AIDL_H_
+
+#include <aidl/android/hardware/drm/ICryptoFactory.h>
+#include <aidl/android/hardware/drm/ICryptoPlugin.h>
+
+#include <mediadrm/ICrypto.h>
+#include <utils/KeyedVector.h>
+#include <utils/threads.h>
+
+using ICryptoFactoryAidl = ::aidl::android::hardware::drm::ICryptoFactory;
+using ICryptoPluginAidl = ::aidl::android::hardware::drm::ICryptoPlugin;
+using ::aidl::android::hardware::drm::Uuid;
+
+// -------Hidl interface related-----------------
+// TODO: replace before removing hidl interface
+using ::android::hardware::drm::V1_0::DestinationBuffer;
+using ::android::hardware::drm::V1_0::SharedBuffer;
+
+using ::android::hardware::HidlMemory;
+
+// -------Hidl interface related end-------------
+
+class IMemoryHeap;
+
+namespace android {
+
+struct CryptoHalAidl : public ICrypto {
+    CryptoHalAidl();
+    virtual ~CryptoHalAidl();
+    virtual status_t initCheck() const;
+    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]);
+    virtual status_t createPlugin(const uint8_t uuid[16], const void* data, size_t size);
+    virtual status_t destroyPlugin();
+    virtual bool requiresSecureDecoderComponent(const char* mime) const;
+    virtual void notifyResolution(uint32_t width, uint32_t height);
+    virtual status_t setMediaDrmSession(const Vector<uint8_t>& sessionId);
+    virtual ssize_t decrypt(const uint8_t key[16], const uint8_t iv[16], CryptoPlugin::Mode mode,
+                            const CryptoPlugin::Pattern& pattern, const ::SharedBuffer& source,
+                            size_t offset, const CryptoPlugin::SubSample* subSamples,
+                            size_t numSubSamples, const ::DestinationBuffer& destination,
+                            AString* errorDetailMsg);
+    virtual int32_t setHeap(const sp<HidlMemory>& heap);
+    virtual void unsetHeap(int32_t seqNum);
+    virtual status_t getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const;
+
+  private:
+    mutable Mutex mLock;
+
+    const std::vector<std::shared_ptr<ICryptoFactoryAidl>> mFactories;
+    std::shared_ptr<ICryptoPluginAidl> mPlugin;
+
+    /**
+     * mInitCheck is:
+     *   NO_INIT if a plugin hasn't been created yet
+     *   ERROR_UNSUPPORTED if a plugin can't be created for the uuid
+     *   OK after a plugin has been created and mPlugin is valid
+     */
+    status_t mInitCheck;
+
+    KeyedVector<int32_t, size_t> mHeapSizes;
+    int32_t mHeapSeqNum;
+
+    std::vector<std::shared_ptr<ICryptoFactoryAidl>> makeCryptoFactories();
+    std::shared_ptr<ICryptoPluginAidl> makeCryptoPlugin(
+            const std::shared_ptr<ICryptoFactoryAidl>& factory, const Uuid& uuidAidl,
+            const std::vector<uint8_t> initData);
+
+    status_t checkSharedBuffer(const ::SharedBuffer& buffer);
+
+    DISALLOW_EVIL_CONSTRUCTORS(CryptoHalAidl);
+};
+
+}  // namespace android
+
+#endif  // CRYPTO_HAL_H_
diff --git a/drm/libmediadrm/include/mediadrm/CryptoHalHidl.h b/drm/libmediadrm/include/mediadrm/CryptoHalHidl.h
new file mode 100644
index 0000000..6db1e89
--- /dev/null
+++ b/drm/libmediadrm/include/mediadrm/CryptoHalHidl.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CRYPTO_HAL_HIDL_H_
+#define CRYPTO_HAL_HIDL_H_
+
+#include <android/hardware/drm/1.0/ICryptoFactory.h>
+#include <android/hardware/drm/1.0/ICryptoPlugin.h>
+#include <android/hardware/drm/1.1/ICryptoFactory.h>
+#include <android/hardware/drm/1.2/ICryptoPlugin.h>
+#include <android/hardware/drm/1.4/ICryptoPlugin.h>
+
+#include <mediadrm/ICrypto.h>
+#include <utils/KeyedVector.h>
+#include <utils/threads.h>
+
+namespace drm = ::android::hardware::drm;
+using drm::V1_0::ICryptoFactory;
+using drm::V1_0::ICryptoPlugin;
+using drm::V1_0::SharedBuffer;
+using drm::V1_0::DestinationBuffer;
+
+using ::android::hardware::HidlMemory;
+
+class IMemoryHeap;
+
+namespace android {
+
+struct CryptoHalHidl : public ICrypto {
+    CryptoHalHidl();
+    virtual ~CryptoHalHidl();
+
+    virtual status_t initCheck() const;
+
+    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]);
+
+    virtual status_t createPlugin(
+            const uint8_t uuid[16], const void *data, size_t size);
+
+    virtual status_t destroyPlugin();
+
+    virtual bool requiresSecureDecoderComponent(
+            const char *mime) const;
+
+    virtual void notifyResolution(uint32_t width, uint32_t height);
+
+    virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId);
+
+    virtual ssize_t decrypt(const uint8_t key[16], const uint8_t iv[16],
+            CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
+            const ::SharedBuffer &source, size_t offset,
+            const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
+            const ::DestinationBuffer &destination,
+            AString *errorDetailMsg);
+
+    virtual int32_t setHeap(const sp<HidlMemory>& heap) {
+        return setHeapBase(heap);
+    }
+    virtual void unsetHeap(int32_t seqNum) { clearHeapBase(seqNum); }
+
+    virtual status_t getLogMessages(Vector<drm::V1_4::LogMessage> &logs) const;
+
+private:
+    mutable Mutex mLock;
+
+    const Vector<sp<ICryptoFactory>> mFactories;
+    sp<ICryptoPlugin> mPlugin;
+    sp<drm::V1_2::ICryptoPlugin> mPluginV1_2;
+
+    /**
+     * mInitCheck is:
+     *   NO_INIT if a plugin hasn't been created yet
+     *   ERROR_UNSUPPORTED if a plugin can't be created for the uuid
+     *   OK after a plugin has been created and mPlugin is valid
+     */
+    status_t mInitCheck;
+
+    KeyedVector<int32_t, size_t> mHeapSizes;
+    int32_t mHeapSeqNum;
+
+    Vector<sp<ICryptoFactory>> makeCryptoFactories();
+    sp<ICryptoPlugin> makeCryptoPlugin(const sp<ICryptoFactory>& factory,
+            const uint8_t uuid[16], const void *initData, size_t size);
+
+    int32_t setHeapBase(const sp<HidlMemory>& heap);
+    void clearHeapBase(int32_t seqNum);
+
+    status_t checkSharedBuffer(const ::SharedBuffer& buffer);
+
+    DISALLOW_EVIL_CONSTRUCTORS(CryptoHalHidl);
+};
+
+}  // namespace android
+
+#endif  // CRYPTO_HAL_H_
diff --git a/drm/libmediadrm/include/mediadrm/DrmHal.h b/drm/libmediadrm/include/mediadrm/DrmHal.h
index 7eb1dec..bb58585 100644
--- a/drm/libmediadrm/include/mediadrm/DrmHal.h
+++ b/drm/libmediadrm/include/mediadrm/DrmHal.h
@@ -14,77 +14,27 @@
  * limitations under the License.
  */
 
-#ifndef DRM_HAL_H_
-
-#define DRM_HAL_H_
-
-#include <android/hardware/drm/1.0/IDrmFactory.h>
-#include <android/hardware/drm/1.0/IDrmPlugin.h>
-#include <android/hardware/drm/1.1/IDrmFactory.h>
-#include <android/hardware/drm/1.1/IDrmPlugin.h>
-#include <android/hardware/drm/1.2/IDrmFactory.h>
-#include <android/hardware/drm/1.2/IDrmPlugin.h>
-#include <android/hardware/drm/1.2/IDrmPluginListener.h>
-#include <android/hardware/drm/1.4/IDrmPlugin.h>
-#include <android/hardware/drm/1.4/types.h>
-
-#include <media/drm/DrmAPI.h>
-#include <mediadrm/DrmMetrics.h>
-#include <mediadrm/DrmSessionManager.h>
 #include <mediadrm/IDrm.h>
-#include <mediadrm/IDrmClient.h>
-#include <mediadrm/IDrmMetricsConsumer.h>
-#include <utils/threads.h>
 
-namespace drm = ::android::hardware::drm;
-using drm::V1_0::EventType;
-using drm::V1_0::IDrmFactory;
-using drm::V1_0::IDrmPlugin;
-using drm::V1_0::IDrmPluginListener;
-using drm::V1_1::SecurityLevel;
-using drm::V1_2::KeyStatus;
-using drm::V1_2::OfflineLicenseState;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-
-typedef drm::V1_2::IDrmPluginListener IDrmPluginListener_V1_2;
-typedef drm::V1_0::KeyStatus KeyStatus_V1_0;
+#ifndef DRM_HAL_H_
+#define DRM_HAL_H_
 
 namespace android {
 
-struct DrmSessionClientInterface;
-
-inline bool operator==(const Vector<uint8_t> &l, const Vector<uint8_t> &r) {
-    if (l.size() != r.size()) return false;
-    return memcmp(l.array(), r.array(), l.size()) == 0;
-}
-
-struct DrmHal : public IDrm,
-                public IDrmPluginListener_V1_2 {
-
-    struct DrmSessionClient;
-
+struct DrmHal : public IDrm {
     DrmHal();
     virtual ~DrmHal();
-
     virtual status_t initCheck() const;
-
     virtual status_t isCryptoSchemeSupported(const uint8_t uuid[16],
-                                             const String8& mimeType,
-                                             DrmPlugin::SecurityLevel level,
-                                             bool *isSupported);
-
+                                             const String8 &mimeType,
+                                             DrmPlugin::SecurityLevel securityLevel,
+                                             bool *result);
     virtual status_t createPlugin(const uint8_t uuid[16],
                                   const String8 &appPackageName);
-
     virtual status_t destroyPlugin();
-
-    virtual status_t openSession(DrmPlugin::SecurityLevel level,
+    virtual status_t openSession(DrmPlugin::SecurityLevel securityLevel,
             Vector<uint8_t> &sessionId);
-
     virtual status_t closeSession(Vector<uint8_t> const &sessionId);
-
     virtual status_t
         getKeyRequest(Vector<uint8_t> const &sessionId,
                       Vector<uint8_t> const &initData,
@@ -92,168 +42,88 @@
                       KeyedVector<String8, String8> const &optionalParameters,
                       Vector<uint8_t> &request, String8 &defaultUrl,
                       DrmPlugin::KeyRequestType *keyRequestType);
-
     virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
                                         Vector<uint8_t> const &response,
                                         Vector<uint8_t> &keySetId);
-
     virtual status_t removeKeys(Vector<uint8_t> const &keySetId);
-
     virtual status_t restoreKeys(Vector<uint8_t> const &sessionId,
                                  Vector<uint8_t> const &keySetId);
-
     virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId,
                                     KeyedVector<String8, String8> &infoMap) const;
-
     virtual status_t getProvisionRequest(String8 const &certType,
                                          String8 const &certAuthority,
                                          Vector<uint8_t> &request,
-                                         String8 &defaulUrl);
-
+                                         String8 &defaultUrl);
     virtual status_t provideProvisionResponse(Vector<uint8_t> const &response,
                                               Vector<uint8_t> &certificate,
                                               Vector<uint8_t> &wrappedKey);
-
     virtual status_t getSecureStops(List<Vector<uint8_t>> &secureStops);
     virtual status_t getSecureStopIds(List<Vector<uint8_t>> &secureStopIds);
     virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop);
-
     virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease);
     virtual status_t removeSecureStop(Vector<uint8_t> const &ssid);
     virtual status_t removeAllSecureStops();
-
     virtual status_t getHdcpLevels(DrmPlugin::HdcpLevel *connectedLevel,
             DrmPlugin::HdcpLevel *maxLevel) const;
     virtual status_t getNumberOfSessions(uint32_t *currentSessions,
             uint32_t *maxSessions) const;
     virtual status_t getSecurityLevel(Vector<uint8_t> const &sessionId,
             DrmPlugin::SecurityLevel *level) const;
-
     virtual status_t getOfflineLicenseKeySetIds(List<Vector<uint8_t>> &keySetIds) const;
     virtual status_t removeOfflineLicense(Vector<uint8_t> const &keySetId);
     virtual status_t getOfflineLicenseState(Vector<uint8_t> const &keySetId,
             DrmPlugin::OfflineLicenseState *licenseState) const;
-
-    virtual status_t getPropertyString(String8 const &name, String8 &value ) const;
+    virtual status_t getPropertyString(String8 const &name, String8 &value) const;
     virtual status_t getPropertyByteArray(String8 const &name,
-                                          Vector<uint8_t> &value ) const;
-    virtual status_t setPropertyString(String8 const &name, String8 const &value ) const;
+                                          Vector<uint8_t> &value) const;
+    virtual status_t setPropertyString(String8 const &name,
+                                       String8 const &value ) const;
     virtual status_t setPropertyByteArray(String8 const &name,
-                                          Vector<uint8_t> const &value ) const;
+                                          Vector<uint8_t> const &value) const;
     virtual status_t getMetrics(const sp<IDrmMetricsConsumer> &consumer);
-
     virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId,
                                         String8 const &algorithm);
-
     virtual status_t setMacAlgorithm(Vector<uint8_t> const &sessionId,
                                      String8 const &algorithm);
-
     virtual status_t encrypt(Vector<uint8_t> const &sessionId,
                              Vector<uint8_t> const &keyId,
                              Vector<uint8_t> const &input,
                              Vector<uint8_t> const &iv,
                              Vector<uint8_t> &output);
-
     virtual status_t decrypt(Vector<uint8_t> const &sessionId,
                              Vector<uint8_t> const &keyId,
                              Vector<uint8_t> const &input,
                              Vector<uint8_t> const &iv,
                              Vector<uint8_t> &output);
-
     virtual status_t sign(Vector<uint8_t> const &sessionId,
                           Vector<uint8_t> const &keyId,
                           Vector<uint8_t> const &message,
                           Vector<uint8_t> &signature);
-
     virtual status_t verify(Vector<uint8_t> const &sessionId,
                             Vector<uint8_t> const &keyId,
                             Vector<uint8_t> const &message,
                             Vector<uint8_t> const &signature,
                             bool &match);
-
     virtual status_t signRSA(Vector<uint8_t> const &sessionId,
                              String8 const &algorithm,
                              Vector<uint8_t> const &message,
                              Vector<uint8_t> const &wrappedKey,
                              Vector<uint8_t> &signature);
-
     virtual status_t setListener(const sp<IDrmClient>& listener);
-
     virtual status_t requiresSecureDecoder(const char *mime, bool *required) const;
-
     virtual status_t requiresSecureDecoder(const char *mime, DrmPlugin::SecurityLevel securityLevel,
                                            bool *required) const;
-
     virtual status_t setPlaybackId(
             Vector<uint8_t> const &sessionId,
             const char *playbackId);
-
     virtual status_t getLogMessages(Vector<drm::V1_4::LogMessage> &logs) const;
 
-    // Methods of IDrmPluginListener
-    Return<void> sendEvent(EventType eventType,
-            const hidl_vec<uint8_t>& sessionId, const hidl_vec<uint8_t>& data);
-
-    Return<void> sendExpirationUpdate(const hidl_vec<uint8_t>& sessionId,
-            int64_t expiryTimeInMS);
-
-    Return<void> sendKeysChange(const hidl_vec<uint8_t>& sessionId,
-            const hidl_vec<KeyStatus_V1_0>& keyStatusList, bool hasNewUsableKey);
-
-    Return<void> sendKeysChange_1_2(const hidl_vec<uint8_t>& sessionId,
-            const hidl_vec<KeyStatus>& keyStatusList, bool hasNewUsableKey);
-
-    Return<void> sendSessionLostState(const hidl_vec<uint8_t>& sessionId);
-
 private:
-    static Mutex mLock;
-
-    sp<IDrmClient> mListener;
-    mutable Mutex mEventLock;
-    mutable Mutex mNotifyLock;
-
-    const std::vector<sp<IDrmFactory>> mFactories;
-    sp<IDrmPlugin> mPlugin;
-    sp<drm::V1_1::IDrmPlugin> mPluginV1_1;
-    sp<drm::V1_2::IDrmPlugin> mPluginV1_2;
-    sp<drm::V1_4::IDrmPlugin> mPluginV1_4;
-    String8 mAppPackageName;
-
-    // Mutable to allow modification within GetPropertyByteArray.
-    mutable MediaDrmMetrics mMetrics;
-
-    std::vector<std::shared_ptr<DrmSessionClient>> mOpenSessions;
-    void closeOpenSessions();
-    void cleanup();
-
-    /**
-     * mInitCheck is:
-     *   NO_INIT if a plugin hasn't been created yet
-     *   ERROR_UNSUPPORTED if a plugin can't be created for the uuid
-     *   OK after a plugin has been created and mPlugin is valid
-     */
-    status_t mInitCheck;
-
-    std::vector<sp<IDrmFactory>> makeDrmFactories();
-    sp<IDrmPlugin> makeDrmPlugin(const sp<IDrmFactory>& factory,
-            const uint8_t uuid[16], const String8& appPackageName);
-
-    void writeByteArray(Parcel &obj, const hidl_vec<uint8_t>& array);
-
-    std::string reportPluginMetrics() const;
-    std::string reportFrameworkMetrics(const std::string& pluginMetrics) const;
-    status_t getPropertyStringInternal(String8 const &name, String8 &value) const;
-    status_t getPropertyByteArrayInternal(String8 const &name,
-                                          Vector<uint8_t> &value) const;
-    status_t matchMimeTypeAndSecurityLevel(const sp<IDrmFactory> &factory,
-                                           const uint8_t uuid[16],
-                                           const String8 &mimeType,
-                                           DrmPlugin::SecurityLevel level,
-                                           bool *isSupported);
-
+    sp<IDrm> mDrmHalHidl;
+    std::shared_ptr<IDrm> mDrmHalAidl;
     DISALLOW_EVIL_CONSTRUCTORS(DrmHal);
 };
 
-}  // namespace android
+} // namespace android
 
-#endif  // DRM_HAL_H_
+#endif
\ No newline at end of file
diff --git a/drm/libmediadrm/include/mediadrm/DrmHalAidl.h b/drm/libmediadrm/include/mediadrm/DrmHalAidl.h
new file mode 100644
index 0000000..6720734
--- /dev/null
+++ b/drm/libmediadrm/include/mediadrm/DrmHalAidl.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DRM_HAL_AIDL_H_
+#define DRM_HAL_AIDL_H_
+
+#include <aidl/android/hardware/drm/BnDrmPluginListener.h>
+#include <aidl/android/hardware/drm/IDrmFactory.h>
+#include <aidl/android/hardware/drm/IDrmPlugin.h>
+#include <aidl/android/media/BnResourceManagerClient.h>
+#include <mediadrm/DrmMetrics.h>
+#include <mediadrm/DrmSessionManager.h>
+#include <mediadrm/IDrm.h>
+#include <memory>
+
+using ::aidl::android::hardware::drm::BnDrmPluginListener;
+using IDrmPluginAidl = ::aidl::android::hardware::drm::IDrmPlugin;
+using IDrmFactoryAidl = ::aidl::android::hardware::drm::IDrmFactory;
+using EventTypeAidl = ::aidl::android::hardware::drm::EventType;
+using KeyStatusAidl = ::aidl::android::hardware::drm::KeyStatus;
+using ::aidl::android::hardware::drm::Uuid;
+
+namespace android {
+struct DrmHalAidl : public IDrm,
+                    public BnDrmPluginListener,
+                    std::enable_shared_from_this<BnDrmPluginListener> {
+    struct DrmSessionClient;
+    DrmHalAidl();
+    virtual ~DrmHalAidl();
+    virtual status_t initCheck() const;
+    virtual status_t isCryptoSchemeSupported(const uint8_t uuid[16], const String8& mimeType,
+                                             DrmPlugin::SecurityLevel securityLevel, bool* result);
+    virtual status_t createPlugin(const uint8_t uuid[16], const String8& appPackageName);
+    virtual status_t destroyPlugin();
+    virtual status_t openSession(DrmPlugin::SecurityLevel securityLevel,
+                                 Vector<uint8_t>& sessionId);
+    virtual status_t closeSession(Vector<uint8_t> const& sessionId);
+    virtual status_t getKeyRequest(Vector<uint8_t> const& sessionId,
+                                   Vector<uint8_t> const& initData, String8 const& mimeType,
+                                   DrmPlugin::KeyType keyType,
+                                   KeyedVector<String8, String8> const& optionalParameters,
+                                   Vector<uint8_t>& request, String8& defaultUrl,
+                                   DrmPlugin::KeyRequestType* keyRequestType);
+    virtual status_t provideKeyResponse(Vector<uint8_t> const& sessionId,
+                                        Vector<uint8_t> const& response, Vector<uint8_t>& keySetId);
+    virtual status_t removeKeys(Vector<uint8_t> const& keySetId);
+    virtual status_t restoreKeys(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keySetId);
+    virtual status_t queryKeyStatus(Vector<uint8_t> const& sessionId,
+                                    KeyedVector<String8, String8>& infoMap) const;
+    virtual status_t getProvisionRequest(String8 const& certType, String8 const& certAuthority,
+                                         Vector<uint8_t>& request, String8& defaultUrl);
+    virtual status_t provideProvisionResponse(Vector<uint8_t> const& response,
+                                              Vector<uint8_t>& certificate,
+                                              Vector<uint8_t>& wrappedKey);
+    virtual status_t getSecureStops(List<Vector<uint8_t>>& secureStops);
+    virtual status_t getSecureStopIds(List<Vector<uint8_t>>& secureStopIds);
+    virtual status_t getSecureStop(Vector<uint8_t> const& ssid, Vector<uint8_t>& secureStop);
+    virtual status_t releaseSecureStops(Vector<uint8_t> const& ssRelease);
+    virtual status_t removeSecureStop(Vector<uint8_t> const& ssid);
+    virtual status_t removeAllSecureStops();
+    virtual status_t getHdcpLevels(DrmPlugin::HdcpLevel* connectedLevel,
+                                   DrmPlugin::HdcpLevel* maxLevel) const;
+    virtual status_t getNumberOfSessions(uint32_t* currentSessions, uint32_t* maxSessions) const;
+    virtual status_t getSecurityLevel(Vector<uint8_t> const& sessionId,
+                                      DrmPlugin::SecurityLevel* level) const;
+    virtual status_t getOfflineLicenseKeySetIds(List<Vector<uint8_t>>& keySetIds) const;
+    virtual status_t removeOfflineLicense(Vector<uint8_t> const& keySetId);
+    virtual status_t getOfflineLicenseState(Vector<uint8_t> const& keySetId,
+                                            DrmPlugin::OfflineLicenseState* licenseState) const;
+    virtual status_t getPropertyString(String8 const& name, String8& value) const;
+    virtual status_t getPropertyByteArray(String8 const& name, Vector<uint8_t>& value) const;
+    virtual status_t setPropertyString(String8 const& name, String8 const& value) const;
+    virtual status_t setPropertyByteArray(String8 const& name, Vector<uint8_t> const& value) const;
+    virtual status_t getMetrics(const sp<IDrmMetricsConsumer>& consumer);
+    virtual status_t setCipherAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm);
+    virtual status_t setMacAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm);
+    virtual status_t encrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                             Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+                             Vector<uint8_t>& output);
+    virtual status_t decrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                             Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+                             Vector<uint8_t>& output);
+    virtual status_t sign(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                          Vector<uint8_t> const& message, Vector<uint8_t>& signature);
+    virtual status_t verify(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+                            Vector<uint8_t> const& message, Vector<uint8_t> const& signature,
+                            bool& match);
+    virtual status_t signRSA(Vector<uint8_t> const& sessionId, String8 const& algorithm,
+                             Vector<uint8_t> const& message, Vector<uint8_t> const& wrappedKey,
+                             Vector<uint8_t>& signature);
+    virtual status_t setListener(const sp<IDrmClient>& listener);
+    virtual status_t requiresSecureDecoder(const char* mime, bool* required) const;
+    virtual status_t requiresSecureDecoder(const char* mime, DrmPlugin::SecurityLevel securityLevel,
+                                           bool* required) const;
+    virtual status_t setPlaybackId(Vector<uint8_t> const& sessionId, const char* playbackId);
+    virtual status_t getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const;
+    // Methods of IDrmPluginListenerAidl
+    ::ndk::ScopedAStatus onEvent(EventTypeAidl in_eventType,
+                                 const std::vector<uint8_t>& in_sessionId,
+                                 const std::vector<uint8_t>& in_data);
+    ::ndk::ScopedAStatus onExpirationUpdate(const std::vector<uint8_t>& in_sessionId,
+                                            int64_t in_expiryTimeInMS);
+    ::ndk::ScopedAStatus onKeysChange(const std::vector<uint8_t>& in_sessionId,
+                                      const std::vector<KeyStatusAidl>& in_keyStatusList,
+                                      bool in_hasNewUsableKey);
+    ::ndk::ScopedAStatus onSessionLostState(const std::vector<uint8_t>& in_sessionId);
+
+  private:
+    static Mutex mLock;
+    sp<IDrmClient> mListener;
+    mutable Mutex mEventLock;
+    mutable Mutex mNotifyLock;
+    const std::vector<std::shared_ptr<IDrmFactoryAidl>> mFactories;
+    std::shared_ptr<IDrmPluginAidl> mPlugin;
+    std::vector<std::shared_ptr<IDrmFactoryAidl>> makeDrmFactories();
+    status_t mInitCheck;
+    mutable MediaDrmMetrics mMetrics;
+    std::vector<std::shared_ptr<DrmSessionClient>> mOpenSessions;
+    void cleanup();
+    void closeOpenSessions();
+    std::string reportPluginMetrics() const;
+    std::string reportFrameworkMetrics(const std::string& pluginMetrics) const;
+    status_t getPropertyStringInternal(String8 const& name, String8& value) const;
+    status_t getPropertyByteArrayInternal(String8 const& name, Vector<uint8_t>& value) const;
+    DISALLOW_EVIL_CONSTRUCTORS(DrmHalAidl);
+};
+
+}  // namespace android
+
+#endif
\ No newline at end of file
diff --git a/drm/libmediadrm/include/mediadrm/DrmHalHidl.h b/drm/libmediadrm/include/mediadrm/DrmHalHidl.h
new file mode 100644
index 0000000..91dc700
--- /dev/null
+++ b/drm/libmediadrm/include/mediadrm/DrmHalHidl.h
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DRM_HAL_HIDL_H_
+#define DRM_HAL_HIDL_H_
+
+#include <android/hardware/drm/1.0/IDrmFactory.h>
+#include <android/hardware/drm/1.0/IDrmPlugin.h>
+#include <android/hardware/drm/1.1/IDrmFactory.h>
+#include <android/hardware/drm/1.1/IDrmPlugin.h>
+#include <android/hardware/drm/1.2/IDrmFactory.h>
+#include <android/hardware/drm/1.2/IDrmPlugin.h>
+#include <android/hardware/drm/1.2/IDrmPluginListener.h>
+#include <android/hardware/drm/1.4/IDrmPlugin.h>
+#include <android/hardware/drm/1.4/types.h>
+
+#include <media/drm/DrmAPI.h>
+#include <mediadrm/DrmMetrics.h>
+#include <mediadrm/DrmSessionManager.h>
+#include <mediadrm/IDrm.h>
+#include <mediadrm/IDrmClient.h>
+#include <mediadrm/IDrmMetricsConsumer.h>
+#include <utils/threads.h>
+
+namespace drm = ::android::hardware::drm;
+using drm::V1_0::EventType;
+using drm::V1_0::IDrmFactory;
+using drm::V1_0::IDrmPlugin;
+using drm::V1_0::IDrmPluginListener;
+using drm::V1_1::SecurityLevel;
+using drm::V1_2::KeyStatus;
+using drm::V1_2::OfflineLicenseState;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+typedef drm::V1_2::IDrmPluginListener IDrmPluginListener_V1_2;
+typedef drm::V1_0::KeyStatus KeyStatus_V1_0;
+
+namespace android {
+
+struct DrmSessionClientInterface;
+
+inline bool operator==(const Vector<uint8_t> &l, const Vector<uint8_t> &r) {
+    if (l.size() != r.size()) return false;
+    return memcmp(l.array(), r.array(), l.size()) == 0;
+}
+
+struct DrmHalHidl : public IDrm,
+                public IDrmPluginListener_V1_2 {
+
+    struct DrmSessionClient;
+
+    DrmHalHidl();
+    virtual ~DrmHalHidl();
+
+    virtual status_t initCheck() const;
+
+    virtual status_t isCryptoSchemeSupported(const uint8_t uuid[16],
+                                             const String8& mimeType,
+                                             DrmPlugin::SecurityLevel level,
+                                             bool *isSupported);
+
+    virtual status_t createPlugin(const uint8_t uuid[16],
+                                  const String8 &appPackageName);
+
+    virtual status_t destroyPlugin();
+
+    virtual status_t openSession(DrmPlugin::SecurityLevel level,
+            Vector<uint8_t> &sessionId);
+
+    virtual status_t closeSession(Vector<uint8_t> const &sessionId);
+
+    virtual status_t
+        getKeyRequest(Vector<uint8_t> const &sessionId,
+                      Vector<uint8_t> const &initData,
+                      String8 const &mimeType, DrmPlugin::KeyType keyType,
+                      KeyedVector<String8, String8> const &optionalParameters,
+                      Vector<uint8_t> &request, String8 &defaultUrl,
+                      DrmPlugin::KeyRequestType *keyRequestType);
+
+    virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
+                                        Vector<uint8_t> const &response,
+                                        Vector<uint8_t> &keySetId);
+
+    virtual status_t removeKeys(Vector<uint8_t> const &keySetId);
+
+    virtual status_t restoreKeys(Vector<uint8_t> const &sessionId,
+                                 Vector<uint8_t> const &keySetId);
+
+    virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId,
+                                    KeyedVector<String8, String8> &infoMap) const;
+
+    virtual status_t getProvisionRequest(String8 const &certType,
+                                         String8 const &certAuthority,
+                                         Vector<uint8_t> &request,
+                                         String8 &defaultUrl);
+
+    virtual status_t provideProvisionResponse(Vector<uint8_t> const &response,
+                                              Vector<uint8_t> &certificate,
+                                              Vector<uint8_t> &wrappedKey);
+
+    virtual status_t getSecureStops(List<Vector<uint8_t>> &secureStops);
+    virtual status_t getSecureStopIds(List<Vector<uint8_t>> &secureStopIds);
+    virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop);
+
+    virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease);
+    virtual status_t removeSecureStop(Vector<uint8_t> const &ssid);
+    virtual status_t removeAllSecureStops();
+
+    virtual status_t getHdcpLevels(DrmPlugin::HdcpLevel *connectedLevel,
+            DrmPlugin::HdcpLevel *maxLevel) const;
+    virtual status_t getNumberOfSessions(uint32_t *currentSessions,
+            uint32_t *maxSessions) const;
+    virtual status_t getSecurityLevel(Vector<uint8_t> const &sessionId,
+            DrmPlugin::SecurityLevel *level) const;
+
+    virtual status_t getOfflineLicenseKeySetIds(List<Vector<uint8_t>> &keySetIds) const;
+    virtual status_t removeOfflineLicense(Vector<uint8_t> const &keySetId);
+    virtual status_t getOfflineLicenseState(Vector<uint8_t> const &keySetId,
+            DrmPlugin::OfflineLicenseState *licenseState) const;
+
+    virtual status_t getPropertyString(String8 const &name, String8 &value ) const;
+    virtual status_t getPropertyByteArray(String8 const &name,
+                                          Vector<uint8_t> &value ) const;
+    virtual status_t setPropertyString(String8 const &name, String8 const &value ) const;
+    virtual status_t setPropertyByteArray(String8 const &name,
+                                          Vector<uint8_t> const &value ) const;
+    virtual status_t getMetrics(const sp<IDrmMetricsConsumer> &consumer);
+
+    virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId,
+                                        String8 const &algorithm);
+
+    virtual status_t setMacAlgorithm(Vector<uint8_t> const &sessionId,
+                                     String8 const &algorithm);
+
+    virtual status_t encrypt(Vector<uint8_t> const &sessionId,
+                             Vector<uint8_t> const &keyId,
+                             Vector<uint8_t> const &input,
+                             Vector<uint8_t> const &iv,
+                             Vector<uint8_t> &output);
+
+    virtual status_t decrypt(Vector<uint8_t> const &sessionId,
+                             Vector<uint8_t> const &keyId,
+                             Vector<uint8_t> const &input,
+                             Vector<uint8_t> const &iv,
+                             Vector<uint8_t> &output);
+
+    virtual status_t sign(Vector<uint8_t> const &sessionId,
+                          Vector<uint8_t> const &keyId,
+                          Vector<uint8_t> const &message,
+                          Vector<uint8_t> &signature);
+
+    virtual status_t verify(Vector<uint8_t> const &sessionId,
+                            Vector<uint8_t> const &keyId,
+                            Vector<uint8_t> const &message,
+                            Vector<uint8_t> const &signature,
+                            bool &match);
+
+    virtual status_t signRSA(Vector<uint8_t> const &sessionId,
+                             String8 const &algorithm,
+                             Vector<uint8_t> const &message,
+                             Vector<uint8_t> const &wrappedKey,
+                             Vector<uint8_t> &signature);
+
+    virtual status_t setListener(const sp<IDrmClient>& listener);
+
+    virtual status_t requiresSecureDecoder(const char *mime, bool *required) const;
+
+    virtual status_t requiresSecureDecoder(const char *mime, DrmPlugin::SecurityLevel securityLevel,
+                                           bool *required) const;
+
+    virtual status_t setPlaybackId(
+            Vector<uint8_t> const &sessionId,
+            const char *playbackId);
+
+    virtual status_t getLogMessages(Vector<drm::V1_4::LogMessage> &logs) const;
+
+    // Methods of IDrmPluginListener
+    Return<void> sendEvent(EventType eventType,
+            const hidl_vec<uint8_t>& sessionId, const hidl_vec<uint8_t>& data);
+
+    Return<void> sendExpirationUpdate(const hidl_vec<uint8_t>& sessionId,
+            int64_t expiryTimeInMS);
+
+    Return<void> sendKeysChange(const hidl_vec<uint8_t>& sessionId,
+            const hidl_vec<KeyStatus_V1_0>& keyStatusList, bool hasNewUsableKey);
+
+    Return<void> sendKeysChange_1_2(const hidl_vec<uint8_t>& sessionId,
+            const hidl_vec<KeyStatus>& keyStatusList, bool hasNewUsableKey);
+
+    Return<void> sendSessionLostState(const hidl_vec<uint8_t>& sessionId);
+
+private:
+    static Mutex mLock;
+
+    sp<IDrmClient> mListener;
+    mutable Mutex mEventLock;
+    mutable Mutex mNotifyLock;
+
+    const std::vector<sp<IDrmFactory>> mFactories;
+    sp<IDrmPlugin> mPlugin;
+    sp<drm::V1_1::IDrmPlugin> mPluginV1_1;
+    sp<drm::V1_2::IDrmPlugin> mPluginV1_2;
+    sp<drm::V1_4::IDrmPlugin> mPluginV1_4;
+    String8 mAppPackageName;
+
+    // Mutable to allow modification within GetPropertyByteArray.
+    mutable MediaDrmMetrics mMetrics;
+
+    std::vector<std::shared_ptr<DrmSessionClient>> mOpenSessions;
+    void closeOpenSessions();
+    void cleanup();
+
+    /**
+     * mInitCheck is:
+     *   NO_INIT if a plugin hasn't been created yet
+     *   ERROR_UNSUPPORTED if a plugin can't be created for the uuid
+     *   OK after a plugin has been created and mPlugin is valid
+     */
+    status_t mInitCheck;
+
+    std::vector<sp<IDrmFactory>> makeDrmFactories();
+    sp<IDrmPlugin> makeDrmPlugin(const sp<IDrmFactory>& factory,
+            const uint8_t uuid[16], const String8& appPackageName);
+
+    void writeByteArray(Parcel &obj, const hidl_vec<uint8_t>& array);
+
+    std::string reportPluginMetrics() const;
+    std::string reportFrameworkMetrics(const std::string& pluginMetrics) const;
+    status_t getPropertyStringInternal(String8 const &name, String8 &value) const;
+    status_t getPropertyByteArrayInternal(String8 const &name,
+                                          Vector<uint8_t> &value) const;
+    status_t matchMimeTypeAndSecurityLevel(const sp<IDrmFactory> &factory,
+                                           const uint8_t uuid[16],
+                                           const String8 &mimeType,
+                                           DrmPlugin::SecurityLevel level,
+                                           bool *isSupported);
+
+    DISALLOW_EVIL_CONSTRUCTORS(DrmHalHidl);
+};
+
+}  // namespace android
+
+#endif  // DRM_HAL_H_
diff --git a/drm/libmediadrm/include/mediadrm/DrmMetrics.h b/drm/libmediadrm/include/mediadrm/DrmMetrics.h
index 100b8f7..e1775c7 100644
--- a/drm/libmediadrm/include/mediadrm/DrmMetrics.h
+++ b/drm/libmediadrm/include/mediadrm/DrmMetrics.h
@@ -50,12 +50,10 @@
   CounterMetric<status_t> mGetProvisionRequestCounter;
   // Count of provideProvisionResponse calls.
   CounterMetric<status_t> mProvideProvisionResponseCounter;
-
   // Count of key status events broken out by status type.
-  CounterMetric<::android::hardware::drm::V1_2::KeyStatusType>
-      mKeyStatusChangeCounter;
+  CounterMetric<uint32_t> mKeyStatusChangeCounter;
   // Count of events broken out by event type
-  CounterMetric<::android::hardware::drm::V1_0::EventType> mEventCounter;
+  CounterMetric<uint32_t> mEventCounter;
 
   // Count getPropertyByteArray calls to retrieve the device unique id.
   CounterMetric<status_t> mGetDeviceUniqueIdCounter;
diff --git a/drm/libmediadrm/interface/mediadrm/DrmUtils.h b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
index ec0b878..5679cfd 100644
--- a/drm/libmediadrm/interface/mediadrm/DrmUtils.h
+++ b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
@@ -38,12 +38,18 @@
 #include <mutex>
 #include <string>
 #include <vector>
+#include <aidl/android/hardware/drm/LogMessage.h>
+#include <aidl/android/hardware/drm/Status.h>
 
 
 using namespace ::android::hardware::drm;
 using ::android::hardware::hidl_vec;
 using ::android::hardware::Return;
 
+using ::aidl::android::hardware::drm::LogPriority;
+using ::aidl::android::hardware::drm::LogMessage;
+using StatusAidl = ::aidl::android::hardware::drm::Status;
+
 namespace android {
 
 struct ICrypto;
@@ -180,6 +186,136 @@
     return toStatusT_1_4(err);
 }
 
+inline status_t toStatusTAidl(int32_t serviceError) {
+    auto status = static_cast<StatusAidl>(serviceError);
+    switch (status) {
+    case StatusAidl::OK:
+        return OK;
+    case StatusAidl::BAD_VALUE:
+        return BAD_VALUE;
+    case StatusAidl::ERROR_DRM_CANNOT_HANDLE:
+        return ERROR_DRM_CANNOT_HANDLE;
+    case StatusAidl::ERROR_DRM_DECRYPT:
+        return ERROR_DRM_DECRYPT;
+    case StatusAidl::ERROR_DRM_DEVICE_REVOKED:
+        return ERROR_DRM_DEVICE_REVOKED;
+    case StatusAidl::ERROR_DRM_FRAME_TOO_LARGE:
+        return ERROR_DRM_FRAME_TOO_LARGE;
+    case StatusAidl::ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION:
+        return ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION;
+    case StatusAidl::ERROR_DRM_INSUFFICIENT_SECURITY:
+        return ERROR_DRM_INSUFFICIENT_SECURITY;
+    case StatusAidl::ERROR_DRM_INVALID_STATE:
+        return ERROR_DRM_INVALID_STATE;
+    case StatusAidl::ERROR_DRM_LICENSE_EXPIRED:
+        return ERROR_DRM_LICENSE_EXPIRED;
+    case StatusAidl::ERROR_DRM_NO_LICENSE:
+        return ERROR_DRM_NO_LICENSE;
+    case StatusAidl::ERROR_DRM_NOT_PROVISIONED:
+        return ERROR_DRM_NOT_PROVISIONED;
+    case StatusAidl::ERROR_DRM_RESOURCE_BUSY:
+        return ERROR_DRM_RESOURCE_BUSY;
+    case StatusAidl::ERROR_DRM_RESOURCE_CONTENTION:
+        return ERROR_DRM_RESOURCE_CONTENTION;
+    case StatusAidl::ERROR_DRM_SESSION_LOST_STATE:
+        return ERROR_DRM_SESSION_LOST_STATE;
+    case StatusAidl::ERROR_DRM_SESSION_NOT_OPENED:
+        return ERROR_DRM_SESSION_NOT_OPENED;
+
+    // New in S / drm@1.4:
+    case StatusAidl::CANNOT_DECRYPT_ZERO_SUBSAMPLES:
+        return ERROR_DRM_ZERO_SUBSAMPLES;
+    case StatusAidl::CRYPTO_LIBRARY_ERROR:
+        return ERROR_DRM_CRYPTO_LIBRARY;
+    case StatusAidl::GENERAL_OEM_ERROR:
+        return ERROR_DRM_GENERIC_OEM;
+    case StatusAidl::GENERAL_PLUGIN_ERROR:
+        return ERROR_DRM_GENERIC_PLUGIN;
+    case StatusAidl::INIT_DATA_INVALID:
+        return ERROR_DRM_INIT_DATA;
+    case StatusAidl::KEY_NOT_LOADED:
+        return ERROR_DRM_KEY_NOT_LOADED;
+    case StatusAidl::LICENSE_PARSE_ERROR:
+        return ERROR_DRM_LICENSE_PARSE;
+    case StatusAidl::LICENSE_POLICY_ERROR:
+        return ERROR_DRM_LICENSE_POLICY;
+    case StatusAidl::LICENSE_RELEASE_ERROR:
+        return ERROR_DRM_LICENSE_RELEASE;
+    case StatusAidl::LICENSE_REQUEST_REJECTED:
+        return ERROR_DRM_LICENSE_REQUEST_REJECTED;
+    case StatusAidl::LICENSE_RESTORE_ERROR:
+        return ERROR_DRM_LICENSE_RESTORE;
+    case StatusAidl::LICENSE_STATE_ERROR:
+        return ERROR_DRM_LICENSE_STATE;
+    case StatusAidl::MALFORMED_CERTIFICATE:
+        return ERROR_DRM_CERTIFICATE_MALFORMED;
+    case StatusAidl::MEDIA_FRAMEWORK_ERROR:
+        return ERROR_DRM_MEDIA_FRAMEWORK;
+    case StatusAidl::MISSING_CERTIFICATE:
+        return ERROR_DRM_CERTIFICATE_MISSING;
+    case StatusAidl::PROVISIONING_CERTIFICATE_ERROR:
+        return ERROR_DRM_PROVISIONING_CERTIFICATE;
+    case StatusAidl::PROVISIONING_CONFIGURATION_ERROR:
+        return ERROR_DRM_PROVISIONING_CONFIG;
+    case StatusAidl::PROVISIONING_PARSE_ERROR:
+        return ERROR_DRM_PROVISIONING_PARSE;
+    case StatusAidl::PROVISIONING_REQUEST_REJECTED:
+        return ERROR_DRM_PROVISIONING_REQUEST_REJECTED;
+    case StatusAidl::RETRYABLE_PROVISIONING_ERROR:
+        return ERROR_DRM_PROVISIONING_RETRY;
+    case StatusAidl::SECURE_STOP_RELEASE_ERROR:
+        return ERROR_DRM_SECURE_STOP_RELEASE;
+    case StatusAidl::STORAGE_READ_FAILURE:
+        return ERROR_DRM_STORAGE_READ;
+    case StatusAidl::STORAGE_WRITE_FAILURE:
+        return ERROR_DRM_STORAGE_WRITE;
+
+    case StatusAidl::ERROR_DRM_UNKNOWN:
+    default:
+        return ERROR_DRM_UNKNOWN;
+    }
+    return ERROR_DRM_UNKNOWN;
+}
+
+template<typename T, typename U>
+status_t GetLogMessagesAidl(const std::shared_ptr<U> &obj, Vector<::V1_4::LogMessage> &logs) {
+    std::shared_ptr<T> plugin = obj;
+    if (obj == NULL) {
+        LOG2BW("%s obj is null", U::descriptor);
+    } else if (plugin == NULL) {
+        LOG2BW("Cannot cast %s obj to %s plugin", U::descriptor, T::descriptor);
+    }
+
+    std::vector<LogMessage> pluginLogsAidl;
+    if (plugin != NULL) {
+        if(!plugin->getLogMessages(&pluginLogsAidl).isOk()) {
+            LOG2BW("%s::getLogMessages remote call failed", T::descriptor);
+        }
+    }
+
+    std::vector<::V1_4::LogMessage> pluginLogs;
+    for (LogMessage log : pluginLogsAidl) {
+        ::V1_4::LogMessage logHidl;
+        logHidl.timeMs = log.timeMs;
+        // skip negative convert check as count of enum elements is 7
+        logHidl.priority =  static_cast<::V1_4::LogPriority>((int32_t)log.priority);
+        logHidl.message = log.message;
+        pluginLogs.push_back(logHidl);
+    }
+
+    auto allLogs(gLogBuf.getLogs());
+    LOG2BD("framework logs size %zu; plugin logs size %zu",
+           allLogs.size(), pluginLogs.size());
+    std::copy(pluginLogs.begin(), pluginLogs.end(), std::back_inserter(allLogs));
+    std::sort(allLogs.begin(), allLogs.end(),
+              [](const ::V1_4::LogMessage &a, const ::V1_4::LogMessage &b) {
+                  return a.timeMs < b.timeMs;
+              });
+
+    logs.appendVector(allLogs);
+    return OK;
+}
+
 template<typename T, typename U>
 status_t GetLogMessages(const sp<U> &obj, Vector<::V1_4::LogMessage> &logs) {
     sp<T> plugin = T::castFrom(obj);
diff --git a/drm/libmediadrm/tests/DrmMetrics_test.cpp b/drm/libmediadrm/tests/DrmMetrics_test.cpp
index f362d60..237a88b 100644
--- a/drm/libmediadrm/tests/DrmMetrics_test.cpp
+++ b/drm/libmediadrm/tests/DrmMetrics_test.cpp
@@ -83,8 +83,8 @@
   metrics.mProvideProvisionResponseCounter.Increment(OK);
   metrics.mGetDeviceUniqueIdCounter.Increment(OK);
 
-  metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::USABLE);
-  metrics.mEventCounter.Increment(EventType::PROVISION_REQUIRED);
+  metrics.mKeyStatusChangeCounter.Increment((uint32_t)KeyStatusType::USABLE);
+  metrics.mEventCounter.Increment((uint32_t)EventType::PROVISION_REQUIRED);
 
   PersistableBundle bundle;
   DrmMetricsConsumer consumer(&bundle);
@@ -151,16 +151,16 @@
   metrics.mGetDeviceUniqueIdCounter.Increment(OK);
   metrics.mGetDeviceUniqueIdCounter.Increment(UNEXPECTED_NULL);
 
-  metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::USABLE);
-  metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::EXPIRED);
-  metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::OUTPUTNOTALLOWED);
-  metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::STATUSPENDING);
-  metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::INTERNALERROR);
-  metrics.mEventCounter.Increment(EventType::PROVISION_REQUIRED);
-  metrics.mEventCounter.Increment(EventType::KEY_NEEDED);
-  metrics.mEventCounter.Increment(EventType::KEY_EXPIRED);
-  metrics.mEventCounter.Increment(EventType::VENDOR_DEFINED);
-  metrics.mEventCounter.Increment(EventType::SESSION_RECLAIMED);
+  metrics.mKeyStatusChangeCounter.Increment((uint32_t)KeyStatusType::USABLE);
+  metrics.mKeyStatusChangeCounter.Increment((uint32_t)KeyStatusType::EXPIRED);
+  metrics.mKeyStatusChangeCounter.Increment((uint32_t)KeyStatusType::OUTPUTNOTALLOWED);
+  metrics.mKeyStatusChangeCounter.Increment((uint32_t)KeyStatusType::STATUSPENDING);
+  metrics.mKeyStatusChangeCounter.Increment((uint32_t)KeyStatusType::INTERNALERROR);
+  metrics.mEventCounter.Increment((uint32_t)EventType::PROVISION_REQUIRED);
+  metrics.mEventCounter.Increment((uint32_t)EventType::KEY_NEEDED);
+  metrics.mEventCounter.Increment((uint32_t)EventType::KEY_EXPIRED);
+  metrics.mEventCounter.Increment((uint32_t)EventType::VENDOR_DEFINED);
+  metrics.mEventCounter.Increment((uint32_t)EventType::SESSION_RECLAIMED);
 
   android::Vector<uint8_t> sessionId1;
   sessionId1.push_back(1);
@@ -284,16 +284,16 @@
   metrics.mGetDeviceUniqueIdCounter.Increment(OK);
   metrics.mGetDeviceUniqueIdCounter.Increment(UNEXPECTED_NULL);
 
-  metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::USABLE);
-  metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::EXPIRED);
-  metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::OUTPUTNOTALLOWED);
-  metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::STATUSPENDING);
-  metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::INTERNALERROR);
-  metrics.mEventCounter.Increment(EventType::PROVISION_REQUIRED);
-  metrics.mEventCounter.Increment(EventType::KEY_NEEDED);
-  metrics.mEventCounter.Increment(EventType::KEY_EXPIRED);
-  metrics.mEventCounter.Increment(EventType::VENDOR_DEFINED);
-  metrics.mEventCounter.Increment(EventType::SESSION_RECLAIMED);
+  metrics.mKeyStatusChangeCounter.Increment((uint32_t)KeyStatusType::USABLE);
+  metrics.mKeyStatusChangeCounter.Increment((uint32_t)KeyStatusType::EXPIRED);
+  metrics.mKeyStatusChangeCounter.Increment((uint32_t)KeyStatusType::OUTPUTNOTALLOWED);
+  metrics.mKeyStatusChangeCounter.Increment((uint32_t)KeyStatusType::STATUSPENDING);
+  metrics.mKeyStatusChangeCounter.Increment((uint32_t)KeyStatusType::INTERNALERROR);
+  metrics.mEventCounter.Increment((uint32_t)EventType::PROVISION_REQUIRED);
+  metrics.mEventCounter.Increment((uint32_t)EventType::KEY_NEEDED);
+  metrics.mEventCounter.Increment((uint32_t)EventType::KEY_EXPIRED);
+  metrics.mEventCounter.Increment((uint32_t)EventType::VENDOR_DEFINED);
+  metrics.mEventCounter.Increment((uint32_t)EventType::SESSION_RECLAIMED);
 
   std::string serializedMetrics;
   ASSERT_EQ(OK, metrics.GetSerializedMetrics(&serializedMetrics));
diff --git a/drm/mediadrm/plugins/TEST_MAPPING b/drm/mediadrm/plugins/TEST_MAPPING
index 7bd1568..fd4ef95 100644
--- a/drm/mediadrm/plugins/TEST_MAPPING
+++ b/drm/mediadrm/plugins/TEST_MAPPING
@@ -1,16 +1,19 @@
 {
   "presubmit": [
     {
-      "name": "CtsMediaTestCases",
+      "name": "CtsMediaDrmTestCases",
       "options" : [
         {
           "include-annotation": "android.platform.test.annotations.Presubmit"
         },
         {
-          "include-filter": "android.media.cts.MediaDrmClearkeyTest"
+          "include-filter": "android.mediadrm.cts.MediaDrmClearkeyTest"
         },
         {
-          "include-filter": "android.media.cts.MediaDrmMetricsTest"
+          "include-filter": "android.mediadrm.cts.MediaDrmMetricsTest"
+        },
+        {
+          "include-filter": "android.mediadrm.cts.NativeMediaDrmClearkeyTest"
         }
       ]
     }
diff --git a/drm/mediadrm/plugins/clearkey/aidl/Android.bp b/drm/mediadrm/plugins/clearkey/aidl/Android.bp
new file mode 100644
index 0000000..2997b67
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/Android.bp
@@ -0,0 +1,72 @@
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "hardware_interfaces_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["frameworks_av_license"],
+}
+
+cc_defaults {
+    name: "aidl_clearkey_service_defaults",
+    vendor: true,
+
+    srcs: [
+        "CreatePluginFactories.cpp",
+        "CryptoFactory.cpp",
+        "CryptoPlugin.cpp",
+        "DrmFactory.cpp",
+        "DrmPlugin.cpp",
+    ],
+
+    relative_install_path: "hw",
+
+    cflags: ["-Wall", "-Werror", "-Wthread-safety"],
+
+    include_dirs: ["frameworks/av/include"],
+
+    shared_libs: [
+        "libbase",
+        "libbinder_ndk",
+        "libcrypto",
+        "liblog",
+        "libprotobuf-cpp-lite",
+        "libutils",
+        "android.hardware.drm-V1-ndk",
+    ],
+
+    static_libs: [
+        "android.hardware.common-V2-ndk",
+        "libclearkeybase",
+    ],
+
+    local_include_dirs: ["include"],
+
+    sanitize: {
+        integer_overflow: true,
+    },
+}
+
+cc_binary {
+    name: "android.hardware.drm-service.clearkey",
+    defaults: ["aidl_clearkey_service_defaults"],
+    srcs: ["Service.cpp"],
+    init_rc: ["android.hardware.drm-service.clearkey.rc"],
+    vintf_fragments: ["android.hardware.drm-service.clearkey.xml"],
+}
+
+cc_binary {
+    name: "android.hardware.drm-service-lazy.clearkey",
+    defaults: ["aidl_clearkey_service_defaults"],
+    overrides: ["android.hardware.drm-service.clearkey"],
+    srcs: ["ServiceLazy.cpp"],
+    init_rc: ["android.hardware.drm-service-lazy.clearkey.rc"],
+    vintf_fragments: ["android.hardware.drm-service.clearkey.xml"],
+}
+
+phony {
+    name: "android.hardware.drm@latest-service.clearkey",
+    required: [
+        "android.hardware.drm-service.clearkey",
+    ],
+}
diff --git a/drm/mediadrm/plugins/clearkey/aidl/CreatePluginFactories.cpp b/drm/mediadrm/plugins/clearkey/aidl/CreatePluginFactories.cpp
new file mode 100644
index 0000000..5f6bfe8
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/CreatePluginFactories.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "CreatePluginFactories.h"
+
+namespace aidl {
+namespace android {
+namespace hardware {
+namespace drm {
+namespace clearkey {
+
+std::shared_ptr<DrmFactory> createDrmFactory() {
+  return ::ndk::SharedRefBase::make<DrmFactory>();
+}
+
+std::shared_ptr<CryptoFactory> createCryptoFactory() {
+    return ::ndk::SharedRefBase::make<CryptoFactory>();
+}
+
+}  // namespace clearkey
+}  // namespace drm
+}  // namespace hardware
+}  // namespace android
+}  // namespace aidl
diff --git a/drm/mediadrm/plugins/clearkey/aidl/CryptoFactory.cpp b/drm/mediadrm/plugins/clearkey/aidl/CryptoFactory.cpp
new file mode 100644
index 0000000..43b325d
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/CryptoFactory.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "clearkey-CryptoFactory"
+#include <utils/Log.h>
+
+#include "CryptoFactory.h"
+
+#include "ClearKeyUUID.h"
+#include "CryptoPlugin.h"
+#include "AidlUtils.h"
+
+namespace aidl {
+namespace android {
+namespace hardware {
+namespace drm {
+namespace clearkey {
+
+using ::aidl::android::hardware::drm::Status;
+using ::aidl::android::hardware::drm::Uuid;
+
+using std::vector;
+
+::ndk::ScopedAStatus CryptoFactory::createPlugin(
+        const ::aidl::android::hardware::drm::Uuid& in_uuid,
+        const std::vector<uint8_t>& in_initData,
+        std::shared_ptr<::aidl::android::hardware::drm::ICryptoPlugin>* _aidl_return) {
+    if (!isClearKeyUUID(in_uuid.uuid.data())) {
+        ALOGE("Clearkey Drm HAL: failed to create crypto plugin, "
+              "invalid crypto scheme");
+        *_aidl_return = nullptr;
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+
+    std::shared_ptr<CryptoPlugin> plugin = ::ndk::SharedRefBase::make<CryptoPlugin>(in_initData);
+    Status status = plugin->getInitStatus();
+    if (status != Status::OK) {
+        plugin.reset();
+        plugin = nullptr;
+    }
+    *_aidl_return = plugin;
+    return toNdkScopedAStatus(status);
+}
+
+::ndk::ScopedAStatus CryptoFactory::isCryptoSchemeSupported(const Uuid& in_uuid,
+                                                            bool* _aidl_return) {
+    *_aidl_return = isClearKeyUUID(in_uuid.uuid.data());
+    return ::ndk::ScopedAStatus::ok();
+}
+
+}  // namespace clearkey
+}  // namespace drm
+}  // namespace hardware
+}  // namespace android
+}  // namespace aidl
diff --git a/drm/mediadrm/plugins/clearkey/aidl/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/aidl/CryptoPlugin.cpp
new file mode 100644
index 0000000..b65d40f
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/CryptoPlugin.cpp
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "clearkey-CryptoPlugin"
+
+#include <utils/Log.h>
+#include <cerrno>
+#include <cstring>
+
+#include "CryptoPlugin.h"
+#include "SessionLibrary.h"
+#include "AidlUtils.h"
+
+namespace aidl {
+namespace android {
+namespace hardware {
+namespace drm {
+namespace clearkey {
+
+using ::aidl::android::hardware::drm::Status;
+
+::ndk::ScopedAStatus CryptoPlugin::decrypt(
+        bool in_secure, const std::vector<uint8_t>& in_keyId, const std::vector<uint8_t>& in_iv,
+        ::aidl::android::hardware::drm::Mode in_mode,
+        const ::aidl::android::hardware::drm::Pattern& in_pattern,
+        const std::vector<::aidl::android::hardware::drm::SubSample>& in_subSamples,
+        const ::aidl::android::hardware::drm::SharedBuffer& in_source, int64_t in_offset,
+        const ::aidl::android::hardware::drm::DestinationBuffer& in_destination,
+        ::aidl::android::hardware::drm::DecryptResult* _aidl_return) {
+    UNUSED(in_pattern);
+
+    std::string detailedError;
+
+    _aidl_return->bytesWritten = 0;
+    if (in_secure) {
+        _aidl_return->detailedError = "secure decryption is not supported with ClearKey";
+        return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+    }
+
+    std::lock_guard<std::mutex> shared_buffer_lock(mSharedBufferLock);
+    if (mSharedBufferMap.find(in_source.bufferId) == mSharedBufferMap.end()) {
+        _aidl_return->detailedError = "source decrypt buffer base not set";
+        return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+    }
+
+    if (in_destination.type == BufferType::SHARED_MEMORY) {
+        const SharedBuffer& dest = in_destination.nonsecureMemory;
+        if (mSharedBufferMap.find(dest.bufferId) == mSharedBufferMap.end()) {
+            _aidl_return->detailedError = "destination decrypt buffer base not set";
+            return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+        }
+    } else {
+        _aidl_return->detailedError = "destination type not supported";
+        return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+    }
+
+    auto src = mSharedBufferMap[in_source.bufferId];
+    if (src->mBase == nullptr) {
+        _aidl_return->detailedError = "source is a nullptr";
+        return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+    }
+
+    size_t totalSize = 0;
+    if (__builtin_add_overflow(in_source.offset, in_offset, &totalSize) ||
+        __builtin_add_overflow(totalSize, in_source.size, &totalSize) || totalSize > src->mSize) {
+        android_errorWriteLog(0x534e4554, "176496160");
+        _aidl_return->detailedError = "invalid buffer size";
+        return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+    }
+
+    // destination.type == BufferType::SHARED_MEMORY
+    const SharedBuffer& destBuffer = in_destination.nonsecureMemory;
+    auto dest = mSharedBufferMap[destBuffer.bufferId];
+    if (dest->mBase == nullptr) {
+        _aidl_return->detailedError = "destination is a nullptr";
+        return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+    }
+
+    totalSize = 0;
+    if (__builtin_add_overflow(destBuffer.offset, destBuffer.size, &totalSize) ||
+        totalSize > dest->mSize) {
+        android_errorWriteLog(0x534e4554, "176444622");
+        _aidl_return->detailedError = "invalid buffer size";
+        return toNdkScopedAStatus(Status::ERROR_DRM_FRAME_TOO_LARGE);
+    }
+
+    // Calculate the output buffer size and determine if any subsamples are
+    // encrypted.
+    uint8_t* srcPtr = src->mBase + in_source.offset + in_offset;
+    uint8_t* destPtr = dest->mBase + in_destination.nonsecureMemory.offset;
+    size_t destSize = 0;
+    size_t srcSize = 0;
+    bool haveEncryptedSubsamples = false;
+    for (size_t i = 0; i < in_subSamples.size(); i++) {
+        const SubSample& subSample = in_subSamples[i];
+        if (__builtin_add_overflow(destSize, subSample.numBytesOfClearData, &destSize) ||
+            __builtin_add_overflow(srcSize, subSample.numBytesOfClearData, &srcSize)) {
+            _aidl_return->detailedError = "subsample clear size overflow";
+            return toNdkScopedAStatus(Status::ERROR_DRM_FRAME_TOO_LARGE);
+        }
+        if (__builtin_add_overflow(destSize, subSample.numBytesOfEncryptedData, &destSize) ||
+            __builtin_add_overflow(srcSize, subSample.numBytesOfEncryptedData, &srcSize)) {
+            _aidl_return->detailedError = "subsample encrypted size overflow";
+            return toNdkScopedAStatus(Status::ERROR_DRM_FRAME_TOO_LARGE);
+        }
+        if (subSample.numBytesOfEncryptedData > 0) {
+            haveEncryptedSubsamples = true;
+        }
+    }
+
+    if (destSize > destBuffer.size || srcSize > in_source.size) {
+        _aidl_return->detailedError = "subsample sum too large";
+        return toNdkScopedAStatus(Status::ERROR_DRM_FRAME_TOO_LARGE);
+    }
+
+    if (in_mode == Mode::UNENCRYPTED) {
+        if (haveEncryptedSubsamples) {
+            _aidl_return->detailedError =
+                    "Encrypted subsamples found in allegedly unencrypted data.";
+            return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+        }
+
+        size_t offset = 0;
+        for (size_t i = 0; i < in_subSamples.size(); ++i) {
+            const SubSample& subSample = in_subSamples[i];
+            if (subSample.numBytesOfClearData != 0) {
+                memcpy(reinterpret_cast<uint8_t*>(destPtr) + offset,
+                       reinterpret_cast<const uint8_t*>(srcPtr) + offset,
+                       subSample.numBytesOfClearData);
+                offset += subSample.numBytesOfClearData;
+            }
+        }
+
+        _aidl_return->bytesWritten = static_cast<ssize_t>(offset);
+        _aidl_return->detailedError = "";
+        return toNdkScopedAStatus(Status::OK);
+    } else if (in_mode == Mode::AES_CTR) {
+        size_t bytesDecrypted{};
+        std::vector<int32_t> clearDataLengths;
+        std::vector<int32_t> encryptedDataLengths;
+        for (auto ss : in_subSamples) {
+            clearDataLengths.push_back(ss.numBytesOfClearData);
+            encryptedDataLengths.push_back(ss.numBytesOfEncryptedData);
+        }
+        auto res =
+                mSession->decrypt(in_keyId.data(), in_iv.data(),
+                                  srcPtr, static_cast<uint8_t*>(destPtr),
+                                  clearDataLengths, encryptedDataLengths,
+                                  &bytesDecrypted);
+        if (res == clearkeydrm::OK) {
+            _aidl_return->bytesWritten = static_cast<ssize_t>(bytesDecrypted);
+            _aidl_return->detailedError = "";
+            return toNdkScopedAStatus(Status::OK);
+        } else {
+            _aidl_return->bytesWritten = 0;
+            _aidl_return->detailedError = "Decryption Error";
+            return toNdkScopedAStatus(static_cast<Status>(res));
+        }
+    } else {
+        _aidl_return->bytesWritten = 0;
+        _aidl_return->detailedError =
+                "selected encryption mode is not supported by the ClearKey DRM \
+Plugin";
+        return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+    }
+}
+
+::ndk::ScopedAStatus CryptoPlugin::getLogMessages(
+        std::vector<::aidl::android::hardware::drm::LogMessage>* _aidl_return) {
+    using std::chrono::duration_cast;
+    using std::chrono::milliseconds;
+    using std::chrono::system_clock;
+
+    auto timeMillis = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
+
+    std::vector<::aidl::android::hardware::drm::LogMessage> logs = {
+            {timeMillis, ::aidl::android::hardware::drm::LogPriority::ERROR,
+             std::string("Not implemented")}};
+    *_aidl_return = logs;
+    return toNdkScopedAStatus(Status::OK);
+}
+
+::ndk::ScopedAStatus CryptoPlugin::notifyResolution(int32_t in_width, int32_t in_height) {
+    UNUSED(in_width);
+    UNUSED(in_height);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus CryptoPlugin::requiresSecureDecoderComponent(const std::string& in_mime,
+                                                                  bool* _aidl_return) {
+    UNUSED(in_mime);
+    *_aidl_return = false;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus CryptoPlugin::setMediaDrmSession(const std::vector<uint8_t>& in_sessionId) {
+    Status status = Status::OK;
+    if (!in_sessionId.size()) {
+        mSession = nullptr;
+    } else {
+        mSession = SessionLibrary::get()->findSession(in_sessionId);
+        if (!mSession.get()) {
+            status = Status::ERROR_DRM_SESSION_NOT_OPENED;
+        }
+    }
+    return toNdkScopedAStatus(status);
+}
+
+::ndk::ScopedAStatus CryptoPlugin::setSharedBufferBase(
+        const ::aidl::android::hardware::common::Ashmem& in_base, int32_t in_bufferId) {
+    std::lock_guard<std::mutex> shared_buffer_lock(mSharedBufferLock);
+    mSharedBufferMap[in_bufferId] = std::make_shared<SharedBufferBase>(in_base);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+SharedBufferBase::SharedBufferBase(const ::aidl::android::hardware::common::Ashmem& mem)
+        : mBase(nullptr),
+          mSize(mem.size) {
+    if (mem.fd.get() < 0) {
+        return;
+    }
+    auto addr = mmap(nullptr, mem.size, PROT_READ | PROT_WRITE, MAP_SHARED,
+                     mem.fd.get(), 0);
+    if (addr == MAP_FAILED) {
+        ALOGE("mmap err: fd %d; errno %s",
+              mem.fd.get(), strerror(errno));
+    } else {
+        mBase = static_cast<uint8_t*>(addr);
+    }
+}
+
+SharedBufferBase::~SharedBufferBase() {
+    if (munmap(mBase, mSize)) {
+        ALOGE("munmap err: base %p; errno %s",
+              mBase, strerror(errno));
+    }
+}
+}  // namespace clearkey
+}  // namespace drm
+}  // namespace hardware
+}  // namespace android
+}  // namespace aidl
diff --git a/drm/mediadrm/plugins/clearkey/aidl/DrmFactory.cpp b/drm/mediadrm/plugins/clearkey/aidl/DrmFactory.cpp
new file mode 100644
index 0000000..168a661
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/DrmFactory.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "clearkey-DrmFactory"
+
+#include <utils/Log.h>
+
+#include "DrmFactory.h"
+
+#include "ClearKeyUUID.h"
+#include "DrmPlugin.h"
+#include "MimeTypeStdStr.h"
+#include "SessionLibrary.h"
+#include "AidlUtils.h"
+
+namespace aidl {
+namespace android {
+namespace hardware {
+namespace drm {
+namespace clearkey {
+
+using std::string;
+using std::vector;
+
+using ::aidl::android::hardware::drm::SecurityLevel;
+using ::aidl::android::hardware::drm::Status;
+using ::aidl::android::hardware::drm::Uuid;
+
+::ndk::ScopedAStatus DrmFactory::createPlugin(
+        const Uuid& in_uuid, const string& in_appPackageName,
+        std::shared_ptr<::aidl::android::hardware::drm::IDrmPlugin>* _aidl_return) {
+    UNUSED(in_appPackageName);
+
+    if (!isClearKeyUUID(in_uuid.uuid.data())) {
+        ALOGE("Clearkey Drm HAL: failed to create drm plugin, "
+              "invalid crypto scheme");
+        *_aidl_return = nullptr;
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+
+    std::shared_ptr<DrmPlugin> plugin =
+            ::ndk::SharedRefBase::make<DrmPlugin>(SessionLibrary::get());
+    *_aidl_return = plugin;
+    return toNdkScopedAStatus(Status::OK);
+}
+
+::ndk::ScopedAStatus DrmFactory::getSupportedCryptoSchemes(vector<Uuid>* _aidl_return) {
+    vector<Uuid> schemes;
+    Uuid scheme;
+    for (const auto& uuid : ::aidl::android::hardware::drm::clearkey::getSupportedCryptoSchemes()) {
+        scheme.uuid.assign(uuid.begin(), uuid.end());
+        schemes.push_back(scheme);
+    }
+    *_aidl_return = schemes;
+    return ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus DrmFactory::isContentTypeSupported(const string& in_mimeType,
+                                                        bool* _aidl_return) {
+    // This should match the in_mimeTypes handed by InitDataParser.
+    *_aidl_return = in_mimeType == kIsoBmffVideoMimeType || in_mimeType == kIsoBmffAudioMimeType ||
+                    in_mimeType == kCencInitDataFormat || in_mimeType == kWebmVideoMimeType ||
+                    in_mimeType == kWebmAudioMimeType || in_mimeType == kWebmInitDataFormat;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus DrmFactory::isCryptoSchemeSupported(const Uuid& in_uuid,
+                                                         const string& in_mimeType,
+                                                         SecurityLevel in_securityLevel,
+                                                         bool* _aidl_return) {
+    bool isSupportedMimeType = false;
+    if (!isContentTypeSupported(in_mimeType, &isSupportedMimeType).isOk()) {
+        ALOGD("%s mime type is not supported by crypto scheme", in_mimeType.c_str());
+    }
+    *_aidl_return = isClearKeyUUID(in_uuid.uuid.data()) && isSupportedMimeType &&
+                    in_securityLevel == SecurityLevel::SW_SECURE_CRYPTO;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+binder_status_t DrmFactory::dump(int fd, const char** args, uint32_t numArgs) {
+    UNUSED(args);
+    UNUSED(numArgs);
+
+    if (fd < 0) {
+        ALOGE("%s: negative fd", __FUNCTION__);
+        return STATUS_BAD_VALUE;
+    }
+
+    uint32_t currentSessions = SessionLibrary::get()->numOpenSessions();
+    dprintf(fd, "current open sessions: %u\n", currentSessions);
+
+    return STATUS_OK;
+}
+
+}  // namespace clearkey
+}  // namespace drm
+}  // namespace hardware
+}  // namespace android
+}  // namespace aidl
diff --git a/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp
new file mode 100644
index 0000000..92bea66
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp
@@ -0,0 +1,1032 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "clearkey-DrmPlugin"
+
+#include <aidl/android/hardware/drm/DrmMetric.h>
+#include <utils/Log.h>
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <chrono>
+
+#include "AidlUtils.h"
+#include "ClearKeyDrmProperties.h"
+#include "DrmPlugin.h"
+#include "Session.h"
+#include "Utils.h"
+
+namespace {
+const std::string kKeySetIdPrefix("ckid");
+const int kKeySetIdLength = 16;
+const int kSecureStopIdStart = 100;
+const std::string kOfflineLicense("\"type\":\"persistent-license\"");
+const std::string kStreaming("Streaming");
+const std::string kTemporaryLicense("\"type\":\"temporary\"");
+const std::string kTrue("True");
+
+const std::string kQueryKeyLicenseType("LicenseType");
+// Value: "Streaming" or "Offline"
+const std::string kQueryKeyPlayAllowed("PlayAllowed");
+// Value: "True" or "False"
+const std::string kQueryKeyRenewAllowed("RenewAllowed");
+// Value: "True" or "False"
+
+const int kSecureStopIdSize = 10;
+
+std::vector<uint8_t> uint32ToVector(uint32_t value) {
+    // 10 bytes to display max value 4294967295 + one byte null terminator
+    char buffer[kSecureStopIdSize];
+    memset(buffer, 0, kSecureStopIdSize);
+    snprintf(buffer, kSecureStopIdSize, "%" PRIu32, value);
+    return std::vector<uint8_t>(buffer, buffer + sizeof(buffer));
+}
+
+};  // unnamed namespace
+
+namespace aidl {
+namespace android {
+namespace hardware {
+namespace drm {
+namespace clearkey {
+
+using ::android::Mutex;
+
+DrmPlugin::DrmPlugin(SessionLibrary* sessionLibrary)
+    : mSessionLibrary(sessionLibrary),
+      mOpenSessionOkCount(0),
+      mCloseSessionOkCount(0),
+      mCloseSessionNotOpenedCount(0),
+      mNextSecureStopId(kSecureStopIdStart),
+      mMockError(Status::OK) {
+    mPlayPolicy.clear();
+    initProperties();
+    mSecureStops.clear();
+    mReleaseKeysMap.clear();
+    std::srand(std::time(nullptr));
+}
+
+void DrmPlugin::initProperties() {
+    mStringProperties.clear();
+    mStringProperties[kVendorKey] = kVendorValue;
+    mStringProperties[kVersionKey] = kVersionValue;
+    mStringProperties[kPluginDescriptionKey] = kPluginDescriptionValue;
+    mStringProperties[kAlgorithmsKey] = kAlgorithmsValue;
+    mStringProperties[kListenerTestSupportKey] = kListenerTestSupportValue;
+    mStringProperties[kDrmErrorTestKey] = kDrmErrorTestValue;
+
+    std::vector<uint8_t> valueVector;
+    valueVector.clear();
+    valueVector.insert(valueVector.end(), kTestDeviceIdData,
+                       kTestDeviceIdData + sizeof(kTestDeviceIdData) / sizeof(uint8_t));
+    mByteArrayProperties[kDeviceIdKey] = valueVector;
+
+    valueVector.clear();
+    valueVector.insert(valueVector.end(), kMetricsData,
+                       kMetricsData + sizeof(kMetricsData) / sizeof(uint8_t));
+    mByteArrayProperties[kMetricsKey] = valueVector;
+}
+
+// The secure stop in ClearKey implementation is not installed securely.
+// This function merely creates a test environment for testing secure stops APIs.
+// The content in this secure stop is implementation dependent, the clearkey
+// secureStop does not serve as a reference implementation.
+void DrmPlugin::installSecureStop(const std::vector<uint8_t>& sessionId) {
+    ::android::Mutex::Autolock lock(mSecureStopLock);
+
+    ClearkeySecureStop clearkeySecureStop;
+    clearkeySecureStop.id = uint32ToVector(++mNextSecureStopId);
+    clearkeySecureStop.data.assign(sessionId.begin(), sessionId.end());
+
+    mSecureStops.insert(std::pair<std::vector<uint8_t>, ClearkeySecureStop>(clearkeySecureStop.id,
+                                                                            clearkeySecureStop));
+}
+
+::ndk::ScopedAStatus DrmPlugin::closeSession(const std::vector<uint8_t>& in_sessionId) {
+    if (in_sessionId.size() == 0) {
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+
+    ::android::sp<Session> session = mSessionLibrary->findSession(in_sessionId);
+    if (session.get()) {
+        mSessionLibrary->destroySession(session);
+        if (session->getMockError() != clearkeydrm::OK) {
+            sendSessionLostState(in_sessionId);
+            return toNdkScopedAStatus(Status::ERROR_DRM_INVALID_STATE);
+        }
+        mCloseSessionOkCount++;
+        return toNdkScopedAStatus(Status::OK);
+    }
+    mCloseSessionNotOpenedCount++;
+    return toNdkScopedAStatus(Status::ERROR_DRM_SESSION_NOT_OPENED);
+}
+
+::ndk::ScopedAStatus DrmPlugin::decrypt(const std::vector<uint8_t>& in_sessionId,
+                                        const std::vector<uint8_t>& in_keyId,
+                                        const std::vector<uint8_t>& in_input,
+                                        const std::vector<uint8_t>& in_iv,
+                                        std::vector<uint8_t>* _aidl_return) {
+    *_aidl_return = {};
+    if (in_sessionId.size() == 0 || in_keyId.size() == 0 || in_input.size() == 0 ||
+        in_iv.size() == 0) {
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+    return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+}
+
+::ndk::ScopedAStatus DrmPlugin::encrypt(const std::vector<uint8_t>& in_sessionId,
+                                        const std::vector<uint8_t>& in_keyId,
+                                        const std::vector<uint8_t>& in_input,
+                                        const std::vector<uint8_t>& in_iv,
+                                        std::vector<uint8_t>* _aidl_return) {
+    *_aidl_return = {};
+    if (in_sessionId.size() == 0 || in_keyId.size() == 0 || in_input.size() == 0 ||
+        in_iv.size() == 0) {
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+    return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+}
+
+::ndk::ScopedAStatus DrmPlugin::getHdcpLevels(
+        ::aidl::android::hardware::drm::HdcpLevels* _aidl_return) {
+    _aidl_return->connectedLevel = HdcpLevel::HDCP_NONE;
+    _aidl_return->maxLevel = HdcpLevel::HDCP_NO_OUTPUT;
+    return toNdkScopedAStatus(Status::OK);
+}
+
+::ndk::ScopedAStatus DrmPlugin::getKeyRequest(
+        const std::vector<uint8_t>& in_scope, const std::vector<uint8_t>& in_initData,
+        const std::string& in_mimeType, ::aidl::android::hardware::drm::KeyType in_keyType,
+        const std::vector<::aidl::android::hardware::drm::KeyValue>& in_optionalParameters,
+        ::aidl::android::hardware::drm::KeyRequest* _aidl_return) {
+    UNUSED(in_optionalParameters);
+
+    KeyRequestType keyRequestType = KeyRequestType::UNKNOWN;
+    std::string defaultUrl("");
+
+    _aidl_return->request = {};
+    _aidl_return->requestType = keyRequestType;
+    _aidl_return->defaultUrl = defaultUrl;
+
+    if (in_scope.size() == 0 ||
+        (in_keyType != KeyType::STREAMING && in_keyType != KeyType::OFFLINE &&
+         in_keyType != KeyType::RELEASE)) {
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+
+    const std::vector<uint8_t> scopeId = in_scope;
+    ::android::sp<Session> session;
+    if (in_keyType == KeyType::STREAMING || in_keyType == KeyType::OFFLINE) {
+        std::vector<uint8_t> sessionId(scopeId.begin(), scopeId.end());
+        session = mSessionLibrary->findSession(sessionId);
+        if (!session.get()) {
+            return toNdkScopedAStatus(Status::ERROR_DRM_SESSION_NOT_OPENED);
+        } else if (session->getMockError() != clearkeydrm::OK) {
+            return toNdkScopedAStatus(session->getMockError());
+        }
+        keyRequestType = KeyRequestType::INITIAL;
+    }
+
+    std::vector<uint8_t> request = {};
+    auto keyType = static_cast<CdmKeyType>(in_keyType);
+    auto status = session->getKeyRequest(in_initData, in_mimeType, keyType, &request);
+
+    if (in_keyType == KeyType::RELEASE) {
+        std::vector<uint8_t> keySetId(scopeId.begin(), scopeId.end());
+        std::string requestString(request.begin(), request.end());
+        if (requestString.find(kOfflineLicense) != std::string::npos) {
+            std::string emptyResponse;
+            std::string keySetIdString(keySetId.begin(), keySetId.end());
+            if (!mFileHandle.StoreLicense(keySetIdString, DeviceFiles::kLicenseStateReleasing,
+                                          emptyResponse)) {
+                ALOGE("Problem releasing offline license");
+                return toNdkScopedAStatus(Status::ERROR_DRM_UNKNOWN);
+            }
+            if (mReleaseKeysMap.find(keySetIdString) == mReleaseKeysMap.end()) {
+                ::android::sp<Session> session = mSessionLibrary->createSession();
+                mReleaseKeysMap[keySetIdString] = session->sessionId();
+            } else {
+                ALOGI("key is in use, ignore release request");
+            }
+        } else {
+            ALOGE("Offline license not found, nothing to release");
+        }
+        keyRequestType = KeyRequestType::RELEASE;
+    }
+    _aidl_return->request = request;
+    _aidl_return->requestType = keyRequestType;
+    _aidl_return->defaultUrl = defaultUrl;
+    return toNdkScopedAStatus(status);
+}
+
+::ndk::ScopedAStatus DrmPlugin::getLogMessages(
+        std::vector<::aidl::android::hardware::drm::LogMessage>* _aidl_return) {
+    using std::chrono::duration_cast;
+    using std::chrono::milliseconds;
+    using std::chrono::system_clock;
+
+    auto timeMillis = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
+
+    std::vector<::aidl::android::hardware::drm::LogMessage> logs = {
+            {timeMillis, ::aidl::android::hardware::drm::LogPriority::ERROR,
+             std::string("Not implemented")}};
+    *_aidl_return = logs;
+    return toNdkScopedAStatus(Status::OK);
+}
+
+::ndk::ScopedAStatus DrmPlugin::getMetrics(
+        std::vector<::aidl::android::hardware::drm::DrmMetricGroup>* _aidl_return) {
+    // Set the open session count metric.
+    DrmMetricNamedValue openSessionOkAttribute = {"status", static_cast<int64_t>(Status::OK)};
+    DrmMetricNamedValue openSessionMetricValue = {"count", mOpenSessionOkCount};
+    DrmMetric openSessionMetric = {
+            "open_session", {openSessionOkAttribute}, {openSessionMetricValue}};
+
+    // Set the close session count metric.
+    DrmMetricNamedValue closeSessionOkAttribute = {"status", static_cast<int64_t>(Status::OK)};
+    DrmMetricNamedValue closeSessionMetricValue = {"count", mCloseSessionOkCount};
+    DrmMetric closeSessionMetric = {
+            "close_session", {closeSessionOkAttribute}, {closeSessionMetricValue}};
+
+    // Set the close session, not opened metric.
+    DrmMetricNamedValue closeSessionNotOpenedAttribute = {"status",
+            static_cast<int64_t>(Status::ERROR_DRM_SESSION_NOT_OPENED)};
+    DrmMetricNamedValue closeSessionNotOpenedMetricValue = {"count", mCloseSessionNotOpenedCount};
+    DrmMetric closeSessionNotOpenedMetric = {
+            "close_session", {closeSessionNotOpenedAttribute}, {closeSessionNotOpenedMetricValue}};
+
+    // Set the setPlaybackId metric.
+    std::vector<DrmMetricNamedValue> sids = {};
+    std::vector<DrmMetricNamedValue> playbackIds = {};
+    for (const auto& [key, value] : mPlaybackId) {
+        std::string sid(key.begin(), key.end());
+        DrmMetricNamedValue sessionIdAttribute = {"sid", sid};
+        sids.push_back(sessionIdAttribute);
+
+        DrmMetricNamedValue playbackIdMetricValue = {"playbackId", value};
+        playbackIds.push_back(playbackIdMetricValue);
+    }
+    DrmMetric setPlaybackIdMetric = {"set_playback_id", sids, playbackIds};
+
+    DrmMetricGroup metrics = {{openSessionMetric, closeSessionMetric, closeSessionNotOpenedMetric,
+            setPlaybackIdMetric}};
+
+    *_aidl_return = {metrics};
+    return toNdkScopedAStatus(Status::OK);
+}
+
+::ndk::ScopedAStatus DrmPlugin::getNumberOfSessions(
+        ::aidl::android::hardware::drm::NumberOfSessions* _aidl_return) {
+    _aidl_return->currentSessions = mSessionLibrary->numOpenSessions();
+    _aidl_return->maxSessions = 10;
+    return toNdkScopedAStatus(Status::OK);
+}
+
+::ndk::ScopedAStatus DrmPlugin::getOfflineLicenseKeySetIds(
+        std::vector<::aidl::android::hardware::drm::KeySetId>* _aidl_return) {
+    std::vector<std::string> licenseNames = mFileHandle.ListLicenses();
+    std::vector<KeySetId> keySetIds = {};
+    if (mMockError != Status::OK) {
+        *_aidl_return = keySetIds;
+        return toNdkScopedAStatus(toMockStatus(mMockError));
+    }
+    for (const auto& name : licenseNames) {
+        std::vector<uint8_t> keySetId(name.begin(), name.end());
+        KeySetId id = {};
+        id.keySetId = keySetId;
+        keySetIds.push_back(id);
+    }
+    *_aidl_return = keySetIds;
+    return toNdkScopedAStatus(Status::OK);
+}
+
+::ndk::ScopedAStatus DrmPlugin::getOfflineLicenseState(
+        const ::aidl::android::hardware::drm::KeySetId& in_keySetId,
+        ::aidl::android::hardware::drm::OfflineLicenseState* _aidl_return) {
+    std::string licenseName(in_keySetId.keySetId.begin(), in_keySetId.keySetId.end());
+    DeviceFiles::LicenseState state;
+    std::string license;
+    OfflineLicenseState licenseState = OfflineLicenseState::UNKNOWN;
+    Status status = Status::OK;
+    if (mMockError != Status::OK) {
+        *_aidl_return = licenseState;
+        return toNdkScopedAStatus(toMockStatus(mMockError));
+    } else if (mFileHandle.RetrieveLicense(licenseName, &state, &license)) {
+        switch (state) {
+            case DeviceFiles::kLicenseStateActive:
+                licenseState = OfflineLicenseState::USABLE;
+                break;
+            case DeviceFiles::kLicenseStateReleasing:
+                licenseState = OfflineLicenseState::INACTIVE;
+                break;
+            case DeviceFiles::kLicenseStateUnknown:
+                licenseState = OfflineLicenseState::UNKNOWN;
+                break;
+        }
+    } else {
+        status = Status::BAD_VALUE;
+    }
+    *_aidl_return = licenseState;
+    return toNdkScopedAStatus(status);
+}
+
+::ndk::ScopedAStatus DrmPlugin::getPropertyByteArray(const std::string& in_propertyName,
+                                                     std::vector<uint8_t>* _aidl_return) {
+    std::map<std::string, std::vector<uint8_t>>::iterator itr =
+            mByteArrayProperties.find(std::string(in_propertyName.c_str()));
+    Status status = Status::OK;
+    if (itr != mByteArrayProperties.end()) {
+        *_aidl_return = itr->second;
+    } else {
+        ALOGE("App requested unknown property: %s", in_propertyName.c_str());
+        status = Status::BAD_VALUE;
+        *_aidl_return = {};
+    }
+    return toNdkScopedAStatus(status);
+}
+
+::ndk::ScopedAStatus DrmPlugin::getPropertyString(const std::string& in_propertyName,
+                                                  std::string* _aidl_return) {
+    std::string name(in_propertyName.c_str());
+    std::string value;
+    Status status = Status::OK;
+
+    if (name == kVendorKey) {
+        value = mStringProperties[kVendorKey];
+    } else if (name == kVersionKey) {
+        value = mStringProperties[kVersionKey];
+    } else if (name == kPluginDescriptionKey) {
+        value = mStringProperties[kPluginDescriptionKey];
+    } else if (name == kAlgorithmsKey) {
+        value = mStringProperties[kAlgorithmsKey];
+    } else if (name == kListenerTestSupportKey) {
+        value = mStringProperties[kListenerTestSupportKey];
+    } else if (name == kDrmErrorTestKey) {
+        value = mStringProperties[kDrmErrorTestKey];
+    } else {
+        ALOGE("App requested unknown string property %s", name.c_str());
+        status = Status::ERROR_DRM_CANNOT_HANDLE;
+    }
+    *_aidl_return = value;
+    return toNdkScopedAStatus(status);
+}
+
+::ndk::ScopedAStatus DrmPlugin::getProvisionRequest(
+        const std::string& in_certificateType, const std::string& in_certificateAuthority,
+        ::aidl::android::hardware::drm::ProvisionRequest* _aidl_return) {
+    UNUSED(in_certificateType);
+    UNUSED(in_certificateAuthority);
+    _aidl_return->request = {};
+    _aidl_return->defaultUrl = {};
+    return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+}
+
+::ndk::ScopedAStatus DrmPlugin::getSecureStop(
+        const ::aidl::android::hardware::drm::SecureStopId& in_secureStopId,
+        ::aidl::android::hardware::drm::SecureStop* _aidl_return) {
+    std::vector<uint8_t> stop = {};
+
+    mSecureStopLock.lock();
+    auto itr = mSecureStops.find(in_secureStopId.secureStopId);
+    if (itr != mSecureStops.end()) {
+        ClearkeySecureStop clearkeyStop = itr->second;
+        stop.assign(clearkeyStop.id.begin(), clearkeyStop.id.end());
+        stop.assign(clearkeyStop.data.begin(), clearkeyStop.data.end());
+    }
+    mSecureStopLock.unlock();
+
+    SecureStop secureStop = {};
+    Status status = Status::OK;
+    if (!stop.empty()) {
+        secureStop.opaqueData = stop;
+    } else {
+        status = Status::BAD_VALUE;
+    }
+    *_aidl_return = secureStop;
+    return toNdkScopedAStatus(status);
+}
+
+::ndk::ScopedAStatus DrmPlugin::getSecureStopIds(
+        std::vector<::aidl::android::hardware::drm::SecureStopId>* _aidl_return) {
+    mSecureStopLock.lock();
+    std::vector<::aidl::android::hardware::drm::SecureStopId> ids;
+    for (auto itr = mSecureStops.begin(); itr != mSecureStops.end(); ++itr) {
+        SecureStopId id;
+        id.secureStopId = itr->first;
+        ids.push_back(id);
+    }
+    mSecureStopLock.unlock();
+
+    *_aidl_return = ids;
+    return toNdkScopedAStatus(Status::OK);
+}
+
+::ndk::ScopedAStatus DrmPlugin::getSecureStops(
+        std::vector<::aidl::android::hardware::drm::SecureStop>* _aidl_return) {
+    mSecureStopLock.lock();
+    std::vector<::aidl::android::hardware::drm::SecureStop> stops;
+    for (auto itr = mSecureStops.begin(); itr != mSecureStops.end(); ++itr) {
+        ClearkeySecureStop clearkeyStop = itr->second;
+        std::vector<uint8_t> stop = {};
+        stop.assign(clearkeyStop.id.begin(), clearkeyStop.id.end());
+        stop.assign(clearkeyStop.data.begin(), clearkeyStop.data.end());
+
+        SecureStop secureStop;
+        secureStop.opaqueData = stop;
+        stops.push_back(secureStop);
+    }
+    mSecureStopLock.unlock();
+
+    *_aidl_return = stops;
+    return toNdkScopedAStatus(Status::OK);
+}
+
+::ndk::ScopedAStatus DrmPlugin::getSecurityLevel(
+        const std::vector<uint8_t>& in_sessionId,
+        ::aidl::android::hardware::drm::SecurityLevel* _aidl_return) {
+    if (in_sessionId.size() == 0) {
+        *_aidl_return = ::aidl::android::hardware::drm::SecurityLevel::UNKNOWN;
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+
+    std::vector<uint8_t> sid = in_sessionId;
+    ::android::sp<Session> session = mSessionLibrary->findSession(sid);
+    if (!session.get()) {
+        *_aidl_return = SecurityLevel::UNKNOWN;
+        return toNdkScopedAStatus(Status::ERROR_DRM_SESSION_NOT_OPENED);
+    }
+
+    std::map<std::vector<uint8_t>, ::aidl::android::hardware::drm::SecurityLevel>::iterator itr =
+            mSecurityLevel.find(sid);
+    if (itr == mSecurityLevel.end()) {
+        ALOGE("Session id not found");
+        *_aidl_return = SecurityLevel::UNKNOWN;
+        return toNdkScopedAStatus(Status::ERROR_DRM_INVALID_STATE);
+    }
+
+    *_aidl_return = itr->second;
+    return toNdkScopedAStatus(Status::OK);
+}
+
+::ndk::ScopedAStatus DrmPlugin::openSession(
+        ::aidl::android::hardware::drm::SecurityLevel in_securityLevel,
+        std::vector<uint8_t>* _aidl_return) {
+    ::android::sp<Session> session = mSessionLibrary->createSession();
+    processMockError(session);
+    std::vector<uint8_t> sessionId = session->sessionId();
+
+    Status status = setSecurityLevel(sessionId, in_securityLevel);
+    if (status == Status::OK) {
+        mOpenSessionOkCount++;
+    } else {
+        mSessionLibrary->destroySession(session);
+        sessionId.clear();
+    }
+    *_aidl_return = sessionId;
+    return toNdkScopedAStatus(status);
+}
+
+::ndk::ScopedAStatus DrmPlugin::provideKeyResponse(
+        const std::vector<uint8_t>& in_scope, const std::vector<uint8_t>& in_response,
+        ::aidl::android::hardware::drm::KeySetId* _aidl_return) {
+    if (in_scope.size() == 0 || in_response.size() == 0) {
+        // Returns empty keySetId
+        *_aidl_return = {};
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+
+    std::string responseString(reinterpret_cast<const char*>(in_response.data()),
+                               in_response.size());
+    const std::vector<uint8_t> scopeId = in_scope;
+    std::vector<uint8_t> sessionId = {};
+    std::string keySetId;
+
+    bool isOfflineLicense = responseString.find(kOfflineLicense) != std::string::npos;
+    if (scopeId.size() < kKeySetIdPrefix.size()) {
+        android_errorWriteLog(0x534e4554, "144507096");
+        *_aidl_return = {};
+        return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+    }
+    bool isRelease = (memcmp(scopeId.data(), kKeySetIdPrefix.data(), kKeySetIdPrefix.size()) == 0);
+    if (isRelease) {
+        keySetId.assign(scopeId.begin(), scopeId.end());
+
+        auto iter = mReleaseKeysMap.find(std::string(keySetId.begin(), keySetId.end()));
+        if (iter != mReleaseKeysMap.end()) {
+            sessionId.assign(iter->second.begin(), iter->second.end());
+        }
+    } else {
+        sessionId.assign(scopeId.begin(), scopeId.end());
+        // non offline license returns empty keySetId
+        keySetId.clear();
+    }
+
+    ::android::sp<Session> session = mSessionLibrary->findSession(sessionId);
+    if (!session.get()) {
+        *_aidl_return = {};
+        return toNdkScopedAStatus(Status::ERROR_DRM_SESSION_NOT_OPENED);
+    }
+    setPlayPolicy();
+
+    auto res = session->provideKeyResponse(in_response);
+    if (res == clearkeydrm::OK) {
+        if (isOfflineLicense) {
+            if (isRelease) {
+                mFileHandle.DeleteLicense(keySetId);
+                mSessionLibrary->destroySession(session);
+            } else {
+                if (!makeKeySetId(&keySetId)) {
+                    *_aidl_return = {};
+                    return toNdkScopedAStatus(Status::ERROR_DRM_UNKNOWN);
+                }
+
+                bool ok = mFileHandle.StoreLicense(
+                        keySetId, DeviceFiles::kLicenseStateActive,
+                        std::string(in_response.begin(), in_response.end()));
+                if (!ok) {
+                    ALOGE("Failed to store offline license");
+                }
+            }
+        }
+
+        // Test calling AMediaDrm listeners.
+        sendEvent(EventType::VENDOR_DEFINED, sessionId, sessionId);
+
+        sendExpirationUpdate(sessionId, 100);
+
+        std::vector<KeyStatus> keysStatus = {};
+        KeyStatus keyStatus;
+
+        std::vector<uint8_t> keyId1 = {0xA, 0xB, 0xC};
+        keyStatus.keyId = keyId1;
+        keyStatus.type = KeyStatusType::USABLE;
+        keysStatus.push_back(keyStatus);
+
+        std::vector<uint8_t> keyId2 = {0xD, 0xE, 0xF};
+        keyStatus.keyId = keyId2;
+        keyStatus.type = KeyStatusType::EXPIRED;
+        keysStatus.push_back(keyStatus);
+
+        std::vector<uint8_t> keyId3 = {0x0, 0x1, 0x2};
+        keyStatus.keyId = keyId3;
+        keyStatus.type = KeyStatusType::USABLEINFUTURE;
+        keysStatus.push_back(keyStatus);
+
+        sendKeysChange(sessionId, keysStatus, true);
+
+        installSecureStop(sessionId);
+    } else {
+        ALOGE("provideKeyResponse returns error=%d", res);
+    }
+
+    std::vector<uint8_t> keySetIdVec(keySetId.begin(), keySetId.end());
+    _aidl_return->keySetId = keySetIdVec;
+    return toNdkScopedAStatus(res);
+}
+
+::ndk::ScopedAStatus DrmPlugin::provideProvisionResponse(
+        const std::vector<uint8_t>& in_response,
+        ::aidl::android::hardware::drm::ProvideProvisionResponseResult* _aidl_return) {
+    Status status = Status::ERROR_DRM_CANNOT_HANDLE;
+    _aidl_return->certificate = {};
+    _aidl_return->wrappedKey = {};
+    if (in_response.size() == 0) {
+        status = Status::BAD_VALUE;
+    }
+    return toNdkScopedAStatus(status);
+}
+
+::ndk::ScopedAStatus DrmPlugin::queryKeyStatus(
+        const std::vector<uint8_t>& in_sessionId,
+        std::vector<::aidl::android::hardware::drm::KeyValue>* _aidl_return) {
+    if (in_sessionId.size() == 0) {
+        // Returns empty key status KeyValue pair
+        *_aidl_return = {};
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+
+    std::vector<KeyValue> infoMap = {};
+    mPlayPolicyLock.lock();
+    KeyValue keyValuePair;
+    for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
+        keyValuePair.key = mPlayPolicy[i].key;
+        keyValuePair.value = mPlayPolicy[i].value;
+        infoMap.push_back(keyValuePair);
+    }
+    mPlayPolicyLock.unlock();
+    *_aidl_return = infoMap;
+    return toNdkScopedAStatus(Status::OK);
+}
+
+::ndk::ScopedAStatus DrmPlugin::releaseAllSecureStops() {
+    Status status = Status::OK;
+    const auto res = removeAllSecureStops();
+    if (!res.isOk() && res.getExceptionCode() == EX_SERVICE_SPECIFIC) {
+        status = static_cast<Status>(res.getServiceSpecificError());
+    }
+    return toNdkScopedAStatus(status);
+}
+
+::ndk::ScopedAStatus DrmPlugin::releaseSecureStop(
+        const ::aidl::android::hardware::drm::SecureStopId& in_secureStopId) {
+    Status status = Status::OK;
+    const auto res = removeSecureStop(in_secureStopId);
+    if (!res.isOk() && res.getExceptionCode() == EX_SERVICE_SPECIFIC) {
+        status = static_cast<Status>(res.getServiceSpecificError());
+    }
+    return toNdkScopedAStatus(status);
+}
+
+::ndk::ScopedAStatus DrmPlugin::releaseSecureStops(
+        const ::aidl::android::hardware::drm::OpaqueData& in_ssRelease) {
+    // OpaqueData starts with 4 byte decimal integer string
+    const size_t kFourBytesOffset = 4;
+    if (in_ssRelease.opaqueData.size() < kFourBytesOffset) {
+        ALOGE("Invalid secureStopRelease length");
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+
+    Status status = Status::OK;
+    std::vector<uint8_t> input = in_ssRelease.opaqueData;
+
+    if (input.size() < kSecureStopIdSize + kFourBytesOffset) {
+        // The minimum size of secure stop has to contain
+        // a 4 bytes count and one secureStop id
+        ALOGE("Total size of secureStops is too short");
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+
+    // The format of opaqueData is shared between the server
+    // and the drm service. The clearkey implementation consists of:
+    //    count - number of secure stops
+    //    list of fixed length secure stops
+    size_t countBufferSize = sizeof(uint32_t);
+    if (input.size() < countBufferSize) {
+        // SafetyNet logging
+        android_errorWriteLog(0x534e4554, "144766455");
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+    uint32_t count = 0;
+    sscanf(reinterpret_cast<char*>(input.data()), "%04" PRIu32, &count);
+
+    // Avoid divide by 0 below.
+    if (count == 0) {
+        ALOGE("Invalid 0 secureStop count");
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+
+    // Computes the fixed length secureStop size
+    size_t secureStopSize = (input.size() - kFourBytesOffset) / count;
+    if (secureStopSize < kSecureStopIdSize) {
+        // A valid secureStop contains the id plus data
+        ALOGE("Invalid secureStop size");
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+    uint8_t* buffer = new uint8_t[secureStopSize];
+    size_t offset = kFourBytesOffset;  // skip the count
+    for (size_t i = 0; i < count; ++i, offset += secureStopSize) {
+        memcpy(buffer, input.data() + offset, secureStopSize);
+
+        // A secureStop contains id+data, we only use the id for removal
+        std::vector<uint8_t> id(buffer, buffer + kSecureStopIdSize);
+        ::aidl::android::hardware::drm::SecureStopId secureStopId{id};
+        const auto res = removeSecureStop(secureStopId);
+        if (!res.isOk() && res.getExceptionCode() == EX_SERVICE_SPECIFIC) {
+            status = static_cast<Status>(res.getServiceSpecificError());
+        }
+        if (Status::OK != status) break;
+    }
+
+    delete[] buffer;
+    return toNdkScopedAStatus(status);
+}
+
+::ndk::ScopedAStatus DrmPlugin::removeAllSecureStops() {
+    Mutex::Autolock lock(mSecureStopLock);
+
+    mSecureStops.clear();
+    mNextSecureStopId = kSecureStopIdStart;
+    return toNdkScopedAStatus(Status::OK);
+}
+
+::ndk::ScopedAStatus DrmPlugin::removeKeys(const std::vector<uint8_t>& in_sessionId) {
+    Status status = Status::ERROR_DRM_CANNOT_HANDLE;
+    if (in_sessionId.size() == 0) {
+        status = Status::BAD_VALUE;
+    }
+    return toNdkScopedAStatus(status);
+}
+
+::ndk::ScopedAStatus DrmPlugin::removeOfflineLicense(
+        const ::aidl::android::hardware::drm::KeySetId& in_keySetId) {
+    if (mMockError != Status::OK) {
+        return toNdkScopedAStatus(toMockStatus(mMockError));
+    }
+    Status status = Status::BAD_VALUE;
+    std::string licenseName(in_keySetId.keySetId.begin(), in_keySetId.keySetId.end());
+    if (mFileHandle.DeleteLicense(licenseName)) {
+        status = Status::OK;
+    }
+    return toNdkScopedAStatus(status);
+}
+
+::ndk::ScopedAStatus DrmPlugin::removeSecureStop(
+        const ::aidl::android::hardware::drm::SecureStopId& in_secureStopId) {
+    Mutex::Autolock lock(mSecureStopLock);
+
+    Status status = Status::OK;
+    if (1 != mSecureStops.erase(in_secureStopId.secureStopId)) {
+        status = Status::BAD_VALUE;
+    }
+    return toNdkScopedAStatus(status);
+}
+
+::ndk::ScopedAStatus DrmPlugin::requiresSecureDecoder(
+        const std::string& in_mime, ::aidl::android::hardware::drm::SecurityLevel in_level,
+        bool* _aidl_return) {
+    UNUSED(in_mime);
+    UNUSED(in_level);
+    *_aidl_return = false;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus DrmPlugin::requiresSecureDecoderDefault(const std::string& in_mime,
+                                                             bool* _aidl_return) {
+    UNUSED(in_mime);
+    // Clearkey only supports SW_SECURE_CRYPTO, so we always returns false
+    // regardless of mime type.
+    *_aidl_return = false;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus DrmPlugin::restoreKeys(
+        const std::vector<uint8_t>& in_sessionId,
+        const ::aidl::android::hardware::drm::KeySetId& in_keySetId) {
+    if (in_sessionId.size() == 0 || in_keySetId.keySetId.size() == 0) {
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+
+    DeviceFiles::LicenseState licenseState;
+    std::string offlineLicense;
+    if (!mFileHandle.RetrieveLicense(
+                std::string(in_keySetId.keySetId.begin(), in_keySetId.keySetId.end()),
+                &licenseState, &offlineLicense)) {
+        ALOGE("Failed to restore offline license");
+        return toNdkScopedAStatus(Status::ERROR_DRM_NO_LICENSE);
+    }
+
+    if (DeviceFiles::kLicenseStateUnknown == licenseState ||
+        DeviceFiles::kLicenseStateReleasing == licenseState) {
+        ALOGE("Invalid license state=%d", licenseState);
+        return toNdkScopedAStatus(Status::ERROR_DRM_NO_LICENSE);
+    }
+
+    ::android::sp<Session> session = mSessionLibrary->findSession(in_sessionId);
+    if (!session.get()) {
+        return toNdkScopedAStatus(Status::ERROR_DRM_SESSION_NOT_OPENED);
+    }
+    auto res = session->provideKeyResponse(
+            std::vector<uint8_t>(offlineLicense.begin(), offlineLicense.end()));
+    if (res != clearkeydrm::OK) {
+        ALOGE("Failed to restore keys");
+    }
+    return toNdkScopedAStatus(res);
+}
+
+void DrmPlugin::sendEvent(::aidl::android::hardware::drm::EventType in_eventType,
+                                          const std::vector<uint8_t>& in_sessionId,
+                                          const std::vector<uint8_t>& in_data) {
+    if (mListener != nullptr) {
+        mListener->onEvent(in_eventType, in_sessionId, in_data);
+    } else {
+        ALOGE("Null event listener, event not sent");
+    }
+    return;
+}
+
+void DrmPlugin::sendExpirationUpdate(const std::vector<uint8_t>& in_sessionId,
+                                                     int64_t in_expiryTimeInMS) {
+    if (mListener != nullptr) {
+        mListener->onExpirationUpdate(in_sessionId, in_expiryTimeInMS);
+    } else {
+        ALOGE("Null event listener, event not sent");
+    }
+    return;
+}
+
+void DrmPlugin::sendKeysChange(
+        const std::vector<uint8_t>& in_sessionId,
+        const std::vector<::aidl::android::hardware::drm::KeyStatus>& in_keyStatusList,
+        bool in_hasNewUsableKey) {
+    if (mListener != nullptr) {
+        mListener->onKeysChange(in_sessionId, in_keyStatusList, in_hasNewUsableKey);
+    } else {
+        ALOGE("Null event listener, event not sent");
+    }
+    return;
+}
+
+void DrmPlugin::sendSessionLostState(const std::vector<uint8_t>& in_sessionId) {
+    if (mListener != nullptr) {
+        mListener->onSessionLostState(in_sessionId);
+    }
+    return;
+}
+
+::ndk::ScopedAStatus DrmPlugin::setCipherAlgorithm(const std::vector<uint8_t>& /*in_sessionId*/,
+                                                   const std::string& /*in_algorithm*/) {
+    return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+}
+
+::ndk::ScopedAStatus DrmPlugin::setListener(
+        const std::shared_ptr<
+                ::aidl::android::hardware::drm::IDrmPluginListener>& in_listener) {
+    mListener = in_listener;
+    return toNdkScopedAStatus(Status::OK);
+}
+
+::ndk::ScopedAStatus DrmPlugin::setMacAlgorithm(const std::vector<uint8_t>& /*in_sessionId*/,
+                                                const std::string& /*in_algorithm*/) {
+    return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+}
+
+::ndk::ScopedAStatus DrmPlugin::setPlaybackId(const std::vector<uint8_t>& in_sessionId,
+                                              const std::string& in_playbackId) {
+    if (in_sessionId.size() == 0) {
+        ALOGE("Invalid empty session id");
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+
+    std::vector<uint8_t> sid = in_sessionId;
+    mPlaybackId[sid] = in_playbackId;
+    return toNdkScopedAStatus(Status::OK);
+}
+
+::ndk::ScopedAStatus DrmPlugin::setPropertyByteArray(const std::string& in_propertyName,
+                                                     const std::vector<uint8_t>& in_value) {
+    if (in_propertyName == kDeviceIdKey) {
+        ALOGD("Cannot set immutable property: %s", in_propertyName.c_str());
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    } else if (in_propertyName == kClientIdKey) {
+        mByteArrayProperties[kClientIdKey] = in_value;
+        return toNdkScopedAStatus(Status::OK);
+    }
+
+    // Setting of undefined properties is not supported
+    ALOGE("Failed to set property byte array, key=%s", in_propertyName.c_str());
+    return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+}
+
+::ndk::ScopedAStatus DrmPlugin::setPropertyString(const std::string& in_propertyName,
+                                                  const std::string& in_value) {
+    std::string immutableKeys;
+    immutableKeys.append(kAlgorithmsKey + ",");
+    immutableKeys.append(kPluginDescriptionKey + ",");
+    immutableKeys.append(kVendorKey + ",");
+    immutableKeys.append(kVersionKey + ",");
+
+    std::string key = std::string(in_propertyName.c_str());
+    if (immutableKeys.find(key) != std::string::npos) {
+        ALOGD("Cannot set immutable property: %s", key.c_str());
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+
+    std::map<std::string, std::string>::iterator itr = mStringProperties.find(key);
+    if (itr == mStringProperties.end()) {
+        ALOGE("Cannot set undefined property string, key=%s", key.c_str());
+        return toNdkScopedAStatus(Status::BAD_VALUE);
+    }
+
+    if (in_propertyName == kDrmErrorTestKey) {
+        if (in_value == kResourceContentionValue) {
+            mMockError = Status::ERROR_DRM_RESOURCE_CONTENTION;
+        } else if (in_value == kLostStateValue) {
+            mMockError = Status::ERROR_DRM_SESSION_LOST_STATE;
+        } else if (in_value == kFrameTooLargeValue) {
+            mMockError = Status::ERROR_DRM_FRAME_TOO_LARGE;
+        } else if (in_value == kInvalidStateValue) {
+            mMockError = Status::ERROR_DRM_INVALID_STATE;
+        } else {
+            mMockError = Status::ERROR_DRM_UNKNOWN;
+        }
+    }
+
+    mStringProperties[key] = std::string(in_value.c_str());
+    return toNdkScopedAStatus(Status::OK);
+}
+
+::ndk::ScopedAStatus DrmPlugin::sign(const std::vector<uint8_t>& /*in_sessionId*/,
+                                     const std::vector<uint8_t>& /*in_keyId*/,
+                                     const std::vector<uint8_t>& /*in_message*/,
+                                     std::vector<uint8_t>* _aidl_return) {
+    *_aidl_return = {};
+    return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+}
+
+::ndk::ScopedAStatus DrmPlugin::signRSA(const std::vector<uint8_t>& /*in_sessionId*/,
+                                        const std::string& /*in_algorithm*/,
+                                        const std::vector<uint8_t>& /*in_message*/,
+                                        const std::vector<uint8_t>& /*in_wrappedkey*/,
+                                        std::vector<uint8_t>* _aidl_return) {
+    *_aidl_return = {};
+    return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+}
+
+::ndk::ScopedAStatus DrmPlugin::verify(const std::vector<uint8_t>& /*in_sessionId*/,
+                                       const std::vector<uint8_t>& /*in_keyId*/,
+                                       const std::vector<uint8_t>& /*in_message*/,
+                                       const std::vector<uint8_t>& /*in_signature*/,
+                                       bool* _aidl_return) {
+    *_aidl_return = false;
+    return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE);
+}
+
+// Private methods below.
+void DrmPlugin::setPlayPolicy() {
+    ::android::Mutex::Autolock lock(mPlayPolicyLock);
+    mPlayPolicy.clear();
+
+    KeyValue policy;
+    policy.key = kQueryKeyLicenseType;
+    policy.value = kStreaming;
+    mPlayPolicy.push_back(policy);
+
+    policy.key = kQueryKeyPlayAllowed;
+    policy.value = kTrue;
+    mPlayPolicy.push_back(policy);
+
+    policy.key = kQueryKeyRenewAllowed;
+    mPlayPolicy.push_back(policy);
+}
+
+bool DrmPlugin::makeKeySetId(std::string* keySetId) {
+    if (!keySetId) {
+        ALOGE("keySetId destination not provided");
+        return false;
+    }
+    std::vector<uint8_t> ksid(kKeySetIdPrefix.begin(), kKeySetIdPrefix.end());
+    ksid.resize(kKeySetIdLength);
+    std::vector<uint8_t> randomData((kKeySetIdLength - kKeySetIdPrefix.size()) / 2, 0);
+
+    while (keySetId->empty()) {
+        for (auto itr = randomData.begin(); itr != randomData.end(); ++itr) {
+            *itr = std::rand() % 0xff;
+        }
+        auto id = reinterpret_cast<const uint8_t*>(randomData.data());
+        *keySetId = kKeySetIdPrefix + ::android::ByteArrayToHexString(id, randomData.size());
+        if (mFileHandle.LicenseExists(*keySetId)) {
+            // collision, regenerate
+            ALOGV("Retry generating KeySetId");
+            keySetId->clear();
+        }
+    }
+    return true;
+}
+
+Status DrmPlugin::setSecurityLevel(const std::vector<uint8_t>& sessionId, SecurityLevel level) {
+    if (sessionId.size() == 0) {
+        ALOGE("Invalid empty session id");
+        return Status::BAD_VALUE;
+    }
+
+    if (level != SecurityLevel::DEFAULT && level != SecurityLevel::SW_SECURE_CRYPTO) {
+        ALOGE("Cannot set security level > max");
+        return Status::ERROR_DRM_CANNOT_HANDLE;
+    }
+
+    std::vector<uint8_t> sid = sessionId;
+    ::android::sp<Session> session = mSessionLibrary->findSession(sid);
+    if (!session.get()) {
+        return Status::ERROR_DRM_SESSION_NOT_OPENED;
+    }
+
+    std::map<std::vector<uint8_t>, SecurityLevel>::iterator itr = mSecurityLevel.find(sid);
+    if (itr != mSecurityLevel.end()) {
+        mSecurityLevel[sid] = level;
+    } else {
+        if (!mSecurityLevel.insert(std::pair<std::vector<uint8_t>, SecurityLevel>(sid, level))
+                     .second) {
+            ALOGE("Failed to set security level");
+            return Status::ERROR_DRM_INVALID_STATE;
+        }
+    }
+    return Status::OK;
+}
+
+}  // namespace clearkey
+}  // namespace drm
+}  // namespace hardware
+}  // namespace android
+}  // namespace aidl
diff --git a/drm/mediadrm/plugins/clearkey/aidl/Service.cpp b/drm/mediadrm/plugins/clearkey/aidl/Service.cpp
new file mode 100644
index 0000000..7d342f3
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/Service.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_NDEBUG 1
+#define LOG_TAG "clearkey-main"
+
+#include "CreatePluginFactories.h"
+
+#include <android-base/logging.h>
+
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+
+using ::android::base::InitLogging;
+using ::android::base::LogdLogger;
+
+using ::aidl::android::hardware::drm::clearkey::createCryptoFactory;
+using ::aidl::android::hardware::drm::clearkey::createDrmFactory;
+using ::aidl::android::hardware::drm::clearkey::CryptoFactory;
+using ::aidl::android::hardware::drm::clearkey::DrmFactory;
+
+int main(int /*argc*/, char* argv[]) {
+    InitLogging(argv, LogdLogger());
+    ::android::base::SetMinimumLogSeverity(::android::base::VERBOSE);
+    ABinderProcess_setThreadPoolMaxThreadCount(8);
+
+    std::shared_ptr<DrmFactory> drmFactory = createDrmFactory();
+    const std::string drmInstance = std::string() + DrmFactory::descriptor + "/clearkey";
+    binder_status_t status =
+            AServiceManager_addService(drmFactory->asBinder().get(), drmInstance.c_str());
+    CHECK(status == STATUS_OK);
+
+    std::shared_ptr<CryptoFactory> cryptoFactory = createCryptoFactory();
+    const std::string cryptoInstance = std::string() + CryptoFactory::descriptor + "/clearkey";
+    status = AServiceManager_addService(cryptoFactory->asBinder().get(), cryptoInstance.c_str());
+    CHECK(status == STATUS_OK);
+
+    ABinderProcess_joinThreadPool();
+    return EXIT_FAILURE;  // should not reached
+}
diff --git a/drm/mediadrm/plugins/clearkey/aidl/ServiceLazy.cpp b/drm/mediadrm/plugins/clearkey/aidl/ServiceLazy.cpp
new file mode 100644
index 0000000..55aa6e0
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/ServiceLazy.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_NDEBUG 1
+#define LOG_TAG "clearkey-main"
+
+#include "CreatePluginFactories.h"
+
+#include <android-base/logging.h>
+
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+
+using ::android::base::InitLogging;
+using ::android::base::StderrLogger;
+
+using ::aidl::android::hardware::drm::clearkey::createCryptoFactory;
+using ::aidl::android::hardware::drm::clearkey::createDrmFactory;
+using ::aidl::android::hardware::drm::clearkey::CryptoFactory;
+using ::aidl::android::hardware::drm::clearkey::DrmFactory;
+
+int main(int /*argc*/, char* argv[]) {
+    InitLogging(argv, StderrLogger);
+    ::android::base::SetMinimumLogSeverity(::android::base::VERBOSE);
+    ABinderProcess_setThreadPoolMaxThreadCount(8);
+
+    binder_status_t status{};
+    std::shared_ptr<DrmFactory> drmFactory = createDrmFactory();
+    const std::string drmInstance = std::string() + DrmFactory::descriptor + "/clearkey";
+    status = AServiceManager_registerLazyService(drmFactory->asBinder().get(),
+                                                 drmInstance.c_str());
+    CHECK(status == STATUS_OK);
+
+    std::shared_ptr<CryptoFactory> cryptoFactory = createCryptoFactory();
+    const std::string cryptoInstance = std::string() + CryptoFactory::descriptor + "/clearkey";
+    status = AServiceManager_registerLazyService(cryptoFactory->asBinder().get(),
+                                                 cryptoInstance.c_str());
+    CHECK(status == STATUS_OK);
+
+    ABinderProcess_joinThreadPool();
+    return EXIT_FAILURE;  // should not reached
+}
diff --git a/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service-lazy.clearkey.rc b/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service-lazy.clearkey.rc
new file mode 100644
index 0000000..019c726
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service-lazy.clearkey.rc
@@ -0,0 +1,9 @@
+service vendor.drm-clearkey-service /vendor/bin/hw/android.hardware.drm-service.clearkey
+    disabled
+    class hal
+    user media
+    group mediadrm drmrpc
+    ioprio rt 4
+    task_profiles ProcessCapacityHigh
+    interface aidl android.hardware.drm.IDrmFactory/clearkey
+    interface aidl android.hardware.drm.ICryptoFactory/clearkey
diff --git a/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service.clearkey.rc b/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service.clearkey.rc
new file mode 100644
index 0000000..2b2637f
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service.clearkey.rc
@@ -0,0 +1,8 @@
+service vendor.drm-clearkey-service /vendor/bin/hw/android.hardware.drm-service.clearkey
+    class hal
+    user media
+    group mediadrm drmrpc
+    ioprio rt 4
+    task_profiles ProcessCapacityHigh
+    interface aidl android.hardware.drm.IDrmFactory/clearkey
+    interface aidl android.hardware.drm.ICryptoFactory/clearkey
diff --git a/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service.clearkey.xml b/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service.clearkey.xml
new file mode 100644
index 0000000..73c15f3
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service.clearkey.xml
@@ -0,0 +1,8 @@
+<manifest version="1.0" type="device">
+    <hal format="aidl">
+        <name>android.hardware.drm</name>
+        <version>1</version>
+        <fqname>ICryptoFactory/clearkey</fqname>
+        <fqname>IDrmFactory/clearkey</fqname>
+    </hal>
+</manifest>
diff --git a/drm/mediadrm/plugins/clearkey/aidl/include/AidlUtils.h b/drm/mediadrm/plugins/clearkey/aidl/include/AidlUtils.h
new file mode 100644
index 0000000..0370ebe
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/include/AidlUtils.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <string>
+#include <vector>
+
+#include <android/binder_auto_utils.h>
+#include "aidl/android/hardware/drm/Status.h"
+#include "ClearKeyTypes.h"
+
+namespace aidl {
+namespace android {
+namespace hardware {
+namespace drm {
+namespace clearkey {
+
+inline ::aidl::android::hardware::drm::Status toMockStatus(
+        ::aidl::android::hardware::drm::Status status) {
+    switch (status) {
+        case ::aidl::android::hardware::drm::Status::ERROR_DRM_INSUFFICIENT_SECURITY:
+        case ::aidl::android::hardware::drm::Status::ERROR_DRM_FRAME_TOO_LARGE:
+        case ::aidl::android::hardware::drm::Status::ERROR_DRM_SESSION_LOST_STATE:
+            return ::aidl::android::hardware::drm::Status::ERROR_DRM_UNKNOWN;
+        default:
+            return status;
+    }
+}
+
+inline ::ndk::ScopedAStatus toNdkScopedAStatus(::aidl::android::hardware::drm::Status status) {
+    if (Status::OK == status) {
+        return ::ndk::ScopedAStatus::ok();
+    } else {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+}
+
+inline ::ndk::ScopedAStatus toNdkScopedAStatus(clearkeydrm::CdmResponseType res) {
+    return toNdkScopedAStatus(static_cast<::aidl::android::hardware::drm::Status>(res));
+}
+
+#define UNUSED(x) (void)(x);
+
+}  // namespace clearkey
+}  // namespace drm
+}  // namespace hardware
+}  // namespace android
+}  // namespace aidl
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl b/drm/mediadrm/plugins/clearkey/aidl/include/CreatePluginFactories.h
similarity index 60%
copy from media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
copy to drm/mediadrm/plugins/clearkey/aidl/include/CreatePluginFactories.h
index b08a604..5a90fb8 100644
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
+++ b/drm/mediadrm/plugins/clearkey/aidl/include/CreatePluginFactories.h
@@ -13,17 +13,23 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+#pragma once
 
-package android.media;
+#include "CryptoFactory.h"
+#include "DrmFactory.h"
 
-/**
- * Audio encapsulation type is used to describe if the audio data should be sent with a particular
- * encapsulation type or not.
- *
- * {@hide}
- */
-@Backing(type="int")
-enum AudioEncapsulationType {
-    NONE     = 0,
-    IEC61937 = 1,
-}
\ No newline at end of file
+namespace aidl {
+namespace android {
+namespace hardware {
+namespace drm {
+namespace clearkey {
+//extern "C" {
+std::shared_ptr<DrmFactory> createDrmFactory();
+std::shared_ptr<CryptoFactory> createCryptoFactory();
+//}
+
+}  // namespace clearkey
+}  // namespace drm
+}  // namespace hardware
+}  // namespace android
+}  // namespace aidl
diff --git a/drm/mediadrm/plugins/clearkey/aidl/include/CryptoFactory.h b/drm/mediadrm/plugins/clearkey/aidl/include/CryptoFactory.h
new file mode 100644
index 0000000..0d6c4ce
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/include/CryptoFactory.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <aidl/android/hardware/drm/BnCryptoFactory.h>
+#include <aidl/android/hardware/drm/ICryptoFactory.h>
+#include <aidl/android/hardware/drm/ICryptoPlugin.h>
+
+#include "ClearKeyTypes.h"
+
+namespace aidl {
+namespace android {
+namespace hardware {
+namespace drm {
+namespace clearkey {
+
+struct CryptoFactory : public BnCryptoFactory {
+    CryptoFactory() {}
+    virtual ~CryptoFactory() {}
+
+    ::ndk::ScopedAStatus createPlugin(
+            const ::aidl::android::hardware::drm::Uuid& in_uuid,
+            const std::vector<uint8_t>& in_initData,
+            std::shared_ptr<::aidl::android::hardware::drm::ICryptoPlugin>* _aidl_return) override;
+
+    ::ndk::ScopedAStatus isCryptoSchemeSupported(
+            const ::aidl::android::hardware::drm::Uuid& in_uuid, bool* _aidl_return) override;
+
+  private:
+    CLEARKEY_DISALLOW_COPY_AND_ASSIGN(CryptoFactory);
+};
+
+}  // namespace clearkey
+}  // namespace drm
+}  // namespace hardware
+}  // namespace android
+}  // namespace aidl
diff --git a/drm/mediadrm/plugins/clearkey/aidl/include/CryptoPlugin.h b/drm/mediadrm/plugins/clearkey/aidl/include/CryptoPlugin.h
new file mode 100644
index 0000000..f98829d
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/include/CryptoPlugin.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <aidl/android/hardware/drm/BnCryptoPlugin.h>
+#include <aidl/android/hardware/drm/Status.h>
+
+#include <aidl/android/hardware/common/Ashmem.h>
+
+#include <android/binder_auto_utils.h>
+
+#include <memory>
+#include <mutex>
+
+#include "ClearKeyTypes.h"
+#include "Session.h"
+
+namespace {
+static const size_t KEY_ID_SIZE = 16;
+static const size_t KEY_IV_SIZE = 16;
+}  // namespace
+
+namespace aidl {
+namespace android {
+namespace hardware {
+namespace drm {
+namespace clearkey {
+
+using namespace clearkeydrm;
+using ::aidl::android::hardware::drm::Status;
+
+struct SharedBufferBase {
+    uint8_t* mBase;
+    int64_t mSize;
+    SharedBufferBase(const ::aidl::android::hardware::common::Ashmem& mem);
+    ~SharedBufferBase();
+};
+
+struct CryptoPlugin : public BnCryptoPlugin {
+    explicit CryptoPlugin(const std::vector<uint8_t>& sessionId) {
+        const auto res = setMediaDrmSession(sessionId);
+        mInitStatus = Status::OK;
+        if (!res.isOk() && res.getExceptionCode() == EX_SERVICE_SPECIFIC) {
+            mInitStatus = static_cast<Status>(res.getServiceSpecificError());
+        }
+    }
+    virtual ~CryptoPlugin() {}
+
+    ::ndk::ScopedAStatus decrypt(
+            bool in_secure, const std::vector<uint8_t>& in_keyId, const std::vector<uint8_t>& in_iv,
+            ::aidl::android::hardware::drm::Mode in_mode,
+            const ::aidl::android::hardware::drm::Pattern& in_pattern,
+            const std::vector<::aidl::android::hardware::drm::SubSample>& in_subSamples,
+            const ::aidl::android::hardware::drm::SharedBuffer& in_source, int64_t in_offset,
+            const ::aidl::android::hardware::drm::DestinationBuffer& in_destination,
+            ::aidl::android::hardware::drm::DecryptResult* _aidl_return) override;
+
+    ::ndk::ScopedAStatus getLogMessages(
+            std::vector<::aidl::android::hardware::drm::LogMessage>* _aidl_return) override;
+
+    ::ndk::ScopedAStatus notifyResolution(int32_t in_width, int32_t in_height) override;
+
+    ::ndk::ScopedAStatus requiresSecureDecoderComponent(const std::string& in_mime,
+                                                        bool* _aidl_return) override;
+
+    ::ndk::ScopedAStatus setMediaDrmSession(const std::vector<uint8_t>& in_sessionId) override;
+
+    ::ndk::ScopedAStatus setSharedBufferBase(
+            const ::aidl::android::hardware::common::Ashmem& in_base, int32_t in_bufferId) override;
+
+    ::aidl::android::hardware::drm::Status getInitStatus() const { return mInitStatus; }
+
+  private:
+    CLEARKEY_DISALLOW_COPY_AND_ASSIGN(CryptoPlugin);
+
+    std::mutex mSharedBufferLock;
+    std::map<uint32_t, std::shared_ptr<SharedBufferBase>> mSharedBufferMap
+            GUARDED_BY(mSharedBufferLock);
+    ::android::sp<Session> mSession;
+    ::aidl::android::hardware::drm::Status mInitStatus;
+};
+
+}  // namespace clearkey
+}  // namespace drm
+}  // namespace hardware
+}  // namespace android
+}  // namespace aidl
diff --git a/drm/mediadrm/plugins/clearkey/aidl/include/DrmFactory.h b/drm/mediadrm/plugins/clearkey/aidl/include/DrmFactory.h
new file mode 100644
index 0000000..0143dc7
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/include/DrmFactory.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <aidl/android/hardware/drm/BnDrmFactory.h>
+#include <aidl/android/hardware/drm/IDrmFactory.h>
+#include <aidl/android/hardware/drm/IDrmPlugin.h>
+
+#include <string>
+#include <vector>
+
+#include "ClearKeyTypes.h"
+
+namespace aidl {
+namespace android {
+namespace hardware {
+namespace drm {
+namespace clearkey {
+
+struct DrmFactory : public BnDrmFactory {
+    DrmFactory() {}
+    virtual ~DrmFactory() {}
+
+    ::ndk::ScopedAStatus createPlugin(
+            const ::aidl::android::hardware::drm::Uuid& in_uuid,
+            const std::string& in_appPackageName,
+            std::shared_ptr<::aidl::android::hardware::drm::IDrmPlugin>* _aidl_return) override;
+
+    ::ndk::ScopedAStatus getSupportedCryptoSchemes(
+            std::vector<::aidl::android::hardware::drm::Uuid>* _aidl_return) override;
+
+    ::ndk::ScopedAStatus isContentTypeSupported(const std::string& in_mimeType,
+                                                bool* _aidl_return) override;
+
+    ::ndk::ScopedAStatus isCryptoSchemeSupported(
+            const ::aidl::android::hardware::drm::Uuid& in_uuid, const std::string& in_mimeType,
+            ::aidl::android::hardware::drm::SecurityLevel in_securityLevel,
+            bool* _aidl_return) override;
+
+    binder_status_t dump(int fd, const char** args, uint32_t numArgs) override;
+
+
+  private:
+    CLEARKEY_DISALLOW_COPY_AND_ASSIGN(DrmFactory);
+};
+
+}  // namespace clearkey
+}  // namespace drm
+}  // namespace hardware
+}  // namespace android
+}  // namespace aidl
diff --git a/drm/mediadrm/plugins/clearkey/aidl/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/aidl/include/DrmPlugin.h
new file mode 100644
index 0000000..44db1d5
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/include/DrmPlugin.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <aidl/android/hardware/drm/BnDrmPlugin.h>
+#include <aidl/android/hardware/drm/IDrmPluginListener.h>
+#include <aidl/android/hardware/drm/Status.h>
+
+#include <stdio.h>
+#include <map>
+
+#include <utils/List.h>
+
+#include "DeviceFiles.h"
+#include "SessionLibrary.h"
+
+namespace aidl {
+namespace android {
+namespace hardware {
+namespace drm {
+namespace clearkey {
+
+using namespace clearkeydrm;
+using ::aidl::android::hardware::drm::KeyType;
+using ::aidl::android::hardware::drm::Status;
+
+struct DrmPlugin : public BnDrmPlugin {
+  public:
+    explicit DrmPlugin(SessionLibrary* sessionLibrary);
+    virtual ~DrmPlugin() { mFileHandle.DeleteAllLicenses(); }
+
+    ::ndk::ScopedAStatus closeSession(const std::vector<uint8_t>& in_sessionId) override;
+    ::ndk::ScopedAStatus decrypt(const std::vector<uint8_t>& in_sessionId,
+                                 const std::vector<uint8_t>& in_keyId,
+                                 const std::vector<uint8_t>& in_input,
+                                 const std::vector<uint8_t>& in_iv,
+                                 std::vector<uint8_t>* _aidl_return) override;
+    ::ndk::ScopedAStatus encrypt(const std::vector<uint8_t>& in_sessionId,
+                                 const std::vector<uint8_t>& in_keyId,
+                                 const std::vector<uint8_t>& in_input,
+                                 const std::vector<uint8_t>& in_iv,
+                                 std::vector<uint8_t>* _aidl_return) override;
+    ::ndk::ScopedAStatus getHdcpLevels(
+            ::aidl::android::hardware::drm::HdcpLevels* _aidl_return) override;
+    ::ndk::ScopedAStatus getKeyRequest(
+            const std::vector<uint8_t>& in_scope, const std::vector<uint8_t>& in_initData,
+            const std::string& in_mimeType, ::aidl::android::hardware::drm::KeyType in_keyType,
+            const std::vector<::aidl::android::hardware::drm::KeyValue>& in_optionalParameters,
+            ::aidl::android::hardware::drm::KeyRequest* _aidl_return) override;
+    ::ndk::ScopedAStatus getLogMessages(
+            std::vector<::aidl::android::hardware::drm::LogMessage>* _aidl_return) override;
+
+    ::ndk::ScopedAStatus getMetrics(
+            std::vector<::aidl::android::hardware::drm::DrmMetricGroup>* _aidl_return) override;
+    ::ndk::ScopedAStatus getNumberOfSessions(
+            ::aidl::android::hardware::drm::NumberOfSessions* _aidl_return) override;
+    ::ndk::ScopedAStatus getOfflineLicenseKeySetIds(
+            std::vector<::aidl::android::hardware::drm::KeySetId>* _aidl_return) override;
+    ::ndk::ScopedAStatus getOfflineLicenseState(
+            const ::aidl::android::hardware::drm::KeySetId& in_keySetId,
+            ::aidl::android::hardware::drm::OfflineLicenseState* _aidl_return) override;
+    ::ndk::ScopedAStatus getPropertyByteArray(const std::string& in_propertyName,
+                                              std::vector<uint8_t>* _aidl_return) override;
+    ::ndk::ScopedAStatus getPropertyString(const std::string& in_propertyName,
+                                           std::string* _aidl_return) override;
+    ::ndk::ScopedAStatus getProvisionRequest(
+            const std::string& in_certificateType, const std::string& in_certificateAuthority,
+            ::aidl::android::hardware::drm::ProvisionRequest* _aidl_return) override;
+    ::ndk::ScopedAStatus getSecureStop(
+            const ::aidl::android::hardware::drm::SecureStopId& in_secureStopId,
+            ::aidl::android::hardware::drm::SecureStop* _aidl_return) override;
+    ::ndk::ScopedAStatus getSecureStopIds(
+            std::vector<::aidl::android::hardware::drm::SecureStopId>* _aidl_return) override;
+    ::ndk::ScopedAStatus getSecureStops(
+            std::vector<::aidl::android::hardware::drm::SecureStop>* _aidl_return) override;
+    ::ndk::ScopedAStatus getSecurityLevel(
+            const std::vector<uint8_t>& in_sessionId,
+            ::aidl::android::hardware::drm::SecurityLevel* _aidl_return) override;
+    ::ndk::ScopedAStatus openSession(::aidl::android::hardware::drm::SecurityLevel in_securityLevel,
+                                     std::vector<uint8_t>* _aidl_return) override;
+    ::ndk::ScopedAStatus provideKeyResponse(
+            const std::vector<uint8_t>& in_scope, const std::vector<uint8_t>& in_response,
+            ::aidl::android::hardware::drm::KeySetId* _aidl_return) override;
+    ::ndk::ScopedAStatus provideProvisionResponse(
+            const std::vector<uint8_t>& in_response,
+            ::aidl::android::hardware::drm::ProvideProvisionResponseResult* _aidl_return) override;
+    ::ndk::ScopedAStatus queryKeyStatus(
+            const std::vector<uint8_t>& in_sessionId,
+            std::vector<::aidl::android::hardware::drm::KeyValue>* _aidl_return) override;
+    ::ndk::ScopedAStatus releaseAllSecureStops() override;
+    ::ndk::ScopedAStatus releaseSecureStop(
+            const ::aidl::android::hardware::drm::SecureStopId& in_secureStopId) override;
+    ::ndk::ScopedAStatus releaseSecureStops(
+            const ::aidl::android::hardware::drm::OpaqueData& in_ssRelease) override;
+    ::ndk::ScopedAStatus removeAllSecureStops() override;
+    ::ndk::ScopedAStatus removeKeys(const std::vector<uint8_t>& in_sessionId) override;
+    ::ndk::ScopedAStatus removeOfflineLicense(
+            const ::aidl::android::hardware::drm::KeySetId& in_keySetId) override;
+    ::ndk::ScopedAStatus removeSecureStop(
+            const ::aidl::android::hardware::drm::SecureStopId& in_secureStopId) override;
+    ::ndk::ScopedAStatus requiresSecureDecoder(
+            const std::string& in_mime, ::aidl::android::hardware::drm::SecurityLevel in_level,
+            bool* _aidl_return) override;
+    ::ndk::ScopedAStatus requiresSecureDecoderDefault(const std::string& in_mime,
+                                                      bool* _aidl_return) override;
+    ::ndk::ScopedAStatus restoreKeys(
+            const std::vector<uint8_t>& in_sessionId,
+            const ::aidl::android::hardware::drm::KeySetId& in_keySetId) override;
+    ::ndk::ScopedAStatus setCipherAlgorithm(const std::vector<uint8_t>& in_sessionId,
+                                            const std::string& in_algorithm) override;
+    ::ndk::ScopedAStatus setListener(
+            //            const ::android::sp<::aidl::android::hardware::drm::IDrmPluginListener>&
+            //            in_listener)
+            const std::shared_ptr<IDrmPluginListener>& in_listener) override;
+    ::ndk::ScopedAStatus setMacAlgorithm(const std::vector<uint8_t>& in_sessionId,
+                                         const std::string& in_algorithm) override;
+    ::ndk::ScopedAStatus setPlaybackId(const std::vector<uint8_t>& in_sessionId,
+                                       const std::string& in_playbackId) override;
+    ::ndk::ScopedAStatus setPropertyByteArray(const std::string& in_propertyName,
+                                              const std::vector<uint8_t>& in_value) override;
+    ::ndk::ScopedAStatus setPropertyString(const std::string& in_propertyName,
+                                           const std::string& in_value) override;
+    ::ndk::ScopedAStatus sign(const std::vector<uint8_t>& in_sessionId,
+                              const std::vector<uint8_t>& in_keyId,
+                              const std::vector<uint8_t>& in_message,
+                              std::vector<uint8_t>* _aidl_return) override;
+    ::ndk::ScopedAStatus signRSA(const std::vector<uint8_t>& in_sessionId,
+                                 const std::string& in_algorithm,
+                                 const std::vector<uint8_t>& in_message,
+                                 const std::vector<uint8_t>& in_wrappedkey,
+                                 std::vector<uint8_t>* _aidl_return) override;
+    ::ndk::ScopedAStatus verify(const std::vector<uint8_t>& in_sessionId,
+                                const std::vector<uint8_t>& in_keyId,
+                                const std::vector<uint8_t>& in_message,
+                                const std::vector<uint8_t>& in_signature,
+                                bool* _aidl_return) override;
+
+  private:
+    void initProperties();
+    void installSecureStop(const std::vector<uint8_t>& sessionId);
+    bool makeKeySetId(std::string* keySetId);
+    void setPlayPolicy();
+
+    void sendEvent(::aidl::android::hardware::drm::EventType in_eventType,
+                   const std::vector<uint8_t>& in_sessionId,
+                   const std::vector<uint8_t>& in_data);
+    void sendExpirationUpdate(const std::vector<uint8_t>& in_sessionId,
+                              int64_t in_expiryTimeInMS);
+    void sendKeysChange(
+            const std::vector<uint8_t>& in_sessionId,
+            const std::vector<::aidl::android::hardware::drm::KeyStatus>& in_keyStatusList,
+            bool in_hasNewUsableKey);
+    void sendSessionLostState(const std::vector<uint8_t>& in_sessionId);
+
+    Status setSecurityLevel(const std::vector<uint8_t>& sessionId, SecurityLevel level);
+
+    Status getKeyRequestCommon(const std::vector<uint8_t>& scope,
+                               const std::vector<uint8_t>& initData, const std::string& mimeType,
+                               KeyType keyType, const std::vector<KeyValue>& optionalParameters,
+                               std::vector<uint8_t>* request, KeyRequestType* getKeyRequestType,
+                               std::string* defaultUrl);
+
+    struct ClearkeySecureStop {
+        std::vector<uint8_t> id;
+        std::vector<uint8_t> data;
+    };
+
+    std::map<std::vector<uint8_t>, ClearkeySecureStop> mSecureStops;
+    std::vector<KeyValue> mPlayPolicy;
+    std::map<std::string, std::string> mStringProperties;
+    std::map<std::string, std::vector<uint8_t>> mByteArrayProperties;
+    std::map<std::string, std::vector<uint8_t>> mReleaseKeysMap;
+    std::map<std::vector<uint8_t>, std::string> mPlaybackId;
+    std::map<std::vector<uint8_t>, SecurityLevel> mSecurityLevel;
+    ::std::shared_ptr<IDrmPluginListener> mListener;
+    SessionLibrary* mSessionLibrary;
+    int64_t mOpenSessionOkCount;
+    int64_t mCloseSessionOkCount;
+    int64_t mCloseSessionNotOpenedCount;
+    uint32_t mNextSecureStopId;
+    ::android::Mutex mPlayPolicyLock;
+
+    // set by property to mock error scenarios
+    Status mMockError;
+
+    void processMockError(const ::android::sp<Session>& session) {
+        session->setMockError(static_cast<CdmResponseType>(mMockError));
+        mMockError = Status::OK;
+    }
+
+    DeviceFiles mFileHandle;
+    ::android::Mutex mSecureStopLock;
+
+    CLEARKEY_DISALLOW_COPY_AND_ASSIGN_AND_NEW(DrmPlugin);
+};
+
+}  // namespace clearkey
+}  // namespace drm
+}  // namespace hardware
+}  // namespace android
+}  // namespace aidl
diff --git a/drm/mediadrm/plugins/clearkey/common/AesCtrDecryptor.cpp b/drm/mediadrm/plugins/clearkey/common/AesCtrDecryptor.cpp
new file mode 100644
index 0000000..0b97820
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/AesCtrDecryptor.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "clearkey-AesDecryptor"
+
+#include <utils/Log.h>
+
+#include <openssl/aes.h>
+
+#include "AesCtrDecryptor.h"
+#include "ClearKeyTypes.h"
+
+namespace clearkeydrm {
+
+static const size_t kBlockBitCount = kBlockSize * 8;
+
+CdmResponseType AesCtrDecryptor::decrypt(const std::vector<uint8_t>& key, const Iv iv,
+                                         const uint8_t* source, uint8_t* destination,
+                                         const std::vector<int32_t>& clearDataLengths,
+                                         const std::vector<int32_t>& encryptedDataLengths,
+                                         size_t* bytesDecryptedOut) {
+
+    if (key.size() != kBlockSize || clearDataLengths.size() != encryptedDataLengths.size()) {
+        android_errorWriteLog(0x534e4554, "63982768");
+        return clearkeydrm::ERROR_DECRYPT;
+    }
+
+    uint32_t blockOffset = 0;
+    uint8_t previousEncryptedCounter[kBlockSize];
+    memset(previousEncryptedCounter, 0, kBlockSize);
+
+    size_t offset = 0;
+    AES_KEY opensslKey;
+    AES_set_encrypt_key(key.data(), kBlockBitCount, &opensslKey);
+    Iv opensslIv;
+    memcpy(opensslIv, iv, sizeof(opensslIv));
+
+    for (size_t i = 0; i < clearDataLengths.size(); ++i) {
+        int32_t numBytesOfClearData = clearDataLengths[i];
+        if (numBytesOfClearData > 0) {
+            memcpy(destination + offset, source + offset, numBytesOfClearData);
+            offset += numBytesOfClearData;
+        }
+
+        int32_t numBytesOfEncryptedData = encryptedDataLengths[i];
+        if (numBytesOfEncryptedData > 0) {
+            AES_ctr128_encrypt(source + offset, destination + offset,
+                               numBytesOfEncryptedData, &opensslKey, opensslIv,
+                               previousEncryptedCounter, &blockOffset);
+            offset += numBytesOfEncryptedData;
+        }
+    }
+
+    *bytesDecryptedOut = offset;
+    return clearkeydrm::OK;
+}
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/Android.bp b/drm/mediadrm/plugins/clearkey/common/Android.bp
index 7ed8b88..a6a5b28 100644
--- a/drm/mediadrm/plugins/clearkey/common/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/common/Android.bp
@@ -44,3 +44,56 @@
         integer_overflow: true,
     },
 }
+
+cc_library_static {
+    name: "libclearkeydevicefiles-protos.common",
+    vendor: true,
+
+    proto: {
+        export_proto_headers: true,
+        type: "lite",
+    },
+    srcs: ["protos/DeviceFiles.proto"],
+}
+
+cc_library_static {
+    name: "libclearkeybase",
+    vendor: true,
+
+    srcs: [
+        "AesCtrDecryptor.cpp",
+        "Base64.cpp",
+        "Buffer.cpp",
+        "ClearKeyUUID.cpp",
+        "DeviceFiles.cpp",
+        "InitDataParser.cpp",
+        "JsonWebKey.cpp",
+        "MemoryFileSystem.cpp",
+        "Session.cpp",
+        "SessionLibrary.cpp",
+        "Utils.cpp",
+    ],
+
+    cflags: ["-Wall", "-Werror"],
+
+    include_dirs: ["frameworks/av/include"],
+
+    shared_libs: [
+        "libutils",
+        "libcrypto",
+    ],
+
+    whole_static_libs: [
+        "libjsmn",
+        "libclearkeydevicefiles-protos.common",
+    ],
+
+    export_include_dirs: [
+        "include",
+        "include/clearkeydrm",
+    ],
+
+    sanitize: {
+        integer_overflow: true,
+    },
+}
diff --git a/drm/mediadrm/plugins/clearkey/common/Base64.cpp b/drm/mediadrm/plugins/clearkey/common/Base64.cpp
new file mode 100644
index 0000000..6499793
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/Base64.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "clearkey-Base64"
+
+#include "Base64.h"
+
+#include <string>
+
+namespace clearkeydrm {
+
+::android::sp<Buffer> decodeBase64(const std::string& s) {
+    size_t n = s.size();
+
+    if ((n % 4) != 0) {
+        return nullptr;
+    }
+
+    size_t padding = 0;
+    if (n >= 1 && s.c_str()[n - 1] == '=') {
+        padding = 1;
+
+        if (n >= 2 && s.c_str()[n - 2] == '=') {
+            padding = 2;
+
+            if (n >= 3 && s.c_str()[n - 3] == '=') {
+                padding = 3;
+            }
+        }
+    }
+
+    // We divide first to avoid overflow. It's OK to do this because we
+    // already made sure that n % 4 == 0.
+    size_t outLen = (n / 4) * 3 - padding;
+
+    ::android::sp<Buffer> buffer = new Buffer(outLen);
+    uint8_t* out = buffer->data();
+    if (out == nullptr || buffer->size() < outLen) {
+        return nullptr;
+    }
+
+    size_t j = 0;
+    uint32_t accum = 0;
+    for (size_t i = 0; i < n; ++i) {
+        char c = s.c_str()[i];
+        unsigned value;
+        if (c >= 'A' && c <= 'Z') {
+            value = c - 'A';
+        } else if (c >= 'a' && c <= 'z') {
+            value = 26 + c - 'a';
+        } else if (c >= '0' && c <= '9') {
+            value = 52 + c - '0';
+        } else if (c == '+' || c == '-') {
+            value = 62;
+        } else if (c == '/' || c == '_') {
+            value = 63;
+        } else if (c != '=') {
+            return nullptr;
+        } else {
+            if (i < n - padding) {
+                return nullptr;
+            }
+
+            value = 0;
+        }
+
+        accum = (accum << 6) | value;
+
+        if (((i + 1) % 4) == 0) {
+            if (j < outLen) {
+                out[j++] = (accum >> 16);
+            }
+            if (j < outLen) {
+                out[j++] = (accum >> 8) & 0xff;
+            }
+            if (j < outLen) {
+                out[j++] = accum & 0xff;
+            }
+
+            accum = 0;
+        }
+    }
+
+    return buffer;
+}
+
+static char encode6Bit(unsigned x) {
+    if (x <= 25) {
+        return 'A' + x;
+    } else if (x <= 51) {
+        return 'a' + x - 26;
+    } else if (x <= 61) {
+        return '0' + x - 52;
+    } else if (x == 62) {
+        return '+';
+    } else {
+        return '/';
+    }
+}
+
+void encodeBase64(const void* _data, size_t size, std::string* out) {
+    out->clear();
+
+    const uint8_t* data = (const uint8_t*)_data;
+
+    size_t i;
+    for (i = 0; i < (size / 3) * 3; i += 3) {
+        uint8_t x1 = data[i];
+        uint8_t x2 = data[i + 1];
+        uint8_t x3 = data[i + 2];
+
+        out->push_back(encode6Bit(x1 >> 2));
+        out->push_back(encode6Bit((x1 << 4 | x2 >> 4) & 0x3f));
+        out->push_back(encode6Bit((x2 << 2 | x3 >> 6) & 0x3f));
+        out->push_back(encode6Bit(x3 & 0x3f));
+    }
+    switch (size % 3) {
+        case 0:
+            break;
+        case 2: {
+            uint8_t x1 = data[i];
+            uint8_t x2 = data[i + 1];
+            out->push_back(encode6Bit(x1 >> 2));
+            out->push_back(encode6Bit((x1 << 4 | x2 >> 4) & 0x3f));
+            out->push_back(encode6Bit((x2 << 2) & 0x3f));
+            out->push_back('=');
+            break;
+        }
+        default: {
+            uint8_t x1 = data[i];
+            out->push_back(encode6Bit(x1 >> 2));
+            out->push_back(encode6Bit((x1 << 4) & 0x3f));
+            out->append("==");
+            break;
+        }
+    }
+}
+
+void encodeBase64Url(const void* _data, size_t size, std::string* out) {
+    encodeBase64(_data, size, out);
+
+    if ((std::string::npos != out->find("+")) || (std::string::npos != out->find("/"))) {
+        size_t outLen = out->size();
+        char* base64url = new char[outLen];
+        for (size_t i = 0; i < outLen; ++i) {
+            if (out->c_str()[i] == '+')
+                base64url[i] = '-';
+            else if (out->c_str()[i] == '/')
+                base64url[i] = '_';
+            else
+                base64url[i] = out->c_str()[i];
+        }
+
+        out->assign(base64url, outLen);
+        delete[] base64url;
+    }
+}
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/Buffer.cpp b/drm/mediadrm/plugins/clearkey/common/Buffer.cpp
new file mode 100644
index 0000000..1671598
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/Buffer.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Buffer.h"
+
+namespace clearkeydrm {
+
+Buffer::Buffer(size_t capacity) : mRangeOffset(0), mOwnsData(true) {
+    mData = malloc(capacity);
+    if (mData == nullptr) {
+        mCapacity = 0;
+        mRangeLength = 0;
+    } else {
+        mCapacity = capacity;
+        mRangeLength = capacity;
+    }
+}
+
+Buffer::~Buffer() {
+    if (mOwnsData) {
+        if (mData != nullptr) {
+            free(mData);
+            mData = nullptr;
+        }
+    }
+}
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/DeviceFiles.cpp b/drm/mediadrm/plugins/clearkey/common/DeviceFiles.cpp
new file mode 100644
index 0000000..2299249
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/DeviceFiles.cpp
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <utils/Log.h>
+
+#include <sys/stat.h>
+#include <string>
+
+#include "DeviceFiles.h"
+#include "protos/DeviceFiles.pb.h"
+
+#include <openssl/sha.h>
+
+// Protobuf generated classes.
+using clearkeydrm::HashedFile;
+using clearkeydrm::License;
+using clearkeydrm::License_LicenseState_ACTIVE;
+using clearkeydrm::License_LicenseState_RELEASING;
+using clearkeydrm::OfflineFile;
+
+namespace {
+const char kLicenseFileNameExt[] = ".lic";
+
+bool Hash(const std::string& data, std::string* hash) {
+    if (!hash) return false;
+
+    hash->resize(SHA256_DIGEST_LENGTH);
+
+    const unsigned char* input = reinterpret_cast<const unsigned char*>(data.data());
+    unsigned char* output = reinterpret_cast<unsigned char*>(&(*hash)[0]);
+    SHA256(input, data.size(), output);
+    return true;
+}
+
+}  // namespace
+
+namespace clearkeydrm {
+
+bool DeviceFiles::StoreLicense(const std::string& keySetId, LicenseState state,
+                               const std::string& licenseResponse) {
+    OfflineFile file;
+    file.set_type(OfflineFile::LICENSE);
+    file.set_version(OfflineFile::VERSION_1);
+
+    License* license = file.mutable_license();
+    switch (state) {
+        case kLicenseStateActive:
+            license->set_state(License_LicenseState_ACTIVE);
+            license->set_license(licenseResponse);
+            break;
+        case kLicenseStateReleasing:
+            license->set_state(License_LicenseState_RELEASING);
+            license->set_license(licenseResponse);
+            break;
+        default:
+            ALOGW("StoreLicense: Unknown license state: %u", state);
+            return false;
+    }
+
+    std::string serializedFile;
+    file.SerializeToString(&serializedFile);
+
+    return StoreFileWithHash(keySetId + kLicenseFileNameExt, serializedFile);
+}
+
+bool DeviceFiles::StoreFileWithHash(const std::string& fileName,
+                                    const std::string& serializedFile) {
+    std::string hash;
+    if (!Hash(serializedFile, &hash)) {
+        ALOGE("StoreFileWithHash: Failed to compute hash");
+        return false;
+    }
+
+    HashedFile hashFile;
+    hashFile.set_file(serializedFile);
+    hashFile.set_hash(hash);
+
+    std::string serializedHashFile;
+    hashFile.SerializeToString(&serializedHashFile);
+
+    return StoreFileRaw(fileName, serializedHashFile);
+}
+
+bool DeviceFiles::StoreFileRaw(const std::string& fileName, const std::string& serializedHashFile) {
+    MemoryFileSystem::MemoryFile memFile;
+    memFile.setFileName(fileName);
+    memFile.setContent(serializedHashFile);
+    memFile.setFileSize(serializedHashFile.size());
+    size_t len = mFileHandle.Write(fileName, memFile);
+
+    if (len != static_cast<size_t>(serializedHashFile.size())) {
+        ALOGE("StoreFileRaw: Failed to write %s", fileName.c_str());
+        ALOGD("StoreFileRaw: expected=%zd, actual=%zu", serializedHashFile.size(), len);
+        return false;
+    }
+
+    ALOGD("StoreFileRaw: wrote %zu bytes to %s", serializedHashFile.size(), fileName.c_str());
+    return true;
+}
+
+bool DeviceFiles::RetrieveLicense(const std::string& keySetId, LicenseState* state,
+                                  std::string* offlineLicense) {
+    OfflineFile file;
+    if (!RetrieveHashedFile(keySetId + kLicenseFileNameExt, &file)) {
+        return false;
+    }
+
+    if (file.type() != OfflineFile::LICENSE) {
+        ALOGE("RetrieveLicense: Invalid file type");
+        return false;
+    }
+
+    if (file.version() != OfflineFile::VERSION_1) {
+        ALOGE("RetrieveLicense: Invalid file version");
+        return false;
+    }
+
+    if (!file.has_license()) {
+        ALOGE("RetrieveLicense: License not present");
+        return false;
+    }
+
+    License license = file.license();
+    switch (license.state()) {
+        case License_LicenseState_ACTIVE:
+            *state = kLicenseStateActive;
+            break;
+        case License_LicenseState_RELEASING:
+            *state = kLicenseStateReleasing;
+            break;
+        default:
+            ALOGW("RetrieveLicense: Unrecognized license state: %u", kLicenseStateUnknown);
+            *state = kLicenseStateUnknown;
+            break;
+    }
+    *offlineLicense = license.license();
+    return true;
+}
+
+bool DeviceFiles::DeleteLicense(const std::string& keySetId) {
+    return mFileHandle.RemoveFile(keySetId + kLicenseFileNameExt);
+}
+
+bool DeviceFiles::DeleteAllLicenses() {
+    return mFileHandle.RemoveAllFiles();
+}
+
+bool DeviceFiles::LicenseExists(const std::string& keySetId) {
+    return mFileHandle.FileExists(keySetId + kLicenseFileNameExt);
+}
+
+std::vector<std::string> DeviceFiles::ListLicenses() const {
+    std::vector<std::string> licenses = mFileHandle.ListFiles();
+    for (size_t i = 0; i < licenses.size(); i++) {
+        std::string& license = licenses[i];
+        license = license.substr(0, license.size() - strlen(kLicenseFileNameExt));
+    }
+    return licenses;
+}
+
+bool DeviceFiles::RetrieveHashedFile(const std::string& fileName, OfflineFile* deSerializedFile) {
+    if (!deSerializedFile) {
+        ALOGE("RetrieveHashedFile: invalid file parameter");
+        return false;
+    }
+
+    if (!FileExists(fileName)) {
+        ALOGE("RetrieveHashedFile: %s does not exist", fileName.c_str());
+        return false;
+    }
+
+    ssize_t bytes = GetFileSize(fileName);
+    if (bytes <= 0) {
+        ALOGE("RetrieveHashedFile: invalid file size: %s", fileName.c_str());
+        // Remove the corrupted file so the caller will not get the same error
+        // when trying to access the file repeatedly, causing the system to stall.
+        RemoveFile(fileName);
+        return false;
+    }
+
+    std::string serializedHashFile;
+    serializedHashFile.resize(bytes);
+    bytes = mFileHandle.Read(fileName, &serializedHashFile);
+
+    if (bytes != static_cast<ssize_t>(serializedHashFile.size())) {
+        ALOGE("RetrieveHashedFile: Failed to read from %s", fileName.c_str());
+        ALOGV("RetrieveHashedFile: expected: %zd, actual: %zd", serializedHashFile.size(), bytes);
+        // Remove the corrupted file so the caller will not get the same error
+        // when trying to access the file repeatedly, causing the system to stall.
+        RemoveFile(fileName);
+        return false;
+    }
+
+    ALOGV("RetrieveHashedFile: read %zd from %s", bytes, fileName.c_str());
+
+    HashedFile hashFile;
+    if (!hashFile.ParseFromString(serializedHashFile)) {
+        ALOGE("RetrieveHashedFile: Unable to parse hash file");
+        // Remove corrupt file.
+        RemoveFile(fileName);
+        return false;
+    }
+
+    std::string hash;
+    if (!Hash(hashFile.file(), &hash)) {
+        ALOGE("RetrieveHashedFile: Hash computation failed");
+        return false;
+    }
+
+    if (hash != hashFile.hash()) {
+        ALOGE("RetrieveHashedFile: Hash mismatch");
+        // Remove corrupt file.
+        RemoveFile(fileName);
+        return false;
+    }
+
+    if (!deSerializedFile->ParseFromString(hashFile.file())) {
+        ALOGE("RetrieveHashedFile: Unable to parse file");
+        // Remove corrupt file.
+        RemoveFile(fileName);
+        return false;
+    }
+
+    return true;
+}
+
+bool DeviceFiles::FileExists(const std::string& fileName) const {
+    return mFileHandle.FileExists(fileName);
+}
+
+bool DeviceFiles::RemoveFile(const std::string& fileName) {
+    return mFileHandle.RemoveFile(fileName);
+}
+
+ssize_t DeviceFiles::GetFileSize(const std::string& fileName) const {
+    return mFileHandle.GetFileSize(fileName);
+}
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/InitDataParser.cpp b/drm/mediadrm/plugins/clearkey/common/InitDataParser.cpp
new file mode 100644
index 0000000..fc839e9
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/InitDataParser.cpp
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "clearkey-InitDataParser"
+
+#include <algorithm>
+#include <arpa/inet.h>
+#include <utils/Log.h>
+
+#include "InitDataParser.h"
+
+#include "Base64.h"
+
+#include "ClearKeyUUID.h"
+#include "MimeType.h"
+
+namespace {
+const size_t kKeyIdSize = 16;
+const size_t kSystemIdSize = 16;
+}  // namespace
+
+namespace clearkeydrm {
+
+std::vector<uint8_t> StrToVector(const std::string& str) {
+    std::vector<uint8_t> vec(str.begin(), str.end());
+    return vec;
+}
+
+CdmResponseType InitDataParser::parse(const std::vector<uint8_t>& initData,
+                                      const std::string& mimeType,
+                                      CdmKeyType keyType,
+                                      std::vector<uint8_t>* licenseRequest) {
+    // Build a list of the key IDs
+    std::vector<const uint8_t*> keyIds;
+
+    if (mimeType == kIsoBmffVideoMimeType.c_str() || mimeType == kIsoBmffAudioMimeType.c_str() ||
+        mimeType == kCencInitDataFormat.c_str()) {
+        auto res = parsePssh(initData, &keyIds);
+        if (res != clearkeydrm::OK) {
+            return res;
+        }
+    } else if (mimeType == kWebmVideoMimeType.c_str() || mimeType == kWebmAudioMimeType.c_str() ||
+               mimeType == kWebmInitDataFormat.c_str()) {
+        // WebM "init data" is just a single key ID
+        if (initData.size() != kKeyIdSize) {
+            return clearkeydrm::ERROR_CANNOT_HANDLE;
+        }
+        keyIds.push_back(initData.data());
+    } else {
+        return clearkeydrm::ERROR_CANNOT_HANDLE;
+    }
+
+    if (keyType == clearkeydrm::KEY_TYPE_RELEASE) {
+        // restore key
+    }
+
+    // Build the request
+    std::string requestJson = generateRequest(keyType, keyIds);
+    std::vector<uint8_t> requestJsonVec = StrToVector(requestJson);
+
+    licenseRequest->clear();
+    licenseRequest->insert(licenseRequest->end(), requestJsonVec.begin(), requestJsonVec.end());
+    return clearkeydrm::OK;
+}
+
+CdmResponseType InitDataParser::parsePssh(const std::vector<uint8_t>& initData,
+                                          std::vector<const uint8_t*>* keyIds) {
+    // Description of PSSH format:
+    // https://w3c.github.io/encrypted-media/format-registry/initdata/cenc.html
+    size_t readPosition = 0;
+
+    uint32_t expectedSize = initData.size();
+    const char psshIdentifier[4] = {'p', 's', 's', 'h'};
+    const uint8_t psshVersion1[4] = {1, 0, 0, 0};
+    uint32_t keyIdCount = 0;
+    size_t headerSize = sizeof(expectedSize) + sizeof(psshIdentifier) + sizeof(psshVersion1) +
+                        kSystemIdSize + sizeof(keyIdCount);
+    if (initData.size() < headerSize) {
+        return clearkeydrm::ERROR_CANNOT_HANDLE;
+    }
+
+    // Validate size field
+    expectedSize = htonl(expectedSize);
+    if (memcmp(&initData[readPosition], &expectedSize, sizeof(expectedSize)) != 0) {
+        return clearkeydrm::ERROR_CANNOT_HANDLE;
+    }
+    readPosition += sizeof(expectedSize);
+
+    // Validate PSSH box identifier
+    if (memcmp(&initData[readPosition], psshIdentifier, sizeof(psshIdentifier)) != 0) {
+        return clearkeydrm::ERROR_CANNOT_HANDLE;
+    }
+    readPosition += sizeof(psshIdentifier);
+
+    // Validate EME version number
+    if (memcmp(&initData[readPosition], psshVersion1, sizeof(psshVersion1)) != 0) {
+        return clearkeydrm::ERROR_CANNOT_HANDLE;
+    }
+    readPosition += sizeof(psshVersion1);
+
+    // Validate system ID
+    if (!isClearKeyUUID(&initData[readPosition])) {
+        return clearkeydrm::ERROR_CANNOT_HANDLE;
+    }
+    readPosition += kSystemIdSize;
+
+    // Read key ID count
+    memcpy(&keyIdCount, &initData[readPosition], sizeof(keyIdCount));
+    keyIdCount = ntohl(keyIdCount);
+    readPosition += sizeof(keyIdCount);
+
+    uint64_t psshSize = 0;
+    if (__builtin_mul_overflow(keyIdCount, kKeyIdSize, &psshSize) ||
+        __builtin_add_overflow(readPosition, psshSize, &psshSize) ||
+        psshSize != initData.size() - sizeof(uint32_t) /* DataSize(0) */) {
+        return clearkeydrm::ERROR_CANNOT_HANDLE;
+    }
+
+    // Calculate the key ID offsets
+    for (uint32_t i = 0; i < keyIdCount; ++i) {
+        size_t keyIdPosition = readPosition + (i * kKeyIdSize);
+        keyIds->push_back(&initData[keyIdPosition]);
+    }
+    return clearkeydrm::OK;
+}
+
+std::string InitDataParser::generateRequest(CdmKeyType keyType,
+                                            const std::vector<const uint8_t*>& keyIds) {
+    const std::string kRequestPrefix("{\"kids\":[");
+    const std::string kTemporarySession("],\"type\":\"temporary\"}");
+    const std::string kPersistentSession("],\"type\":\"persistent-license\"}");
+
+    std::string request(kRequestPrefix);
+    std::string encodedId;
+    for (size_t i = 0; i < keyIds.size(); ++i) {
+        encodedId.clear();
+        encodeBase64Url(keyIds[i], kKeyIdSize, &encodedId);
+        if (i != 0) {
+            request.append(",");
+        }
+        request.push_back('\"');
+        request.append(encodedId);
+        request.push_back('\"');
+    }
+    if (keyType == clearkeydrm::KEY_TYPE_STREAMING) {
+        request.append(kTemporarySession);
+    } else if (keyType == clearkeydrm::KEY_TYPE_OFFLINE ||
+               keyType == clearkeydrm::KEY_TYPE_RELEASE) {
+        request.append(kPersistentSession);
+    }
+
+    // Android's Base64 encoder produces padding. EME forbids padding.
+    const char kBase64Padding = '=';
+    request.erase(std::remove(request.begin(), request.end(), kBase64Padding), request.end());
+
+    return request;
+}
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/JsonWebKey.cpp b/drm/mediadrm/plugins/clearkey/common/JsonWebKey.cpp
new file mode 100644
index 0000000..ddbc594
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/JsonWebKey.cpp
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "clearkey-JsonWebKey"
+
+#include <utils/Log.h>
+
+#include "JsonWebKey.h"
+
+#include "Base64.h"
+
+namespace {
+const std::string kBase64Padding("=");
+const std::string kKeysTag("keys");
+const std::string kKeyTypeTag("kty");
+const std::string kKeyTag("k");
+const std::string kKeyIdTag("kid");
+const std::string kMediaSessionType("type");
+const std::string kPersistentLicenseSession("persistent-license");
+const std::string kSymmetricKeyValue("oct");
+const std::string kTemporaryLicenseSession("temporary");
+}  // namespace
+
+namespace clearkeydrm {
+
+JsonWebKey::JsonWebKey() {}
+
+JsonWebKey::~JsonWebKey() {}
+
+/*
+ * Parses a JSON Web Key Set string, initializes a KeyMap with key id:key
+ * pairs from the JSON Web Key Set. Both key ids and keys are base64url
+ * encoded. The KeyMap contains base64url decoded key id:key pairs.
+ *
+ * @return Returns false for errors, true for success.
+ */
+bool JsonWebKey::extractKeysFromJsonWebKeySet(const std::string& jsonWebKeySet, KeyMap* keys) {
+    keys->clear();
+
+    if (!parseJsonWebKeySet(jsonWebKeySet, &mJsonObjects)) {
+        return false;
+    }
+
+    // mJsonObjects[0] contains the entire JSON Web Key Set, including
+    // all the base64 encoded keys. Each key is also stored separately as
+    // a JSON object in mJsonObjects[1..n] where n is the total
+    // number of keys in the set.
+    if (mJsonObjects.size() == 0 || !isJsonWebKeySet(mJsonObjects[0])) {
+        return false;
+    }
+
+    std::string encodedKey, encodedKeyId;
+    std::vector<uint8_t> decodedKey, decodedKeyId;
+
+    // mJsonObjects[1] contains the first JSON Web Key in the set
+    for (size_t i = 1; i < mJsonObjects.size(); ++i) {
+        encodedKeyId.clear();
+        encodedKey.clear();
+
+        if (!parseJsonObject(mJsonObjects[i], &mTokens)) return false;
+
+        if (findKey(mJsonObjects[i], &encodedKeyId, &encodedKey)) {
+            if (encodedKeyId.empty() || encodedKey.empty()) {
+                ALOGE("Must have both key id and key in the JsonWebKey set.");
+                continue;
+            }
+
+            if (!decodeBase64String(encodedKeyId, &decodedKeyId)) {
+                ALOGE("Failed to decode key id(%s)", encodedKeyId.c_str());
+                continue;
+            }
+
+            if (!decodeBase64String(encodedKey, &decodedKey)) {
+                ALOGE("Failed to decode key(%s)", encodedKey.c_str());
+                continue;
+            }
+
+            keys->insert(std::pair<std::vector<uint8_t>, std::vector<uint8_t>>(decodedKeyId,
+                                                                               decodedKey));
+        }
+    }
+    return true;
+}
+
+bool JsonWebKey::decodeBase64String(const std::string& encodedText,
+                                    std::vector<uint8_t>* decodedText) {
+    decodedText->clear();
+
+    // encodedText should not contain padding characters as per EME spec.
+    if (encodedText.find(kBase64Padding) != std::string::npos) {
+        return false;
+    }
+
+    // Since decodeBase64() requires padding characters,
+    // add them so length of encodedText is exactly a multiple of 4.
+    int remainder = encodedText.length() % 4;
+    std::string paddedText(encodedText);
+    if (remainder > 0) {
+        for (int i = 0; i < 4 - remainder; ++i) {
+            paddedText.append(kBase64Padding);
+        }
+    }
+
+    ::android::sp<Buffer> buffer = decodeBase64(paddedText);
+    if (buffer == nullptr) {
+        ALOGE("Malformed base64 encoded content found.");
+        return false;
+    }
+
+    decodedText->insert(decodedText->end(), buffer->base(), buffer->base() + buffer->size());
+    return true;
+}
+
+bool JsonWebKey::findKey(const std::string& jsonObject, std::string* keyId,
+                         std::string* encodedKey) {
+    std::string key, value;
+
+    // Only allow symmetric key, i.e. "kty":"oct" pair.
+    if (jsonObject.find(kKeyTypeTag) != std::string::npos) {
+        findValue(kKeyTypeTag, &value);
+        if (0 != value.compare(kSymmetricKeyValue)) return false;
+    }
+
+    if (jsonObject.find(kKeyIdTag) != std::string::npos) {
+        findValue(kKeyIdTag, keyId);
+    }
+
+    if (jsonObject.find(kKeyTag) != std::string::npos) {
+        findValue(kKeyTag, encodedKey);
+    }
+    return true;
+}
+
+void JsonWebKey::findValue(const std::string& key, std::string* value) {
+    value->clear();
+    const char* valueToken;
+    for (std::vector<std::string>::const_iterator nextToken = mTokens.begin();
+         nextToken != mTokens.end(); ++nextToken) {
+        if (0 == (*nextToken).compare(key)) {
+            if (nextToken + 1 == mTokens.end()) break;
+            valueToken = (*(nextToken + 1)).c_str();
+            value->assign(valueToken);
+            nextToken++;
+            break;
+        }
+    }
+}
+
+bool JsonWebKey::isJsonWebKeySet(const std::string& jsonObject) const {
+    if (jsonObject.find(kKeysTag) == std::string::npos) {
+        ALOGE("JSON Web Key does not contain keys.");
+        return false;
+    }
+    return true;
+}
+
+/*
+ * Parses a JSON objects string and initializes a vector of tokens.
+ *
+ * @return Returns false for errors, true for success.
+ */
+bool JsonWebKey::parseJsonObject(const std::string& jsonObject, std::vector<std::string>* tokens) {
+    jsmn_parser parser;
+
+    jsmn_init(&parser);
+    int numTokens = jsmn_parse(&parser, jsonObject.c_str(), jsonObject.size(), nullptr, 0);
+    if (numTokens < 0) {
+        ALOGE("Parser returns error code=%d", numTokens);
+        return false;
+    }
+
+    unsigned int jsmnTokensSize = numTokens * sizeof(jsmntok_t);
+    mJsmnTokens.clear();
+    mJsmnTokens.resize(jsmnTokensSize);
+
+    jsmn_init(&parser);
+    int status = jsmn_parse(&parser, jsonObject.c_str(), jsonObject.size(), mJsmnTokens.data(),
+                            numTokens);
+    if (status < 0) {
+        ALOGE("Parser returns error code=%d", status);
+        return false;
+    }
+
+    tokens->clear();
+    std::string token;
+    const char* pjs;
+    for (int j = 0; j < numTokens; ++j) {
+        pjs = jsonObject.c_str() + mJsmnTokens[j].start;
+        if (mJsmnTokens[j].type == JSMN_STRING || mJsmnTokens[j].type == JSMN_PRIMITIVE) {
+            token.assign(pjs, mJsmnTokens[j].end - mJsmnTokens[j].start);
+            tokens->push_back(token);
+        }
+    }
+    return true;
+}
+
+/*
+ * Parses JSON Web Key Set string and initializes a vector of JSON objects.
+ *
+ * @return Returns false for errors, true for success.
+ */
+bool JsonWebKey::parseJsonWebKeySet(const std::string& jsonWebKeySet,
+                                    std::vector<std::string>* jsonObjects) {
+    if (jsonWebKeySet.empty()) {
+        ALOGE("Empty JSON Web Key");
+        return false;
+    }
+
+    // The jsmn parser only supports unicode encoding.
+    jsmn_parser parser;
+
+    // Computes number of tokens. A token marks the type, offset in
+    // the original string.
+    jsmn_init(&parser);
+    int numTokens = jsmn_parse(&parser, jsonWebKeySet.c_str(), jsonWebKeySet.size(), nullptr, 0);
+    if (numTokens < 0) {
+        ALOGE("Parser returns error code=%d", numTokens);
+        return false;
+    }
+
+    unsigned int jsmnTokensSize = numTokens * sizeof(jsmntok_t);
+    mJsmnTokens.resize(jsmnTokensSize);
+
+    jsmn_init(&parser);
+    int status = jsmn_parse(&parser, jsonWebKeySet.c_str(), jsonWebKeySet.size(),
+                            mJsmnTokens.data(), numTokens);
+    if (status < 0) {
+        ALOGE("Parser returns error code=%d", status);
+        return false;
+    }
+
+    std::string token;
+    const char* pjs;
+    for (int i = 0; i < numTokens; ++i) {
+        pjs = jsonWebKeySet.c_str() + mJsmnTokens[i].start;
+        if (mJsmnTokens[i].type == JSMN_OBJECT) {
+            token.assign(pjs, mJsmnTokens[i].end - mJsmnTokens[i].start);
+            jsonObjects->push_back(token);
+        }
+    }
+    return true;
+}
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/MemoryFileSystem.cpp b/drm/mediadrm/plugins/clearkey/common/MemoryFileSystem.cpp
new file mode 100644
index 0000000..1045458
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/MemoryFileSystem.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <utils/Log.h>
+#include <string>
+#include <vector>
+
+#include "MemoryFileSystem.h"
+
+namespace clearkeydrm {
+
+std::string MemoryFileSystem::GetFileName(const std::string& path) {
+    size_t index = path.find_last_of('/');
+    if (index != std::string::npos) {
+        return path.substr(index + 1);
+    } else {
+        return path;
+    }
+}
+
+bool MemoryFileSystem::FileExists(const std::string& fileName) const {
+    auto result = mMemoryFileSystem.find(fileName);
+    return result != mMemoryFileSystem.end();
+}
+
+ssize_t MemoryFileSystem::GetFileSize(const std::string& fileName) const {
+    auto result = mMemoryFileSystem.find(fileName);
+    if (result != mMemoryFileSystem.end()) {
+        return static_cast<ssize_t>(result->second.getFileSize());
+    } else {
+        ALOGE("Failed to get size for %s", fileName.c_str());
+        return -1;
+    }
+}
+
+std::vector<std::string> MemoryFileSystem::ListFiles() const {
+    std::vector<std::string> list;
+    for (const auto& filename : mMemoryFileSystem) {
+        list.push_back(filename.first);
+    }
+    return list;
+}
+
+size_t MemoryFileSystem::Read(const std::string& path, std::string* buffer) {
+    std::string key = GetFileName(path);
+    auto result = mMemoryFileSystem.find(key);
+    if (result != mMemoryFileSystem.end()) {
+        std::string serializedHashFile = result->second.getContent();
+        buffer->assign(serializedHashFile);
+        return buffer->size();
+    } else {
+        ALOGE("Failed to read from %s", path.c_str());
+        return -1;
+    }
+}
+
+size_t MemoryFileSystem::Write(const std::string& path, const MemoryFile& memoryFile) {
+    std::string key = GetFileName(path);
+    auto result = mMemoryFileSystem.find(key);
+    if (result != mMemoryFileSystem.end()) {
+        mMemoryFileSystem.erase(key);
+    }
+    mMemoryFileSystem.insert(std::pair<std::string, MemoryFile>(key, memoryFile));
+    return memoryFile.getFileSize();
+}
+
+bool MemoryFileSystem::RemoveFile(const std::string& fileName) {
+    auto result = mMemoryFileSystem.find(fileName);
+    if (result != mMemoryFileSystem.end()) {
+        mMemoryFileSystem.erase(result);
+        return true;
+    } else {
+        ALOGE("Cannot find license to remove: %s", fileName.c_str());
+        return false;
+    }
+}
+
+bool MemoryFileSystem::RemoveAllFiles() {
+    mMemoryFileSystem.clear();
+    return mMemoryFileSystem.empty();
+}
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/Session.cpp b/drm/mediadrm/plugins/clearkey/common/Session.cpp
new file mode 100644
index 0000000..d7fd13a
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/Session.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "clearkey-Session"
+
+#include <utils/Log.h>
+
+#include "Session.h"
+
+#include "AesCtrDecryptor.h"
+#include "InitDataParser.h"
+#include "JsonWebKey.h"
+
+namespace clearkeydrm {
+
+using ::android::Mutex;
+using ::android::sp;
+
+CdmResponseType Session::getKeyRequest(const std::vector<uint8_t>& initData,
+                                       const std::string& mimeType,
+                                       CdmKeyType keyType,
+                                       std::vector<uint8_t>* keyRequest) const {
+    InitDataParser parser;
+    return parser.parse(initData, mimeType, keyType, keyRequest);
+}
+
+CdmResponseType Session::provideKeyResponse(const std::vector<uint8_t>& response) {
+    std::string responseString(reinterpret_cast<const char*>(response.data()), response.size());
+    KeyMap keys;
+
+    Mutex::Autolock lock(mMapLock);
+    JsonWebKey parser;
+    if (parser.extractKeysFromJsonWebKeySet(responseString, &keys)) {
+        for (auto& key : keys) {
+            std::string first(key.first.begin(), key.first.end());
+            std::string second(key.second.begin(), key.second.end());
+            mKeyMap.insert(
+                    std::pair<std::vector<uint8_t>, std::vector<uint8_t>>(key.first, key.second));
+        }
+        return clearkeydrm::OK;
+    } else {
+        return clearkeydrm::ERROR_UNKNOWN;
+    }
+}
+
+CdmResponseType Session::decrypt(const KeyId keyId, const Iv iv,
+                                 const uint8_t* srcPtr, uint8_t* destPtr,
+                                 const std::vector<int32_t>& clearDataLengths,
+                                 const std::vector<int32_t>& encryptedDataLengths,
+                                 size_t* bytesDecryptedOut) {
+    Mutex::Autolock lock(mMapLock);
+
+    if (getMockError() != clearkeydrm::OK) {
+        return getMockError();
+    }
+
+    std::vector<uint8_t> keyIdVector;
+    keyIdVector.clear();
+    keyIdVector.insert(keyIdVector.end(), keyId, keyId + kBlockSize);
+    std::map<std::vector<uint8_t>, std::vector<uint8_t>>::iterator itr;
+    itr = mKeyMap.find(keyIdVector);
+    if (itr == mKeyMap.end()) {
+        return clearkeydrm::ERROR_NO_LICENSE;
+    }
+
+    clearkeydrm::AesCtrDecryptor decryptor;
+    auto status = decryptor.decrypt(itr->second /*key*/, iv, srcPtr, destPtr,
+                                    clearDataLengths,
+                                    encryptedDataLengths,
+                                    bytesDecryptedOut);
+    return status;
+}
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/SessionLibrary.cpp b/drm/mediadrm/plugins/clearkey/common/SessionLibrary.cpp
new file mode 100644
index 0000000..6b2ff38
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/SessionLibrary.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "clearkey-SessionLibrary"
+
+#include <utils/Log.h>
+
+#include "SessionLibrary.h"
+
+namespace clearkeydrm {
+
+using ::android::Mutex;
+using ::android::sp;
+
+Mutex SessionLibrary::sSingletonLock;
+SessionLibrary* SessionLibrary::sSingleton = NULL;
+
+SessionLibrary* SessionLibrary::get() {
+    Mutex::Autolock lock(sSingletonLock);
+
+    if (sSingleton == NULL) {
+        ALOGD("Instantiating Session Library Singleton.");
+        sSingleton = new SessionLibrary();
+    }
+
+    return sSingleton;
+}
+
+sp<Session> SessionLibrary::createSession() {
+    Mutex::Autolock lock(mSessionsLock);
+
+    char sessionIdRaw[16];
+    snprintf(sessionIdRaw, sizeof(sessionIdRaw), "%u", mNextSessionId);
+
+    mNextSessionId += 1;
+
+    std::vector<uint8_t> sessionId;
+    sessionId.insert(sessionId.end(), sessionIdRaw,
+                     sessionIdRaw + sizeof(sessionIdRaw) / sizeof(uint8_t));
+
+    mSessions.insert(
+            std::pair<std::vector<uint8_t>, sp<Session>>(sessionId, new Session(sessionId)));
+    std::map<std::vector<uint8_t>, sp<Session>>::iterator itr = mSessions.find(sessionId);
+    if (itr != mSessions.end()) {
+        return itr->second;
+    } else {
+        return nullptr;
+    }
+}
+
+sp<Session> SessionLibrary::findSession(const std::vector<uint8_t>& sessionId) {
+    Mutex::Autolock lock(mSessionsLock);
+    std::map<std::vector<uint8_t>, sp<Session>>::iterator itr = mSessions.find(sessionId);
+    if (itr != mSessions.end()) {
+        return itr->second;
+    } else {
+        return nullptr;
+    }
+}
+
+void SessionLibrary::destroySession(const sp<Session>& session) {
+    Mutex::Autolock lock(mSessionsLock);
+    mSessions.erase(session->sessionId());
+}
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/include/ClearKeyUUID.h b/drm/mediadrm/plugins/clearkey/common/include/ClearKeyUUID.h
index fe10fba..8911024 100644
--- a/drm/mediadrm/plugins/clearkey/common/include/ClearKeyUUID.h
+++ b/drm/mediadrm/plugins/clearkey/common/include/ClearKeyUUID.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2021 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -13,9 +13,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
-#ifndef CLEARKEY_UUID_H_
-#define CLEARKEY_UUID_H_
+#pragma once
 
 #include <array>
 #include <cstdint>
@@ -27,6 +25,4 @@
 
 std::vector<std::array<uint8_t, 16>> getSupportedCryptoSchemes();
 
-} // namespace clearkeydrm
-
-#endif // CLEARKEY_UUID_H_
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/AesCtrDecryptor.h b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/AesCtrDecryptor.h
new file mode 100644
index 0000000..dbf3098
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/AesCtrDecryptor.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <cstdint>
+
+#include "ClearKeyTypes.h"
+
+namespace clearkeydrm {
+
+class AesCtrDecryptor {
+  public:
+    AesCtrDecryptor() {}
+
+    CdmResponseType decrypt(const std::vector<uint8_t>& key, const Iv iv, const uint8_t* source,
+                            uint8_t* destination,
+                            const std::vector<int32_t>& clearDataLengths,
+                            const std::vector<int32_t>& encryptedDataLengths,
+                            size_t* bytesDecryptedOut);
+
+  private:
+    CLEARKEY_DISALLOW_COPY_AND_ASSIGN(AesCtrDecryptor);
+};
+
+}  // namespace clearkeydrm
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/Base64.h
similarity index 66%
copy from media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
copy to drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/Base64.h
index b08a604..075d247 100644
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
+++ b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/Base64.h
@@ -13,17 +13,18 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+#pragma once
 
-package android.media;
+#include "Buffer.h"
 
-/**
- * Audio encapsulation type is used to describe if the audio data should be sent with a particular
- * encapsulation type or not.
- *
- * {@hide}
- */
-@Backing(type="int")
-enum AudioEncapsulationType {
-    NONE     = 0,
-    IEC61937 = 1,
-}
\ No newline at end of file
+namespace clearkeydrm {
+
+struct Buffer;
+
+::android::sp<Buffer> decodeBase64(const std::string& s);
+
+void encodeBase64(const void* data, size_t size, std::string* out);
+
+void encodeBase64Url(const void* data, size_t size, std::string* out);
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/Buffer.h b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/Buffer.h
new file mode 100644
index 0000000..d41c4f3
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/Buffer.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <utils/RefBase.h>
+
+#include "ClearKeyTypes.h"
+
+namespace clearkeydrm {
+struct Buffer : public ::android::RefBase {
+    explicit Buffer(size_t capacity);
+
+    uint8_t* base() { return reinterpret_cast<uint8_t*>(mData); }
+    uint8_t* data() { return reinterpret_cast<uint8_t*>(mData) + mRangeOffset; }
+    size_t capacity() const { return mCapacity; }
+    size_t size() const { return mRangeLength; }
+    size_t offset() const { return mRangeOffset; }
+
+  protected:
+    virtual ~Buffer();
+
+  private:
+    void* mData;
+    size_t mCapacity;
+    size_t mRangeOffset;
+    size_t mRangeLength;
+
+    bool mOwnsData;
+
+    CLEARKEY_DISALLOW_COPY_AND_ASSIGN(Buffer);
+};
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/ClearKeyDrmProperties.h b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/ClearKeyDrmProperties.h
new file mode 100644
index 0000000..9a22633
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/ClearKeyDrmProperties.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <string>
+
+namespace clearkeydrm {
+static const std::string kVendorKey("vendor");
+static const std::string kVendorValue("Google");
+static const std::string kVersionKey("version");
+static const std::string kVersionValue("1.2");
+static const std::string kPluginDescriptionKey("description");
+static const std::string kPluginDescriptionValue("ClearKey CDM");
+static const std::string kAlgorithmsKey("algorithms");
+static const std::string kAlgorithmsValue("");
+static const std::string kListenerTestSupportKey("listenerTestSupport");
+static const std::string kListenerTestSupportValue("true");
+static const std::string kDrmErrorTestKey("drmErrorTest");
+static const std::string kDrmErrorTestValue("");
+static const std::string kResourceContentionValue("resourceContention");
+static const std::string kLostStateValue("lostState");
+static const std::string kFrameTooLargeValue("frameTooLarge");
+static const std::string kInvalidStateValue("invalidState");
+
+static const std::string kDeviceIdKey("deviceId");
+static const uint8_t kTestDeviceIdData[] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+                                            0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
+
+// settable byte array property
+static const std::string kClientIdKey("clientId");
+
+// TODO stub out metrics for nw
+static const std::string kMetricsKey("metrics");
+static const uint8_t kMetricsData[] = {0};
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/ClearKeyTypes.h b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/ClearKeyTypes.h
new file mode 100644
index 0000000..0cc9511
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/ClearKeyTypes.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <cstdint>
+#include <map>
+#include <vector>
+
+namespace clearkeydrm {
+
+const uint8_t kBlockSize = 16;  // AES_BLOCK_SIZE;
+typedef uint8_t KeyId[kBlockSize];
+typedef uint8_t Iv[kBlockSize];
+
+typedef std::map<std::vector<uint8_t>, std::vector<uint8_t>> KeyMap;
+
+#define CLEARKEY_DISALLOW_COPY_AND_ASSIGN(TypeName) \
+    TypeName(const TypeName&) = delete;             \
+    void operator=(const TypeName&) = delete;
+
+#define CLEARKEY_DISALLOW_COPY_AND_ASSIGN_AND_NEW(TypeName) \
+    TypeName() = delete;                                    \
+    TypeName(const TypeName&) = delete;                     \
+    void operator=(const TypeName&) = delete;
+
+enum CdmResponseType : int32_t {
+    OK = 0,
+    ERROR_NO_LICENSE = 1,
+    ERROR_SESSION_NOT_OPENED = 3,
+    ERROR_CANNOT_HANDLE = 4,
+    ERROR_INVALID_STATE = 5,
+    BAD_VALUE = 6,
+    ERROR_DECRYPT = 11,
+    ERROR_UNKNOWN = 12,
+    ERROR_INSUFFICIENT_SECURITY = 13,
+    ERROR_FRAME_TOO_LARGE = 14,
+    ERROR_SESSION_LOST_STATE = 15,
+    ERROR_RESOURCE_CONTENTION = 16,
+};
+
+enum CdmKeyType : int32_t {
+    KEY_TYPE_OFFLINE = 0,
+    KEY_TYPE_STREAMING = 1,
+    KEY_TYPE_RELEASE = 2,
+};
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/DeviceFiles.h b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/DeviceFiles.h
new file mode 100644
index 0000000..5698441
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/DeviceFiles.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "ClearKeyTypes.h"
+#include "MemoryFileSystem.h"
+
+namespace clearkeydrm {
+class OfflineFile;
+class DeviceFiles {
+  public:
+    typedef enum {
+        kLicenseStateUnknown,
+        kLicenseStateActive,
+        kLicenseStateReleasing,
+    } LicenseState;
+
+    DeviceFiles(){};
+    virtual ~DeviceFiles(){};
+
+    virtual bool StoreLicense(const std::string& keySetId, LicenseState state,
+                              const std::string& keyResponse);
+
+    virtual bool RetrieveLicense(const std::string& key_set_id, LicenseState* state,
+                                 std::string* offlineLicense);
+
+    virtual bool LicenseExists(const std::string& keySetId);
+
+    virtual std::vector<std::string> ListLicenses() const;
+
+    virtual bool DeleteLicense(const std::string& keySetId);
+
+    virtual bool DeleteAllLicenses();
+
+  private:
+    bool FileExists(const std::string& path) const;
+    ssize_t GetFileSize(const std::string& fileName) const;
+    bool RemoveFile(const std::string& fileName);
+
+    bool RetrieveHashedFile(
+            const std::string& fileName,
+            OfflineFile* deSerializedFile);
+    bool StoreFileRaw(const std::string& fileName, const std::string& serializedFile);
+    bool StoreFileWithHash(const std::string& fileName, const std::string& serializedFile);
+
+    MemoryFileSystem mFileHandle;
+
+    CLEARKEY_DISALLOW_COPY_AND_ASSIGN(DeviceFiles);
+};
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/InitDataParser.h b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/InitDataParser.h
new file mode 100644
index 0000000..8ecc8e3
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/InitDataParser.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include "ClearKeyTypes.h"
+
+namespace clearkeydrm {
+
+class InitDataParser {
+  public:
+    InitDataParser() {}
+
+    CdmResponseType parse(const std::vector<uint8_t>& initData, const std::string& mimeType,
+                          CdmKeyType keyType, std::vector<uint8_t>* licenseRequest);
+
+  private:
+    CLEARKEY_DISALLOW_COPY_AND_ASSIGN(InitDataParser);
+
+    CdmResponseType parsePssh(const std::vector<uint8_t>& initData,
+                              std::vector<const uint8_t*>* keyIds);
+
+    std::string generateRequest(CdmKeyType keyType, const std::vector<const uint8_t*>& keyIds);
+};
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/JsonWebKey.h b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/JsonWebKey.h
new file mode 100644
index 0000000..6681553
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/JsonWebKey.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <string>
+#include <vector>
+
+#include "jsmn.h"
+#include "ClearKeyTypes.h"
+
+namespace clearkeydrm {
+
+class JsonWebKey {
+  public:
+    JsonWebKey();
+    virtual ~JsonWebKey();
+
+    bool extractKeysFromJsonWebKeySet(const std::string& jsonWebKeySet, KeyMap* keys);
+
+  private:
+    std::vector<jsmntok_t> mJsmnTokens;
+    std::vector<std::string> mJsonObjects;
+    std::vector<std::string> mTokens;
+
+    bool decodeBase64String(const std::string& encodedText, std::vector<uint8_t>* decodedText);
+    bool findKey(const std::string& jsonObject, std::string* keyId, std::string* encodedKey);
+    void findValue(const std::string& key, std::string* value);
+    bool isJsonWebKeySet(const std::string& jsonObject) const;
+    bool parseJsonObject(const std::string& jsonObject, std::vector<std::string>* tokens);
+    bool parseJsonWebKeySet(const std::string& jsonWebKeySet,
+                            std::vector<std::string>* jsonObjects);
+
+    CLEARKEY_DISALLOW_COPY_AND_ASSIGN(JsonWebKey);
+};
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/MemoryFileSystem.h b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/MemoryFileSystem.h
new file mode 100644
index 0000000..5642a0f
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/MemoryFileSystem.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <map>
+#include <string>
+
+#include "ClearKeyTypes.h"
+
+namespace clearkeydrm {
+
+// Using android file system requires clearkey plugin to update
+// its sepolicy. However, we are unable to update sepolicy for
+// older vendor partitions. To provide backward compatibility,
+// clearkey plugin implements a very simple file system in memory.
+// This memory file system does not support directory structure.
+class MemoryFileSystem {
+  public:
+    struct MemoryFile {
+        std::string fileName;  // excludes path
+        std::string content;
+        size_t fileSize;
+
+        std::string getContent() const { return content; }
+        size_t getFileSize() const { return fileSize; }
+        void setContent(const std::string& file) { content = file; }
+        void setFileName(const std::string& name) { fileName = name; }
+        void setFileSize(size_t size) {
+            content.resize(size);
+            fileSize = size;
+        }
+    };
+
+    MemoryFileSystem(){};
+    virtual ~MemoryFileSystem(){};
+
+    bool FileExists(const std::string& fileName) const;
+    ssize_t GetFileSize(const std::string& fileName) const;
+    std::vector<std::string> ListFiles() const;
+    size_t Read(const std::string& pathName, std::string* buffer);
+    bool RemoveAllFiles();
+    bool RemoveFile(const std::string& fileName);
+    size_t Write(const std::string& pathName, const MemoryFile& memoryFile);
+
+  private:
+    // License file name is made up of a unique keySetId, therefore,
+    // the filename can be used as the key to locate licenses in the
+    // memory file system.
+    std::map<std::string, MemoryFile> mMemoryFileSystem;
+
+    std::string GetFileName(const std::string& path);
+
+    CLEARKEY_DISALLOW_COPY_AND_ASSIGN(MemoryFileSystem);
+};
+
+}  // namespace clearkeydrm
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/MimeTypeStdStr.h
similarity index 62%
copy from media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
copy to drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/MimeTypeStdStr.h
index b08a604..dea2974 100644
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
+++ b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/MimeTypeStdStr.h
@@ -13,17 +13,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+#pragma once
 
-package android.media;
+#include <string>
 
-/**
- * Audio encapsulation type is used to describe if the audio data should be sent with a particular
- * encapsulation type or not.
- *
- * {@hide}
- */
-@Backing(type="int")
-enum AudioEncapsulationType {
-    NONE     = 0,
-    IEC61937 = 1,
-}
\ No newline at end of file
+namespace {
+const std::string kCencInitDataFormat("cenc");
+const std::string kIsoBmffAudioMimeType("audio/mp4");
+const std::string kIsoBmffVideoMimeType("video/mp4");
+const std::string kWebmInitDataFormat("webm");
+const std::string kWebmAudioMimeType("audio/webm");
+const std::string kWebmVideoMimeType("video/webm");
+}  // namespace
diff --git a/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/Session.h b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/Session.h
new file mode 100644
index 0000000..e2d4e32
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/Session.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+
+#include <cstdint>
+#include <vector>
+
+#include "ClearKeyTypes.h"
+
+namespace clearkeydrm {
+
+class Session : public ::android::RefBase {
+  public:
+    explicit Session(const std::vector<uint8_t>& sessionId)
+        : mSessionId(sessionId), mMockError(clearkeydrm::OK) {}
+    virtual ~Session() {}
+
+    const std::vector<uint8_t>& sessionId() const { return mSessionId; }
+
+    CdmResponseType getKeyRequest(const std::vector<uint8_t>& initDataType,
+                                  const std::string& mimeType,
+                                  CdmKeyType keyType,
+                                  std::vector<uint8_t>* keyRequest) const;
+
+    CdmResponseType provideKeyResponse(const std::vector<uint8_t>& response);
+
+    CdmResponseType decrypt(const KeyId keyId, const Iv iv, const uint8_t* srcPtr, uint8_t* dstPtr,
+                            const std::vector<int32_t>& clearDataLengths,
+                            const std::vector<int32_t>& encryptedDataLengths,
+                            size_t* bytesDecryptedOut);
+
+    void setMockError(CdmResponseType error) { mMockError = error; }
+    CdmResponseType getMockError() const { return mMockError; }
+
+  private:
+    CLEARKEY_DISALLOW_COPY_AND_ASSIGN(Session);
+
+    const std::vector<uint8_t> mSessionId;
+    KeyMap mKeyMap;
+    ::android::Mutex mMapLock;
+
+    // For mocking error return scenarios
+    CdmResponseType mMockError;
+};
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/SessionLibrary.h b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/SessionLibrary.h
new file mode 100644
index 0000000..987e328
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/SessionLibrary.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+
+#include "ClearKeyTypes.h"
+#include "Session.h"
+
+namespace clearkeydrm {
+
+class SessionLibrary : public ::android::RefBase {
+  public:
+    static SessionLibrary* get();
+
+    ::android::sp<Session> createSession();
+
+    ::android::sp<Session> findSession(const std::vector<uint8_t>& sessionId);
+
+    void destroySession(const ::android::sp<Session>& session);
+
+    size_t numOpenSessions() const { return mSessions.size(); }
+
+  private:
+    CLEARKEY_DISALLOW_COPY_AND_ASSIGN(SessionLibrary);
+
+    SessionLibrary() : mNextSessionId(1) {}
+
+    static ::android::Mutex sSingletonLock;
+    static SessionLibrary* sSingleton;
+
+    ::android::Mutex mSessionsLock;
+    uint32_t mNextSessionId;
+    std::map<std::vector<uint8_t>, ::android::sp<Session>> mSessions;
+};
+
+}  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/protos/DeviceFiles.proto b/drm/mediadrm/plugins/clearkey/common/protos/DeviceFiles.proto
new file mode 100644
index 0000000..2d98656
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/protos/DeviceFiles.proto
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto2";
+
+package clearkeydrm;
+
+// need this if we are using libprotobuf-cpp-2.3.0-lite
+option optimize_for = LITE_RUNTIME;
+
+message License {
+  enum LicenseState {
+    ACTIVE = 1;
+    RELEASING = 2;
+  }
+
+  optional LicenseState state = 1;
+  optional bytes license = 2;
+}
+
+message OfflineFile {
+  enum FileType {
+    LICENSE = 1;
+  }
+
+  enum FileVersion {
+    VERSION_1 = 1;
+  }
+
+  optional FileType type = 1;
+  optional FileVersion version = 2 [default = VERSION_1];
+  optional License license = 3;
+
+}
+
+message HashedFile {
+  optional bytes file = 1;
+  // A raw (not hex-encoded) SHA256, taken over the bytes of 'file'.
+  optional bytes hash = 2;
+}
diff --git a/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
index 6ac3510..089eb1c 100644
--- a/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
@@ -207,6 +207,7 @@
     }
 
     infoMap.clear();
+    android::Mutex::Autolock lock(mPlayPolicyLock);
     for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
         infoMap.add(mPlayPolicy.keyAt(i), mPlayPolicy.valueAt(i));
     }
diff --git a/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
index aa9b59d..95f15ca 100644
--- a/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
@@ -262,7 +262,7 @@
     void initProperties();
     void setPlayPolicy();
 
-    android::Mutex mPlayPolicyLock;
+    mutable android::Mutex mPlayPolicyLock;
     android::KeyedVector<String8, String8> mPlayPolicy;
     android::KeyedVector<String8, String8> mStringProperties;
     android::KeyedVector<String8, Vector<uint8_t>> mByteArrayProperties;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/Android.bp b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
index 6c68532..02ac943 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
@@ -93,6 +93,11 @@
     srcs: ["protos/DeviceFiles.proto"],
 }
 
+cc_library {
+    name: "libclearkeyhidl",
+    defaults: ["clearkey_service_defaults"],
+}
+
 cc_binary {
     name: "android.hardware.drm@1.2-service.clearkey",
     defaults: ["clearkey_service_defaults"],
@@ -126,3 +131,37 @@
     init_rc: ["android.hardware.drm@1.4-service-lazy.clearkey.rc"],
     vintf_fragments: ["manifest_android.hardware.drm@1.4-service.clearkey.xml"],
 }
+
+cc_fuzz {
+    name: "clearkeyV1.4_fuzzer",
+    vendor: true,
+    srcs: [
+        "fuzzer/clearkeyV1.4_fuzzer.cpp",
+    ],
+    static_libs: [
+        "libclearkeyhidl",
+        "libclearkeycommon",
+        "libclearkeydevicefiles-protos",
+        "libjsmn",
+        "libprotobuf-cpp-lite",
+        "libutils",
+    ],
+    shared_libs: [
+        "android.hidl.allocator@1.0",
+        "android.hardware.drm@1.0",
+        "android.hardware.drm@1.1",
+        "android.hardware.drm@1.2",
+        "android.hardware.drm@1.3",
+        "android.hardware.drm@1.4",
+        "libcrypto",
+        "libhidlbase",
+        "libhidlmemory",
+        "liblog",
+    ],
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
index bc7c3f2..32d7723 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -187,7 +187,7 @@
         return Status_V1_2::ERROR_DRM_CANNOT_HANDLE;
     }
 
-    *defaultUrl = "";
+    *defaultUrl = "https://default.url";
     *keyRequestType = KeyRequestType_V1_1::UNKNOWN;
     *request = std::vector<uint8_t>();
 
@@ -221,7 +221,6 @@
         if (requestString.find(kOfflineLicense) != std::string::npos) {
             std::string emptyResponse;
             std::string keySetIdString(keySetId.begin(), keySetId.end());
-            Mutex::Autolock lock(mFileHandleLock);
             if (!mFileHandle.StoreLicense(keySetIdString,
                     DeviceFiles::kLicenseStateReleasing,
                     emptyResponse)) {
@@ -337,7 +336,6 @@
         }
         *keySetId = kKeySetIdPrefix + ByteArrayToHexString(
                 reinterpret_cast<const uint8_t*>(randomData.data()), randomData.size());
-        Mutex::Autolock lock(mFileHandleLock);
         if (mFileHandle.LicenseExists(*keySetId)) {
             // collision, regenerate
             ALOGV("Retry generating KeySetId");
@@ -395,7 +393,6 @@
     if (status == Status::OK) {
         if (isOfflineLicense) {
             if (isRelease) {
-                Mutex::Autolock lock(mFileHandleLock);
                 mFileHandle.DeleteLicense(keySetId);
                 mSessionLibrary->destroySession(session);
             } else {
@@ -404,7 +401,6 @@
                     return Void();
                 }
 
-                Mutex::Autolock lock(mFileHandleLock);
                 bool ok = mFileHandle.StoreLicense(
                         keySetId,
                         DeviceFiles::kLicenseStateActive,
@@ -459,7 +455,6 @@
         DeviceFiles::LicenseState licenseState;
         std::string offlineLicense;
         Status status = Status::OK;
-        Mutex::Autolock lock(mFileHandleLock);
         if (!mFileHandle.RetrieveLicense(std::string(keySetId.begin(), keySetId.end()),
                 &licenseState, &offlineLicense)) {
             ALOGE("Failed to restore offline license");
@@ -582,7 +577,6 @@
 Return<void> DrmPlugin::queryKeyStatus(
         const hidl_vec<uint8_t>& sessionId,
         queryKeyStatus_cb _hidl_cb) {
-
     if (sessionId.size() == 0) {
         // Returns empty key status KeyValue pair
         _hidl_cb(Status::BAD_VALUE, hidl_vec<KeyValue>());
@@ -592,12 +586,14 @@
     std::vector<KeyValue> infoMapVec;
     infoMapVec.clear();
 
+    mPlayPolicyLock.lock();
     KeyValue keyValuePair;
     for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
         keyValuePair.key = mPlayPolicy[i].key;
         keyValuePair.value = mPlayPolicy[i].value;
         infoMapVec.push_back(keyValuePair);
     }
+    mPlayPolicyLock.unlock();
     _hidl_cb(Status::OK, toHidlVec(infoMapVec));
     return Void();
 }
@@ -768,8 +764,6 @@
 }
 
 Return<void> DrmPlugin::getOfflineLicenseKeySetIds(getOfflineLicenseKeySetIds_cb _hidl_cb) {
-    Mutex::Autolock lock(mFileHandleLock);
-
     std::vector<std::string> licenseNames = mFileHandle.ListLicenses();
     std::vector<KeySetId> keySetIds;
     if (mMockError != Status_V1_2::OK) {
@@ -790,7 +784,6 @@
         return toStatus_1_0(mMockError);
     }
     std::string licenseName(keySetId.begin(), keySetId.end());
-    Mutex::Autolock lock(mFileHandleLock);
     if (mFileHandle.DeleteLicense(licenseName)) {
         return Status::OK;
     }
@@ -799,8 +792,6 @@
 
 Return<void> DrmPlugin::getOfflineLicenseState(const KeySetId& keySetId,
         getOfflineLicenseState_cb _hidl_cb) {
-    Mutex::Autolock lock(mFileHandleLock);
-
     std::string licenseName(keySetId.begin(), keySetId.end());
     DeviceFiles::LicenseState state;
     std::string license;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
index e61db3f..56910be 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
@@ -24,13 +24,11 @@
 }
 
 bool MemoryFileSystem::FileExists(const std::string& fileName) const {
-    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     auto result = mMemoryFileSystem.find(fileName);
     return result != mMemoryFileSystem.end();
 }
 
 ssize_t MemoryFileSystem::GetFileSize(const std::string& fileName) const {
-    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     auto result = mMemoryFileSystem.find(fileName);
     if (result != mMemoryFileSystem.end()) {
         return static_cast<ssize_t>(result->second.getFileSize());
@@ -42,7 +40,6 @@
 
 std::vector<std::string> MemoryFileSystem::ListFiles() const {
     std::vector<std::string> list;
-    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     for (const auto& filename : mMemoryFileSystem) {
         list.push_back(filename.first);
     }
@@ -51,7 +48,6 @@
 
 size_t MemoryFileSystem::Read(const std::string& path, std::string* buffer) {
     std::string key = GetFileName(path);
-    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     auto result = mMemoryFileSystem.find(key);
     if (result != mMemoryFileSystem.end()) {
         std::string serializedHashFile = result->second.getContent();
@@ -65,7 +61,6 @@
 
 size_t MemoryFileSystem::Write(const std::string& path, const MemoryFile& memoryFile) {
     std::string key = GetFileName(path);
-    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     auto result = mMemoryFileSystem.find(key);
     if (result != mMemoryFileSystem.end()) {
         mMemoryFileSystem.erase(key);
@@ -75,7 +70,6 @@
 }
 
 bool MemoryFileSystem::RemoveFile(const std::string& fileName) {
-    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     auto result = mMemoryFileSystem.find(fileName);
     if (result != mMemoryFileSystem.end()) {
         mMemoryFileSystem.erase(result);
@@ -87,7 +81,6 @@
 }
 
 bool MemoryFileSystem::RemoveAllFiles() {
-    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     mMemoryFileSystem.clear();
     return mMemoryFileSystem.empty();
 }
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service-lazy.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service-lazy.clearkey.rc
index 9afd3d7..ec4517d 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service-lazy.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service-lazy.clearkey.rc
@@ -11,4 +11,4 @@
     user media
     group media mediadrm
     ioprio rt 4
-    writepid /dev/cpuset/foreground/tasks
+    task_profiles ProcessCapacityHigh
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc
index c1abe7f..3b48cf2 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc
@@ -10,4 +10,4 @@
     user media
     group media mediadrm
     ioprio rt 4
-    writepid /dev/cpuset/foreground/tasks
+    task_profiles ProcessCapacityHigh
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service-lazy.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service-lazy.clearkey.rc
index 1e0d431..6e64978 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service-lazy.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service-lazy.clearkey.rc
@@ -13,4 +13,4 @@
     user media
     group media mediadrm
     ioprio rt 4
-    writepid /dev/cpuset/foreground/tasks
+    task_profiles ProcessCapacityHigh
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service.clearkey.rc
index 8130511..e302e1b 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service.clearkey.rc
@@ -11,4 +11,4 @@
     user media
     group media mediadrm
     ioprio rt 4
-    writepid /dev/cpuset/foreground/tasks
+    task_profiles ProcessCapacityHigh
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service-lazy.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service-lazy.clearkey.rc
index 46aba88..84a63a1 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service-lazy.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service-lazy.clearkey.rc
@@ -15,4 +15,4 @@
     user media
     group media mediadrm
     ioprio rt 4
-    writepid /dev/cpuset/foreground/tasks
+    task_profiles ProcessCapacityHigh
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service.clearkey.rc
index 8186933..649599e 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service.clearkey.rc
@@ -13,4 +13,4 @@
     user media
     group media mediadrm
     ioprio rt 4
-    writepid /dev/cpuset/foreground/tasks
+    task_profiles ProcessCapacityHigh
diff --git a/drm/mediadrm/plugins/clearkey/hidl/fuzzer/README.md b/drm/mediadrm/plugins/clearkey/hidl/fuzzer/README.md
new file mode 100644
index 0000000..cb45460
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/fuzzer/README.md
@@ -0,0 +1,52 @@
+# Fuzzer for android.hardware.drm@1.4-service.clearkey
+
+## Plugin Design Considerations
+The fuzzer plugin for android.hardware.drm@1.4-service.clearkey is designed based on the understanding of the
+source code and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+android.hardware.drm@1.4-service.clearkey supports the following parameters:
+1. Security Level (parameter name: `securityLevel`)
+2. Mime Type (parameter name: `mimeType`)
+3. Key Type (parameter name: `keyType`)
+4. Crypto Mode (parameter name: `cryptoMode`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `securityLevel` | 0.`SecurityLevel::UNKNOWN` 1.`SecurityLevel::SW_SECURE_CRYPTO` 2.`SecurityLevel::SW_SECURE_DECODE` 3.`SecurityLevel::HW_SECURE_CRYPTO`  4.`SecurityLevel::HW_SECURE_DECODE` 5.`SecurityLevel::HW_SECURE_ALL`| Value obtained from FuzzedDataProvider in the range 0 to 5|
+| `mimeType` | 0.`video/mp4` 1.`video/mpeg` 2.`video/x-flv` 3.`video/mj2` 4.`video/3gp2` 5.`video/3gpp` 6.`video/3gpp2` 7.`audio/mp4` 8.`audio/mpeg` 9.`audio/aac` 10.`audio/3gp2` 11.`audio/3gpp` 12.`audio/3gpp2` 13.`audio/webm` 14.`video/webm` 15.`webm` 16.`cenc` 17.`video/unknown` 18.`audio/unknown`| Value obtained from FuzzedDataProvider in the range 0 to 18|
+| `keyType` | 0.`KeyType::OFFLINE` 1.`KeyType::STREAMING` 2.`KeyType::RELEASE` | Value obtained from FuzzedDataProvider in the range 0 to 2|
+| `cryptoMode` | 0.`Mode::UNENCRYPTED` 1.`Mode::AES_CTR` 2.`Mode::AES_CBC_CTS` 3.`Mode::AES_CBC` | Value obtained from FuzzedDataProvider in the range 0 to 3|
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feeds the entire input data to the module.
+This ensures that the plugin tolerates any kind of input (empty, huge,
+malformed, etc) and doesnt `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build clearkeyV1.4_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+  $ mm -j$(nproc) clearkeyV1.4_fuzzer
+```
+#### Steps to run
+To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/${TARGET_ARCH}/clearkeyV1.4_fuzzer/vendor/hw/clearkeyV1.4_fuzzer
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/drm/mediadrm/plugins/clearkey/hidl/fuzzer/clearkeyV1.4_fuzzer.cpp b/drm/mediadrm/plugins/clearkey/hidl/fuzzer/clearkeyV1.4_fuzzer.cpp
new file mode 100644
index 0000000..afe0e6c
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/fuzzer/clearkeyV1.4_fuzzer.cpp
@@ -0,0 +1,719 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <include/CreatePluginFactories.h>
+
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <hidlmemory/mapping.h>
+#include <include/ClearKeyDrmProperties.h>
+#include <include/CryptoFactory.h>
+#include <include/CryptoPlugin.h>
+#include <include/DrmPlugin.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+namespace drm = ::android::hardware::drm;
+using namespace std;
+using namespace android;
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hidl::allocator::V1_0::IAllocator;
+using ::android::hidl::memory::V1_0::IMemory;
+using drm::V1_0::BufferType;
+using drm::V1_0::DestinationBuffer;
+using drm::V1_0::EventType;
+using drm::V1_0::ICryptoPlugin;
+using drm::V1_0::IDrmPlugin;
+using drm::V1_0::IDrmPluginListener;
+using drm::V1_0::KeyedVector;
+using drm::V1_0::KeyStatus;
+using drm::V1_0::KeyStatusType;
+using drm::V1_0::KeyType;
+using drm::V1_0::Mode;
+using drm::V1_0::Pattern;
+using drm::V1_0::SecureStop;
+using drm::V1_0::SharedBuffer;
+using drm::V1_0::Status;
+using drm::V1_0::SubSample;
+using drm::V1_1::DrmMetricGroup;
+using drm::V1_1::HdcpLevel;
+using drm::V1_1::SecureStopRelease;
+using drm::V1_1::SecurityLevel;
+using drm::V1_2::KeySetId;
+using drm::V1_2::OfflineLicenseState;
+using drm::V1_4::clearkey::ICryptoFactory;
+using drm::V1_4::clearkey::IDrmFactory;
+using drm::V1_4::clearkey::kAlgorithmsKey;
+using drm::V1_4::clearkey::kClientIdKey;
+using drm::V1_4::clearkey::kDeviceIdKey;
+using drm::V1_4::clearkey::kDrmErrorTestKey;
+using drm::V1_4::clearkey::kListenerTestSupportKey;
+using drm::V1_4::clearkey::kMetricsKey;
+using drm::V1_4::clearkey::kPluginDescriptionKey;
+using drm::V1_4::clearkey::kVendorKey;
+using drm::V1_4::clearkey::kVersionKey;
+
+typedef ::android::hardware::hidl_vec<uint8_t> SessionId;
+typedef ::android::hardware::hidl_vec<uint8_t> SecureStopId;
+
+static const uint8_t kInvalidUUID[] = {0x10, 0x20, 0x30, 0x40, 0x50, 0x60,
+                                       0x70, 0x80, 0x10, 0x20, 0x30, 0x40,
+                                       0x50, 0x60, 0x70, 0x80};
+
+static const uint8_t kClearKeyUUID[] = {0xE2, 0x71, 0x9D, 0x58, 0xA9, 0x85,
+                                        0xB3, 0xC9, 0x78, 0x1A, 0xB0, 0x30,
+                                        0xAF, 0x78, 0xD3, 0x0E};
+
+const SecurityLevel kSecurityLevel[] = {
+    SecurityLevel::UNKNOWN,          SecurityLevel::SW_SECURE_CRYPTO,
+    SecurityLevel::SW_SECURE_DECODE, SecurityLevel::HW_SECURE_CRYPTO,
+    SecurityLevel::HW_SECURE_DECODE, SecurityLevel::HW_SECURE_ALL};
+
+const char *kMimeType[] = {
+    "video/mp4",  "video/mpeg",  "video/x-flv",   "video/mj2",    "video/3gp2",
+    "video/3gpp", "video/3gpp2", "audio/mp4",     "audio/mpeg",   "audio/aac",
+    "audio/3gp2", "audio/3gpp",  "audio/3gpp2",   "audio/webm",   "video/webm",
+    "webm",       "cenc",        "video/unknown", "audio/unknown"};
+
+const char *kCipherAlgorithm[] = {"AES/CBC/NoPadding", ""};
+
+const char *kMacAlgorithm[] = {"HmacSHA256", ""};
+
+const char *kRSAAlgorithm[] = {"RSASSA-PSS-SHA1", ""};
+
+const std::string kProperty[] = {kVendorKey,
+                                 kVersionKey,
+                                 kPluginDescriptionKey,
+                                 kAlgorithmsKey,
+                                 kListenerTestSupportKey,
+                                 kDrmErrorTestKey,
+                                 kDeviceIdKey,
+                                 kClientIdKey,
+                                 kMetricsKey,
+                                 "placeholder"};
+
+const KeyType kKeyType[] = {KeyType::OFFLINE, KeyType::STREAMING,
+                            KeyType::RELEASE};
+
+const Mode kCryptoMode[] = {Mode::UNENCRYPTED, Mode::AES_CTR, Mode::AES_CBC_CTS,
+                            Mode::AES_CBC};
+
+const hidl_vec<uint8_t> validInitData = {
+    // BMFF box header (4 bytes size + 'pssh')
+    0x00, 0x00, 0x00, 0x34, 0x70, 0x73, 0x73, 0x68,
+    // full box header (version = 1 flags = 0)
+    0x01, 0x00, 0x00, 0x00,
+    // system id
+    0x10, 0x77, 0xef, 0xec, 0xc0, 0xb2, 0x4d, 0x02, 0xac, 0xe3, 0x3c, 0x1e,
+    0x52, 0xe2, 0xfb, 0x4b,
+    // number of key ids
+    0x00, 0x00, 0x00, 0x01,
+    // key id
+    0x60, 0x06, 0x1e, 0x01, 0x7e, 0x47, 0x7e, 0x87, 0x7e, 0x57, 0xd0, 0x0d,
+    0x1e, 0xd0, 0x0d, 0x1e,
+    // size of data, must be zero
+    0x00, 0x00, 0x00, 0x00};
+
+const hidl_vec<uint8_t> validKeyResponse = {
+    0x7b, 0x22, 0x6b, 0x65, 0x79, 0x73, 0x22, 0x3a, 0x5b, 0x7b, 0x22,
+    0x6b, 0x74, 0x79, 0x22, 0x3a, 0x22, 0x6f, 0x63, 0x74, 0x22, 0x2c,
+    0x22, 0x6b, 0x69, 0x64, 0x22, 0x3a, 0x22, 0x59, 0x41, 0x59, 0x65,
+    0x41, 0x58, 0x35, 0x48, 0x66, 0x6f, 0x64, 0x2d, 0x56, 0x39, 0x41,
+    0x4e, 0x48, 0x74, 0x41, 0x4e, 0x48, 0x67, 0x22, 0x2c, 0x22, 0x6b,
+    0x22, 0x3a, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x54, 0x65,
+    0x73, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x61, 0x73, 0x65, 0x36, 0x34,
+    0x67, 0x67, 0x67, 0x22, 0x7d, 0x5d, 0x7d, 0x0a};
+
+const size_t kAESBlockSize = 16;
+const size_t kMaxStringLength = 100;
+const size_t kMaxSubSamples = 10;
+const size_t kMaxNumBytes = 1000;
+const size_t kSegmentIndex = 0;
+
+template <typename T, size_t size>
+T getValueFromArray(FuzzedDataProvider *fdp, const T (&arr)[size]) {
+  return arr[fdp->ConsumeIntegralInRange<int32_t>(0, size - 1)];
+}
+
+class TestDrmPluginListener : public IDrmPluginListener {
+public:
+  TestDrmPluginListener() {}
+  virtual ~TestDrmPluginListener() {}
+
+  virtual Return<void> sendEvent(EventType /*eventType*/,
+                                 const hidl_vec<uint8_t> & /*sessionId*/,
+                                 const hidl_vec<uint8_t> & /*data*/) override {
+    return Return<void>();
+  }
+
+  virtual Return<void>
+  sendExpirationUpdate(const hidl_vec<uint8_t> & /*sessionId*/,
+                       int64_t /*expiryTimeInMS*/) override {
+    return Return<void>();
+  }
+
+  virtual Return<void>
+  sendKeysChange(const hidl_vec<uint8_t> & /*sessionId*/,
+                 const hidl_vec<KeyStatus> & /*keyStatusList*/,
+                 bool /*hasNewUsableKey*/) override {
+    return Return<void>();
+  }
+};
+
+class ClearKeyFuzzer {
+public:
+  ~ClearKeyFuzzer() { deInit(); }
+  bool init();
+  void process(const uint8_t *data, size_t size);
+
+private:
+  void deInit();
+  void invokeDrmPlugin(const uint8_t *data, size_t size);
+  void invokeCryptoPlugin(const uint8_t *data);
+  void invokeDrm(const uint8_t *data, size_t size);
+  void invokeCrypto(const uint8_t *data);
+  void invokeDrmDecryptEncryptAPI(const uint8_t *data, size_t size);
+  bool invokeDrmFactory();
+  bool invokeCryptoFactory();
+  void invokeDrmV1_4API();
+  void invokeDrmSetAlgorithmAPI();
+  void invokeDrmPropertyAPI();
+  void invokeDrmSecureStopAPI();
+  void invokeDrmOfflineLicenseAPI(const uint8_t *data, size_t size);
+  SessionId getSessionId();
+  SecureStopRelease makeSecureRelease(const SecureStop &stop);
+  sp<IDrmFactory> mDrmFactory = nullptr;
+  sp<ICryptoFactory> mCryptoFactory = nullptr;
+  sp<IDrmPlugin> mDrmPlugin = nullptr;
+  sp<drm::V1_1::IDrmPlugin> mDrmPluginV1_1 = nullptr;
+  sp<drm::V1_2::IDrmPlugin> mDrmPluginV1_2 = nullptr;
+  sp<drm::V1_4::IDrmPlugin> mDrmPluginV1_4 = nullptr;
+  sp<drm::V1_4::ICryptoPlugin> mCryptoPluginV1_4 = nullptr;
+  sp<ICryptoPlugin> mCryptoPlugin = nullptr;
+  FuzzedDataProvider *mFDP = nullptr;
+  SessionId mSessionId = {};
+  SessionId mSessionIdV1 = {};
+};
+
+void ClearKeyFuzzer::deInit() {
+  if (mDrmPluginV1_1) {
+    mDrmPluginV1_1->closeSession(mSessionIdV1);
+  }
+  if (mDrmPluginV1_2) {
+    mDrmPluginV1_2->closeSession(mSessionId);
+  }
+  mDrmFactory.clear();
+  mCryptoFactory.clear();
+  mDrmPlugin.clear();
+  mDrmPluginV1_1.clear();
+  mDrmPluginV1_2.clear();
+  mDrmPluginV1_4.clear();
+  mCryptoPlugin.clear();
+  mCryptoPluginV1_4.clear();
+  mSessionId = {};
+  mSessionIdV1 = {};
+}
+
+void ClearKeyFuzzer::invokeDrmV1_4API() {
+  mDrmPluginV1_4->requiresSecureDecoderDefault(
+      getValueFromArray(mFDP, kMimeType));
+  mDrmPluginV1_4->requiresSecureDecoder(
+      getValueFromArray(mFDP, kMimeType),
+      getValueFromArray(mFDP, kSecurityLevel));
+  mDrmPluginV1_4->setPlaybackId(
+      mSessionId, mFDP->ConsumeRandomLengthString(kMaxStringLength).c_str());
+  drm::V1_4::IDrmPlugin::getLogMessages_cb cb =
+      [&]([[maybe_unused]] drm::V1_4::Status status,
+          [[maybe_unused]] hidl_vec<drm::V1_4::LogMessage> logs) {};
+  mDrmPluginV1_4->getLogMessages(cb);
+}
+
+void ClearKeyFuzzer::invokeDrmSetAlgorithmAPI() {
+  const hidl_string cipherAlgo =
+      mFDP->ConsumeBool()
+          ? mFDP->ConsumeRandomLengthString(kMaxStringLength).c_str()
+          : hidl_string(kCipherAlgorithm[mFDP->ConsumeBool()]);
+  mDrmPluginV1_2->setCipherAlgorithm(mSessionId, cipherAlgo);
+
+  const hidl_string macAlgo =
+      mFDP->ConsumeBool()
+          ? mFDP->ConsumeRandomLengthString(kMaxStringLength).c_str()
+          : hidl_string(kMacAlgorithm[mFDP->ConsumeBool()]);
+  mDrmPluginV1_2->setMacAlgorithm(mSessionId, macAlgo);
+}
+
+void ClearKeyFuzzer::invokeDrmPropertyAPI() {
+  mDrmPluginV1_2->setPropertyString(
+      hidl_string(getValueFromArray(mFDP, kProperty)), hidl_string("value"));
+
+  hidl_string stringValue;
+  mDrmPluginV1_2->getPropertyString(
+      getValueFromArray(mFDP, kProperty),
+      [&](Status status, const hidl_string &hValue) {
+        if (status == Status::OK) {
+          stringValue = hValue;
+        }
+      });
+
+  hidl_vec<uint8_t> value = {};
+  mDrmPluginV1_2->setPropertyByteArray(
+      hidl_string(getValueFromArray(mFDP, kProperty)), value);
+
+  hidl_vec<uint8_t> byteValue;
+  mDrmPluginV1_2->getPropertyByteArray(
+      getValueFromArray(mFDP, kProperty),
+      [&](Status status, const hidl_vec<uint8_t> &hValue) {
+        if (status == Status::OK) {
+          byteValue = hValue;
+        }
+      });
+}
+
+SessionId ClearKeyFuzzer::getSessionId() {
+  SessionId emptySessionId = {};
+  return mFDP->ConsumeBool() ? mSessionId : emptySessionId;
+}
+
+void ClearKeyFuzzer::invokeDrmDecryptEncryptAPI(const uint8_t *data,
+                                                size_t size) {
+  uint32_t currSessions, maximumSessions;
+  mDrmPluginV1_2->getNumberOfSessions(
+      [&](Status status, uint32_t hCurrentSessions, uint32_t hMaxSessions) {
+        if (status == Status::OK) {
+          currSessions = hCurrentSessions;
+          maximumSessions = hMaxSessions;
+        }
+      });
+
+  HdcpLevel connected, maximum;
+  mDrmPluginV1_2->getHdcpLevels([&](Status status,
+                                    const HdcpLevel &hConnectedLevel,
+                                    const HdcpLevel &hMaxLevel) {
+    if (status == Status::OK) {
+      connected = hConnectedLevel;
+      maximum = hMaxLevel;
+    }
+  });
+
+  drm::V1_2::HdcpLevel connectedV1_2, maximumV1_2;
+  mDrmPluginV1_2->getHdcpLevels_1_2(
+      [&](drm::V1_2::Status status, const drm::V1_2::HdcpLevel &connectedLevel,
+          const drm::V1_2::HdcpLevel &maxLevel) {
+        if (status == drm::V1_2::Status::OK) {
+          connectedV1_2 = connectedLevel;
+          maximumV1_2 = maxLevel;
+        }
+      });
+
+  SecurityLevel securityLevel;
+  mDrmPluginV1_2->getSecurityLevel(mSessionId,
+                                   [&](Status status, SecurityLevel hLevel) {
+                                     if (status == Status::OK) {
+                                       securityLevel = hLevel;
+                                     }
+                                   });
+
+  hidl_vec<DrmMetricGroup> metrics;
+  mDrmPluginV1_2->getMetrics(
+      [&](Status status, hidl_vec<DrmMetricGroup> hMetricGroups) {
+        if (status == Status::OK) {
+          metrics = hMetricGroups;
+        }
+      });
+
+  hidl_string certificateType;
+  hidl_string certificateAuthority;
+  mDrmPluginV1_2->getProvisionRequest(certificateType, certificateAuthority,
+                                      [&]([[maybe_unused]] Status status,
+                                          const hidl_vec<uint8_t> &,
+                                          const hidl_string &) {});
+
+  mDrmPluginV1_2->getProvisionRequest_1_2(
+      certificateType, certificateAuthority,
+      [&]([[maybe_unused]] drm::V1_2::Status status, const hidl_vec<uint8_t> &,
+          const hidl_string &) {});
+
+  hidl_vec<uint8_t> response;
+  mDrmPluginV1_2->provideProvisionResponse(
+      response, [&]([[maybe_unused]] Status status, const hidl_vec<uint8_t> &,
+                    const hidl_vec<uint8_t> &) {});
+
+  hidl_vec<uint8_t> initData = {};
+  if (mFDP->ConsumeBool()) {
+    initData = validInitData;
+  } else {
+    initData.setToExternal(const_cast<uint8_t *>(data), kAESBlockSize);
+  }
+  hidl_string mimeType = getValueFromArray(mFDP, kMimeType);
+  KeyType keyType = mFDP->ConsumeBool()
+                        ? static_cast<KeyType>(mFDP->ConsumeIntegral<size_t>())
+                        : getValueFromArray(mFDP, kKeyType);
+  KeyedVector optionalParameters;
+  mDrmPluginV1_2->getKeyRequest_1_2(
+      mSessionId, initData, mimeType, keyType, optionalParameters,
+      [&]([[maybe_unused]] drm::V1_2::Status status, const hidl_vec<uint8_t> &,
+          drm::V1_1::KeyRequestType, const hidl_string &) {});
+  mDrmPluginV1_1->getKeyRequest_1_1(
+      mSessionIdV1, initData, mimeType, keyType, optionalParameters,
+      [&]([[maybe_unused]] drm::V1_0::Status status, const hidl_vec<uint8_t> &,
+          drm::V1_1::KeyRequestType, const hidl_string &) {});
+  hidl_vec<uint8_t> emptyInitData = {};
+  mDrmPlugin->getKeyRequest(
+      mSessionId, mFDP->ConsumeBool() ? initData : emptyInitData, mimeType,
+      keyType, optionalParameters,
+      [&]([[maybe_unused]] drm::V1_0::Status status, const hidl_vec<uint8_t> &,
+          drm::V1_0::KeyRequestType, const hidl_string &) {});
+
+  hidl_vec<uint8_t> keyResponse = {};
+  if (mFDP->ConsumeBool()) {
+    keyResponse = validKeyResponse;
+  } else {
+    keyResponse.setToExternal(const_cast<uint8_t *>(data), size);
+  }
+  hidl_vec<uint8_t> keySetId;
+  hidl_vec<uint8_t> emptyKeyResponse = {};
+  mDrmPluginV1_2->provideKeyResponse(
+      getSessionId(), mFDP->ConsumeBool() ? keyResponse : emptyKeyResponse,
+      [&](Status status, const hidl_vec<uint8_t> &hKeySetId) {
+        if (status == Status::OK) {
+          keySetId = hKeySetId;
+        }
+      });
+
+  mDrmPluginV1_2->restoreKeys(getSessionId(), keySetId);
+
+  mDrmPluginV1_2->queryKeyStatus(
+      getSessionId(),
+      [&]([[maybe_unused]] Status status, KeyedVector /* info */) {});
+
+  hidl_vec<uint8_t> keyId, input, iv;
+  keyId.setToExternal(const_cast<uint8_t *>(data), size);
+  input.setToExternal(const_cast<uint8_t *>(data), size);
+  iv.setToExternal(const_cast<uint8_t *>(data), size);
+  mDrmPluginV1_2->encrypt(
+      getSessionId(), keyId, input, iv,
+      [&]([[maybe_unused]] Status status, const hidl_vec<uint8_t> &) {});
+
+  mDrmPluginV1_2->decrypt(
+      getSessionId(), keyId, input, iv,
+      [&]([[maybe_unused]] Status status, const hidl_vec<uint8_t> &) {});
+
+  hidl_vec<uint8_t> message;
+  message.setToExternal(const_cast<uint8_t *>(data), size);
+  mDrmPluginV1_2->sign(
+      getSessionId(), keyId, message,
+      [&]([[maybe_unused]] Status status, const hidl_vec<uint8_t> &) {});
+
+  hidl_vec<uint8_t> signature;
+  signature.setToExternal(const_cast<uint8_t *>(data), size);
+  mDrmPluginV1_2->verify(getSessionId(), keyId, message, signature,
+                         [&]([[maybe_unused]] Status status, bool) {});
+
+  hidl_vec<uint8_t> wrappedKey;
+  signature.setToExternal(const_cast<uint8_t *>(data), size);
+  mDrmPluginV1_2->signRSA(
+      getSessionId(), kRSAAlgorithm[mFDP->ConsumeBool()], message, wrappedKey,
+      [&]([[maybe_unused]] Status status, const hidl_vec<uint8_t> &) {});
+
+  mDrmPluginV1_2->removeKeys(getSessionId());
+}
+
+/**
+ * Helper function to create a secure release message for
+ * a secure stop. The clearkey secure stop release format
+ * is just a count followed by the secure stop opaque data.
+ */
+SecureStopRelease ClearKeyFuzzer::makeSecureRelease(const SecureStop &stop) {
+  std::vector<uint8_t> stopData = stop.opaqueData;
+  std::vector<uint8_t> buffer;
+  std::string count = "0001";
+
+  auto it = buffer.insert(buffer.begin(), count.begin(), count.end());
+  buffer.insert(it + count.size(), stopData.begin(), stopData.end());
+  SecureStopRelease release = {.opaqueData = hidl_vec<uint8_t>(buffer)};
+  return release;
+}
+
+void ClearKeyFuzzer::invokeDrmSecureStopAPI() {
+  SecureStopId ssid;
+  mDrmPluginV1_2->getSecureStop(
+      ssid, [&]([[maybe_unused]] Status status, const SecureStop &) {});
+
+  mDrmPluginV1_2->getSecureStopIds(
+      [&]([[maybe_unused]] Status status,
+          [[maybe_unused]] const hidl_vec<SecureStopId> &secureStopIds) {});
+
+  SecureStopRelease release;
+  mDrmPluginV1_2->getSecureStops(
+      [&]([[maybe_unused]] Status status, const hidl_vec<SecureStop> &stops) {
+        if (stops.size() > 0) {
+          release = makeSecureRelease(
+              stops[mFDP->ConsumeIntegralInRange<size_t>(0, stops.size() - 1)]);
+        }
+      });
+
+  mDrmPluginV1_2->releaseSecureStops(release);
+
+  mDrmPluginV1_2->removeSecureStop(ssid);
+
+  mDrmPluginV1_2->removeAllSecureStops();
+
+  mDrmPluginV1_2->releaseSecureStop(ssid);
+
+  mDrmPluginV1_2->releaseAllSecureStops();
+}
+
+void ClearKeyFuzzer::invokeDrmOfflineLicenseAPI(const uint8_t *data,
+                                                size_t size) {
+  hidl_vec<KeySetId> keySetIds = {};
+  mDrmPluginV1_2->getOfflineLicenseKeySetIds(
+      [&](Status status, const hidl_vec<KeySetId> &hKeySetIds) {
+        if (status == Status::OK) {
+          keySetIds = hKeySetIds;
+        }
+      });
+
+  OfflineLicenseState licenseState;
+  KeySetId keySetId = {};
+  if (keySetIds.size() > 0) {
+    keySetId = keySetIds[mFDP->ConsumeIntegralInRange<size_t>(
+        0, keySetIds.size() - 1)];
+  } else {
+    keySetId.setToExternal(const_cast<uint8_t *>(data), size);
+  }
+  mDrmPluginV1_2->getOfflineLicenseState(
+      keySetId, [&](Status status, OfflineLicenseState hLicenseState) {
+        if (status == Status::OK) {
+          licenseState = hLicenseState;
+        }
+      });
+
+  mDrmPluginV1_2->removeOfflineLicense(keySetId);
+}
+
+void ClearKeyFuzzer::invokeDrmPlugin(const uint8_t *data, size_t size) {
+  SecurityLevel secLevel =
+      mFDP->ConsumeBool()
+          ? getValueFromArray(mFDP, kSecurityLevel)
+          : static_cast<SecurityLevel>(mFDP->ConsumeIntegral<uint32_t>());
+  mDrmPluginV1_1->openSession_1_1(
+      secLevel, [&]([[maybe_unused]] Status status, const SessionId &id) {
+        mSessionIdV1 = id;
+      });
+  mDrmPluginV1_2->openSession([&]([[maybe_unused]] Status status,
+                                  const SessionId &id) { mSessionId = id; });
+
+  sp<TestDrmPluginListener> listener = new TestDrmPluginListener();
+  mDrmPluginV1_2->setListener(listener);
+  const hidl_vec<KeyStatus> keyStatusList = {
+      {{1}, KeyStatusType::USABLE},
+      {{2}, KeyStatusType::EXPIRED},
+      {{3}, KeyStatusType::OUTPUTNOTALLOWED},
+      {{4}, KeyStatusType::STATUSPENDING},
+      {{5}, KeyStatusType::INTERNALERROR},
+  };
+  mDrmPluginV1_2->sendKeysChange(mSessionId, keyStatusList, true);
+
+  invokeDrmV1_4API();
+  invokeDrmSetAlgorithmAPI();
+  invokeDrmPropertyAPI();
+  invokeDrmDecryptEncryptAPI(data, size);
+  invokeDrmSecureStopAPI();
+  invokeDrmOfflineLicenseAPI(data, size);
+}
+
+void ClearKeyFuzzer::invokeCryptoPlugin(const uint8_t *data) {
+  mCryptoPlugin->requiresSecureDecoderComponent(
+      getValueFromArray(mFDP, kMimeType));
+
+  const uint32_t width = mFDP->ConsumeIntegral<uint32_t>();
+  const uint32_t height = mFDP->ConsumeIntegral<uint32_t>();
+  mCryptoPlugin->notifyResolution(width, height);
+
+  mCryptoPlugin->setMediaDrmSession(mSessionId);
+
+  size_t totalSize = 0;
+  const size_t numSubSamples =
+      mFDP->ConsumeIntegralInRange<size_t>(1, kMaxSubSamples);
+
+  const Pattern pattern = {0, 0};
+  hidl_vec<SubSample> subSamples;
+  subSamples.resize(numSubSamples);
+
+  for (size_t i = 0; i < numSubSamples; ++i) {
+    const uint32_t clearBytes =
+        mFDP->ConsumeIntegralInRange<uint32_t>(0, kMaxNumBytes);
+    const uint32_t encryptedBytes =
+        mFDP->ConsumeIntegralInRange<uint32_t>(0, kMaxNumBytes);
+    subSamples[i].numBytesOfClearData = clearBytes;
+    subSamples[i].numBytesOfEncryptedData = encryptedBytes;
+    totalSize += subSamples[i].numBytesOfClearData;
+    totalSize += subSamples[i].numBytesOfEncryptedData;
+  }
+
+  // The first totalSize bytes of shared memory is the encrypted
+  // input, the second totalSize bytes is the decrypted output.
+  size_t memoryBytes = totalSize * 2;
+
+  sp<IAllocator> ashmemAllocator = IAllocator::getService("ashmem");
+  if (!ashmemAllocator.get()) {
+    return;
+  }
+
+  hidl_memory hidlMemory;
+  ashmemAllocator->allocate(memoryBytes, [&]([[maybe_unused]] bool success,
+                                             const hidl_memory &memory) {
+    mCryptoPlugin->setSharedBufferBase(memory, kSegmentIndex);
+    hidlMemory = memory;
+  });
+
+  sp<IMemory> mappedMemory = mapMemory(hidlMemory);
+  if (!mappedMemory.get()) {
+    return;
+  }
+  mCryptoPlugin->setSharedBufferBase(hidlMemory, kSegmentIndex);
+
+  uint32_t srcBufferId =
+      mFDP->ConsumeBool() ? kSegmentIndex : mFDP->ConsumeIntegral<uint32_t>();
+  const SharedBuffer sourceBuffer = {
+      .bufferId = srcBufferId, .offset = 0, .size = totalSize};
+
+  BufferType type = mFDP->ConsumeBool() ? BufferType::SHARED_MEMORY
+                                        : BufferType::NATIVE_HANDLE;
+  uint32_t destBufferId =
+      mFDP->ConsumeBool() ? kSegmentIndex : mFDP->ConsumeIntegral<uint32_t>();
+  const DestinationBuffer destBuffer = {
+      .type = type,
+      {.bufferId = destBufferId, .offset = totalSize, .size = totalSize},
+      .secureMemory = nullptr};
+
+  const uint64_t offset = 0;
+  uint32_t bytesWritten = 0;
+  hidl_array<uint8_t, kAESBlockSize> keyId =
+      hidl_array<uint8_t, kAESBlockSize>(data);
+  hidl_array<uint8_t, kAESBlockSize> iv =
+      hidl_array<uint8_t, kAESBlockSize>(data);
+  Mode mode = getValueFromArray(mFDP, kCryptoMode);
+  mCryptoPlugin->decrypt(
+      mFDP->ConsumeBool(), keyId, iv, mode, pattern, subSamples, sourceBuffer,
+      offset, destBuffer,
+      [&]([[maybe_unused]] Status status, uint32_t count,
+          [[maybe_unused]] string detailedError) { bytesWritten = count; });
+  drm::V1_4::IDrmPlugin::getLogMessages_cb cb =
+      [&]([[maybe_unused]] drm::V1_4::Status status,
+          [[maybe_unused]] hidl_vec<drm::V1_4::LogMessage> logs) {};
+  mCryptoPluginV1_4->getLogMessages(cb);
+}
+
+bool ClearKeyFuzzer::invokeDrmFactory() {
+  hidl_string packageName(
+      mFDP->ConsumeRandomLengthString(kMaxStringLength).c_str());
+  hidl_string mimeType(getValueFromArray(mFDP, kMimeType));
+  SecurityLevel securityLevel =
+      mFDP->ConsumeBool()
+          ? getValueFromArray(mFDP, kSecurityLevel)
+          : static_cast<SecurityLevel>(mFDP->ConsumeIntegral<uint32_t>());
+  const hidl_array<uint8_t, 16> uuid =
+      mFDP->ConsumeBool() ? kClearKeyUUID : kInvalidUUID;
+  mDrmFactory->isCryptoSchemeSupported_1_2(uuid, mimeType, securityLevel);
+  mDrmFactory->createPlugin(
+      uuid, packageName, [&](Status status, const sp<IDrmPlugin> &plugin) {
+        if (status == Status::OK) {
+          mDrmPlugin = plugin.get();
+          mDrmPluginV1_1 = drm::V1_1::IDrmPlugin::castFrom(mDrmPlugin);
+          mDrmPluginV1_2 = drm::V1_2::IDrmPlugin::castFrom(mDrmPlugin);
+          mDrmPluginV1_4 = drm::V1_4::IDrmPlugin::castFrom(mDrmPlugin);
+        }
+      });
+
+  std::vector<hidl_array<uint8_t, 16>> supportedSchemes;
+  mDrmFactory->getSupportedCryptoSchemes(
+      [&](const hidl_vec<hidl_array<uint8_t, 16>> &schemes) {
+        for (const auto &scheme : schemes) {
+          supportedSchemes.push_back(scheme);
+        }
+      });
+
+  if (!(mDrmPlugin && mDrmPluginV1_1 && mDrmPluginV1_2 && mDrmPluginV1_4)) {
+    return false;
+  }
+  return true;
+}
+
+bool ClearKeyFuzzer::invokeCryptoFactory() {
+  const hidl_array<uint8_t, 16> uuid =
+      mFDP->ConsumeBool() ? kClearKeyUUID : kInvalidUUID;
+  mCryptoFactory->createPlugin(
+      uuid, mSessionId, [this](Status status, const sp<ICryptoPlugin> &plugin) {
+        if (status == Status::OK) {
+          mCryptoPlugin = plugin;
+          mCryptoPluginV1_4 = drm::V1_4::ICryptoPlugin::castFrom(mCryptoPlugin);
+        }
+      });
+
+  if (!mCryptoPlugin && !mCryptoPluginV1_4) {
+    return false;
+  }
+  return true;
+}
+
+void ClearKeyFuzzer::invokeDrm(const uint8_t *data, size_t size) {
+  if (!invokeDrmFactory()) {
+    return;
+  }
+  invokeDrmPlugin(data, size);
+}
+
+void ClearKeyFuzzer::invokeCrypto(const uint8_t *data) {
+  if (!invokeCryptoFactory()) {
+    return;
+  }
+  invokeCryptoPlugin(data);
+}
+
+void ClearKeyFuzzer::process(const uint8_t *data, size_t size) {
+  mFDP = new FuzzedDataProvider(data, size);
+  invokeDrm(data, size);
+  invokeCrypto(data);
+  delete mFDP;
+}
+
+bool ClearKeyFuzzer::init() {
+  mCryptoFactory =
+      android::hardware::drm::V1_4::clearkey::createCryptoFactory();
+  mDrmFactory = android::hardware::drm::V1_4::clearkey::createDrmFactory();
+  if (!mDrmFactory && !mCryptoFactory) {
+    return false;
+  }
+  return true;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+  if (size < kAESBlockSize) {
+    return 0;
+  }
+  ClearKeyFuzzer clearKeyFuzzer;
+  if (clearKeyFuzzer.init()) {
+    clearKeyFuzzer.process(data, size);
+  }
+  return 0;
+}
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
index 5d6e3da..cb5c9fe 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
@@ -432,8 +432,7 @@
         mMockError = Status_V1_2::OK;
     }
 
-    DeviceFiles mFileHandle GUARDED_BY(mFileHandleLock);
-    Mutex mFileHandleLock;
+    DeviceFiles mFileHandle;
     Mutex mSecureStopLock;
 
     CLEARKEY_DISALLOW_COPY_AND_ASSIGN_AND_NEW(DrmPlugin);
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h b/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
index a90d818..1d98860 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
@@ -5,9 +5,7 @@
 #ifndef CLEARKEY_MEMORY_FILE_SYSTEM_H_
 #define CLEARKEY_MEMORY_FILE_SYSTEM_H_
 
-#include <android-base/thread_annotations.h>
 #include <map>
-#include <mutex>
 #include <string>
 
 #include "ClearKeyTypes.h"
@@ -51,12 +49,10 @@
     size_t Write(const std::string& pathName, const MemoryFile& memoryFile);
 
  private:
-    mutable std::mutex mMemoryFileSystemLock;
-
     // License file name is made up of a unique keySetId, therefore,
     // the filename can be used as the key to locate licenses in the
     // memory file system.
-    std::map<std::string, MemoryFile> mMemoryFileSystem GUARDED_BY(mMemoryFileSystemLock);
+    std::map<std::string, MemoryFile> mMemoryFileSystem;
 
     std::string GetFileName(const std::string& path);
 
diff --git a/include/OWNERS b/include/OWNERS
index d6bd998..88de595 100644
--- a/include/OWNERS
+++ b/include/OWNERS
@@ -1,6 +1,5 @@
 elaurent@google.com
-gkasten@google.com
 hunga@google.com
 jtinker@google.com
 lajos@google.com
-marcone@google.com
+essick@google.com
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 200e92d..a1e1702 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -53,6 +53,84 @@
 //EL_FIXME 20 seconds may not be enough and must be reconciled with new obtainBuffer implementation
 #define MAX_RUN_OFFLOADED_TIMEOUT_MS 20000 // assuming up to a maximum of 20 seconds of offloaded
 
+// for audio_track_cblk_t::mState, to match TrackBase.h
+static inline constexpr int CBLK_STATE_IDLE = 0;
+static inline constexpr int CBLK_STATE_ACTIVE = 6;
+static inline constexpr int CBLK_STATE_PAUSING = 7;
+
+/**
+ * MirroredVariable is a local variable which simultaneously updates
+ * a mirrored storage location.  This is useful for server side variables
+ * where a local copy is kept, but a client visible copy is offered through shared memory.
+ *
+ * We use std::atomic as the default container class to access this memory.
+ */
+template <typename T, template <typename> class Container = std::atomic>
+class MirroredVariable {
+    template <typename C>
+    struct Constraints {
+        // If setMirror is used with a different type U != T passed in,
+        // as a general rule, the Container must issue a memcpy to read or write
+        // (or its equivalent) to avoid possible strict aliasing issues.
+        // The memcpy also avoids gaps in structs and alignment issues with different types.
+        static constexpr bool ok_ = false;  // Containers must specify constraints.
+    };
+    template <typename X>
+    struct Constraints<std::atomic<X>> {
+        // Atomics force read and write to memory.
+        static constexpr bool ok = std::is_same_v<X, T> ||
+                (std::atomic<X>::is_always_lock_free                   // no additional locking
+                && sizeof(std::atomic<X>) == sizeof(X)                 // layout identical to X.
+                && (std::is_arithmetic_v<X> || std::is_enum_v<X>));    // No gaps in the layout.
+    };
+
+static_assert(Constraints<Container<T>>::ok);
+public:
+    explicit MirroredVariable(const T& t) : t_{t} {}
+
+    // implicit conversion operator
+    operator T() const {
+        return t_;
+    }
+
+    MirroredVariable& operator=(const T& t) {
+        t_ = t;
+        if (mirror_ != nullptr) {
+            *mirror_ = t;
+        }
+        return *this;
+    }
+
+    template <typename U>
+    void setMirror(Container<U> *other_mirror) {
+        // Much of the concern is with T != U, however there are additional concerns
+        // when storage uses shared memory between processes.  For atomics, it must be
+        // lock free.
+        static_assert(sizeof(U) == sizeof(T));
+        static_assert(alignof(U) == alignof(T));
+        static_assert(Constraints<Container<U>>::ok);
+        static_assert(sizeof(Container<U>) == sizeof(Container<T>));
+        static_assert(alignof(Container<U>) == alignof(Container<T>));
+        auto mirror = reinterpret_cast<Container<T>*>(other_mirror);
+        if (mirror_ != mirror) {
+            mirror_ = mirror;
+            if (mirror != nullptr) {
+                *mirror = t_;
+            }
+        }
+    }
+
+    void clear() {
+        mirror_ = nullptr;
+    }
+
+    MirroredVariable& operator&() const = delete;
+
+protected:
+    T t_{};
+    Container<T>* mirror_ = nullptr;
+};
+
 struct AudioTrackSharedStreaming {
     // similar to NBAIO MonoPipe
     // in continuously incrementing frame units, take modulo buffer size, which must be a power of 2
@@ -188,6 +266,8 @@
 
     volatile    int32_t     mFlags;         // combinations of CBLK_*
 
+                std::atomic<int32_t>  mState; // current TrackBase state.
+
 public:
                 union {
                     AudioTrackSharedStreaming   mStreaming;
@@ -198,6 +278,9 @@
                 // Cache line boundary (32 bytes)
 };
 
+// TODO: ensure standard layout.
+// static_assert(std::is_standard_layout_v<audio_track_cblk_t>);
+
 // ----------------------------------------------------------------------------
 
 // Proxy for shared memory control block, to isolate callers from needing to know the details.
@@ -323,6 +406,7 @@
         return mEpoch;
     }
 
+    int32_t getState() const { return mCblk->mState; }
     uint32_t      getBufferSizeInFrames() const { return mBufferSizeInFrames; }
     // See documentation for AudioTrack::setBufferSizeInFrames()
     uint32_t      setBufferSizeInFrames(uint32_t requestedSize);
diff --git a/media/OWNERS b/media/OWNERS
index 4cf4870..4a25b68 100644
--- a/media/OWNERS
+++ b/media/OWNERS
@@ -1,7 +1,6 @@
 # Bug component: 1344
 elaurent@google.com
 essick@google.com
-hkuang@google.com
 hunga@google.com
 jiabin@google.com
 jmtrivi@google.com
@@ -15,6 +14,7 @@
 robertshih@google.com
 taklee@google.com
 wonsik@google.com
+ytai@google.com
 
 # go/android-fwk-media-solutions for info on areas of ownership.
 include platform/frameworks/av:/media/janitors/media_solutions_OWNERS
diff --git a/media/TEST_MAPPING b/media/TEST_MAPPING
index 5bc7262..41fe080 100644
--- a/media/TEST_MAPPING
+++ b/media/TEST_MAPPING
@@ -3,18 +3,18 @@
     "presubmit-large": [
         // runs whenever we change something in this tree
         {
-            "name": "CtsMediaTestCases",
+            "name": "CtsMediaCodecTestCases",
             "options": [
                 {
-                    "include-filter": "android.media.cts.EncodeDecodeTest"
+                    "include-filter": "android.media.codec.cts.EncodeDecodeTest"
                 }
             ]
         },
         {
-            "name": "CtsMediaTestCases",
+            "name": "CtsMediaCodecTestCases",
             "options": [
                 {
-                    "include-filter": "android.media.cts.DecodeEditEncodeTest"
+                    "include-filter": "android.media.codec.cts.DecodeEditEncodeTest"
                 }
             ]
         }
@@ -46,18 +46,18 @@
         // runs regularly, independent of changes in this tree.
         // signals if changes elsewhere break media functionality
         {
-            "name": "CtsMediaTestCases",
+            "name": "CtsMediaCodecTestCases",
             "options": [
                 {
-                    "include-filter": "android.media.cts.EncodeDecodeTest"
+                    "include-filter": "android.media.codec.cts.EncodeDecodeTest"
                 }
             ]
         },
         {
-            "name": "CtsMediaTestCases",
+            "name": "CtsMediaCodecTestCases",
             "options": [
                 {
-                    "include-filter": "android.media.cts.DecodeEditEncodeTest"
+                    "include-filter": "android.media.codec.cts.DecodeEditEncodeTest"
                 }
             ]
         }
diff --git a/media/audioserver/Android.bp b/media/audioserver/Android.bp
index be25ffb..0b44700 100644
--- a/media/audioserver/Android.bp
+++ b/media/audioserver/Android.bp
@@ -25,7 +25,9 @@
     ],
 
     shared_libs: [
+        "packagemanager_aidl-cpp",
         "libaaudioservice",
+        "libaudioclient",
         "libaudioflinger",
         "libaudiopolicyservice",
         "libaudioprocessing",
@@ -41,7 +43,6 @@
         "libpowermanager",
         "libutils",
         "libvibrator",
-
     ],
 
     // TODO check if we still need all of these include directories
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index 8ee1efb..e3db5b4 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -17,11 +17,17 @@
 #define LOG_TAG "audioserver"
 //#define LOG_NDEBUG 0
 
+#include <algorithm>
+
 #include <fcntl.h>
 #include <sys/prctl.h>
 #include <sys/wait.h>
 #include <cutils/properties.h>
 
+#include <android/media/audio/common/AudioMMapPolicy.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
+#include <android/media/IAudioFlingerService.h>
 #include <binder/IPCThreadState.h>
 #include <binder/ProcessState.h>
 #include <binder/IServiceManager.h>
@@ -30,7 +36,6 @@
 #include <utils/Log.h>
 
 // from include_dirs
-#include "aaudio/AAudioTesting.h" // aaudio_policy_t, AAUDIO_PROP_MMAP_POLICY, AAUDIO_POLICY_*
 #include "AudioFlinger.h"
 #include "AudioPolicyService.h"
 #include "AAudioService.h"
@@ -39,6 +44,10 @@
 
 using namespace android;
 
+using android::media::audio::common::AudioMMapPolicy;
+using android::media::audio::common::AudioMMapPolicyInfo;
+using android::media::audio::common::AudioMMapPolicyType;
+
 int main(int argc __unused, char **argv)
 {
     // TODO: update with refined parameters
@@ -73,10 +82,8 @@
         IPCThreadState::self()->joinThreadPool();
         for (;;) {
             siginfo_t info;
-            int ret = waitid(P_PID, childPid, &info, WEXITED | WSTOPPED | WCONTINUED);
-            if (ret == EINTR) {
-                continue;
-            }
+            int ret = TEMP_FAILURE_RETRY(waitid(P_PID, childPid, &info,
+                                                WEXITED | WSTOPPED | WCONTINUED));
             if (ret < 0) {
                 break;
             }
@@ -146,10 +153,24 @@
         // AAudioService should only be used in OC-MR1 and later.
         // And only enable the AAudioService if the system MMAP policy explicitly allows it.
         // This prevents a client from misusing AAudioService when it is not supported.
-        aaudio_policy_t mmapPolicy = property_get_int32(AAUDIO_PROP_MMAP_POLICY,
-                                                        AAUDIO_POLICY_NEVER);
-        if (mmapPolicy == AAUDIO_POLICY_AUTO || mmapPolicy == AAUDIO_POLICY_ALWAYS) {
+        // If we cannot get audio flinger here, there must be some serious problems. In that case,
+        // attempting to call audio flinger on a null pointer could make the process crash
+        // and attract attentions.
+        sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+        std::vector<AudioMMapPolicyInfo> policyInfos;
+        status_t status = af->getMmapPolicyInfos(
+                AudioMMapPolicyType::DEFAULT, &policyInfos);
+        // Initialize aaudio service when querying mmap policy succeeds and
+        // any of the policy supports MMAP.
+        if (status == NO_ERROR &&
+            std::any_of(policyInfos.begin(), policyInfos.end(), [](const auto& info) {
+                    return info.mmapPolicy == AudioMMapPolicy::AUTO ||
+                           info.mmapPolicy == AudioMMapPolicy::ALWAYS;
+            })) {
             AAudioService::instantiate();
+        } else {
+            ALOGD("Do not init aaudio service, status %d, policy info size %zu",
+                  status, policyInfos.size());
         }
 
         ProcessState::self()->startThreadPool();
diff --git a/media/bufferpool/1.0/vts/OWNERS b/media/bufferpool/1.0/vts/OWNERS
index 6733e0c..db54d45 100644
--- a/media/bufferpool/1.0/vts/OWNERS
+++ b/media/bufferpool/1.0/vts/OWNERS
@@ -1,6 +1,5 @@
 # Media team
 lajos@google.com
-pawin@google.com
 taklee@google.com
 wonsik@google.com
 
diff --git a/media/bufferpool/2.0/Android.bp b/media/bufferpool/2.0/Android.bp
index 0d1fe27..930b026 100644
--- a/media/bufferpool/2.0/Android.bp
+++ b/media/bufferpool/2.0/Android.bp
@@ -40,6 +40,12 @@
     defaults: ["libstagefright_bufferpool@2.0-default"],
     vendor_available: true,
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media.swcodec",
+        "test_com.android.media.swcodec",
+    ],
+
     // TODO: b/147147992
     double_loadable: true,
     cflags: [
diff --git a/media/bufferpool/2.0/tests/OWNERS b/media/bufferpool/2.0/tests/OWNERS
index 6733e0c..db54d45 100644
--- a/media/bufferpool/2.0/tests/OWNERS
+++ b/media/bufferpool/2.0/tests/OWNERS
@@ -1,6 +1,5 @@
 # Media team
 lajos@google.com
-pawin@google.com
 taklee@google.com
 wonsik@google.com
 
diff --git a/media/codec2/OWNERS b/media/codec2/OWNERS
index 46a9fca..7d40041 100644
--- a/media/codec2/OWNERS
+++ b/media/codec2/OWNERS
@@ -1,5 +1,4 @@
 set noparent
 wonsik@google.com
 lajos@google.com
-pawin@google.com
 taklee@google.com
diff --git a/media/codec2/TEST_MAPPING b/media/codec2/TEST_MAPPING
index 6ac4210..f477f1c 100644
--- a/media/codec2/TEST_MAPPING
+++ b/media/codec2/TEST_MAPPING
@@ -15,13 +15,68 @@
         },
         {
           "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+        }
+      ]
+    },
+    {
+      "name": "CtsMediaAudioTestCases",
+      "options": [
+        {
+          "include-annotation": "android.platform.test.annotations.Presubmit"
+        },
+        {
+          "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
         },
         // TODO: b/149314419
         {
-          "exclude-filter": "android.media.cts.AudioPlaybackCaptureTest"
+          "exclude-filter": "android.media.audio.cts.AudioPlaybackCaptureTest"
         },
         {
-          "exclude-filter": "android.media.cts.AudioRecordTest"
+          "exclude-filter": "android.media.audio.cts.AudioRecordTest"
+        }
+      ]
+    },
+    {
+      "name": "CtsMediaDecoderTestCases",
+      "options": [
+        {
+          "include-annotation": "android.platform.test.annotations.Presubmit"
+        },
+        {
+          "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+        }
+      ]
+    },
+    {
+      "name": "CtsMediaEncoderTestCases",
+      "options": [
+        {
+          "include-annotation": "android.platform.test.annotations.Presubmit"
+        },
+        {
+          "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+        }
+      ]
+    },
+    {
+      "name": "CtsMediaCodecTestCases",
+      "options": [
+        {
+          "include-annotation": "android.platform.test.annotations.Presubmit"
+        },
+        {
+          "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+        }
+      ]
+    },
+    {
+      "name": "CtsMediaPlayerTestCases",
+      "options": [
+        {
+          "include-annotation": "android.platform.test.annotations.Presubmit"
+        },
+        {
+          "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
         }
       ]
     }
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index c08cd59..c7985ca 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -505,124 +505,6 @@
     }
 }
 
-static void copyOutputBufferToYuvPlanarFrame(
-        uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
-        const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
-        size_t srcYStride, size_t srcUStride, size_t srcVStride,
-        size_t dstYStride, size_t dstUVStride,
-        uint32_t width, uint32_t height) {
-
-    for (size_t i = 0; i < height; ++i) {
-        memcpy(dstY, srcY, width);
-        srcY += srcYStride;
-        dstY += dstYStride;
-    }
-
-    for (size_t i = 0; i < height / 2; ++i) {
-        memcpy(dstV, srcV, width / 2);
-        srcV += srcVStride;
-        dstV += dstUVStride;
-    }
-
-    for (size_t i = 0; i < height / 2; ++i) {
-        memcpy(dstU, srcU, width / 2);
-        srcU += srcUStride;
-        dstU += dstUVStride;
-    }
-}
-
-static void convertYUV420Planar16ToY410(uint32_t *dst,
-        const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
-        size_t srcYStride, size_t srcUStride, size_t srcVStride,
-        size_t dstStride, size_t width, size_t height) {
-
-    // Converting two lines at a time, slightly faster
-    for (size_t y = 0; y < height; y += 2) {
-        uint32_t *dstTop = (uint32_t *) dst;
-        uint32_t *dstBot = (uint32_t *) (dst + dstStride);
-        uint16_t *ySrcTop = (uint16_t*) srcY;
-        uint16_t *ySrcBot = (uint16_t*) (srcY + srcYStride);
-        uint16_t *uSrc = (uint16_t*) srcU;
-        uint16_t *vSrc = (uint16_t*) srcV;
-
-        uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
-        size_t x = 0;
-        for (; x < width - 3; x += 4) {
-
-            u01 = *((uint32_t*)uSrc); uSrc += 2;
-            v01 = *((uint32_t*)vSrc); vSrc += 2;
-
-            y01 = *((uint32_t*)ySrcTop); ySrcTop += 2;
-            y23 = *((uint32_t*)ySrcTop); ySrcTop += 2;
-            y45 = *((uint32_t*)ySrcBot); ySrcBot += 2;
-            y67 = *((uint32_t*)ySrcBot); ySrcBot += 2;
-
-            uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
-            uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
-
-            *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
-            *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
-            *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
-            *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
-
-            *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
-            *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
-            *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
-            *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
-        }
-
-        // There should be at most 2 more pixels to process. Note that we don't
-        // need to consider odd case as the buffer is always aligned to even.
-        if (x < width) {
-            u01 = *uSrc;
-            v01 = *vSrc;
-            y01 = *((uint32_t*)ySrcTop);
-            y45 = *((uint32_t*)ySrcBot);
-            uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
-            *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
-            *dstTop++ = ((y01 >> 16) << 10) | uv0;
-            *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
-            *dstBot++ = ((y45 >> 16) << 10) | uv0;
-        }
-
-        srcY += srcYStride * 2;
-        srcU += srcUStride;
-        srcV += srcVStride;
-        dst += dstStride * 2;
-    }
-
-    return;
-}
-
-static void convertYUV420Planar16ToYUV420Planar(
-        uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
-        const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
-        size_t srcYStride, size_t srcUStride, size_t srcVStride,
-        size_t dstYStride, size_t dstUVStride,
-        size_t width, size_t height) {
-
-    for (size_t y = 0; y < height; ++y) {
-        for (size_t x = 0; x < width; ++x) {
-            dstY[x] = (uint8_t)(srcY[x] >> 2);
-        }
-
-        srcY += srcYStride;
-        dstY += dstYStride;
-    }
-
-    for (size_t y = 0; y < (height + 1) / 2; ++y) {
-        for (size_t x = 0; x < (width + 1) / 2; ++x) {
-            dstU[x] = (uint8_t)(srcU[x] >> 2);
-            dstV[x] = (uint8_t)(srcV[x] >> 2);
-        }
-
-        srcU += srcUStride;
-        srcV += srcVStride;
-        dstU += dstUVStride;
-        dstV += dstUVStride;
-    }
-    return;
-}
 bool C2SoftAomDec::outputBuffer(
         const std::shared_ptr<C2BlockPool> &pool,
         const std::unique_ptr<C2Work> &work)
@@ -711,21 +593,16 @@
                                     dstYStride / sizeof(uint32_t),
                                     mWidth, mHeight);
         } else {
-            convertYUV420Planar16ToYUV420Planar(dstY, dstU, dstV,
-                                    srcY, srcU, srcV,
-                                    srcYStride / 2, srcUStride / 2, srcVStride / 2,
-                                    dstYStride, dstUVStride,
-                                    mWidth, mHeight);
+            convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
+                                        srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride,
+                                        mWidth, mHeight);
         }
     } else {
         const uint8_t *srcY = (const uint8_t *)img->planes[AOM_PLANE_Y];
         const uint8_t *srcU = (const uint8_t *)img->planes[AOM_PLANE_U];
         const uint8_t *srcV = (const uint8_t *)img->planes[AOM_PLANE_V];
-        copyOutputBufferToYuvPlanarFrame(
-                dstY, dstU, dstV, srcY, srcU, srcV,
-                srcYStride, srcUStride, srcVStride,
-                dstYStride, dstUVStride,
-                mWidth, mHeight);
+        convertYUV420Planar8ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride,
+                                   srcVStride, dstYStride, dstUVStride, mWidth, mHeight);
     }
     finishWork(*(int64_t*)img->user_priv, work, std::move(block));
     block = nullptr;
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index 6c4b7d9..99ff450 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -29,7 +29,179 @@
 #include <SimpleC2Component.h>
 
 namespace android {
+constexpr uint8_t kNeutralUVBitDepth8 = 128;
+constexpr uint16_t kNeutralUVBitDepth10 = 512;
 
+void convertYUV420Planar8ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint8_t *srcY,
+                                const uint8_t *srcU, const uint8_t *srcV, size_t srcYStride,
+                                size_t srcUStride, size_t srcVStride, size_t dstYStride,
+                                size_t dstUVStride, uint32_t width, uint32_t height,
+                                bool isMonochrome) {
+    for (size_t i = 0; i < height; ++i) {
+        memcpy(dstY, srcY, width);
+        srcY += srcYStride;
+        dstY += dstYStride;
+    }
+
+    if (isMonochrome) {
+        // Fill with neutral U/V values.
+        for (size_t i = 0; i < height / 2; ++i) {
+            memset(dstV, kNeutralUVBitDepth8, width / 2);
+            memset(dstU, kNeutralUVBitDepth8, width / 2);
+            dstV += dstUVStride;
+            dstU += dstUVStride;
+        }
+        return;
+    }
+
+    for (size_t i = 0; i < height / 2; ++i) {
+        memcpy(dstV, srcV, width / 2);
+        srcV += srcVStride;
+        dstV += dstUVStride;
+    }
+
+    for (size_t i = 0; i < height / 2; ++i) {
+        memcpy(dstU, srcU, width / 2);
+        srcU += srcUStride;
+        dstU += dstUVStride;
+    }
+}
+
+void convertYUV420Planar16ToY410(uint32_t *dst, const uint16_t *srcY, const uint16_t *srcU,
+                                 const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
+                                 size_t srcVStride, size_t dstStride, size_t width, size_t height) {
+    // Converting two lines at a time, slightly faster
+    for (size_t y = 0; y < height; y += 2) {
+        uint32_t *dstTop = (uint32_t *)dst;
+        uint32_t *dstBot = (uint32_t *)(dst + dstStride);
+        uint16_t *ySrcTop = (uint16_t *)srcY;
+        uint16_t *ySrcBot = (uint16_t *)(srcY + srcYStride);
+        uint16_t *uSrc = (uint16_t *)srcU;
+        uint16_t *vSrc = (uint16_t *)srcV;
+
+        uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
+        size_t x = 0;
+        for (; x < width - 3; x += 4) {
+            u01 = *((uint32_t *)uSrc);
+            uSrc += 2;
+            v01 = *((uint32_t *)vSrc);
+            vSrc += 2;
+
+            y01 = *((uint32_t *)ySrcTop);
+            ySrcTop += 2;
+            y23 = *((uint32_t *)ySrcTop);
+            ySrcTop += 2;
+            y45 = *((uint32_t *)ySrcBot);
+            ySrcBot += 2;
+            y67 = *((uint32_t *)ySrcBot);
+            ySrcBot += 2;
+
+            uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+            uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
+
+            *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
+            *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
+            *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
+            *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
+
+            *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
+            *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
+            *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
+            *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
+        }
+
+        // There should be at most 2 more pixels to process. Note that we don't
+        // need to consider odd case as the buffer is always aligned to even.
+        if (x < width) {
+            u01 = *uSrc;
+            v01 = *vSrc;
+            y01 = *((uint32_t *)ySrcTop);
+            y45 = *((uint32_t *)ySrcBot);
+            uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+            *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
+            *dstTop++ = ((y01 >> 16) << 10) | uv0;
+            *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
+            *dstBot++ = ((y45 >> 16) << 10) | uv0;
+        }
+
+        srcY += srcYStride * 2;
+        srcU += srcUStride;
+        srcV += srcVStride;
+        dst += dstStride * 2;
+    }
+}
+
+void convertYUV420Planar16ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint16_t *srcY,
+                                 const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
+                                 size_t srcUStride, size_t srcVStride, size_t dstYStride,
+                                 size_t dstUVStride, size_t width, size_t height,
+                                 bool isMonochrome) {
+    for (size_t y = 0; y < height; ++y) {
+        for (size_t x = 0; x < width; ++x) {
+            dstY[x] = (uint8_t)(srcY[x] >> 2);
+        }
+        srcY += srcYStride;
+        dstY += dstYStride;
+    }
+
+    if (isMonochrome) {
+        // Fill with neutral U/V values.
+        for (size_t y = 0; y < (height + 1) / 2; ++y) {
+            memset(dstV, kNeutralUVBitDepth8, (width + 1) / 2);
+            memset(dstU, kNeutralUVBitDepth8, (width + 1) / 2);
+            dstV += dstUVStride;
+            dstU += dstUVStride;
+        }
+        return;
+    }
+
+    for (size_t y = 0; y < (height + 1) / 2; ++y) {
+        for (size_t x = 0; x < (width + 1) / 2; ++x) {
+            dstU[x] = (uint8_t)(srcU[x] >> 2);
+            dstV[x] = (uint8_t)(srcV[x] >> 2);
+        }
+        srcU += srcUStride;
+        srcV += srcVStride;
+        dstU += dstUVStride;
+        dstV += dstUVStride;
+    }
+}
+
+void convertYUV420Planar16ToP010(uint16_t *dstY, uint16_t *dstUV, const uint16_t *srcY,
+                                 const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
+                                 size_t srcUStride, size_t srcVStride, size_t dstYStride,
+                                 size_t dstUVStride, size_t width, size_t height,
+                                 bool isMonochrome) {
+    for (size_t y = 0; y < height; ++y) {
+        for (size_t x = 0; x < width; ++x) {
+            dstY[x] = srcY[x] << 6;
+        }
+        srcY += srcYStride;
+        dstY += dstYStride;
+    }
+
+    if (isMonochrome) {
+        // Fill with neutral U/V values.
+        for (size_t y = 0; y < (height + 1) / 2; ++y) {
+            for (size_t x = 0; x < (width + 1) / 2; ++x) {
+                dstUV[2 * x] = kNeutralUVBitDepth10 << 6;
+                dstUV[2 * x + 1] = kNeutralUVBitDepth10 << 6;
+            }
+            dstUV += dstUVStride;
+        }
+        return;
+    }
+
+    for (size_t y = 0; y < (height + 1) / 2; ++y) {
+        for (size_t x = 0; x < (width + 1) / 2; ++x) {
+            dstUV[2 * x] = srcU[x] << 6;
+            dstUV[2 * x + 1] = srcV[x] << 6;
+        }
+        srcU += srcUStride;
+        srcV += srcVStride;
+        dstUV += dstUVStride;
+    }
+}
 std::unique_ptr<C2Work> SimpleC2Component::WorkQueue::pop_front() {
     std::unique_ptr<C2Work> work = std::move(mQueue.front().work);
     mQueue.pop_front();
diff --git a/media/codec2/components/base/include/SimpleC2Component.h b/media/codec2/components/base/include/SimpleC2Component.h
index e5e16d8..3b4e212 100644
--- a/media/codec2/components/base/include/SimpleC2Component.h
+++ b/media/codec2/components/base/include/SimpleC2Component.h
@@ -28,6 +28,24 @@
 
 namespace android {
 
+void convertYUV420Planar8ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint8_t *srcY,
+                                const uint8_t *srcU, const uint8_t *srcV, size_t srcYStride,
+                                size_t srcUStride, size_t srcVStride, size_t dstYStride,
+                                size_t dstUVStride, uint32_t width, uint32_t height,
+                                bool isMonochrome = false);
+void convertYUV420Planar16ToY410(uint32_t *dst, const uint16_t *srcY, const uint16_t *srcU,
+                                 const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
+                                 size_t srcVStride, size_t dstStride, size_t width, size_t height);
+void convertYUV420Planar16ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint16_t *srcY,
+                                 const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
+                                 size_t srcUStride, size_t srcVStride, size_t dstYStride,
+                                 size_t dstUVStride, size_t width, size_t height,
+                                 bool isMonochrome = false);
+void convertYUV420Planar16ToP010(uint16_t *dstY, uint16_t *dstUV, const uint16_t *srcY,
+                                 const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
+                                 size_t srcUStride, size_t srcVStride, size_t dstYStride,
+                                 size_t dstUVStride, size_t width, size_t height,
+                                 bool isMonochrome = false);
 class SimpleC2Component
         : public C2Component, public std::enable_shared_from_this<SimpleC2Component> {
 public:
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index 386e097..ffe72dc 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -20,6 +20,7 @@
 
 #include <C2Debug.h>
 #include <C2PlatformSupport.h>
+#include <Codec2BufferUtils.h>
 #include <Codec2Mapper.h>
 #include <SimpleC2Interface.h>
 #include <log/log.h>
@@ -27,11 +28,6 @@
 #include <media/stagefright/foundation/MediaDefs.h>
 
 namespace android {
-namespace {
-
-constexpr uint8_t NEUTRAL_UV_VALUE = 128;
-
-}  // namespace
 
 // codecname set and passed in as a compile flag from Android.bp
 constexpr char COMPONENT_NAME[] = CODECNAME;
@@ -339,6 +335,7 @@
           std::make_shared<SimpleInterface<IntfImpl>>(name, id, intfImpl)),
       mIntf(intfImpl),
       mCodecCtx(nullptr) {
+  mIsFormatR10G10B10A2Supported = IsFormatR10G10B10A2SupportedForLegacyRendering();
   gettimeofday(&mTimeStart, nullptr);
   gettimeofday(&mTimeEnd, nullptr);
 }
@@ -544,150 +541,6 @@
   }
 }
 
-static void copyOutputBufferToYV12Frame(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
-                                        const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
-                                        size_t srcYStride, size_t srcUStride, size_t srcVStride,
-                                        size_t dstYStride, size_t dstUVStride,
-                                        uint32_t width, uint32_t height,
-                                        bool isMonochrome) {
-
-  for (size_t i = 0; i < height; ++i) {
-    memcpy(dstY, srcY, width);
-    srcY += srcYStride;
-    dstY += dstYStride;
-  }
-
-  if (isMonochrome) {
-    // Fill with neutral U/V values.
-    for (size_t i = 0; i < height / 2; ++i) {
-      memset(dstV, NEUTRAL_UV_VALUE, width / 2);
-      memset(dstU, NEUTRAL_UV_VALUE, width / 2);
-      dstV += dstUVStride;
-      dstU += dstUVStride;
-    }
-    return;
-  }
-
-  for (size_t i = 0; i < height / 2; ++i) {
-    memcpy(dstV, srcV, width / 2);
-    srcV += srcVStride;
-    dstV += dstUVStride;
-  }
-
-  for (size_t i = 0; i < height / 2; ++i) {
-    memcpy(dstU, srcU, width / 2);
-    srcU += srcUStride;
-    dstU += dstUVStride;
-  }
-}
-
-static void convertYUV420Planar16ToY410(uint32_t *dst, const uint16_t *srcY,
-                                        const uint16_t *srcU,
-                                        const uint16_t *srcV, size_t srcYStride,
-                                        size_t srcUStride, size_t srcVStride,
-                                        size_t dstStride, size_t width,
-                                        size_t height) {
-  // Converting two lines at a time, slightly faster
-  for (size_t y = 0; y < height; y += 2) {
-    uint32_t *dstTop = (uint32_t *)dst;
-    uint32_t *dstBot = (uint32_t *)(dst + dstStride);
-    uint16_t *ySrcTop = (uint16_t *)srcY;
-    uint16_t *ySrcBot = (uint16_t *)(srcY + srcYStride);
-    uint16_t *uSrc = (uint16_t *)srcU;
-    uint16_t *vSrc = (uint16_t *)srcV;
-
-    uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
-    size_t x = 0;
-    for (; x < width - 3; x += 4) {
-      u01 = *((uint32_t *)uSrc);
-      uSrc += 2;
-      v01 = *((uint32_t *)vSrc);
-      vSrc += 2;
-
-      y01 = *((uint32_t *)ySrcTop);
-      ySrcTop += 2;
-      y23 = *((uint32_t *)ySrcTop);
-      ySrcTop += 2;
-      y45 = *((uint32_t *)ySrcBot);
-      ySrcBot += 2;
-      y67 = *((uint32_t *)ySrcBot);
-      ySrcBot += 2;
-
-      uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
-      uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
-
-      *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
-      *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
-      *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
-      *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
-
-      *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
-      *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
-      *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
-      *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
-    }
-
-    // There should be at most 2 more pixels to process. Note that we don't
-    // need to consider odd case as the buffer is always aligned to even.
-    if (x < width) {
-      u01 = *uSrc;
-      v01 = *vSrc;
-      y01 = *((uint32_t *)ySrcTop);
-      y45 = *((uint32_t *)ySrcBot);
-      uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
-      *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
-      *dstTop++ = ((y01 >> 16) << 10) | uv0;
-      *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
-      *dstBot++ = ((y45 >> 16) << 10) | uv0;
-    }
-
-    srcY += srcYStride * 2;
-    srcU += srcUStride;
-    srcV += srcVStride;
-    dst += dstStride * 2;
-  }
-}
-
-static void convertYUV420Planar16ToYUV420Planar(
-    uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
-    const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
-    size_t srcYStride, size_t srcUStride, size_t srcVStride,
-    size_t dstYStride, size_t dstUVStride,
-    size_t width, size_t height, bool isMonochrome) {
-
-  for (size_t y = 0; y < height; ++y) {
-    for (size_t x = 0; x < width; ++x) {
-      dstY[x] = (uint8_t)(srcY[x] >> 2);
-    }
-
-    srcY += srcYStride;
-    dstY += dstYStride;
-  }
-
-  if (isMonochrome) {
-    // Fill with neutral U/V values.
-    for (size_t y = 0; y < (height + 1) / 2; ++y) {
-      memset(dstV, NEUTRAL_UV_VALUE, (width + 1) / 2);
-      memset(dstU, NEUTRAL_UV_VALUE, (width + 1) / 2);
-      dstV += dstUVStride;
-      dstU += dstUVStride;
-    }
-    return;
-  }
-
-  for (size_t y = 0; y < (height + 1) / 2; ++y) {
-    for (size_t x = 0; x < (width + 1) / 2; ++x) {
-      dstU[x] = (uint8_t)(srcU[x] >> 2);
-      dstV[x] = (uint8_t)(srcV[x] >> 2);
-    }
-
-    srcU += srcUStride;
-    srcV += srcVStride;
-    dstU += dstUVStride;
-    dstV += dstUVStride;
-  }
-}
-
 void C2SoftGav1Dec::getVuiParams(const libgav1::DecoderBuffer *buffer) {
     VuiColorAspects vuiColorAspects;
     vuiColorAspects.primaries = buffer->color_primary;
@@ -791,7 +644,14 @@
         work->workletsProcessed = 1u;
         return false;
       }
-      format = HAL_PIXEL_FORMAT_RGBA_1010102;
+      // TODO (b/201787956) For devices that do not support HAL_PIXEL_FORMAT_RGBA_1010102,
+      // HAL_PIXEL_FORMAT_YV12 is used as a temporary work around.
+      if (!mIsFormatR10G10B10A2Supported)  {
+        ALOGE("HAL_PIXEL_FORMAT_RGBA_1010102 isn't supported");
+        format = HAL_PIXEL_FORMAT_YV12;
+      } else {
+        format = HAL_PIXEL_FORMAT_RGBA_1010102;
+      }
     }
   }
   C2MemoryUsage usage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
@@ -833,22 +693,24 @@
     const uint16_t *srcV = (const uint16_t *)buffer->plane[2];
 
     if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
-      convertYUV420Planar16ToY410(
-          (uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2, srcUStride / 2,
-          srcVStride / 2, dstYStride / sizeof(uint32_t), mWidth, mHeight);
+        convertYUV420Planar16ToY410((uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
+                                    srcUStride / 2, srcVStride / 2, dstYStride / sizeof(uint32_t),
+                                    mWidth, mHeight);
+    } else if (format == HAL_PIXEL_FORMAT_YCBCR_P010) {
+        convertYUV420Planar16ToP010((uint16_t *)dstY, (uint16_t *)dstU, srcY, srcU, srcV,
+                                    srcYStride / 2, srcUStride / 2, srcVStride / 2, dstYStride / 2,
+                                    dstUVStride / 2, mWidth, mHeight, isMonochrome);
     } else {
-      convertYUV420Planar16ToYUV420Planar(
-          dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2, srcUStride / 2,
-          srcVStride / 2, dstYStride, dstUVStride, mWidth, mHeight,
-          isMonochrome);
+        convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
+                                    srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride, mWidth,
+                                    mHeight, isMonochrome);
     }
   } else {
     const uint8_t *srcY = (const uint8_t *)buffer->plane[0];
     const uint8_t *srcU = (const uint8_t *)buffer->plane[1];
     const uint8_t *srcV = (const uint8_t *)buffer->plane[2];
-    copyOutputBufferToYV12Frame(
-        dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride,
-        dstYStride, dstUVStride, mWidth, mHeight, isMonochrome);
+    convertYUV420Planar8ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride,
+                               srcVStride, dstYStride, dstUVStride, mWidth, mHeight, isMonochrome);
   }
   finishWork(buffer->user_private_data, work, std::move(block));
   block = nullptr;
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.h b/media/codec2/components/gav1/C2SoftGav1Dec.h
index 134fa0d..f82992d 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.h
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.h
@@ -82,6 +82,7 @@
 
   struct timeval mTimeStart;  // Time at the start of decode()
   struct timeval mTimeEnd;    // Time at the end of decode()
+  bool mIsFormatR10G10B10A2Supported;
 
   bool initDecoder();
   void getVuiParams(const libgav1::DecoderBuffer *buffer);
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.cpp b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
index 4bc1777..b7a5686 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
@@ -245,6 +245,19 @@
                 })
                 .withSetter(CodedColorAspectsSetter, mColorAspects)
                 .build());
+
+        addParameter(
+                DefineParam(mPictureQuantization, C2_PARAMKEY_PICTURE_QUANTIZATION)
+                .withDefault(C2StreamPictureQuantizationTuning::output::AllocShared(
+                        0 /* flexCount */, 0u /* stream */))
+                .withFields({C2F(mPictureQuantization, m.values[0].type_).oneOf(
+                                {C2Config::picture_type_t(I_FRAME),
+                                  C2Config::picture_type_t(P_FRAME),
+                                  C2Config::picture_type_t(B_FRAME)}),
+                             C2F(mPictureQuantization, m.values[0].min).any(),
+                             C2F(mPictureQuantization, m.values[0].max).any()})
+                .withSetter(PictureQuantizationSetter)
+                .build());
     }
 
     static C2R InputDelaySetter(
@@ -464,9 +477,69 @@
         me.set().matrix = coded.v.matrix;
         return C2R::Ok();
     }
+    static C2R PictureQuantizationSetter(bool mayBlock,
+                                         C2P<C2StreamPictureQuantizationTuning::output> &me) {
+        (void)mayBlock;
+
+        // these are the ones we're going to set, so want them to default
+        // to the DEFAULT values for the codec
+        int32_t iMin = HEVC_QP_MIN, pMin = HEVC_QP_MIN, bMin = HEVC_QP_MIN;
+        int32_t iMax = HEVC_QP_MAX, pMax = HEVC_QP_MAX, bMax = HEVC_QP_MAX;
+
+        for (size_t i = 0; i < me.v.flexCount(); ++i) {
+            const C2PictureQuantizationStruct &layer = me.v.m.values[i];
+
+            // layerMin is clamped to [HEVC_QP_MIN, layerMax] to avoid error
+            // cases where layer.min > layer.max
+            int32_t layerMax = std::clamp(layer.max, HEVC_QP_MIN, HEVC_QP_MAX);
+            int32_t layerMin = std::clamp(layer.min, HEVC_QP_MIN, layerMax);
+            if (layer.type_ == C2Config::picture_type_t(I_FRAME)) {
+                iMax = layerMax;
+                iMin = layerMin;
+                ALOGV("iMin %d iMax %d", iMin, iMax);
+            } else if (layer.type_ == C2Config::picture_type_t(P_FRAME)) {
+                pMax = layerMax;
+                pMin = layerMin;
+                ALOGV("pMin %d pMax %d", pMin, pMax);
+            } else if (layer.type_ == C2Config::picture_type_t(B_FRAME)) {
+                bMax = layerMax;
+                bMin = layerMin;
+                ALOGV("bMin %d bMax %d", bMin, bMax);
+            }
+        }
+
+        ALOGV("PictureQuantizationSetter(entry): i %d-%d p %d-%d b %d-%d",
+              iMin, iMax, pMin, pMax, bMin, bMax);
+
+        int32_t maxFrameQP = std::min(std::min(iMax, pMax), bMax);
+        int32_t minFrameQP = std::max(std::max(iMin, pMin), bMin);
+        if (minFrameQP > maxFrameQP) {
+            minFrameQP = maxFrameQP;
+        }
+
+        // put them back into the structure
+        for (size_t i = 0; i < me.v.flexCount(); ++i) {
+            const C2PictureQuantizationStruct &layer = me.v.m.values[i];
+
+            if (layer.type_ == C2Config::picture_type_t(I_FRAME) ||
+                layer.type_ == C2Config::picture_type_t(P_FRAME) ||
+                layer.type_ == C2Config::picture_type_t(B_FRAME)) {
+                me.set().m.values[i].max = maxFrameQP;
+                me.set().m.values[i].min = minFrameQP;
+            }
+        }
+
+        ALOGV("PictureQuantizationSetter(exit): i = p = b = %d-%d",
+              minFrameQP, maxFrameQP);
+
+        return C2R::Ok();
+    }
     std::shared_ptr<C2StreamColorAspectsInfo::output> getCodedColorAspects_l() {
         return mCodedColorAspects;
     }
+    std::shared_ptr<C2StreamPictureQuantizationTuning::output> getPictureQuantization_l() const {
+        return mPictureQuantization;
+    }
 
    private:
     std::shared_ptr<C2StreamUsageTuning::input> mUsage;
@@ -482,6 +555,7 @@
     std::shared_ptr<C2StreamGopTuning::output> mGop;
     std::shared_ptr<C2StreamColorAspectsInfo::input> mColorAspects;
     std::shared_ptr<C2StreamColorAspectsInfo::output> mCodedColorAspects;
+    std::shared_ptr<C2StreamPictureQuantizationTuning::output> mPictureQuantization;
 };
 
 static size_t GetCPUCoreCount() {
@@ -654,12 +728,41 @@
         mEncParams.s_coding_tools_prms.i4_max_temporal_layers = 3;
     }
 
-    switch (mBitrateMode->value) {
-        case C2Config::BITRATE_IGNORE:
-            mEncParams.s_config_prms.i4_rate_control_mode = 3;
-            mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_frame_qp[0] =
-                getQpFromQuality(mQuality->value);
+    // we resolved out-of-bound and unspecified values in PictureQuantizationSetter()
+    // so we can start with defaults that are overridden as needed.
+    int32_t maxFrameQP = mEncParams.s_config_prms.i4_max_frame_qp;
+    int32_t minFrameQP = mEncParams.s_config_prms.i4_min_frame_qp;
+
+    for (size_t i = 0; i < mQpBounds->flexCount(); ++i) {
+        const C2PictureQuantizationStruct &layer = mQpBounds->m.values[i];
+
+        // no need to loop, hevc library takes same range for I/P/B picture type
+        if (layer.type_ == C2Config::picture_type_t(I_FRAME) ||
+            layer.type_ == C2Config::picture_type_t(P_FRAME) ||
+            layer.type_ == C2Config::picture_type_t(B_FRAME)) {
+
+            maxFrameQP = layer.max;
+            minFrameQP = layer.min;
             break;
+        }
+    }
+    mEncParams.s_config_prms.i4_max_frame_qp = maxFrameQP;
+    mEncParams.s_config_prms.i4_min_frame_qp = minFrameQP;
+
+    ALOGV("MaxFrameQp: %d MinFrameQp: %d", maxFrameQP, minFrameQP);
+
+    mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_frame_qp[0] =
+        std::clamp(kDefaultInitQP, minFrameQP, maxFrameQP);
+
+    switch (mBitrateMode->value) {
+        case C2Config::BITRATE_IGNORE: {
+            mEncParams.s_config_prms.i4_rate_control_mode = 3;
+            // ensure initial qp values are within our newly configured bounds
+            int32_t frameQp = getQpFromQuality(mQuality->value);
+            mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_frame_qp[0] =
+                std::clamp(frameQp, minFrameQP, maxFrameQP);
+            break;
+        }
         case C2Config::BITRATE_CONST:
             mEncParams.s_config_prms.i4_rate_control_mode = 5;
             break;
@@ -723,6 +826,7 @@
         mGop = mIntf->getGop_l();
         mRequestSync = mIntf->getRequestSync_l();
         mColorAspects = mIntf->getCodedColorAspects_l();
+        mQpBounds = mIntf->getPictureQuantization_l();;
     }
 
     c2_status_t status = initEncParams();
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.h b/media/codec2/components/hevc/C2SoftHevcEnc.h
index 9dbf682..4217a8b 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.h
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.h
@@ -42,6 +42,11 @@
 #define DEFAULT_B_FRAMES     0
 #define DEFAULT_RC_LOOKAHEAD 0
 
+#define HEVC_QP_MIN 1
+#define HEVC_QP_MAX 51
+
+constexpr int32_t kDefaultInitQP = 32;
+
 struct C2SoftHevcEnc : public SimpleC2Component {
     class IntfImpl;
 
@@ -90,6 +95,7 @@
     std::shared_ptr<C2StreamGopTuning::output> mGop;
     std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
     std::shared_ptr<C2StreamColorAspectsInfo::output> mColorAspects;
+    std::shared_ptr<C2StreamPictureQuantizationTuning::output> mQpBounds;
 #ifdef FILE_DUMP_ENABLE
     char mInFile[200];
     char mOutFile[200];
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
index 81f4679..54a1d0e 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
@@ -419,40 +419,6 @@
     return resChanged;
 }
 
-/* TODO: can remove temporary copy after library supports writing to display
- * buffer Y, U and V plane pointers using stride info. */
-static void copyOutputBufferToYuvPlanarFrame(
-        uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, uint8_t *src,
-        size_t dstYStride, size_t dstUVStride,
-        size_t srcYStride, uint32_t width,
-        uint32_t height) {
-    size_t srcUVStride = srcYStride / 2;
-    uint8_t *srcStart = src;
-
-    size_t vStride = align(height, 16);
-    for (size_t i = 0; i < height; ++i) {
-         memcpy(dstY, src, width);
-         src += srcYStride;
-         dstY += dstYStride;
-    }
-
-    /* U buffer */
-    src = srcStart + vStride * srcYStride;
-    for (size_t i = 0; i < height / 2; ++i) {
-         memcpy(dstU, src, width / 2);
-         src += srcUVStride;
-         dstU += dstUVStride;
-    }
-
-    /* V buffer */
-    src = srcStart + vStride * srcYStride * 5 / 4;
-    for (size_t i = 0; i < height / 2; ++i) {
-         memcpy(dstV, src, width / 2);
-         src += srcUVStride;
-         dstV += dstUVStride;
-    }
-}
-
 void C2SoftMpeg4Dec::process(
         const std::unique_ptr<C2Work> &work,
         const std::shared_ptr<C2BlockPool> &pool) {
@@ -636,11 +602,17 @@
         C2PlanarLayout layout = wView.layout();
         size_t dstYStride = layout.planes[C2PlanarLayout::PLANE_Y].rowInc;
         size_t dstUVStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
-        (void)copyOutputBufferToYuvPlanarFrame(
-                outputBufferY, outputBufferU, outputBufferV,
-                mOutputBuffer[mNumSamplesOutput & 1],
-                dstYStride, dstUVStride,
-                align(mWidth, 16), mWidth, mHeight);
+        size_t srcYStride = align(mWidth, 16);
+        size_t srcUStride = srcYStride / 2;
+        size_t srcVStride = srcYStride / 2;
+        size_t vStride = align(mHeight, 16);
+        const uint8_t *srcY = (const uint8_t *)mOutputBuffer[mNumSamplesOutput & 1];
+        const uint8_t *srcU = (const uint8_t *)srcY + vStride * srcYStride;
+        const uint8_t *srcV = (const uint8_t *)srcY + vStride * srcYStride * 5 / 4;
+
+        convertYUV420Planar8ToYV12(outputBufferY, outputBufferU, outputBufferV, srcY, srcU, srcV,
+                                   srcYStride, srcUStride, srcVStride, dstYStride, dstUVStride,
+                                   mWidth, mHeight);
 
         inPos += inSize - (size_t)tmpInSize;
         finishWork(workIndex, work);
diff --git a/media/codec2/components/tests/Android.bp b/media/codec2/components/tests/Android.bp
index 3c68eee..be2abf2 100644
--- a/media/codec2/components/tests/Android.bp
+++ b/media/codec2/components/tests/Android.bp
@@ -9,44 +9,13 @@
 
 cc_defaults {
     name: "C2SoftCodecTest-defaults",
+    defaults: [ "libcodec2-static-defaults" ],
     gtest: true,
     host_supported: false,
     srcs: [
         "C2SoftCodecTest.cpp",
     ],
 
-    static_libs: [
-        "liblog",
-        "libion",
-        "libfmq",
-        "libbase",
-        "libutils",
-        "libcutils",
-        "libcodec2",
-        "libhidlbase",
-        "libdmabufheap",
-        "libcodec2_vndk",
-        "libnativewindow",
-        "libcodec2_soft_common",
-        "libsfplugin_ccodec_utils",
-        "libstagefright_foundation",
-        "libstagefright_bufferpool@2.0.1",
-        "android.hardware.graphics.mapper@2.0",
-        "android.hardware.graphics.mapper@3.0",
-        "android.hardware.media.bufferpool@2.0",
-        "android.hardware.graphics.allocator@2.0",
-        "android.hardware.graphics.allocator@3.0",
-        "android.hardware.graphics.bufferqueue@2.0",
-    ],
-
-    shared_libs: [
-        "libui",
-        "libdl",
-        "libhardware",
-        "libvndksupport",
-        "libprocessgroup",
-    ],
-
     cflags: [
         "-Wall",
         "-Werror",
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 45e2ca8..0a27821 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -25,6 +25,7 @@
 
 #include <C2Debug.h>
 #include <C2PlatformSupport.h>
+#include <Codec2BufferUtils.h>
 #include <SimpleC2Interface.h>
 
 #include "C2SoftVpxDec.h"
@@ -351,6 +352,7 @@
       mCodecCtx(nullptr),
       mCoreCount(1),
       mQueue(new Mutexed<ConversionQueue>) {
+      mIsFormatR10G10B10A2Supported = IsFormatR10G10B10A2SupportedForLegacyRendering();
 }
 
 C2SoftVpxDec::~C2SoftVpxDec() {
@@ -638,125 +640,6 @@
     }
 }
 
-static void copyOutputBufferToYuvPlanarFrame(
-        uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
-        const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
-        size_t srcYStride, size_t srcUStride, size_t srcVStride,
-        size_t dstYStride, size_t dstUVStride,
-        uint32_t width, uint32_t height) {
-
-    for (size_t i = 0; i < height; ++i) {
-         memcpy(dstY, srcY, width);
-         srcY += srcYStride;
-         dstY += dstYStride;
-    }
-
-    for (size_t i = 0; i < height / 2; ++i) {
-         memcpy(dstV, srcV, width / 2);
-         srcV += srcVStride;
-         dstV += dstUVStride;
-    }
-
-    for (size_t i = 0; i < height / 2; ++i) {
-         memcpy(dstU, srcU, width / 2);
-         srcU += srcUStride;
-         dstU += dstUVStride;
-    }
-
-}
-
-static void convertYUV420Planar16ToY410(uint32_t *dst,
-        const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
-        size_t srcYStride, size_t srcUStride, size_t srcVStride,
-        size_t dstStride, size_t width, size_t height) {
-
-    // Converting two lines at a time, slightly faster
-    for (size_t y = 0; y < height; y += 2) {
-        uint32_t *dstTop = (uint32_t *) dst;
-        uint32_t *dstBot = (uint32_t *) (dst + dstStride);
-        uint16_t *ySrcTop = (uint16_t*) srcY;
-        uint16_t *ySrcBot = (uint16_t*) (srcY + srcYStride);
-        uint16_t *uSrc = (uint16_t*) srcU;
-        uint16_t *vSrc = (uint16_t*) srcV;
-
-        uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
-        size_t x = 0;
-        for (; x < width - 3; x += 4) {
-
-            u01 = *((uint32_t*)uSrc); uSrc += 2;
-            v01 = *((uint32_t*)vSrc); vSrc += 2;
-
-            y01 = *((uint32_t*)ySrcTop); ySrcTop += 2;
-            y23 = *((uint32_t*)ySrcTop); ySrcTop += 2;
-            y45 = *((uint32_t*)ySrcBot); ySrcBot += 2;
-            y67 = *((uint32_t*)ySrcBot); ySrcBot += 2;
-
-            uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
-            uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
-
-            *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
-            *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
-            *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
-            *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
-
-            *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
-            *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
-            *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
-            *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
-        }
-
-        // There should be at most 2 more pixels to process. Note that we don't
-        // need to consider odd case as the buffer is always aligned to even.
-        if (x < width) {
-            u01 = *uSrc;
-            v01 = *vSrc;
-            y01 = *((uint32_t*)ySrcTop);
-            y45 = *((uint32_t*)ySrcBot);
-            uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
-            *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
-            *dstTop++ = ((y01 >> 16) << 10) | uv0;
-            *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
-            *dstBot++ = ((y45 >> 16) << 10) | uv0;
-        }
-
-        srcY += srcYStride * 2;
-        srcU += srcUStride;
-        srcV += srcVStride;
-        dst += dstStride * 2;
-    }
-
-    return;
-}
-
-static void convertYUV420Planar16ToYUV420Planar(
-        uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
-        const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
-        size_t srcYStride, size_t srcUStride, size_t srcVStride,
-        size_t dstYStride, size_t dstUVStride,
-        size_t width, size_t height) {
-
-    for (size_t y = 0; y < height; ++y) {
-        for (size_t x = 0; x < width; ++x) {
-            dstY[x] = (uint8_t)(srcY[x] >> 2);
-        }
-
-        srcY += srcYStride;
-        dstY += dstYStride;
-    }
-
-    for (size_t y = 0; y < (height + 1) / 2; ++y) {
-        for (size_t x = 0; x < (width + 1) / 2; ++x) {
-            dstU[x] = (uint8_t)(srcU[x] >> 2);
-            dstV[x] = (uint8_t)(srcV[x] >> 2);
-        }
-
-        srcU += srcUStride;
-        srcV += srcVStride;
-        dstU += dstUVStride;
-        dstV += dstUVStride;
-    }
-    return;
-}
 status_t C2SoftVpxDec::outputBuffer(
         const std::shared_ptr<C2BlockPool> &pool,
         const std::unique_ptr<C2Work> &work)
@@ -804,7 +687,14 @@
         if (defaultColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
             defaultColorAspects->matrix == C2Color::MATRIX_BT2020 &&
             defaultColorAspects->transfer == C2Color::TRANSFER_ST2084) {
-            format = HAL_PIXEL_FORMAT_RGBA_1010102;
+            // TODO (b/201787956) For devices that do not support HAL_PIXEL_FORMAT_RGBA_1010102,
+            // HAL_PIXEL_FORMAT_YV12 is used as a temporary work around.
+            if (!mIsFormatR10G10B10A2Supported)  {
+                ALOGE("HAL_PIXEL_FORMAT_RGBA_1010102 isn't supported");
+                format = HAL_PIXEL_FORMAT_YV12;
+            } else {
+                format = HAL_PIXEL_FORMAT_RGBA_1010102;
+            }
         }
     }
     C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
@@ -867,24 +757,22 @@
                 queue->cond.signal();
                 queue.waitForCondition(queue->cond);
             }
+        } else if (format == HAL_PIXEL_FORMAT_YCBCR_P010) {
+            convertYUV420Planar16ToP010((uint16_t *)dstY, (uint16_t *)dstU, srcY, srcU, srcV,
+                                        srcYStride / 2, srcUStride / 2, srcVStride / 2,
+                                        dstYStride / 2, dstUVStride / 2, mWidth, mHeight);
         } else {
-            convertYUV420Planar16ToYUV420Planar(dstY, dstU, dstV,
-                                                srcY, srcU, srcV,
-                                                srcYStride / 2, srcUStride / 2, srcVStride / 2,
-                                                dstYStride, dstUVStride,
-                                                mWidth, mHeight);
+            convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
+                                        srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride,
+                                        mWidth, mHeight);
         }
     } else {
         const uint8_t *srcY = (const uint8_t *)img->planes[VPX_PLANE_Y];
         const uint8_t *srcU = (const uint8_t *)img->planes[VPX_PLANE_U];
         const uint8_t *srcV = (const uint8_t *)img->planes[VPX_PLANE_V];
 
-        copyOutputBufferToYuvPlanarFrame(
-                dstY, dstU, dstV,
-                srcY, srcU, srcV,
-                srcYStride, srcUStride, srcVStride,
-                dstYStride, dstUVStride,
-                mWidth, mHeight);
+        convertYUV420Planar8ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride,
+                                   srcVStride, dstYStride, dstUVStride, mWidth, mHeight);
     }
     finishWork(((c2_cntr64_t *)img->user_priv)->peekull(), work, std::move(block));
     return OK;
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.h b/media/codec2/components/vpx/C2SoftVpxDec.h
index 2065165..ade162d 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.h
+++ b/media/codec2/components/vpx/C2SoftVpxDec.h
@@ -80,7 +80,7 @@
     };
     std::shared_ptr<Mutexed<ConversionQueue>> mQueue;
     std::vector<sp<ConverterThread>> mConverterThreads;
-
+    bool mIsFormatR10G10B10A2Supported;
     status_t initDecoder();
     status_t destroyDecoder();
     void finishWork(uint64_t index, const std::unique_ptr<C2Work> &work,
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 2cc7ab7..70e742c 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -59,6 +59,7 @@
     enum drc_compression_mode_t : int32_t;  ///< DRC compression mode
     enum drc_effect_type_t : int32_t;       ///< DRC effect type
     enum drc_album_mode_t : int32_t;        ///< DRC album mode
+    enum hdr_dynamic_metadata_type_t : uint32_t;  ///< HDR dynamic metadata type
     enum intra_refresh_mode_t : uint32_t;   ///< intra refresh modes
     enum level_t : uint32_t;                ///< coding level
     enum ordinal_key_t : uint32_t;          ///< work ordering keys
@@ -189,10 +190,13 @@
 
     kParamIndexPictureTypeMask,
     kParamIndexPictureType,
+    // deprecated
     kParamIndexHdr10PlusMetadata,
 
     kParamIndexPictureQuantization,
 
+    kParamIndexHdrDynamicMetadata,
+
     /* ------------------------------------ video components ------------------------------------ */
 
     kParamIndexFrameRate = C2_PARAM_INDEX_VIDEO_PARAM_START,
@@ -270,6 +274,9 @@
 
     // encoding quality requirements
     kParamIndexEncodingQualityLevel, // encoders, enum
+
+    // encoding statistics, average block qp of a frame
+    kParamIndexAverageBlockQuantization, // int32
 };
 
 }
@@ -680,6 +687,9 @@
     LEVEL_DV_MAIN_UHD_30,                       ///< Dolby Vision main tier uhd30
     LEVEL_DV_MAIN_UHD_48,                       ///< Dolby Vision main tier uhd48
     LEVEL_DV_MAIN_UHD_60,                       ///< Dolby Vision main tier uhd60
+    LEVEL_DV_MAIN_UHD_120,                      ///< Dolby Vision main tier uhd120
+    LEVEL_DV_MAIN_8K_30,                        ///< Dolby Vision main tier 8k30
+    LEVEL_DV_MAIN_8K_60,                        ///< Dolby Vision main tier 8k60
 
     LEVEL_DV_HIGH_HD_24 = _C2_PL_DV_BASE + 0x100,  ///< Dolby Vision high tier hd24
     LEVEL_DV_HIGH_HD_30,                        ///< Dolby Vision high tier hd30
@@ -690,6 +700,9 @@
     LEVEL_DV_HIGH_UHD_30,                       ///< Dolby Vision high tier uhd30
     LEVEL_DV_HIGH_UHD_48,                       ///< Dolby Vision high tier uhd48
     LEVEL_DV_HIGH_UHD_60,                       ///< Dolby Vision high tier uhd60
+    LEVEL_DV_HIGH_UHD_120,                      ///< Dolby Vision high tier uhd120
+    LEVEL_DV_HIGH_8K_30,                        ///< Dolby Vision high tier 8k30
+    LEVEL_DV_HIGH_8K_60,                        ///< Dolby Vision high tier 8k60
 
     // AV1 levels
     LEVEL_AV1_2    = _C2_PL_AV1_BASE ,          ///< AV1 Level 2
@@ -1602,16 +1615,54 @@
     C2FIELD(maxFall, "max-fall")
 };
 typedef C2StreamParam<C2Info, C2HdrStaticMetadataStruct, kParamIndexHdrStaticMetadata>
-        C2StreamHdrStaticInfo;
+        C2StreamHdrStaticMetadataInfo;
+typedef C2StreamParam<C2Info, C2HdrStaticMetadataStruct, kParamIndexHdrStaticMetadata>
+        C2StreamHdrStaticInfo;  // deprecated
 constexpr char C2_PARAMKEY_HDR_STATIC_INFO[] = "raw.hdr-static-info";
 
 /**
  * HDR10+ Metadata Info.
+ *
+ * Deprecated. Use C2StreamHdrDynamicMetadataInfo with
+ * HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40
  */
 typedef C2StreamParam<C2Info, C2BlobValue, kParamIndexHdr10PlusMetadata>
-        C2StreamHdr10PlusInfo;
-constexpr char C2_PARAMKEY_INPUT_HDR10_PLUS_INFO[] = "input.hdr10-plus-info";
-constexpr char C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO[] = "output.hdr10-plus-info";
+        C2StreamHdr10PlusInfo;  // deprecated
+constexpr char C2_PARAMKEY_INPUT_HDR10_PLUS_INFO[] = "input.hdr10-plus-info";  // deprecated
+constexpr char C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO[] = "output.hdr10-plus-info";  // deprecated
+
+/**
+ * HDR dynamic metadata types
+ */
+C2ENUM(C2Config::hdr_dynamic_metadata_type_t, uint32_t,
+    HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_10,  ///< SMPTE ST 2094-10
+    HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40,  ///< SMPTE ST 2094-40
+)
+
+struct C2HdrDynamicMetadataStruct {
+    inline C2HdrDynamicMetadataStruct() { memset(this, 0, sizeof(*this)); }
+
+    inline C2HdrDynamicMetadataStruct(
+            size_t flexCount, C2Config::hdr_dynamic_metadata_type_t type)
+        : type_(type) {
+        memset(data, 0, flexCount);
+    }
+
+    C2Config::hdr_dynamic_metadata_type_t type_;
+    uint8_t data[];
+
+    DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(HdrDynamicMetadata, data)
+    C2FIELD(type_, "type")
+    C2FIELD(data, "data")
+};
+
+/**
+ * Dynamic HDR Metadata Info.
+ */
+typedef C2StreamParam<C2Info, C2HdrDynamicMetadataStruct, kParamIndexHdrDynamicMetadata>
+        C2StreamHdrDynamicMetadataInfo;
+constexpr char C2_PARAMKEY_INPUT_HDR_DYNAMIC_INFO[] = "input.hdr-dynamic-info";
+constexpr char C2_PARAMKEY_OUTPUT_HDR_DYNAMIC_INFO[] = "output.hdr-dynamic-info";
 
 /* ------------------------------------ block-based coding ----------------------------------- */
 
@@ -1673,7 +1724,7 @@
     SYNC_FRAME = (1 << 0),  ///< sync frame, e.g. IDR
     I_FRAME    = (1 << 1),  ///< intra frame that is completely encoded
     P_FRAME    = (1 << 2),  ///< inter predicted frame from previous frames
-    B_FRAME    = (1 << 3),  ///< backward predicted (out-of-order) frame
+    B_FRAME    = (1 << 3),  ///< bidirectional predicted (out-of-order) frame
 )
 
 /**
@@ -2411,6 +2462,17 @@
     S_HANDHELD = 1              // corresponds to VMAF=70
 );
 
+/**
+ * Video Encoding Statistics Export
+ */
+
+/**
+ * Average block QP exported from video encoder.
+ */
+typedef C2StreamParam<C2Info, C2SimpleValueStruct<int32_t>, kParamIndexAverageBlockQuantization>
+        C2AndroidStreamAverageBlockQuantizationInfo;
+constexpr char C2_PARAMKEY_AVERAGE_QP[] = "coded.average-qp";
+
 /// @}
 
 #endif  // C2CONFIG_H_
diff --git a/media/codec2/fuzzer/Android.bp b/media/codec2/fuzzer/Android.bp
index bd1fac6..3adc212 100644
--- a/media/codec2/fuzzer/Android.bp
+++ b/media/codec2/fuzzer/Android.bp
@@ -28,43 +28,12 @@
 cc_defaults {
     name: "C2Fuzzer-defaults",
 
+    defaults: [ "libcodec2-static-defaults" ],
+
     srcs: [
         "C2Fuzzer.cpp",
     ],
 
-    static_libs: [
-        "liblog",
-        "libion",
-        "libfmq",
-        "libbase",
-        "libutils",
-        "libcutils",
-        "libcodec2",
-        "libhidlbase",
-        "libdmabufheap",
-        "libcodec2_vndk",
-        "libnativewindow",
-        "libcodec2_soft_common",
-        "libsfplugin_ccodec_utils",
-        "libstagefright_foundation",
-        "libstagefright_bufferpool@2.0.1",
-        "android.hardware.graphics.mapper@2.0",
-        "android.hardware.graphics.mapper@3.0",
-        "android.hardware.media.bufferpool@2.0",
-        "android.hardware.graphics.allocator@2.0",
-        "android.hardware.graphics.allocator@3.0",
-        "android.hardware.graphics.bufferqueue@2.0",
-    ],
-
-    shared_libs: [
-        "libui",
-        "libdl",
-        "libbinder",
-        "libhardware",
-        "libvndksupport",
-        "libprocessgroup",
-    ],
-
     cflags: [
         "-Wall",
         "-Werror",
diff --git a/media/codec2/fuzzer/C2Fuzzer.cpp b/media/codec2/fuzzer/C2Fuzzer.cpp
index 51e1013..e469d8b 100644
--- a/media/codec2/fuzzer/C2Fuzzer.cpp
+++ b/media/codec2/fuzzer/C2Fuzzer.cpp
@@ -194,12 +194,12 @@
   }
 
   std::vector<C2Param*> configParams;
+  C2StreamPictureSizeInfo::input inputSize(0u, kWidthOfVideo, kHeightOfVideo);
+  C2StreamSampleRateInfo::output sampleRateInfo(0u, kSamplingRateOfAudio);
+  C2StreamChannelCountInfo::output channelCountInfo(0u, kChannelsOfAudio);
   if (domain.value == DOMAIN_VIDEO) {
-    C2StreamPictureSizeInfo::input inputSize(0u, kWidthOfVideo, kHeightOfVideo);
     configParams.push_back(&inputSize);
   } else if (domain.value == DOMAIN_AUDIO) {
-    C2StreamSampleRateInfo::output sampleRateInfo(0u, kSamplingRateOfAudio);
-    C2StreamChannelCountInfo::output channelCountInfo(0u, kChannelsOfAudio);
     configParams.push_back(&sampleRateInfo);
     configParams.push_back(&channelCountInfo);
   }
@@ -239,17 +239,17 @@
 }
 
 void Codec2Fuzzer::decodeFrames(const uint8_t* data, size_t size) {
-  mBufferSource = new BufferSource(data, size);
-  if (!mBufferSource) {
+  std::unique_ptr<BufferSource> bufferSource = std::make_unique<BufferSource>(data, size);
+  if (!bufferSource) {
     return;
   }
-  mBufferSource->parse();
+  bufferSource->parse();
   c2_status_t status = C2_OK;
   size_t numFrames = 0;
-  while (!mBufferSource->isEos()) {
+  while (!bufferSource->isEos()) {
     uint8_t* frame = nullptr;
     size_t frameSize = 0;
-    FrameData frameData = mBufferSource->getFrame();
+    FrameData frameData = bufferSource->getFrame();
     frame = std::get<0>(frameData);
     frameSize = std::get<1>(frameData);
 
@@ -298,7 +298,6 @@
   mConditionalVariable.wait_for(waitForDecodeComplete, kC2FuzzerTimeOut, [this] { return mEos; });
   std::list<std::unique_ptr<C2Work>> c2flushedWorks;
   mComponent->flush_sm(C2Component::FLUSH_COMPONENT, &c2flushedWorks);
-  delete mBufferSource;
 }
 
 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
diff --git a/media/codec2/fuzzer/C2Fuzzer.h b/media/codec2/fuzzer/C2Fuzzer.h
index d5ac81a..da76885 100644
--- a/media/codec2/fuzzer/C2Fuzzer.h
+++ b/media/codec2/fuzzer/C2Fuzzer.h
@@ -104,7 +104,6 @@
     static constexpr size_t kMarkerSuffixSize = 3;
   };
 
-  BufferSource* mBufferSource;
   bool mEos = false;
   C2BlockPool::local_id_t mBlockPoolId;
 
diff --git a/media/codec2/hidl/1.0/vts/.clang-format b/media/codec2/hidl/1.0/vts/.clang-format
deleted file mode 120000
index 136279c..0000000
--- a/media/codec2/hidl/1.0/vts/.clang-format
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../../../build/soong/scripts/system-clang-format
\ No newline at end of file
diff --git a/media/codec2/hidl/1.0/vts/OWNERS b/media/codec2/hidl/1.0/vts/OWNERS
index dbe89cf..32b11b8 100644
--- a/media/codec2/hidl/1.0/vts/OWNERS
+++ b/media/codec2/hidl/1.0/vts/OWNERS
@@ -1,8 +1,5 @@
+# Bug component: 25690
 # Media team
 lajos@google.com
-pawin@google.com
 taklee@google.com
 wonsik@google.com
-
-# VTS team
-dshi@google.com
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
index 9e3a823..d47ef67 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
@@ -465,6 +465,11 @@
     if (mMime.find("raw") != std::string::npos) {
         bitStreamInfo[0] = 8000;
         bitStreamInfo[1] = 1;
+    } else if ((mMime.find("g711-alaw") != std::string::npos) ||
+               (mMime.find("g711-mlaw") != std::string::npos)) {
+        // g711 test data is all 1-channel and has no embedded config info.
+        bitStreamInfo[0] = 8000;
+        bitStreamInfo[1] = 1;
     } else {
         ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mMime, bitStreamInfo));
     }
diff --git a/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
index ffec897..275a721 100644
--- a/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
@@ -61,6 +61,7 @@
   public:
     virtual void SetUp() override {
         getParams();
+        mDisableTest = false;
         mEos = false;
         mClient = android::Codec2Client::CreateFromService(mInstanceName.c_str());
         ASSERT_NE(mClient, nullptr);
@@ -73,6 +74,14 @@
         for (int i = 0; i < MAX_INPUT_BUFFERS; ++i) {
             mWorkQueue.emplace_back(new C2Work);
         }
+
+        C2SecureModeTuning secureModeTuning{};
+        mComponent->query({&secureModeTuning}, {}, C2_MAY_BLOCK, nullptr);
+        if (secureModeTuning.value != C2Config::SM_UNPROTECTED) {
+            mDisableTest = true;
+        }
+
+        if (mDisableTest) std::cout << "[   WARN   ] Test Disabled \n";
     }
 
     virtual void TearDown() override {
@@ -105,6 +114,7 @@
     std::string mInstanceName;
     std::string mComponentName;
     bool mEos;
+    bool mDisableTest;
     std::mutex mQueueLock;
     std::condition_variable mQueueCondition;
     std::list<std::unique_ptr<C2Work>> mWorkQueue;
@@ -324,6 +334,7 @@
 };
 
 TEST_P(Codec2ComponentInputTests, InputBufferTest) {
+    if (mDisableTest) GTEST_SKIP() << "Test is disabled";
     description("Tests for different inputs");
 
     uint32_t flags = std::get<2>(GetParam());
diff --git a/media/codec2/hidl/plugin/FilterWrapper.cpp b/media/codec2/hidl/plugin/FilterWrapper.cpp
index 70c63f2..d5124fd 100644
--- a/media/codec2/hidl/plugin/FilterWrapper.cpp
+++ b/media/codec2/hidl/plugin/FilterWrapper.cpp
@@ -49,6 +49,11 @@
             std::weak_ptr<FilterWrapper> filterWrapper)
         : mIntf(intf), mFilterWrapper(filterWrapper) {
         takeFilters(std::move(filters));
+        for (size_t i = 0; i < mFilters.size(); ++i) {
+            mControlParamTypes.insert(
+                    mFilters[i].desc.controlParams.begin(),
+                    mFilters[i].desc.controlParams.end());
+        }
     }
 
     ~WrappedDecoderInterface() override = default;
@@ -187,7 +192,12 @@
         }
 
         std::vector<C2Param *> stackParamsForIntf;
-        std::copy_n(stackParamsList.begin(), stackParamsList.size(), stackParamsForIntf.begin());
+        for (C2Param *param : stackParamsList) {
+            if (mControlParamTypes.count(param->type()) != 0) {
+                continue;
+            }
+            stackParamsForIntf.push_back(param);
+        }
 
         // Gather heap params that did not get queried from the filter interfaces above.
         // These need to be queried from the decoder interface.
@@ -197,6 +207,9 @@
             if (mTypeToIndexForQuery.find(type) != mTypeToIndexForQuery.end()) {
                 continue;
             }
+            if (mControlParamTypes.count(type) != 0) {
+                continue;
+            }
             heapParamIndicesForIntf.push_back(heapParamIndices[j]);
         }
 
@@ -251,11 +264,14 @@
             std::vector<C2Param *> paramsForFilter;
             for (C2Param* param : params) {
                 auto it = mTypeToIndexForConfig.find(param->type().type());
-                if (it != mTypeToIndexForConfig.end() && it->second != i) {
+                if (it == mTypeToIndexForConfig.end() || it->second != i) {
                     continue;
                 }
                 paramsForFilter.push_back(param);
             }
+            if (paramsForFilter.empty()) {
+                continue;
+            }
             c2_status_t err = filter->config_vb(paramsForFilter, mayBlock, &filterFailures);
             if (err != C2_OK) {
                 LOG(err == C2_BAD_INDEX ? VERBOSE : WARNING)
@@ -356,6 +372,7 @@
     std::weak_ptr<FilterWrapper> mFilterWrapper;
     std::map<uint32_t, size_t> mTypeToIndexForQuery;
     std::map<uint32_t, size_t> mTypeToIndexForConfig;
+    std::set<C2Param::Type> mControlParamTypes;
 
     c2_status_t transferParams_l(
             const std::shared_ptr<C2ComponentInterface> &curr,
@@ -430,6 +447,10 @@
             LOG(DEBUG) << "WrappedDecoderInterface: FilterWrapper not found";
             return C2_OK;
         }
+        if (!filterWrapper->isFilteringEnabled(next)) {
+            LOG(VERBOSE) << "WrappedDecoderInterface: filtering not enabled";
+            return C2_OK;
+        }
         std::vector<std::unique_ptr<C2Param>> params;
         c2_status_t err = filterWrapper->queryParamsForPreviousComponent(next, &params);
         if (err != C2_OK) {
@@ -594,6 +615,8 @@
             }
         }
         mRunningFilters.clear();
+        std::vector<FilterWrapper::Component> filters(mFilters);
+        mIntf->takeFilters(std::move(filters));
         return result;
     }
 
diff --git a/media/codec2/hidl/services/Android.bp b/media/codec2/hidl/services/Android.bp
index bb9f51f..b36e80a 100644
--- a/media/codec2/hidl/services/Android.bp
+++ b/media/codec2/hidl/services/Android.bp
@@ -52,7 +52,7 @@
 
     // minijail is used to protect against unexpected system calls.
     shared_libs: [
-        "libavservices_minijail_vendor",
+        "libavservices_minijail",
         "libbinder",
     ],
     required: ["android.hardware.media.c2@1.2-default-seccomp_policy"],
diff --git a/media/codec2/hidl/services/android.hardware.media.c2@1.2-default-service.rc b/media/codec2/hidl/services/android.hardware.media.c2@1.2-default-service.rc
index 03f6e3d..12da593 100644
--- a/media/codec2/hidl/services/android.hardware.media.c2@1.2-default-service.rc
+++ b/media/codec2/hidl/services/android.hardware.media.c2@1.2-default-service.rc
@@ -3,5 +3,5 @@
     user mediacodec
     group camera mediadrm drmrpc
     ioprio rt 4
-    writepid /dev/cpuset/foreground/tasks
+    task_profiles ProcessCapacityHigh
 
diff --git a/media/codec2/sfplugin/Android.bp b/media/codec2/sfplugin/Android.bp
index 2bc748f..feeddb5 100644
--- a/media/codec2/sfplugin/Android.bp
+++ b/media/codec2/sfplugin/Android.bp
@@ -42,12 +42,14 @@
         "android.hardware.drm@1.0",
         "android.hardware.media.c2@1.0",
         "android.hardware.media.omx@1.0",
+        "android.hardware.graphics.mapper@4.0",
         "libbase",
         "libbinder",
         "libcodec2",
         "libcodec2_client",
         "libcodec2_vndk",
         "libcutils",
+        "libgralloctypes",
         "libgui",
         "libhidlallocatorutils",
         "libhidlbase",
diff --git a/media/codec2/sfplugin/C2OMXNode.cpp b/media/codec2/sfplugin/C2OMXNode.cpp
index c049187..ed7d69c 100644
--- a/media/codec2/sfplugin/C2OMXNode.cpp
+++ b/media/codec2/sfplugin/C2OMXNode.cpp
@@ -42,6 +42,7 @@
 
 #include "utils/Codec2Mapper.h"
 #include "C2OMXNode.h"
+#include "Codec2Buffer.h"
 
 namespace android {
 
@@ -466,6 +467,18 @@
                 new Buffer2D(block->share(
                         C2Rect(block->width(), block->height()), ::C2Fence())));
         work->input.buffers.push_back(c2Buffer);
+        std::shared_ptr<C2StreamHdrStaticInfo::input> staticInfo;
+        std::shared_ptr<C2StreamHdrDynamicMetadataInfo::input> dynamicInfo;
+        GetHdrMetadataFromGralloc4Handle(
+                block->handle(),
+                &staticInfo,
+                &dynamicInfo);
+        if (staticInfo && *staticInfo) {
+            c2Buffer->setInfo(staticInfo);
+        }
+        if (dynamicInfo && *dynamicInfo) {
+            c2Buffer->setInfo(dynamicInfo);
+        }
     }
     work->worklets.clear();
     work->worklets.emplace_back(new C2Worklet);
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 82460c9..5df28f0 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -1018,29 +1018,31 @@
             } else {
                 pixelFormatInfo = nullptr;
             }
-            std::optional<uint32_t> flexPixelFormat{};
-            std::optional<uint32_t> flexPlanarPixelFormat{};
-            std::optional<uint32_t> flexSemiPlanarPixelFormat{};
+            // bit depth -> format
+            std::map<uint32_t, uint32_t> flexPixelFormat;
+            std::map<uint32_t, uint32_t> flexPlanarPixelFormat;
+            std::map<uint32_t, uint32_t> flexSemiPlanarPixelFormat;
             if (pixelFormatInfo && *pixelFormatInfo) {
                 for (size_t i = 0; i < pixelFormatInfo->flexCount(); ++i) {
                     const C2FlexiblePixelFormatDescriptorStruct &desc =
                         pixelFormatInfo->m.values[i];
-                    if (desc.bitDepth != 8
-                            || desc.subsampling != C2Color::YUV_420
+                    if (desc.subsampling != C2Color::YUV_420
                             // TODO(b/180076105): some device report wrong layout
                             // || desc.layout == C2Color::INTERLEAVED_PACKED
                             // || desc.layout == C2Color::INTERLEAVED_ALIGNED
                             || desc.layout == C2Color::UNKNOWN_LAYOUT) {
                         continue;
                     }
-                    if (!flexPixelFormat) {
-                        flexPixelFormat = desc.pixelFormat;
+                    if (flexPixelFormat.count(desc.bitDepth) == 0) {
+                        flexPixelFormat.emplace(desc.bitDepth, desc.pixelFormat);
                     }
-                    if (desc.layout == C2Color::PLANAR_PACKED && !flexPlanarPixelFormat) {
-                        flexPlanarPixelFormat = desc.pixelFormat;
+                    if (desc.layout == C2Color::PLANAR_PACKED
+                            && flexPlanarPixelFormat.count(desc.bitDepth) == 0) {
+                        flexPlanarPixelFormat.emplace(desc.bitDepth, desc.pixelFormat);
                     }
-                    if (desc.layout == C2Color::SEMIPLANAR_PACKED && !flexSemiPlanarPixelFormat) {
-                        flexSemiPlanarPixelFormat = desc.pixelFormat;
+                    if (desc.layout == C2Color::SEMIPLANAR_PACKED
+                            && flexSemiPlanarPixelFormat.count(desc.bitDepth) == 0) {
+                        flexSemiPlanarPixelFormat.emplace(desc.bitDepth, desc.pixelFormat);
                     }
                 }
             }
@@ -1050,7 +1052,7 @@
                 if (!(config->mDomain & Config::IS_ENCODER)) {
                     if (surface == nullptr) {
                         const char *prefix = "";
-                        if (flexSemiPlanarPixelFormat) {
+                        if (flexSemiPlanarPixelFormat.count(8) != 0) {
                             format = COLOR_FormatYUV420SemiPlanar;
                             prefix = "semi-";
                         } else {
@@ -1067,17 +1069,34 @@
                 if ((config->mDomain & Config::IS_ENCODER) || !surface) {
                     switch (format) {
                         case COLOR_FormatYUV420Flexible:
-                            format = flexPixelFormat.value_or(COLOR_FormatYUV420Planar);
+                            format = COLOR_FormatYUV420Planar;
+                            if (flexPixelFormat.count(8) != 0) {
+                                format = flexPixelFormat[8];
+                            }
                             break;
                         case COLOR_FormatYUV420Planar:
                         case COLOR_FormatYUV420PackedPlanar:
-                            format = flexPlanarPixelFormat.value_or(
-                                    flexPixelFormat.value_or(format));
+                            if (flexPlanarPixelFormat.count(8) != 0) {
+                                format = flexPlanarPixelFormat[8];
+                            } else if (flexPixelFormat.count(8) != 0) {
+                                format = flexPixelFormat[8];
+                            }
                             break;
                         case COLOR_FormatYUV420SemiPlanar:
                         case COLOR_FormatYUV420PackedSemiPlanar:
-                            format = flexSemiPlanarPixelFormat.value_or(
-                                    flexPixelFormat.value_or(format));
+                            if (flexSemiPlanarPixelFormat.count(8) != 0) {
+                                format = flexSemiPlanarPixelFormat[8];
+                            } else if (flexPixelFormat.count(8) != 0) {
+                                format = flexPixelFormat[8];
+                            }
+                            break;
+                        case COLOR_FormatYUVP010:
+                            format = COLOR_FormatYUVP010;
+                            if (flexSemiPlanarPixelFormat.count(10) != 0) {
+                                format = flexSemiPlanarPixelFormat[10];
+                            } else if (flexPixelFormat.count(10) != 0) {
+                                format = flexPixelFormat[10];
+                            }
                             break;
                         default:
                             // No-op
@@ -1213,11 +1232,25 @@
         std::initializer_list<C2Param::Index> indices {
             colorAspectsRequestIndex.withStream(0u),
         };
-        c2_status_t c2err = comp->query(
-                { &usage, &maxInputSize, &prepend },
-                indices,
-                C2_DONT_BLOCK,
-                &params);
+        int32_t colorTransferRequest = 0;
+        if (config->mDomain & (Config::IS_IMAGE | Config::IS_VIDEO)
+                && !sdkParams->findInt32("color-transfer-request", &colorTransferRequest)) {
+            colorTransferRequest = 0;
+        }
+        c2_status_t c2err = C2_OK;
+        if (colorTransferRequest != 0) {
+            c2err = comp->query(
+                    { &usage, &maxInputSize, &prepend },
+                    indices,
+                    C2_DONT_BLOCK,
+                    &params);
+        } else {
+            c2err = comp->query(
+                    { &usage, &maxInputSize, &prepend },
+                    {},
+                    C2_DONT_BLOCK,
+                    &params);
+        }
         if (c2err != C2_OK && c2err != C2_BAD_INDEX) {
             ALOGE("Failed to query component interface: %d", c2err);
             return UNKNOWN_ERROR;
@@ -1332,8 +1365,8 @@
             }
         }
 
-        // set channel-mask
         if (config->mDomain & Config::IS_AUDIO) {
+            // set channel-mask
             int32_t mask;
             if (msg->findInt32(KEY_CHANNEL_MASK, &mask)) {
                 if (config->mDomain & Config::IS_ENCODER) {
@@ -1342,6 +1375,15 @@
                     config->mOutputFormat->setInt32(KEY_CHANNEL_MASK, mask);
                 }
             }
+
+            // set PCM encoding
+            int32_t pcmEncoding = kAudioEncodingPcm16bit;
+            msg->findInt32(KEY_PCM_ENCODING, &pcmEncoding);
+            if (encoder) {
+                config->mInputFormat->setInt32("android._config-pcm-encoding", pcmEncoding);
+            } else {
+                config->mOutputFormat->setInt32("android._config-pcm-encoding", pcmEncoding);
+            }
         }
 
         std::unique_ptr<C2Param> colorTransferRequestParam;
@@ -1351,11 +1393,6 @@
                 colorTransferRequestParam = std::move(param);
             }
         }
-        int32_t colorTransferRequest = 0;
-        if (config->mDomain & (Config::IS_IMAGE | Config::IS_VIDEO)
-                && !sdkParams->findInt32("color-transfer-request", &colorTransferRequest)) {
-            colorTransferRequest = 0;
-        }
 
         if (colorTransferRequest != 0) {
             if (colorTransferRequestParam && *colorTransferRequestParam) {
@@ -1421,6 +1458,31 @@
             }
         }
 
+        if (config->mTunneled) {
+            config->mOutputFormat->setInt32("android._tunneled", 1);
+        }
+
+        // Convert an encoding statistics level to corresponding encoding statistics
+        // kinds
+        int32_t encodingStatisticsLevel = VIDEO_ENCODING_STATISTICS_LEVEL_NONE;
+        if ((config->mDomain & Config::IS_ENCODER)
+            && (config->mDomain & Config::IS_VIDEO)
+            && msg->findInt32(KEY_VIDEO_ENCODING_STATISTICS_LEVEL, &encodingStatisticsLevel)) {
+            // Higher level include all the enc stats belong to lower level.
+            switch (encodingStatisticsLevel) {
+                // case VIDEO_ENCODING_STATISTICS_LEVEL_2: // reserved for the future level 2
+                                                           // with more enc stat kinds
+                // Future extended encoding statistics for the level 2 should be added here
+                case VIDEO_ENCODING_STATISTICS_LEVEL_1:
+                    config->subscribeToConfigUpdate(comp,
+                        {kParamIndexAverageBlockQuantization, kParamIndexPictureType});
+                    break;
+                case VIDEO_ENCODING_STATISTICS_LEVEL_NONE:
+                    break;
+            }
+        }
+        ALOGD("encoding statistics level = %d", encodingStatisticsLevel);
+
         ALOGD("setup formats input: %s",
                 config->mInputFormat->debugString().c_str());
         ALOGD("setup formats output: %s",
@@ -1896,9 +1958,11 @@
     {
         Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
         const std::unique_ptr<Config> &config = *configLocked;
+        sp<ANativeWindow> nativeWindow = static_cast<ANativeWindow *>(surface.get());
+        status_t err = OK;
+
         if (config->mTunneled && config->mSidebandHandle != nullptr) {
-            sp<ANativeWindow> nativeWindow = static_cast<ANativeWindow *>(surface.get());
-            status_t err = native_window_set_sideband_stream(
+            err = native_window_set_sideband_stream(
                     nativeWindow.get(),
                     const_cast<native_handle_t *>(config->mSidebandHandle->handle()));
             if (err != OK) {
@@ -1906,6 +1970,15 @@
                         nativeWindow.get(), config->mSidebandHandle->handle(), err);
                 return err;
             }
+        } else {
+            // Explicitly reset the sideband handle of the window for
+            // non-tunneled video in case the window was previously used
+            // for a tunneled video playback.
+            err = native_window_set_sideband_stream(nativeWindow.get(), nullptr);
+            if (err != OK) {
+                ALOGE("native_window_set_sideband_stream(nullptr) failed! (err %d).", err);
+                return err;
+            }
         }
     }
     return mChannel->setSurface(surface);
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index e9adfc9..99aa593 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -19,6 +19,7 @@
 #include <utils/Log.h>
 
 #include <algorithm>
+#include <atomic>
 #include <list>
 #include <numeric>
 
@@ -155,6 +156,7 @@
         input->pipelineDelay = 0u;
         input->numSlots = kSmoothnessFactor;
         input->numExtraSlots = 0u;
+        input->lastFlushIndex = 0u;
     }
     {
         Mutexed<Output>::Locked output(mOutput);
@@ -252,7 +254,7 @@
                 bool released = input->buffers->releaseBuffer(buffer, nullptr, true);
                 ALOGV("[%s] queueInputBuffer: buffer copied; %sreleased",
                       mName, released ? "" : "not ");
-                buffer.clear();
+                buffer = copy;
             } else {
                 ALOGW("[%s] queueInputBuffer: failed to copy a buffer; this may cause input "
                       "buffer starvation on component.", mName);
@@ -280,6 +282,12 @@
             }
         }
     } else if (eos) {
+        Mutexed<Input>::Locked input(mInput);
+        if (input->frameReassembler) {
+            usesFrameReassembler = true;
+            // drain any pending items with eos
+            input->frameReassembler.process(buffer, &items);
+        }
         flags |= C2FrameData::FLAG_END_OF_STREAM;
     }
     if (usesFrameReassembler) {
@@ -339,10 +347,10 @@
     } else {
         Mutexed<Input>::Locked input(mInput);
         bool released = false;
-        if (buffer) {
-            released = input->buffers->releaseBuffer(buffer, nullptr, true);
-        } else if (copy) {
+        if (copy) {
             released = input->extraBuffers.releaseSlot(copy, nullptr, true);
+        } else if (buffer) {
+            released = input->buffers->releaseBuffer(buffer, nullptr, true);
         }
         ALOGV("[%s] queueInputBuffer: buffer%s %sreleased",
               mName, (buffer == nullptr) ? "(copy)" : "", released ? "" : "not ");
@@ -832,6 +840,35 @@
         hdr10PlusInfo.reset();
     }
 
+    // HDR dynamic info
+    std::shared_ptr<const C2StreamHdrDynamicMetadataInfo::output> hdrDynamicInfo =
+        std::static_pointer_cast<const C2StreamHdrDynamicMetadataInfo::output>(
+                c2Buffer->getInfo(C2StreamHdrDynamicMetadataInfo::output::PARAM_TYPE));
+    // TODO: make this sticky & enable unset
+    if (hdrDynamicInfo && hdrDynamicInfo->flexCount() == 0) {
+        hdrDynamicInfo.reset();
+    }
+
+    if (hdr10PlusInfo) {
+        // C2StreamHdr10PlusInfo is deprecated; components should use
+        // C2StreamHdrDynamicMetadataInfo
+        // TODO: #metric
+        if (hdrDynamicInfo) {
+            // It is unexpected that C2StreamHdr10PlusInfo and
+            // C2StreamHdrDynamicMetadataInfo is both present.
+            // C2StreamHdrDynamicMetadataInfo takes priority.
+            // TODO: #metric
+        } else {
+            std::shared_ptr<C2StreamHdrDynamicMetadataInfo::output> info =
+                    C2StreamHdrDynamicMetadataInfo::output::AllocShared(
+                            hdr10PlusInfo->flexCount(),
+                            0u,
+                            C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40);
+            memcpy(info->m.data, hdr10PlusInfo->m.value, hdr10PlusInfo->flexCount());
+            hdrDynamicInfo = info;
+        }
+    }
+
     std::vector<C2ConstGraphicBlock> blocks = c2Buffer->data().graphicBlocks();
     if (blocks.size() != 1u) {
         ALOGD("[%s] expected 1 graphic block, but got %zu", mName, blocks.size());
@@ -851,7 +888,7 @@
             videoScalingMode,
             transform,
             Fence::NO_FENCE, 0);
-    if (hdrStaticInfo || hdr10PlusInfo) {
+    if (hdrStaticInfo || hdrDynamicInfo) {
         HdrMetadata hdr;
         if (hdrStaticInfo) {
             // If mastering max and min luminance fields are 0, do not use them.
@@ -888,13 +925,16 @@
                 hdr.cta8613 = cta861_meta;
             }
         }
-        if (hdr10PlusInfo) {
+        if (hdrDynamicInfo
+                && hdrDynamicInfo->m.type_ == C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40) {
             hdr.validTypes |= HdrMetadata::HDR10PLUS;
             hdr.hdr10plus.assign(
-                    hdr10PlusInfo->m.value,
-                    hdr10PlusInfo->m.value + hdr10PlusInfo->flexCount());
+                    hdrDynamicInfo->m.data,
+                    hdrDynamicInfo->m.data + hdrDynamicInfo->flexCount());
         }
         qbi.setHdrMetadata(hdr);
+
+        SetHdrMetadataToGralloc4Handle(hdrStaticInfo, hdrDynamicInfo, block.handle());
     }
     // we don't have dirty regions
     qbi.setSurfaceDamage(Region::INVALID_REGION);
@@ -1116,6 +1156,7 @@
         input->numSlots = numInputSlots;
         input->extraBuffers.flush();
         input->numExtraSlots = 0u;
+        input->lastFlushIndex = mFrameIndex.load(std::memory_order_relaxed);
         if (audioEncoder && encoderFrameSize && sampleRate && channelCount) {
             input->frameReassembler.init(
                     pool,
@@ -1379,6 +1420,12 @@
                 }
             }
         }
+
+        int32_t tunneled = 0;
+        if (!outputFormat->findInt32("android._tunneled", &tunneled)) {
+            tunneled = 0;
+        }
+        mTunneled = (tunneled != 0);
     }
 
     // Set up pipeline control. This has to be done after mInputBuffers and
@@ -1452,6 +1499,16 @@
     std::list<std::unique_ptr<C2Work>> flushedConfigs;
     mFlushedConfigs.lock()->swap(flushedConfigs);
     if (!flushedConfigs.empty()) {
+        {
+            Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
+            PipelineWatcher::Clock::time_point now = PipelineWatcher::Clock::now();
+            for (const std::unique_ptr<C2Work> &work : flushedConfigs) {
+                watcher->onWorkQueued(
+                        work->input.ordinal.frameIndex.peeku(),
+                        std::vector(work->input.buffers),
+                        now);
+            }
+        }
         err = mComponent->queue(&flushedConfigs);
         if (err != C2_OK) {
             ALOGW("[%s] Error while queueing a flushed config", mName);
@@ -1518,40 +1575,45 @@
     setDescrambler(nullptr);
 }
 
-
 void CCodecBufferChannel::flush(const std::list<std::unique_ptr<C2Work>> &flushedWork) {
     ALOGV("[%s] flush", mName);
-    std::vector<uint64_t> indices;
     std::list<std::unique_ptr<C2Work>> configs;
-    for (const std::unique_ptr<C2Work> &work : flushedWork) {
-        indices.push_back(work->input.ordinal.frameIndex.peeku());
-        if (!(work->input.flags & C2FrameData::FLAG_CODEC_CONFIG)) {
-            continue;
+    mInput.lock()->lastFlushIndex = mFrameIndex.load(std::memory_order_relaxed);
+    {
+        Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
+        for (const std::unique_ptr<C2Work> &work : flushedWork) {
+            uint64_t frameIndex = work->input.ordinal.frameIndex.peeku();
+            if (!(work->input.flags & C2FrameData::FLAG_CODEC_CONFIG)) {
+                watcher->onWorkDone(frameIndex);
+                continue;
+            }
+            if (work->input.buffers.empty()
+                    || work->input.buffers.front() == nullptr
+                    || work->input.buffers.front()->data().linearBlocks().empty()) {
+                ALOGD("[%s] no linear codec config data found", mName);
+                watcher->onWorkDone(frameIndex);
+                continue;
+            }
+            std::unique_ptr<C2Work> copy(new C2Work);
+            copy->input.flags = C2FrameData::flags_t(
+                    work->input.flags | C2FrameData::FLAG_DROP_FRAME);
+            copy->input.ordinal = work->input.ordinal;
+            copy->input.ordinal.frameIndex = mFrameIndex++;
+            for (size_t i = 0; i < work->input.buffers.size(); ++i) {
+                copy->input.buffers.push_back(watcher->onInputBufferReleased(frameIndex, i));
+            }
+            for (const std::unique_ptr<C2Param> &param : work->input.configUpdate) {
+                copy->input.configUpdate.push_back(C2Param::Copy(*param));
+            }
+            copy->input.infoBuffers.insert(
+                    copy->input.infoBuffers.begin(),
+                    work->input.infoBuffers.begin(),
+                    work->input.infoBuffers.end());
+            copy->worklets.emplace_back(new C2Worklet);
+            configs.push_back(std::move(copy));
+            watcher->onWorkDone(frameIndex);
+            ALOGV("[%s] stashed flushed codec config data", mName);
         }
-        if (work->input.buffers.empty()
-                || work->input.buffers.front() == nullptr
-                || work->input.buffers.front()->data().linearBlocks().empty()) {
-            ALOGD("[%s] no linear codec config data found", mName);
-            continue;
-        }
-        std::unique_ptr<C2Work> copy(new C2Work);
-        copy->input.flags = C2FrameData::flags_t(work->input.flags | C2FrameData::FLAG_DROP_FRAME);
-        copy->input.ordinal = work->input.ordinal;
-        copy->input.ordinal.frameIndex = mFrameIndex++;
-        copy->input.buffers.insert(
-                copy->input.buffers.begin(),
-                work->input.buffers.begin(),
-                work->input.buffers.end());
-        for (const std::unique_ptr<C2Param> &param : work->input.configUpdate) {
-            copy->input.configUpdate.push_back(C2Param::Copy(*param));
-        }
-        copy->input.infoBuffers.insert(
-                copy->input.infoBuffers.begin(),
-                work->input.infoBuffers.begin(),
-                work->input.infoBuffers.end());
-        copy->worklets.emplace_back(new C2Worklet);
-        configs.push_back(std::move(copy));
-        ALOGV("[%s] stashed flushed codec config data", mName);
     }
     mFlushedConfigs.lock()->swap(configs);
     {
@@ -1566,12 +1628,6 @@
             output->buffers->flushStash();
         }
     }
-    {
-        Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
-        for (uint64_t index : indices) {
-            watcher->onWorkDone(index);
-        }
-    }
 }
 
 void CCodecBufferChannel::onWorkDone(
@@ -1589,12 +1645,18 @@
     }
     std::shared_ptr<C2Buffer> buffer =
             mPipelineWatcher.lock()->onInputBufferReleased(frameIndex, arrayIndex);
-    bool newInputSlotAvailable;
+    bool newInputSlotAvailable = false;
     {
         Mutexed<Input>::Locked input(mInput);
-        newInputSlotAvailable = input->buffers->expireComponentBuffer(buffer);
-        if (!newInputSlotAvailable) {
-            (void)input->extraBuffers.expireComponentBuffer(buffer);
+        if (input->lastFlushIndex >= frameIndex) {
+            ALOGD("[%s] Ignoring stale input buffer done callback: "
+                  "last flush index = %lld, frameIndex = %lld",
+                  mName, input->lastFlushIndex.peekll(), (long long)frameIndex);
+        } else {
+            newInputSlotAvailable = input->buffers->expireComponentBuffer(buffer);
+            if (!newInputSlotAvailable) {
+                (void)input->extraBuffers.expireComponentBuffer(buffer);
+            }
         }
     }
     if (newInputSlotAvailable) {
@@ -1881,10 +1943,21 @@
         }
     }
 
+    bool drop = false;
+    if (worklet->output.flags & C2FrameData::FLAG_DROP_FRAME) {
+        ALOGV("[%s] onWorkDone: drop buffer but keep metadata", mName);
+        drop = true;
+    }
+
     if (notifyClient && !buffer && !flags) {
-        ALOGV("[%s] onWorkDone: Not reporting output buffer (%lld)",
-              mName, work->input.ordinal.frameIndex.peekull());
-        notifyClient = false;
+        if (mTunneled && drop && outputFormat) {
+            ALOGV("[%s] onWorkDone: Keep tunneled, drop frame with format change (%lld)",
+                  mName, work->input.ordinal.frameIndex.peekull());
+        } else {
+            ALOGV("[%s] onWorkDone: Not reporting output buffer (%lld)",
+                  mName, work->input.ordinal.frameIndex.peekull());
+            notifyClient = false;
+        }
     }
 
     if (buffer) {
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index 5a2aca2..26eef30 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -273,6 +273,7 @@
         size_t numExtraSlots;
         uint32_t inputDelay;
         uint32_t pipelineDelay;
+        c2_cntr64_t lastFlushIndex;
 
         FrameReassembler frameReassembler;
     };
@@ -323,6 +324,8 @@
         return mCrypto != nullptr || mDescrambler != nullptr;
     }
     std::atomic_bool mSendEncryptedInfoBuffer;
+
+    std::atomic_bool mTunneled;
 };
 
 // Conversion of a c2_status_t value to a status_t value may depend on the
diff --git a/media/codec2/sfplugin/CCodecBuffers.cpp b/media/codec2/sfplugin/CCodecBuffers.cpp
index 333a2ca..20f2ecf 100644
--- a/media/codec2/sfplugin/CCodecBuffers.cpp
+++ b/media/codec2/sfplugin/CCodecBuffers.cpp
@@ -21,6 +21,7 @@
 #include <C2PlatformSupport.h>
 
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/MediaDefs.h>
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaCodecConstants.h>
 #include <media/stagefright/SkipCutBuffer.h>
@@ -33,7 +34,7 @@
 
 namespace {
 
-sp<GraphicBlockBuffer> AllocateGraphicBuffer(
+sp<GraphicBlockBuffer> AllocateInputGraphicBuffer(
         const std::shared_ptr<C2BlockPool> &pool,
         const sp<AMessage> &format,
         uint32_t pixelFormat,
@@ -45,9 +46,13 @@
         return nullptr;
     }
 
+    int64_t usageValue = 0;
+    (void)format->findInt64("android._C2MemoryUsage", &usageValue);
+    C2MemoryUsage fullUsage{usageValue | usage.expected};
+
     std::shared_ptr<C2GraphicBlock> block;
     c2_status_t err = pool->fetchGraphicBlock(
-            width, height, pixelFormat, usage, &block);
+            width, height, pixelFormat, fullUsage, &block);
     if (err != C2_OK) {
         ALOGD("fetch graphic block failed: %d", err);
         return nullptr;
@@ -132,6 +137,7 @@
     if (!copy->copy(c2buffer)) {
         return nullptr;
     }
+    copy->meta()->extend(buffer->meta());
     return copy;
 }
 
@@ -199,6 +205,56 @@
     mSkipCutBuffer = new SkipCutBuffer(skip, cut, mChannelCount);
 }
 
+bool OutputBuffers::convert(
+        const std::shared_ptr<C2Buffer> &src, sp<Codec2Buffer> *dst) {
+    if (!src || src->data().type() != C2BufferData::LINEAR) {
+        return false;
+    }
+    int32_t configEncoding = kAudioEncodingPcm16bit;
+    int32_t codecEncoding = kAudioEncodingPcm16bit;
+    if (mFormat->findInt32("android._codec-pcm-encoding", &codecEncoding)
+            && mFormat->findInt32("android._config-pcm-encoding", &configEncoding)) {
+        if (mSrcEncoding != codecEncoding || mDstEncoding != configEncoding) {
+            if (codecEncoding != configEncoding) {
+                mDataConverter = AudioConverter::Create(
+                        (AudioEncoding)codecEncoding, (AudioEncoding)configEncoding);
+                ALOGD_IF(mDataConverter, "[%s] Converter created from %d to %d",
+                         mName, codecEncoding, configEncoding);
+                mFormatWithConverter = mFormat->dup();
+                mFormatWithConverter->setInt32(KEY_PCM_ENCODING, configEncoding);
+            } else {
+                mDataConverter = nullptr;
+                mFormatWithConverter = nullptr;
+            }
+            mSrcEncoding = codecEncoding;
+            mDstEncoding = configEncoding;
+        }
+        if (int encoding; !mFormat->findInt32(KEY_PCM_ENCODING, &encoding)
+                || encoding != mDstEncoding) {
+        }
+    }
+    if (!mDataConverter) {
+        return false;
+    }
+    sp<MediaCodecBuffer> srcBuffer = ConstLinearBlockBuffer::Allocate(mFormat, src);
+    if (!srcBuffer) {
+        return false;
+    }
+    if (!*dst) {
+        *dst = new Codec2Buffer(
+                mFormat,
+                new ABuffer(mDataConverter->targetSize(srcBuffer->size())));
+    }
+    sp<MediaCodecBuffer> dstBuffer = *dst;
+    status_t err = mDataConverter->convert(srcBuffer, dstBuffer);
+    if (err != OK) {
+        ALOGD("[%s] buffer conversion failed: %d", mName, err);
+        return false;
+    }
+    dstBuffer->setFormat(mFormatWithConverter);
+    return true;
+}
+
 void OutputBuffers::clearStash() {
     mPending.clear();
     mReorderStash.clear();
@@ -887,6 +943,10 @@
         return nullptr;
     }
 
+    int64_t usageValue = 0;
+    (void)format->findInt64("android._C2MemoryUsage", &usageValue);
+    usage = C2MemoryUsage(usage.expected | usageValue);
+
     std::shared_ptr<C2LinearBlock> block;
     c2_status_t err = pool->fetchLinearBlock(capacity, usage, &block);
     if (err != C2_OK || block == nullptr) {
@@ -1031,7 +1091,7 @@
             [pool = mPool, format = mFormat, lbp = mLocalBufferPool, pixelFormat]()
                     -> sp<Codec2Buffer> {
                 C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
-                return AllocateGraphicBuffer(
+                return AllocateInputGraphicBuffer(
                         pool, format, pixelFormat, usage, lbp);
             });
     return std::move(array);
@@ -1042,10 +1102,8 @@
 }
 
 sp<Codec2Buffer> GraphicInputBuffers::createNewBuffer() {
-    int64_t usageValue = 0;
-    (void)mFormat->findInt64("android._C2MemoryUsage", &usageValue);
-    C2MemoryUsage usage{usageValue | C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE};
-    return AllocateGraphicBuffer(
+    C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
+    return AllocateInputGraphicBuffer(
             mPool, mFormat, extractPixelFormat(mFormat), usage, mLocalBufferPool);
 }
 
@@ -1078,7 +1136,7 @@
         return err;
     }
     c2Buffer->setFormat(mFormat);
-    if (!c2Buffer->copy(buffer)) {
+    if (!convert(buffer, &c2Buffer) && !c2Buffer->copy(buffer)) {
         ALOGD("[%s] copy buffer failed", mName);
         return WOULD_BLOCK;
     }
@@ -1194,9 +1252,12 @@
         const std::shared_ptr<C2Buffer> &buffer,
         size_t *index,
         sp<MediaCodecBuffer> *clientBuffer) {
-    sp<Codec2Buffer> newBuffer = wrap(buffer);
-    if (newBuffer == nullptr) {
-        return NO_MEMORY;
+    sp<Codec2Buffer> newBuffer;
+    if (!convert(buffer, &newBuffer)) {
+        newBuffer = wrap(buffer);
+        if (newBuffer == nullptr) {
+            return NO_MEMORY;
+        }
     }
     newBuffer->setFormat(mFormat);
     *index = mImpl.assignSlot(newBuffer);
diff --git a/media/codec2/sfplugin/CCodecBuffers.h b/media/codec2/sfplugin/CCodecBuffers.h
index 995d3a4..c8e9930 100644
--- a/media/codec2/sfplugin/CCodecBuffers.h
+++ b/media/codec2/sfplugin/CCodecBuffers.h
@@ -18,9 +18,11 @@
 
 #define CCODEC_BUFFERS_H_
 
+#include <optional>
 #include <string>
 
 #include <C2Config.h>
+#include <DataConverter.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/MediaCodecBuffer.h>
 
@@ -382,6 +384,14 @@
      */
     void submit(const sp<MediaCodecBuffer> &buffer);
 
+    /**
+     * Apply DataConverter from |src| to |*dst| if needed. If |*dst| is nullptr,
+     * a new buffer is allocated.
+     *
+     * Returns true if conversion was needed and executed; false otherwise.
+     */
+    bool convert(const std::shared_ptr<C2Buffer> &src, sp<Codec2Buffer> *dst);
+
 private:
     // SkipCutBuffer
     int32_t mDelay;
@@ -391,6 +401,12 @@
 
     void setSkipCutBuffer(int32_t skip, int32_t cut);
 
+    // DataConverter
+    sp<DataConverter> mDataConverter;
+    sp<AMessage> mFormatWithConverter;
+    std::optional<int32_t> mSrcEncoding;
+    std::optional<int32_t> mDstEncoding;
+
     // Output stash
 
     // Struct for an entry in the output stash (mPending and mReorderStash)
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index c275187..dd37c4b 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -657,24 +657,29 @@
     add(ConfigMapper(KEY_SAMPLE_RATE,   C2_PARAMKEY_CODED_SAMPLE_RATE,  "value")
         .limitTo(D::AUDIO & D::CODED));
 
-    add(ConfigMapper(KEY_PCM_ENCODING,  C2_PARAMKEY_PCM_ENCODING,       "value")
+    auto pcmEncodingMapper = [](C2Value v) -> C2Value {
+        int32_t value;
+        C2Config::pcm_encoding_t to;
+        if (v.get(&value) && C2Mapper::map(value, &to)) {
+            return to;
+        }
+        return C2Value();
+    };
+    auto pcmEncodingReverse = [](C2Value v) -> C2Value {
+        C2Config::pcm_encoding_t value;
+        int32_t to;
+        using C2ValueType=typename _c2_reduce_enum_to_underlying_type<decltype(value)>::type;
+        if (v.get((C2ValueType*)&value) && C2Mapper::map(value, &to)) {
+            return to;
+        }
+        return C2Value();
+    };
+    add(ConfigMapper(KEY_PCM_ENCODING,              C2_PARAMKEY_PCM_ENCODING, "value")
         .limitTo(D::AUDIO)
-        .withMappers([](C2Value v) -> C2Value {
-            int32_t value;
-            C2Config::pcm_encoding_t to;
-            if (v.get(&value) && C2Mapper::map(value, &to)) {
-                return to;
-            }
-            return C2Value();
-        }, [](C2Value v) -> C2Value {
-            C2Config::pcm_encoding_t value;
-            int32_t to;
-            using C2ValueType=typename _c2_reduce_enum_to_underlying_type<decltype(value)>::type;
-            if (v.get((C2ValueType*)&value) && C2Mapper::map(value, &to)) {
-                return to;
-            }
-            return C2Value();
-        }));
+        .withMappers(pcmEncodingMapper, pcmEncodingReverse));
+    add(ConfigMapper("android._codec-pcm-encoding", C2_PARAMKEY_PCM_ENCODING, "value")
+        .limitTo(D::AUDIO & D::READ)
+        .withMappers(pcmEncodingMapper, pcmEncodingReverse));
 
     add(ConfigMapper(KEY_IS_ADTS, C2_PARAMKEY_AAC_PACKAGING, "value")
         .limitTo(D::AUDIO & D::CODED)
@@ -894,6 +899,9 @@
     add(ConfigMapper(KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT, C2_PARAMKEY_MAX_CHANNEL_COUNT, "value")
         .limitTo(D::AUDIO & (D::CONFIG | D::PARAM | D::READ)));
 
+    add(ConfigMapper(KEY_MAX_OUTPUT_CHANNEL_COUNT, C2_PARAMKEY_MAX_CHANNEL_COUNT, "value")
+        .limitTo(D::AUDIO & (D::CONFIG | D::PARAM | D::READ)));
+
     add(ConfigMapper(KEY_AAC_SBR_MODE, C2_PARAMKEY_AAC_SBR_MODE, "value")
         .limitTo(D::AUDIO & D::ENCODER & (D::CONFIG | D::PARAM | D::READ))
         .withMapper([](C2Value v) -> C2Value {
@@ -948,6 +956,12 @@
             return value == 0 ? C2_FALSE : C2_TRUE;
         }));
 
+    add(ConfigMapper(KEY_VIDEO_QP_AVERAGE, C2_PARAMKEY_AVERAGE_QP, "value")
+        .limitTo(D::ENCODER & D::VIDEO & D::READ));
+
+    add(ConfigMapper(KEY_PICTURE_TYPE, C2_PARAMKEY_PICTURE_TYPE, "value")
+        .limitTo(D::ENCODER & D::VIDEO & D::READ));
+
     /* still to do
     constexpr char KEY_PUSH_BLANK_BUFFERS_ON_STOP[] = "push-blank-buffers-on-shutdown";
 
diff --git a/media/codec2/sfplugin/CCodecConfig.h b/media/codec2/sfplugin/CCodecConfig.h
index 417b773..88e6239 100644
--- a/media/codec2/sfplugin/CCodecConfig.h
+++ b/media/codec2/sfplugin/CCodecConfig.h
@@ -363,11 +363,6 @@
             const std::vector<std::string> &names,
             c2_blocking_t blocking = C2_DONT_BLOCK);
 
-private:
-
-    /// initializes the standard MediaCodec to Codec 2.0 params mapping
-    void initializeStandardParams();
-
     /// Adds indices to the subscribed indices, and updated subscription to component
     /// \param blocking blocking mode to use with the component
     status_t subscribeToConfigUpdate(
@@ -375,6 +370,11 @@
             const std::vector<C2Param::Index> &indices,
             c2_blocking_t blocking = C2_DONT_BLOCK);
 
+private:
+
+    /// initializes the standard MediaCodec to Codec 2.0 params mapping
+    void initializeStandardParams();
+
     /// Gets SDK format from codec 2.0 reflected configuration
     /// \param domain input/output bitmask
     sp<AMessage> getFormatForDomain(
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 4070478..2d3c70a 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -18,9 +18,14 @@
 #define LOG_TAG "Codec2Buffer"
 #include <utils/Log.h>
 
+#include <aidl/android/hardware/graphics/common/Cta861_3.h>
+#include <aidl/android/hardware/graphics/common/Smpte2086.h>
 #include <android-base/properties.h>
 #include <android/hardware/cas/native/1.0/types.h>
 #include <android/hardware/drm/1.0/types.h>
+#include <android/hardware/graphics/common/1.2/types.h>
+#include <android/hardware/graphics/mapper/4.0/IMapper.h>
+#include <gralloctypes/Gralloc4.h>
 #include <hidlmemory/FrameworkUtils.h>
 #include <media/hardware/HardwareAPI.h>
 #include <media/stagefright/CodecBase.h>
@@ -358,21 +363,22 @@
                         break;
 
                     case COLOR_FormatYUVP010:
+                        // stride is in bytes
                         mediaImage->mPlane[mediaImage->Y].mOffset = 0;
                         mediaImage->mPlane[mediaImage->Y].mColInc = 2;
-                        mediaImage->mPlane[mediaImage->Y].mRowInc = stride * 2;
+                        mediaImage->mPlane[mediaImage->Y].mRowInc = stride;
                         mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
                         mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
 
-                        mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride * 2;
+                        mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride;
                         mediaImage->mPlane[mediaImage->U].mColInc = 4;
-                        mediaImage->mPlane[mediaImage->U].mRowInc = stride * 2;
+                        mediaImage->mPlane[mediaImage->U].mRowInc = stride;
                         mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
                         mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
 
-                        mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride * 2 + 2;
+                        mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride + 2;
                         mediaImage->mPlane[mediaImage->V].mColInc = 4;
-                        mediaImage->mPlane[mediaImage->V].mRowInc = stride * 2;
+                        mediaImage->mPlane[mediaImage->V].mRowInc = stride;
                         mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
                         mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
                         if (tryWrapping) {
@@ -533,8 +539,8 @@
                 mInitCheck = BAD_VALUE;
                 return;
             }
-            bufferSize += stride * vStride
-                    / plane.rowSampling / plane.colSampling * divUp(mAllocatedDepth, 8u);
+            // stride is in bytes
+            bufferSize += stride * vStride / plane.rowSampling / plane.colSampling;
         }
 
         mBackBufferSize = bufferSize;
@@ -787,8 +793,14 @@
         ALOGD("format had no width / height");
         return nullptr;
     }
-    // NOTE: we currently only support YUV420 formats for byte-buffer mode.
-    sp<ABuffer> aBuffer(alloc(align(width, 16) * align(height, 16) * 3 / 2));
+    int32_t colorFormat = COLOR_FormatYUV420Flexible;
+    int32_t bpp = 12;  // 8(Y) + 2(U) + 2(V)
+    if (format->findInt32(KEY_COLOR_FORMAT, &colorFormat)) {
+        if (colorFormat == COLOR_FormatYUVP010) {
+            bpp = 24;  // 16(Y) + 4(U) + 4(V)
+        }
+    }
+    sp<ABuffer> aBuffer(alloc(align(width, 16) * align(height, 16) * bpp / 8));
     return new ConstGraphicBlockBuffer(
             format,
             aBuffer,
@@ -941,4 +953,218 @@
     return const_cast<native_handle_t *>(mBlock->handle());
 }
 
+using ::aidl::android::hardware::graphics::common::Cta861_3;
+using ::aidl::android::hardware::graphics::common::Smpte2086;
+
+using ::android::gralloc4::MetadataType_Cta861_3;
+using ::android::gralloc4::MetadataType_Smpte2086;
+using ::android::gralloc4::MetadataType_Smpte2094_40;
+
+using ::android::hardware::Return;
+using ::android::hardware::hidl_vec;
+
+using Error4 = ::android::hardware::graphics::mapper::V4_0::Error;
+using IMapper4 = ::android::hardware::graphics::mapper::V4_0::IMapper;
+
+namespace {
+
+sp<IMapper4> GetMapper4() {
+    static sp<IMapper4> sMapper = IMapper4::getService();
+    return sMapper;
+}
+
+class NativeHandleDeleter {
+public:
+    explicit NativeHandleDeleter(native_handle_t *handle) : mHandle(handle) {}
+    ~NativeHandleDeleter() {
+        if (mHandle) {
+            native_handle_delete(mHandle);
+        }
+    }
+private:
+    native_handle_t *mHandle;
+};
+
+}  // namspace
+
+c2_status_t GetHdrMetadataFromGralloc4Handle(
+        const C2Handle *const handle,
+        std::shared_ptr<C2StreamHdrStaticMetadataInfo::input> *staticInfo,
+        std::shared_ptr<C2StreamHdrDynamicMetadataInfo::input> *dynamicInfo) {
+    c2_status_t err = C2_OK;
+    native_handle_t *nativeHandle = UnwrapNativeCodec2GrallocHandle(handle);
+    if (nativeHandle == nullptr) {
+        // Nothing to do
+        return err;
+    }
+    // TRICKY: UnwrapNativeCodec2GrallocHandle creates a new handle but
+    //         does not clone the fds. Thus we need to delete the handle
+    //         without closing it when going out of scope.
+    //         NativeHandle cannot solve this problem, as it would close and
+    //         delete the handle, while we need delete only.
+    NativeHandleDeleter nhd(nativeHandle);
+    sp<IMapper4> mapper = GetMapper4();
+    if (!mapper) {
+        // Gralloc4 not supported; nothing to do
+        return err;
+    }
+    Error4 mapperErr = Error4::NONE;
+    if (staticInfo) {
+        staticInfo->reset(new C2StreamHdrStaticMetadataInfo::input(0u));
+        memset(&(*staticInfo)->mastering, 0, sizeof((*staticInfo)->mastering));
+        (*staticInfo)->maxCll = 0;
+        (*staticInfo)->maxFall = 0;
+        IMapper4::get_cb cb = [&mapperErr, staticInfo](Error4 err, const hidl_vec<uint8_t> &vec) {
+            mapperErr = err;
+            if (err != Error4::NONE) {
+                return;
+            }
+
+            std::optional<Smpte2086> smpte2086;
+            gralloc4::decodeSmpte2086(vec, &smpte2086);
+            if (smpte2086) {
+                (*staticInfo)->mastering.red.x    = smpte2086->primaryRed.x;
+                (*staticInfo)->mastering.red.y    = smpte2086->primaryRed.y;
+                (*staticInfo)->mastering.green.x  = smpte2086->primaryGreen.x;
+                (*staticInfo)->mastering.green.y  = smpte2086->primaryGreen.y;
+                (*staticInfo)->mastering.blue.x   = smpte2086->primaryBlue.x;
+                (*staticInfo)->mastering.blue.y   = smpte2086->primaryBlue.y;
+                (*staticInfo)->mastering.white.x  = smpte2086->whitePoint.x;
+                (*staticInfo)->mastering.white.y  = smpte2086->whitePoint.y;
+
+                (*staticInfo)->mastering.maxLuminance = smpte2086->maxLuminance;
+                (*staticInfo)->mastering.minLuminance = smpte2086->minLuminance;
+            } else {
+                mapperErr = Error4::BAD_VALUE;
+            }
+        };
+        Return<void> ret = mapper->get(nativeHandle, MetadataType_Smpte2086, cb);
+        if (!ret.isOk()) {
+            err = C2_REFUSED;
+        } else if (mapperErr != Error4::NONE) {
+            err = C2_CORRUPTED;
+        }
+        cb = [&mapperErr, staticInfo](Error4 err, const hidl_vec<uint8_t> &vec) {
+            mapperErr = err;
+            if (err != Error4::NONE) {
+                return;
+            }
+
+            std::optional<Cta861_3> cta861_3;
+            gralloc4::decodeCta861_3(vec, &cta861_3);
+            if (cta861_3) {
+                (*staticInfo)->maxCll   = cta861_3->maxContentLightLevel;
+                (*staticInfo)->maxFall  = cta861_3->maxFrameAverageLightLevel;
+            } else {
+                mapperErr = Error4::BAD_VALUE;
+            }
+        };
+        ret = mapper->get(nativeHandle, MetadataType_Cta861_3, cb);
+        if (!ret.isOk()) {
+            err = C2_REFUSED;
+        } else if (mapperErr != Error4::NONE) {
+            err = C2_CORRUPTED;
+        }
+    }
+    if (dynamicInfo) {
+        dynamicInfo->reset();
+        IMapper4::get_cb cb = [&mapperErr, dynamicInfo](Error4 err, const hidl_vec<uint8_t> &vec) {
+            mapperErr = err;
+            if (err != Error4::NONE) {
+                return;
+            }
+            if (!dynamicInfo) {
+                return;
+            }
+            *dynamicInfo = C2StreamHdrDynamicMetadataInfo::input::AllocShared(
+                    vec.size(), 0u, C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40);
+            memcpy((*dynamicInfo)->m.data, vec.data(), vec.size());
+        };
+        Return<void> ret = mapper->get(nativeHandle, MetadataType_Smpte2094_40, cb);
+        if (!ret.isOk() || mapperErr != Error4::NONE) {
+            dynamicInfo->reset();
+        }
+    }
+
+    return err;
+}
+
+c2_status_t SetHdrMetadataToGralloc4Handle(
+        const std::shared_ptr<const C2StreamHdrStaticMetadataInfo::output> &staticInfo,
+        const std::shared_ptr<const C2StreamHdrDynamicMetadataInfo::output> &dynamicInfo,
+        const C2Handle *const handle) {
+    c2_status_t err = C2_OK;
+    native_handle_t *nativeHandle = UnwrapNativeCodec2GrallocHandle(handle);
+    if (nativeHandle == nullptr) {
+        // Nothing to do
+        return err;
+    }
+    // TRICKY: UnwrapNativeCodec2GrallocHandle creates a new handle but
+    //         does not clone the fds. Thus we need to delete the handle
+    //         without closing it when going out of scope.
+    NativeHandleDeleter nhd(nativeHandle);
+    sp<IMapper4> mapper = GetMapper4();
+    if (!mapper) {
+        // Gralloc4 not supported; nothing to do
+        return err;
+    }
+    if (staticInfo && *staticInfo) {
+        std::optional<Smpte2086> smpte2086 = Smpte2086{
+            {staticInfo->mastering.red.x, staticInfo->mastering.red.y},
+            {staticInfo->mastering.green.x, staticInfo->mastering.green.y},
+            {staticInfo->mastering.blue.x, staticInfo->mastering.blue.y},
+            {staticInfo->mastering.white.x, staticInfo->mastering.white.y},
+            staticInfo->mastering.maxLuminance,
+            staticInfo->mastering.minLuminance,
+        };
+        hidl_vec<uint8_t> vec;
+        if (gralloc4::encodeSmpte2086(smpte2086, &vec) == OK) {
+            Return<Error4> ret = mapper->set(nativeHandle, MetadataType_Smpte2086, vec);
+            if (!ret.isOk()) {
+                err = C2_REFUSED;
+            } else if (ret != Error4::NONE) {
+                err = C2_CORRUPTED;
+            }
+        }
+        std::optional<Cta861_3> cta861_3 = Cta861_3{
+            staticInfo->maxCll,
+            staticInfo->maxFall,
+        };
+        if (gralloc4::encodeCta861_3(cta861_3, &vec) == OK) {
+            Return<Error4> ret = mapper->set(nativeHandle, MetadataType_Cta861_3, vec);
+            if (!ret.isOk()) {
+                err = C2_REFUSED;
+            } else if (ret != Error4::NONE) {
+                err = C2_CORRUPTED;
+            }
+        }
+    }
+    if (dynamicInfo && *dynamicInfo) {
+        hidl_vec<uint8_t> vec;
+        vec.resize(dynamicInfo->flexCount());
+        memcpy(vec.data(), dynamicInfo->m.data, dynamicInfo->flexCount());
+        std::optional<IMapper4::MetadataType> metadataType;
+        switch (dynamicInfo->m.type_) {
+        case C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_10:
+            // TODO
+            break;
+        case C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40:
+            metadataType = MetadataType_Smpte2094_40;
+            break;
+        }
+        if (metadataType) {
+            Return<Error4> ret = mapper->set(nativeHandle, *metadataType, vec);
+            if (!ret.isOk()) {
+                err = C2_REFUSED;
+            } else if (ret != Error4::NONE) {
+                err = C2_CORRUPTED;
+            }
+        } else {
+            err = C2_BAD_VALUE;
+        }
+    }
+
+    return err;
+}
+
 }  // namespace android
diff --git a/media/codec2/sfplugin/Codec2Buffer.h b/media/codec2/sfplugin/Codec2Buffer.h
index dc788cd..b02b042 100644
--- a/media/codec2/sfplugin/Codec2Buffer.h
+++ b/media/codec2/sfplugin/Codec2Buffer.h
@@ -19,6 +19,7 @@
 #define CODEC2_BUFFER_H_
 
 #include <C2Buffer.h>
+#include <C2Config.h>
 
 #include <binder/IMemory.h>
 #include <media/hardware/VideoAPI.h>
@@ -391,6 +392,36 @@
     int32_t mHeapSeqNum;
 };
 
+/**
+ * Get HDR metadata from Gralloc4 handle.
+ *
+ * \param[in]   handle      handle of the allocation
+ * \param[out]  staticInfo  HDR static info to be filled. Ignored if null;
+ *                          if |handle| is invalid or does not contain the metadata,
+ *                          the shared_ptr is reset.
+ * \param[out]  dynamicInfo HDR dynamic info to be filled. Ignored if null;
+ *                          if |handle| is invalid or does not contain the metadata,
+ *                          the shared_ptr is reset.
+ * \return C2_OK if successful
+ */
+c2_status_t GetHdrMetadataFromGralloc4Handle(
+        const C2Handle *const handle,
+        std::shared_ptr<C2StreamHdrStaticMetadataInfo::input> *staticInfo,
+        std::shared_ptr<C2StreamHdrDynamicMetadataInfo::input> *dynamicInfo);
+
+/**
+ * Set HDR metadata to Gralloc4 handle.
+ *
+ * \param[in]   staticInfo  HDR static info to set. Ignored if null or invalid.
+ * \param[in]   dynamicInfo HDR dynamic info to set. Ignored if null or invalid.
+ * \param[out]  handle      handle of the allocation.
+ * \return C2_OK if successful
+ */
+c2_status_t SetHdrMetadataToGralloc4Handle(
+        const std::shared_ptr<const C2StreamHdrStaticMetadataInfo::output> &staticInfo,
+        const std::shared_ptr<const C2StreamHdrDynamicMetadataInfo::output> &dynamicInfo,
+        const C2Handle *const handle);
+
 }  // namespace android
 
 #endif  // CODEC2_BUFFER_H_
diff --git a/media/codec2/sfplugin/Codec2InfoBuilder.cpp b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
index 7c4bfb6..63bd64b 100644
--- a/media/codec2/sfplugin/Codec2InfoBuilder.cpp
+++ b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
@@ -54,6 +54,9 @@
 
 using Traits = C2Component::Traits;
 
+// HAL pixel format -> framework color format
+typedef std::map<uint32_t, int32_t> PixelFormatMap;
+
 namespace /* unnamed */ {
 
 bool hasPrefix(const std::string& s, const char* prefix) {
@@ -67,6 +70,26 @@
             s.compare(s.size() - suffixLen, suffixLen, suffix) == 0;
 }
 
+std::optional<int32_t> findFrameworkColorFormat(
+        const C2FlexiblePixelFormatDescriptorStruct &desc) {
+    switch (desc.bitDepth) {
+        case 8u:
+            if (desc.layout == C2Color::PLANAR_PACKED
+                    || desc.layout == C2Color::SEMIPLANAR_PACKED) {
+                return COLOR_FormatYUV420Flexible;
+            }
+            break;
+        case 10u:
+            if (desc.layout == C2Color::SEMIPLANAR_PACKED) {
+                return COLOR_FormatYUVP010;
+            }
+            break;
+        default:
+            break;
+    }
+    return std::nullopt;
+}
+
 // returns true if component advertised supported profile level(s)
 bool addSupportedProfileLevels(
         std::shared_ptr<Codec2Client::Interface> intf,
@@ -96,9 +119,12 @@
         return false;
     }
 
-    // determine if codec supports HDR
+    // determine if codec supports HDR; imply 10-bit support
     bool supportsHdr = false;
+    // determine if codec supports HDR10Plus; imply 10-bit support
     bool supportsHdr10Plus = false;
+    // determine if codec supports 10-bit format
+    bool supports10Bit = false;
 
     std::vector<std::shared_ptr<C2ParamDescriptor>> paramDescs;
     c2_status_t err1 = intf->querySupportedParams(&paramDescs);
@@ -126,6 +152,10 @@
     supportsHdr |= (mediaType == MIMETYPE_VIDEO_VP9);
     supportsHdr |= (mediaType == MIMETYPE_VIDEO_AV1);
 
+    // HDR support implies 10-bit support.
+    // TODO: directly check this from the component interface
+    supports10Bit = (supportsHdr || supportsHdr10Plus);
+
     bool added = false;
 
     for (C2Value::Primitive profile : profileQuery[0].values.values) {
@@ -165,6 +195,12 @@
                     }
                 }
             }
+            if (supports10Bit) {
+                auto bitnessMapper = C2Mapper::GetBitDepthProfileLevelMapper(trait.mediaType, 10);
+                if (bitnessMapper && bitnessMapper->mapProfile(pl.profile, &sdkProfile)) {
+                    caps->addProfileLevel((uint32_t)sdkProfile, (uint32_t)sdkLevel);
+                }
+            }
         } else if (!mapper) {
             caps->addProfileLevel(pl.profile, pl.level);
         }
@@ -198,27 +234,69 @@
 void addSupportedColorFormats(
         std::shared_ptr<Codec2Client::Interface> intf,
         MediaCodecInfo::CapabilitiesWriter *caps,
-        const Traits& trait, const std::string &mediaType) {
-    (void)intf;
-
+        const Traits& trait, const std::string &mediaType,
+        const PixelFormatMap &pixelFormatMap) {
     // TODO: get this from intf() as well, but how do we map them to
     // MediaCodec color formats?
     bool encoder = trait.kind == C2Component::KIND_ENCODER;
     if (mediaType.find("video") != std::string::npos
             || mediaType.find("image") != std::string::npos) {
+
+        std::vector<C2FieldSupportedValuesQuery> query;
+        if (encoder) {
+            C2StreamPixelFormatInfo::input pixelFormat;
+            query.push_back(C2FieldSupportedValuesQuery::Possible(
+                    C2ParamField::Make(pixelFormat, pixelFormat.value)));
+        } else {
+            C2StreamPixelFormatInfo::output pixelFormat;
+            query.push_back(C2FieldSupportedValuesQuery::Possible(
+                    C2ParamField::Make(pixelFormat, pixelFormat.value)));
+        }
+        std::list<int32_t> supportedColorFormats;
+        if (intf->querySupportedValues(query, C2_DONT_BLOCK) == C2_OK) {
+            if (query[0].status == C2_OK) {
+                const C2FieldSupportedValues &fsv = query[0].values;
+                if (fsv.type == C2FieldSupportedValues::VALUES) {
+                    for (C2Value::Primitive value : fsv.values) {
+                        auto it = pixelFormatMap.find(value.u32);
+                        if (it != pixelFormatMap.end()) {
+                            auto it2 = std::find(
+                                    supportedColorFormats.begin(),
+                                    supportedColorFormats.end(),
+                                    it->second);
+                            if (it2 == supportedColorFormats.end()) {
+                                supportedColorFormats.push_back(it->second);
+                            }
+                        }
+                    }
+                }
+            }
+        }
+        auto addDefaultColorFormat = [caps, &supportedColorFormats](int32_t colorFormat) {
+            caps->addColorFormat(colorFormat);
+            auto it = std::find(
+                    supportedColorFormats.begin(), supportedColorFormats.end(), colorFormat);
+            if (it != supportedColorFormats.end()) {
+                supportedColorFormats.erase(it);
+            }
+        };
+
         // vendor video codecs prefer opaque format
         if (trait.name.find("android") == std::string::npos) {
-            caps->addColorFormat(COLOR_FormatSurface);
+            addDefaultColorFormat(COLOR_FormatSurface);
         }
-        caps->addColorFormat(COLOR_FormatYUV420Flexible);
-        caps->addColorFormat(COLOR_FormatYUV420Planar);
-        caps->addColorFormat(COLOR_FormatYUV420SemiPlanar);
-        caps->addColorFormat(COLOR_FormatYUV420PackedPlanar);
-        caps->addColorFormat(COLOR_FormatYUV420PackedSemiPlanar);
+        addDefaultColorFormat(COLOR_FormatYUV420Flexible);
+        addDefaultColorFormat(COLOR_FormatYUV420Planar);
+        addDefaultColorFormat(COLOR_FormatYUV420SemiPlanar);
+        addDefaultColorFormat(COLOR_FormatYUV420PackedPlanar);
+        addDefaultColorFormat(COLOR_FormatYUV420PackedSemiPlanar);
         // framework video encoders must support surface format, though it is unclear
         // that they will be able to map it if it is opaque
         if (encoder && trait.name.find("android") != std::string::npos) {
-            caps->addColorFormat(COLOR_FormatSurface);
+            addDefaultColorFormat(COLOR_FormatSurface);
+        }
+        for (int32_t colorFormat : supportedColorFormats) {
+            caps->addColorFormat(colorFormat);
         }
     }
 }
@@ -410,6 +488,7 @@
         }
     }
 
+    std::map<std::string, PixelFormatMap> nameToPixelFormatMap;
     for (const Traits& trait : traits) {
         C2Component::rank_t rank = trait.rank;
 
@@ -423,8 +502,9 @@
         nameAndAliases.insert(nameAndAliases.begin(), trait.name);
         for (const std::string &nameOrAlias : nameAndAliases) {
             bool isAlias = trait.name != nameOrAlias;
+            std::shared_ptr<Codec2Client> client;
             std::shared_ptr<Codec2Client::Interface> intf =
-                Codec2Client::CreateInterfaceByName(nameOrAlias.c_str());
+                Codec2Client::CreateInterfaceByName(nameOrAlias.c_str(), &client);
             if (!intf) {
                 ALOGD("could not create interface for %s'%s'",
                         isAlias ? "alias " : "",
@@ -618,7 +698,40 @@
                         caps->addProfileLevel(VP8ProfileMain, VP8Level_Version0);
                     }
                 }
-                addSupportedColorFormats(intf, caps.get(), trait, mediaType);
+
+                auto it = nameToPixelFormatMap.find(client->getServiceName());
+                if (it == nameToPixelFormatMap.end()) {
+                    it = nameToPixelFormatMap.try_emplace(client->getServiceName()).first;
+                    PixelFormatMap &pixelFormatMap = it->second;
+                    pixelFormatMap[HAL_PIXEL_FORMAT_YCBCR_420_888] = COLOR_FormatYUV420Flexible;
+                    pixelFormatMap[HAL_PIXEL_FORMAT_YCBCR_P010]    = COLOR_FormatYUVP010;
+                    pixelFormatMap[HAL_PIXEL_FORMAT_RGBA_1010102]  = COLOR_Format32bitABGR2101010;
+                    pixelFormatMap[HAL_PIXEL_FORMAT_RGBA_FP16]     = COLOR_Format64bitABGRFloat;
+
+                    std::shared_ptr<C2StoreFlexiblePixelFormatDescriptorsInfo> pixelFormatInfo;
+                    std::vector<std::unique_ptr<C2Param>> heapParams;
+                    if (client->query(
+                                {},
+                                {C2StoreFlexiblePixelFormatDescriptorsInfo::PARAM_TYPE},
+                                C2_MAY_BLOCK,
+                                &heapParams) == C2_OK
+                            && heapParams.size() == 1u) {
+                        pixelFormatInfo.reset(C2StoreFlexiblePixelFormatDescriptorsInfo::From(
+                                heapParams[0].release()));
+                    }
+                    if (pixelFormatInfo && *pixelFormatInfo) {
+                        for (size_t i = 0; i < pixelFormatInfo->flexCount(); ++i) {
+                            C2FlexiblePixelFormatDescriptorStruct &desc =
+                                pixelFormatInfo->m.values[i];
+                            std::optional<int32_t> colorFormat = findFrameworkColorFormat(desc);
+                            if (colorFormat) {
+                                pixelFormatMap[desc.pixelFormat] = *colorFormat;
+                            }
+                        }
+                    }
+                }
+                addSupportedColorFormats(
+                        intf, caps.get(), trait, mediaType, it->second);
             }
         }
     }
diff --git a/media/codec2/sfplugin/FrameReassembler.cpp b/media/codec2/sfplugin/FrameReassembler.cpp
index af054c7..cb8b6ab 100644
--- a/media/codec2/sfplugin/FrameReassembler.cpp
+++ b/media/codec2/sfplugin/FrameReassembler.cpp
@@ -88,8 +88,7 @@
         const sp<MediaCodecBuffer> &buffer,
         std::list<std::unique_ptr<C2Work>> *items) {
     int64_t timeUs;
-    if (buffer->size() == 0u
-            || !buffer->meta()->findInt64("timeUs", &timeUs)) {
+    if (!buffer->meta()->findInt64("timeUs", &timeUs)) {
         return C2_BAD_VALUE;
     }
 
diff --git a/media/codec2/sfplugin/tests/CCodecConfig_test.cpp b/media/codec2/sfplugin/tests/CCodecConfig_test.cpp
index 7c660dc..3615289 100644
--- a/media/codec2/sfplugin/tests/CCodecConfig_test.cpp
+++ b/media/codec2/sfplugin/tests/CCodecConfig_test.cpp
@@ -224,6 +224,17 @@
                                 Copy<C2StreamBitrateInfo::output, C2StreamBitrateInfo::input>,
                                 mInputBitrate)
                             .build());
+
+                    addParameter(
+                            DefineParam(mOutputProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+                            .withDefault(new C2StreamProfileLevelInfo::output(
+                                    0u, PROFILE_UNUSED, LEVEL_UNUSED))
+                            .withFields({
+                                C2F(mOutputProfileLevel, profile).any(),
+                                C2F(mOutputProfileLevel, level).any(),
+                            })
+                            .withSetter(Setter<C2StreamProfileLevelInfo::output>)
+                            .build());
                 }
 
                 // TODO: more SDK params
@@ -241,6 +252,8 @@
             std::shared_ptr<C2StreamPixelAspectRatioInfo::output> mPixelAspectRatio;
             std::shared_ptr<C2StreamBitrateInfo::input> mInputBitrate;
             std::shared_ptr<C2StreamBitrateInfo::output> mOutputBitrate;
+            std::shared_ptr<C2StreamProfileLevelInfo::input> mInputProfileLevel;
+            std::shared_ptr<C2StreamProfileLevelInfo::output> mOutputProfileLevel;
 
             template<typename T>
             static C2R Setter(bool, C2P<T> &) {
@@ -576,4 +589,51 @@
             << "mOutputFormat = " << mConfig.mOutputFormat->debugString().c_str();
 }
 
+typedef std::tuple<std::string, C2Config::profile_t, int32_t> HdrProfilesParams;
+
+class HdrProfilesTest
+    : public CCodecConfigTest,
+      public ::testing::WithParamInterface<HdrProfilesParams> {
+};
+
+TEST_P(HdrProfilesTest, SetFromSdk) {
+    HdrProfilesParams params = GetParam();
+    std::string mediaType = std::get<0>(params);
+    C2Config::profile_t c2Profile = std::get<1>(params);
+    int32_t sdkProfile = std::get<2>(params);
+
+    init(C2Component::DOMAIN_VIDEO, C2Component::KIND_ENCODER, mediaType.c_str());
+
+    ASSERT_EQ(OK, mConfig.initialize(mReflector, mConfigurable));
+
+    sp<AMessage> format{new AMessage};
+    format->setInt32(KEY_PROFILE, sdkProfile);
+
+    std::vector<std::unique_ptr<C2Param>> configUpdate;
+    ASSERT_EQ(OK, mConfig.getConfigUpdateFromSdkParams(
+            mConfigurable, format, D::ALL, C2_MAY_BLOCK, &configUpdate));
+
+    ASSERT_EQ(1u, configUpdate.size());
+    C2StreamProfileLevelInfo::input *pl =
+        FindParam<std::remove_pointer<decltype(pl)>::type>(configUpdate);
+    ASSERT_NE(nullptr, pl);
+    ASSERT_EQ(c2Profile, pl->profile);
+}
+
+HdrProfilesParams kHdrProfilesParams[] = {
+    std::make_tuple(MIMETYPE_VIDEO_HEVC, PROFILE_HEVC_MAIN_10, HEVCProfileMain10HDR10),
+    std::make_tuple(MIMETYPE_VIDEO_HEVC, PROFILE_HEVC_MAIN_10, HEVCProfileMain10HDR10Plus),
+    std::make_tuple(MIMETYPE_VIDEO_VP9,  PROFILE_VP9_2,        VP9Profile2HDR),
+    std::make_tuple(MIMETYPE_VIDEO_VP9,  PROFILE_VP9_2,        VP9Profile2HDR10Plus),
+    std::make_tuple(MIMETYPE_VIDEO_VP9,  PROFILE_VP9_3,        VP9Profile3HDR),
+    std::make_tuple(MIMETYPE_VIDEO_VP9,  PROFILE_VP9_3,        VP9Profile3HDR10Plus),
+    std::make_tuple(MIMETYPE_VIDEO_AV1,  PROFILE_AV1_0,        AV1ProfileMain10HDR10),
+    std::make_tuple(MIMETYPE_VIDEO_AV1,  PROFILE_AV1_0,        AV1ProfileMain10HDR10Plus),
+};
+
+INSTANTIATE_TEST_SUITE_P(
+        CCodecConfig,
+        HdrProfilesTest,
+        ::testing::ValuesIn(kHdrProfilesParams));
+
 } // namespace android
diff --git a/media/codec2/sfplugin/tests/FrameReassembler_test.cpp b/media/codec2/sfplugin/tests/FrameReassembler_test.cpp
index 6738ee7..0be934a 100644
--- a/media/codec2/sfplugin/tests/FrameReassembler_test.cpp
+++ b/media/codec2/sfplugin/tests/FrameReassembler_test.cpp
@@ -53,7 +53,8 @@
             C2Config::pcm_encoding_t encoding,
             size_t inputFrameSizeInBytes,
             size_t count,
-            size_t expectedOutputSize) {
+            size_t expectedOutputSize,
+            bool separateEos) {
         FrameReassembler frameReassembler;
         frameReassembler.init(
                 mPool,
@@ -67,7 +68,7 @@
 
         size_t inputIndex = 0, outputIndex = 0;
         size_t expectCount = 0;
-        for (size_t i = 0; i < count; ++i) {
+        for (size_t i = 0; i < count + (separateEos ? 1 : 0); ++i) {
             sp<MediaCodecBuffer> buffer = new MediaCodecBuffer(
                     new AMessage, new ABuffer(inputFrameSizeInBytes));
             buffer->setRange(0, inputFrameSizeInBytes);
@@ -77,8 +78,12 @@
             if (i == count - 1) {
                 buffer->meta()->setInt32("eos", 1);
             }
-            for (size_t j = 0; j < inputFrameSizeInBytes; ++j, ++inputIndex) {
-                buffer->base()[j] = (inputIndex & 0xFF);
+            if (i == count && separateEos) {
+                buffer->setRange(0, 0);
+            } else {
+                for (size_t j = 0; j < inputFrameSizeInBytes; ++j, ++inputIndex) {
+                    buffer->base()[j] = (inputIndex & 0xFF);
+                }
             }
             std::list<std::unique_ptr<C2Work>> items;
             ASSERT_EQ(C2_OK, frameReassembler.process(buffer, &items));
@@ -105,7 +110,8 @@
                 ASSERT_EQ(encoderFrameSize * BytesPerSample(encoding), view.capacity());
                 for (size_t j = 0; j < view.capacity(); ++j, ++outputIndex) {
                     ASSERT_TRUE(outputIndex < inputIndex
-                             || inputIndex == inputFrameSizeInBytes * count);
+                             || inputIndex == inputFrameSizeInBytes * count)
+                        << "inputIndex = " << inputIndex << " outputIndex = " << outputIndex;
                     uint8_t expected = outputIndex < inputIndex ? (outputIndex & 0xFF) : 0;
                     if (expectCount < 10) {
                         ++expectCount;
@@ -137,204 +143,239 @@
 // Push frames with exactly the same size as the encoder requested.
 TEST_F(FrameReassemblerTest, PushExactFrameSize) {
     ASSERT_EQ(OK, initStatus());
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_8,
-            1024 /* input frame size in bytes = 1024 samples * 1 channel * 1 bytes/sample */,
-            10 /* count */,
-            10240 /* expected output size = 10 * 1024 bytes/frame */);
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_16,
-            2048 /* input frame size in bytes = 1024 samples * 1 channel * 2 bytes/sample */,
-            10 /* count */,
-            20480 /* expected output size = 10 * 2048 bytes/frame */);
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_FLOAT,
-            4096 /* input frame size in bytes = 1024 samples * 1 channel * 4 bytes/sample */,
-            10 /* count */,
-            40960 /* expected output size = 10 * 4096 bytes/frame */);
+    for (bool separateEos : {false, true}) {
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_8,
+                1024 /* input frame size in bytes = 1024 samples * 1 channel * 1 bytes/sample */,
+                10 /* count */,
+                10240 /* expected output size = 10 * 1024 bytes/frame */,
+                separateEos);
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_16,
+                2048 /* input frame size in bytes = 1024 samples * 1 channel * 2 bytes/sample */,
+                10 /* count */,
+                20480 /* expected output size = 10 * 2048 bytes/frame */,
+                separateEos);
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_FLOAT,
+                4096 /* input frame size in bytes = 1024 samples * 1 channel * 4 bytes/sample */,
+                10 /* count */,
+                40960 /* expected output size = 10 * 4096 bytes/frame */,
+                separateEos);
+    }
 }
 
 // Push frames with half the size that the encoder requested.
 TEST_F(FrameReassemblerTest, PushHalfFrameSize) {
     ASSERT_EQ(OK, initStatus());
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_8,
-            512 /* input frame size in bytes = 512 samples * 1 channel * 1 bytes per sample */,
-            10 /* count */,
-            5120 /* expected output size = 5 * 1024 bytes/frame */);
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_16,
-            1024 /* input frame size in bytes = 512 samples * 1 channel * 2 bytes per sample */,
-            10 /* count */,
-            10240 /* expected output size = 5 * 2048 bytes/frame */);
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_FLOAT,
-            2048 /* input frame size in bytes = 512 samples * 1 channel * 4 bytes per sample */,
-            10 /* count */,
-            20480 /* expected output size = 5 * 4096 bytes/frame */);
+    for (bool separateEos : {false, true}) {
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_8,
+                512 /* input frame size in bytes = 512 samples * 1 channel * 1 bytes/sample */,
+                10 /* count */,
+                5120 /* expected output size = 5 * 1024 bytes/frame */,
+                separateEos);
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_16,
+                1024 /* input frame size in bytes = 512 samples * 1 channel * 2 bytes/sample */,
+                10 /* count */,
+                10240 /* expected output size = 5 * 2048 bytes/frame */,
+                separateEos);
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_FLOAT,
+                2048 /* input frame size in bytes = 512 samples * 1 channel * 4 bytes/sample */,
+                10 /* count */,
+                20480 /* expected output size = 5 * 4096 bytes/frame */,
+                separateEos);
+    }
 }
 
 // Push frames with twice the size that the encoder requested.
 TEST_F(FrameReassemblerTest, PushDoubleFrameSize) {
     ASSERT_EQ(OK, initStatus());
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_8,
-            2048 /* input frame size in bytes = 2048 samples * 1 channel * 1 bytes per sample */,
-            10 /* count */,
-            20480 /* expected output size = 20 * 1024 bytes/frame */);
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_16,
-            4096 /* input frame size in bytes = 2048 samples * 1 channel * 2 bytes per sample */,
-            10 /* count */,
-            40960 /* expected output size = 20 * 2048 bytes/frame */);
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_FLOAT,
-            8192 /* input frame size in bytes = 2048 samples * 1 channel * 4 bytes per sample */,
-            10 /* count */,
-            81920 /* expected output size = 20 * 4096 bytes/frame */);
+    for (bool separateEos : {false, true}) {
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_8,
+                2048 /* input frame size in bytes = 2048 samples * 1 channel * 1 bytes/sample */,
+                10 /* count */,
+                20480 /* expected output size = 20 * 1024 bytes/frame */,
+                separateEos);
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_16,
+                4096 /* input frame size in bytes = 2048 samples * 1 channel * 2 bytes/sample */,
+                10 /* count */,
+                40960 /* expected output size = 20 * 2048 bytes/frame */,
+                separateEos);
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_FLOAT,
+                8192 /* input frame size in bytes = 2048 samples * 1 channel * 4 bytes/sample */,
+                10 /* count */,
+                81920 /* expected output size = 20 * 4096 bytes/frame */,
+                separateEos);
+    }
 }
 
 // Push frames with a little bit larger (+5 samples) than the requested size.
 TEST_F(FrameReassemblerTest, PushLittleLargerFrameSize) {
     ASSERT_EQ(OK, initStatus());
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_8,
-            1029 /* input frame size in bytes = 1029 samples * 1 channel * 1 bytes per sample */,
-            10 /* count */,
-            11264 /* expected output size = 11 * 1024 bytes/frame */);
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_16,
-            2058 /* input frame size in bytes = 1029 samples * 1 channel * 2 bytes per sample */,
-            10 /* count */,
-            22528 /* expected output size = 11 * 2048 bytes/frame */);
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_FLOAT,
-            4116 /* input frame size in bytes = 1029 samples * 1 channel * 4 bytes per sample */,
-            10 /* count */,
-            45056 /* expected output size = 11 * 4096 bytes/frame */);
+    for (bool separateEos : {false, true}) {
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_8,
+                1029 /* input frame size in bytes = 1029 samples * 1 channel * 1 bytes/sample */,
+                10 /* count */,
+                11264 /* expected output size = 11 * 1024 bytes/frame */,
+                separateEos);
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_16,
+                2058 /* input frame size in bytes = 1029 samples * 1 channel * 2 bytes/sample */,
+                10 /* count */,
+                22528 /* expected output size = 11 * 2048 bytes/frame */,
+                separateEos);
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_FLOAT,
+                4116 /* input frame size in bytes = 1029 samples * 1 channel * 4 bytes/sample */,
+                10 /* count */,
+                45056 /* expected output size = 11 * 4096 bytes/frame */,
+                separateEos);
+    }
 }
 
 // Push frames with a little bit smaller (-5 samples) than the requested size.
 TEST_F(FrameReassemblerTest, PushLittleSmallerFrameSize) {
     ASSERT_EQ(OK, initStatus());
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_8,
-            1019 /* input frame size in bytes = 1019 samples * 1 channel * 1 bytes per sample */,
-            10 /* count */,
-            10240 /* expected output size = 10 * 1024 bytes/frame */);
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_16,
-            2038 /* input frame size in bytes = 1019 samples * 1 channel * 2 bytes per sample */,
-            10 /* count */,
-            20480 /* expected output size = 10 * 2048 bytes/frame */);
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_FLOAT,
-            4076 /* input frame size in bytes = 1019 samples * 1 channel * 4 bytes per sample */,
-            10 /* count */,
-            40960 /* expected output size = 10 * 4096 bytes/frame */);
+    for (bool separateEos : {false, true}) {
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_8,
+                1019 /* input frame size in bytes = 1019 samples * 1 channel * 1 bytes/sample */,
+                10 /* count */,
+                10240 /* expected output size = 10 * 1024 bytes/frame */,
+                separateEos);
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_16,
+                2038 /* input frame size in bytes = 1019 samples * 1 channel * 2 bytes/sample */,
+                10 /* count */,
+                20480 /* expected output size = 10 * 2048 bytes/frame */,
+                separateEos);
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_FLOAT,
+                4076 /* input frame size in bytes = 1019 samples * 1 channel * 4 bytes/sample */,
+                10 /* count */,
+                40960 /* expected output size = 10 * 4096 bytes/frame */,
+                separateEos);
+    }
 }
 
 // Push single-byte frames
 TEST_F(FrameReassemblerTest, PushSingleByte) {
     ASSERT_EQ(OK, initStatus());
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_8,
-            1 /* input frame size in bytes */,
-            100000 /* count */,
-            100352 /* expected output size = 98 * 1024 bytes/frame */);
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_16,
-            1 /* input frame size in bytes */,
-            100000 /* count */,
-            100352 /* expected output size = 49 * 2048 bytes/frame */);
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_FLOAT,
-            1 /* input frame size in bytes */,
-            100000 /* count */,
-            102400 /* expected output size = 25 * 4096 bytes/frame */);
+    for (bool separateEos : {false, true}) {
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_8,
+                1 /* input frame size in bytes */,
+                100000 /* count */,
+                100352 /* expected output size = 98 * 1024 bytes/frame */,
+                separateEos);
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_16,
+                1 /* input frame size in bytes */,
+                100000 /* count */,
+                100352 /* expected output size = 49 * 2048 bytes/frame */,
+                separateEos);
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_FLOAT,
+                1 /* input frame size in bytes */,
+                100000 /* count */,
+                102400 /* expected output size = 25 * 4096 bytes/frame */,
+                separateEos);
+    }
 }
 
 // Push one big chunk.
 TEST_F(FrameReassemblerTest, PushBigChunk) {
     ASSERT_EQ(OK, initStatus());
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_8,
-            100000 /* input frame size in bytes */,
-            1 /* count */,
-            100352 /* expected output size = 98 * 1024 bytes/frame */);
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_16,
-            100000 /* input frame size in bytes */,
-            1 /* count */,
-            100352 /* expected output size = 49 * 2048 bytes/frame */);
-    testPushSameSize(
-            1024 /* frame size in samples */,
-            48000 /* sample rate */,
-            1 /* channel count */,
-            PCM_FLOAT,
-            100000 /* input frame size in bytes */,
-            1 /* count */,
-            102400 /* expected output size = 25 * 4096 bytes/frame */);
+    for (bool separateEos : {false, true}) {
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_8,
+                100000 /* input frame size in bytes */,
+                1 /* count */,
+                100352 /* expected output size = 98 * 1024 bytes/frame */,
+                separateEos);
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_16,
+                100000 /* input frame size in bytes */,
+                1 /* count */,
+                100352 /* expected output size = 49 * 2048 bytes/frame */,
+                separateEos);
+        testPushSameSize(
+                1024 /* frame size in samples */,
+                48000 /* sample rate */,
+                1 /* channel count */,
+                PCM_FLOAT,
+                100000 /* input frame size in bytes */,
+                1 /* count */,
+                102400 /* expected output size = 25 * 4096 bytes/frame */,
+                separateEos);
+    }
 }
 
 } // namespace android
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
index 5f87c66..b761c35 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
@@ -88,7 +88,7 @@
         uint32_t planeW = img->mWidth / plane.colSampling;
         uint32_t planeH = img->mHeight / plane.rowSampling;
 
-        bool canCopyByRow = (plane.colInc == 1) && (img->mPlane[i].mColInc == 1);
+        bool canCopyByRow = (plane.colInc == bpp) && (img->mPlane[i].mColInc == bpp);
         bool canCopyByPlane = canCopyByRow && (plane.rowInc == img->mPlane[i].mRowInc);
         if (canCopyByPlane) {
             MemCopier<ToMediaImage, 0>::copy(imgRow, viewRow, plane.rowInc * planeH);
@@ -118,6 +118,22 @@
 
 }  // namespace
 
+bool IsFormatR10G10B10A2SupportedForLegacyRendering() {
+    const AHardwareBuffer_Desc desc = {
+        .width = 320,
+        .height = 240,
+        .format = AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM,
+        .layers = 1,
+        .usage = AHARDWAREBUFFER_USAGE_CPU_READ_RARELY | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN |
+                 AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE,
+        .stride = 0,
+        .rfu0 = 0,
+        .rfu1 = 0,
+    };
+
+    return AHardwareBuffer_isSupported(&desc);
+}
+
 status_t ImageCopy(uint8_t *imgBase, const MediaImage2 *img, const C2GraphicView &view) {
     if (view.crop().width != img->mWidth || view.crop().height != img->mHeight) {
         return BAD_VALUE;
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.h b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
index 9fa642d..c4651a4 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.h
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
@@ -27,6 +27,11 @@
 namespace android {
 
 /**
+ * Check if R10G10B10A2 is supported in legacy rendering path that involves GPU
+ */
+bool IsFormatR10G10B10A2SupportedForLegacyRendering();
+
+/**
  * Converts an RGB view to planar YUV 420 media image.
  *
  * \param dstY       pointer to media image buffer
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index 4d939fa..93f29ca 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -18,6 +18,9 @@
 #define LOG_TAG "Codec2Mapper"
 #include <utils/Log.h>
 
+#include <map>
+#include <optional>
+
 #include <media/stagefright/MediaCodecConstants.h>
 #include <media/stagefright/SurfaceUtils.h>
 #include <media/stagefright/foundation/ALookup.h>
@@ -167,6 +170,9 @@
     { C2Config::LEVEL_DV_MAIN_UHD_30, DolbyVisionLevelUhd30 },
     { C2Config::LEVEL_DV_MAIN_UHD_48, DolbyVisionLevelUhd48 },
     { C2Config::LEVEL_DV_MAIN_UHD_60, DolbyVisionLevelUhd60 },
+    { C2Config::LEVEL_DV_MAIN_UHD_120, DolbyVisionLevelUhd120 },
+    { C2Config::LEVEL_DV_MAIN_8K_30,  DolbyVisionLevel8k30 },
+    { C2Config::LEVEL_DV_MAIN_8K_60,  DolbyVisionLevel8k60 },
 
     // high tiers are not yet supported on android, for now map them to main tier
     { C2Config::LEVEL_DV_HIGH_HD_24,  DolbyVisionLevelHd24 },
@@ -178,6 +184,9 @@
     { C2Config::LEVEL_DV_HIGH_UHD_30, DolbyVisionLevelUhd30 },
     { C2Config::LEVEL_DV_HIGH_UHD_48, DolbyVisionLevelUhd48 },
     { C2Config::LEVEL_DV_HIGH_UHD_60, DolbyVisionLevelUhd60 },
+    { C2Config::LEVEL_DV_HIGH_UHD_120, DolbyVisionLevelUhd120 },
+    { C2Config::LEVEL_DV_HIGH_8K_30,  DolbyVisionLevel8k30 },
+    { C2Config::LEVEL_DV_HIGH_8K_60,  DolbyVisionLevel8k60 },
 };
 
 ALookup<C2Config::profile_t, int32_t> sDolbyVisionProfiles = {
@@ -255,6 +264,8 @@
     { C2Config::PROFILE_HEVC_MAIN_STILL, HEVCProfileMainStill },
     { C2Config::PROFILE_HEVC_MAIN_INTRA, HEVCProfileMain },
     { C2Config::PROFILE_HEVC_MAIN_10_INTRA, HEVCProfileMain10 },
+    { C2Config::PROFILE_HEVC_MAIN_10, HEVCProfileMain10HDR10 },
+    { C2Config::PROFILE_HEVC_MAIN_10, HEVCProfileMain10HDR10Plus },
 };
 
 ALookup<C2Config::profile_t, int32_t> sHevcHdrProfiles = {
@@ -381,15 +392,17 @@
     { C2Config::LEVEL_AV1_7_3,  AV1Level73 },
 };
 
-
 ALookup<C2Config::profile_t, int32_t> sAv1Profiles = {
-    // TODO: will need to disambiguate between Main8 and Main10
     { C2Config::PROFILE_AV1_0, AV1ProfileMain8 },
     { C2Config::PROFILE_AV1_0, AV1ProfileMain10 },
     { C2Config::PROFILE_AV1_0, AV1ProfileMain10HDR10 },
     { C2Config::PROFILE_AV1_0, AV1ProfileMain10HDR10Plus },
 };
 
+ALookup<C2Config::profile_t, int32_t> sAv1TenbitProfiles = {
+    { C2Config::PROFILE_AV1_0, AV1ProfileMain10 },
+};
+
 ALookup<C2Config::profile_t, int32_t> sAv1HdrProfiles = {
     { C2Config::PROFILE_AV1_0, AV1ProfileMain10HDR10 },
 };
@@ -398,6 +411,30 @@
     { C2Config::PROFILE_AV1_0, AV1ProfileMain10HDR10Plus },
 };
 
+// HAL_PIXEL_FORMAT_* -> COLOR_Format*
+ALookup<uint32_t, int32_t> sPixelFormats = {
+    { HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, COLOR_FormatSurface },
+
+    // YCBCR_420_888 maps to YUV420Flexible and vice versa
+    { HAL_PIXEL_FORMAT_YCBCR_420_888,          COLOR_FormatYUV420Flexible },
+
+    // Fallback matches for YCBCR_420_888
+    { HAL_PIXEL_FORMAT_YCBCR_420_888,          COLOR_FormatYUV420Planar },
+    { HAL_PIXEL_FORMAT_YCBCR_420_888,          COLOR_FormatYUV420SemiPlanar },
+    { HAL_PIXEL_FORMAT_YCBCR_420_888,          COLOR_FormatYUV420PackedPlanar },
+    { HAL_PIXEL_FORMAT_YCBCR_420_888,          COLOR_FormatYUV420PackedSemiPlanar },
+
+    // Fallback matches for YUV420Flexible
+    { HAL_PIXEL_FORMAT_YCRCB_420_SP,           COLOR_FormatYUV420Flexible },
+    { HAL_PIXEL_FORMAT_YV12,                   COLOR_FormatYUV420Flexible },
+
+    { HAL_PIXEL_FORMAT_YCBCR_422_SP,           COLOR_FormatYUV422PackedSemiPlanar },
+    { HAL_PIXEL_FORMAT_YCBCR_422_I,            COLOR_FormatYUV422PackedPlanar },
+    { HAL_PIXEL_FORMAT_YCBCR_P010,             COLOR_FormatYUVP010 },
+    { HAL_PIXEL_FORMAT_RGBA_1010102,           COLOR_Format32bitABGR2101010 },
+    { HAL_PIXEL_FORMAT_RGBA_FP16,              COLOR_Format64bitABGRFloat },
+};
+
 /**
  * A helper that passes through vendor extension profile and level values.
  */
@@ -603,9 +640,9 @@
 };
 
 struct Av1ProfileLevelMapper : ProfileLevelMapperHelper {
-    Av1ProfileLevelMapper(bool isHdr = false, bool isHdr10Plus = false) :
+    Av1ProfileLevelMapper(bool isHdr = false, bool isHdr10Plus = false, int32_t bitDepth = 8) :
         ProfileLevelMapperHelper(),
-        mIsHdr(isHdr), mIsHdr10Plus(isHdr10Plus) {}
+        mIsHdr(isHdr), mIsHdr10Plus(isHdr10Plus), mBitDepth(bitDepth) {}
 
     virtual bool simpleMap(C2Config::level_t from, int32_t *to) {
         return sAv1Levels.map(from, to);
@@ -614,19 +651,22 @@
         return sAv1Levels.map(from, to);
     }
     virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
-        return mIsHdr10Plus ? sAv1Hdr10PlusProfiles.map(from, to) :
-                     mIsHdr ? sAv1HdrProfiles.map(from, to) :
-                              sAv1Profiles.map(from, to);
+        return (mBitDepth == 10) ? sAv1TenbitProfiles.map(from, to) :
+                    mIsHdr10Plus ? sAv1Hdr10PlusProfiles.map(from, to) :
+                          mIsHdr ? sAv1HdrProfiles.map(from, to) :
+                                   sAv1Profiles.map(from, to);
     }
     virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
-        return mIsHdr10Plus ? sAv1Hdr10PlusProfiles.map(from, to) :
-                     mIsHdr ? sAv1HdrProfiles.map(from, to) :
-                              sAv1Profiles.map(from, to);
+        return (mBitDepth == 10) ? sAv1TenbitProfiles.map(from, to) :
+                    mIsHdr10Plus ? sAv1Hdr10PlusProfiles.map(from, to) :
+                          mIsHdr ? sAv1HdrProfiles.map(from, to) :
+                                   sAv1Profiles.map(from, to);
     }
 
 private:
     bool mIsHdr;
     bool mIsHdr10Plus;
+    int32_t mBitDepth;
 };
 
 } // namespace
@@ -674,6 +714,18 @@
 }
 
 // static
+std::shared_ptr<C2Mapper::ProfileLevelMapper>
+C2Mapper::GetBitDepthProfileLevelMapper(std::string mediaType, int32_t bitDepth) {
+    std::transform(mediaType.begin(), mediaType.end(), mediaType.begin(), ::tolower);
+    if (bitDepth == 8) {
+        return GetProfileLevelMapper(mediaType);
+    } else if (mediaType == MIMETYPE_VIDEO_AV1 && bitDepth == 10) {
+        return std::make_shared<Av1ProfileLevelMapper>(false, false, bitDepth);
+    }
+    return nullptr;
+}
+
+// static
 bool C2Mapper::map(C2Config::bitrate_mode_t from, int32_t *to) {
     return sBitrateModes.map(from, to);
 }
@@ -956,41 +1008,19 @@
 // static
 bool C2Mapper::mapPixelFormatFrameworkToCodec(
         int32_t frameworkValue, uint32_t *c2Value) {
-    switch (frameworkValue) {
-        case COLOR_FormatSurface:
-            *c2Value = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
-            return true;
-        case COLOR_FormatYUV420Flexible:
-        case COLOR_FormatYUV420Planar:
-        case COLOR_FormatYUV420SemiPlanar:
-        case COLOR_FormatYUV420PackedPlanar:
-        case COLOR_FormatYUV420PackedSemiPlanar:
-            *c2Value = HAL_PIXEL_FORMAT_YCBCR_420_888;
-            return true;
-        default:
-            // Passthrough
-            *c2Value = uint32_t(frameworkValue);
-            return true;
+    if (!sPixelFormats.map(frameworkValue, c2Value)) {
+        // passthrough if not mapped
+        *c2Value = uint32_t(frameworkValue);
     }
+    return true;
 }
 
 // static
 bool C2Mapper::mapPixelFormatCodecToFramework(
         uint32_t c2Value, int32_t *frameworkValue) {
-    switch (c2Value) {
-        case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
-            *frameworkValue = COLOR_FormatSurface;
-            return true;
-        case HAL_PIXEL_FORMAT_YCBCR_422_SP:
-        case HAL_PIXEL_FORMAT_YCRCB_420_SP:
-        case HAL_PIXEL_FORMAT_YCBCR_422_I:
-        case HAL_PIXEL_FORMAT_YCBCR_420_888:
-        case HAL_PIXEL_FORMAT_YV12:
-            *frameworkValue = COLOR_FormatYUV420Flexible;
-            return true;
-        default:
-            // Passthrough
-            *frameworkValue = int32_t(c2Value);
-            return true;
+    if (!sPixelFormats.map(c2Value, frameworkValue)) {
+        // passthrough if not mapped
+        *frameworkValue = int32_t(c2Value);
     }
+    return true;
 }
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.h b/media/codec2/sfplugin/utils/Codec2Mapper.h
index 797c8a8..33d305e 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.h
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.h
@@ -43,6 +43,9 @@
         static std::shared_ptr<ProfileLevelMapper>
         GetHdrProfileLevelMapper(std::string mediaType, bool isHdr10Plus = false);
 
+        static std::shared_ptr<ProfileLevelMapper>
+        GetBitDepthProfileLevelMapper(std::string mediaType, int32_t bitDepth = 8);
+
         // convert between bitrates
         static bool map(C2Config::bitrate_mode_t, int32_t*);
         static bool map(int32_t, C2Config::bitrate_mode_t*);
diff --git a/media/codec2/vndk/Android.bp b/media/codec2/vndk/Android.bp
index be81c84..27cd1f8 100644
--- a/media/codec2/vndk/Android.bp
+++ b/media/codec2/vndk/Android.bp
@@ -73,11 +73,12 @@
         "libbase",
         "libcutils",
         "libdl",
+        "libdmabufheap",
+        "libfmq",
+        "libgralloctypes",
         "libhardware",
         "libhidlbase",
         "libion",
-        "libdmabufheap",
-        "libfmq",
         "liblog",
         "libnativewindow",
         "libstagefright_foundation",
@@ -92,6 +93,44 @@
     ],
 }
 
+// public dependency for statically linking to libcodec2_vndk for unit tests
+cc_defaults {
+    name: "libcodec2-static-defaults",
+
+    static_libs: [
+        "liblog",
+        "libion",
+        "libfmq",
+        "libbase",
+        "libutils",
+        "libcutils",
+        "libcodec2",
+        "libhidlbase",
+        "libdmabufheap",
+        "libcodec2_vndk",
+        "libnativewindow",
+        "libcodec2_soft_common",
+        "libsfplugin_ccodec_utils",
+        "libstagefright_foundation",
+        "libstagefright_bufferpool@2.0.1",
+        "libgralloctypes",
+        "android.hardware.graphics.mapper@2.0",
+        "android.hardware.graphics.mapper@3.0",
+        "android.hardware.media.bufferpool@2.0",
+        "android.hardware.graphics.allocator@2.0",
+        "android.hardware.graphics.allocator@3.0",
+        "android.hardware.graphics.bufferqueue@2.0",
+    ],
+
+    shared_libs: [
+        "libui",
+        "libdl",
+        "libhardware",
+        "libvndksupport",
+        "libprocessgroup",
+    ],
+}
+
 // public dependency for implementing Codec 2 components
 cc_defaults {
     name: "libcodec2-impl-defaults",
diff --git a/media/codec2/vndk/C2AllocatorGralloc.cpp b/media/codec2/vndk/C2AllocatorGralloc.cpp
index 6a7f19c..b5200a5 100644
--- a/media/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/codec2/vndk/C2AllocatorGralloc.cpp
@@ -20,8 +20,10 @@
 
 #include <mutex>
 
+#include <aidl/android/hardware/graphics/common/PlaneLayoutComponentType.h>
 #include <android/hardware/graphics/common/1.2/types.h>
 #include <cutils/native_handle.h>
+#include <gralloctypes/Gralloc4.h>
 #include <hardware/gralloc.h>
 #include <ui/GraphicBufferAllocator.h>
 #include <ui/GraphicBufferMapper.h>
@@ -29,6 +31,7 @@
 
 #include <C2AllocatorGralloc.h>
 #include <C2Buffer.h>
+#include <C2Debug.h>
 #include <C2PlatformSupport.h>
 
 using ::android::hardware::hidl_handle;
@@ -230,8 +233,89 @@
     }
 };
 
+static
+c2_status_t Gralloc4Mapper_lock(native_handle_t *handle, uint64_t usage, const Rect& bounds,
+        C2PlanarLayout *layout, uint8_t **addr) {
+    GraphicBufferMapper &mapper = GraphicBufferMapper::get();
+
+    std::vector<ui::PlaneLayout> planes;
+    // this method is only supported on Gralloc 4 or later
+    status_t err = mapper.getPlaneLayouts(handle, &planes);
+    if (err != NO_ERROR || planes.empty()) {
+        return C2_CANNOT_DO;
+    }
+
+    uint8_t *pointer = nullptr;
+    err = mapper.lock(handle, usage, bounds, (void **)&pointer, nullptr, nullptr);
+    if (err != NO_ERROR || pointer == nullptr) {
+        return C2_CORRUPTED;
+    }
+
+    using aidl::android::hardware::graphics::common::PlaneLayoutComponentType;
+    using aidl::android::hardware::graphics::common::PlaneLayoutComponent;
+
+    layout->type = C2PlanarLayout::TYPE_YUV;
+    layout->numPlanes = 0;
+    layout->rootPlanes = 0;
+
+    for (const ui::PlaneLayout &plane : planes) {
+        layout->rootPlanes++;
+        uint32_t lastOffsetInBits = 0;
+        uint32_t rootIx = 0;
+
+        for (const PlaneLayoutComponent &component : plane.components) {
+            if (!gralloc4::isStandardPlaneLayoutComponentType(component.type)) {
+                return C2_CANNOT_DO;
+            }
+
+            uint32_t rightShiftBits = component.offsetInBits - lastOffsetInBits;
+            uint32_t allocatedDepthInBits = component.sizeInBits + rightShiftBits;
+            C2PlanarLayout::plane_index_t planeId;
+            C2PlaneInfo::channel_t channel;
+
+            switch (static_cast<PlaneLayoutComponentType>(component.type.value)) {
+                case PlaneLayoutComponentType::Y:
+                    planeId = C2PlanarLayout::PLANE_Y;
+                    channel = C2PlaneInfo::CHANNEL_Y;
+                    break;
+                case PlaneLayoutComponentType::CB:
+                    planeId = C2PlanarLayout::PLANE_U;
+                    channel = C2PlaneInfo::CHANNEL_CB;
+                    break;
+                case PlaneLayoutComponentType::CR:
+                    planeId = C2PlanarLayout::PLANE_V;
+                    channel = C2PlaneInfo::CHANNEL_CR;
+                    break;
+                default:
+                    return C2_CORRUPTED;
+            }
+
+            addr[planeId] = pointer + plane.offsetInBytes + (component.offsetInBits / 8);
+            layout->planes[planeId] = {
+                channel,                                                // channel
+                static_cast<int32_t>(plane.sampleIncrementInBits / 8),  // colInc
+                static_cast<int32_t>(plane.strideInBytes),              // rowInc
+                static_cast<uint32_t>(plane.horizontalSubsampling),     // mColSampling
+                static_cast<uint32_t>(plane.verticalSubsampling),       // mRowSampling
+                allocatedDepthInBits,                                   // allocatedDepth (bits)
+                static_cast<uint32_t>(component.sizeInBits),            // bitDepth (bits)
+                rightShiftBits,                                         // rightShift (bits)
+                C2PlaneInfo::NATIVE,                                    // endianness
+                rootIx,                                                 // rootIx
+                static_cast<uint32_t>(component.offsetInBits / 8),      // offset (bytes)
+            };
+
+            layout->numPlanes++;
+            lastOffsetInBits = component.offsetInBits + component.sizeInBits;
+            rootIx++;
+        }
+    }
+    return C2_OK;
+}
+
 } // unnamed namespace
 
+
 native_handle_t *UnwrapNativeCodec2GrallocHandle(const C2Handle *const handle) {
     return C2HandleGralloc::UnwrapNativeHandle(handle);
 }
@@ -385,6 +469,10 @@
                 mBuffer, mWidth, mHeight, mFormat, mGrallocUsage,
                 mStride, generation, igbp_id, igbp_slot);
     }
+
+    // 'NATIVE' on Android means LITTLE_ENDIAN
+    constexpr C2PlaneInfo::endianness_t kEndianness = C2PlaneInfo::NATIVE;
+
     switch (mFormat) {
         case static_cast<uint32_t>(PixelFormat4::RGBA_1010102): {
             // TRICKY: this is used for media as YUV444 in the case when it is queued directly to a
@@ -646,7 +734,7 @@
                 16,                             // allocatedDepth
                 10,                             // bitDepth
                 6,                              // rightShift
-                C2PlaneInfo::LITTLE_END,        // endianness
+                kEndianness,                    // endianness
                 C2PlanarLayout::PLANE_Y,        // rootIx
                 0,                              // offset
             };
@@ -659,7 +747,7 @@
                 16,                             // allocatedDepth
                 10,                             // bitDepth
                 6,                              // rightShift
-                C2PlaneInfo::LITTLE_END,        // endianness
+                kEndianness,                    // endianness
                 C2PlanarLayout::PLANE_U,        // rootIx
                 0,                              // offset
             };
@@ -672,7 +760,7 @@
                 16,                             // allocatedDepth
                 10,                             // bitDepth
                 6,                              // rightShift
-                C2PlaneInfo::LITTLE_END,        // endianness
+                kEndianness,                    // endianness
                 C2PlanarLayout::PLANE_U,        // rootIx
                 2,                              // offset
             };
@@ -680,9 +768,15 @@
         }
 
         default: {
-            // We don't know what it is, but let's try to lock it.
+            // We don't know what it is, let's try to lock it with gralloc4
             android_ycbcr ycbcrLayout;
+            c2_status_t status = Gralloc4Mapper_lock(
+                    const_cast<native_handle_t*>(mBuffer), grallocUsage, rect, layout, addr);
+            if (status == C2_OK) {
+                break;
+            }
 
+            // fallback to lockYCbCr
             status_t err = GraphicBufferMapper::get().lockYCbCr(
                     const_cast<native_handle_t*>(mBuffer), grallocUsage, rect, &ycbcrLayout);
             if (err == OK && ycbcrLayout.y && ycbcrLayout.cb && ycbcrLayout.cr
diff --git a/media/codec2/vndk/C2Store.cpp b/media/codec2/vndk/C2Store.cpp
index 1660c38..dfdd84d 100644
--- a/media/codec2/vndk/C2Store.cpp
+++ b/media/codec2/vndk/C2Store.cpp
@@ -301,13 +301,21 @@
         std::lock_guard<std::mutex> lock(_mComponentStoreReadLock);
         _mComponentStore = store;
     }
-    std::shared_ptr<C2AllocatorIon> allocator;
+    std::shared_ptr<C2AllocatorIon> ionAllocator;
     {
         std::lock_guard<std::mutex> lock(gIonAllocatorMutex);
-        allocator = gIonAllocator.lock();
+        ionAllocator = gIonAllocator.lock();
     }
-    if (allocator) {
-        UseComponentStoreForIonAllocator(allocator, store);
+    if (ionAllocator) {
+        UseComponentStoreForIonAllocator(ionAllocator, store);
+    }
+    std::shared_ptr<C2DmaBufAllocator> dmaAllocator;
+    {
+        std::lock_guard<std::mutex> lock(gDmaBufAllocatorMutex);
+        dmaAllocator = gDmaBufAllocator.lock();
+    }
+    if (dmaAllocator) {
+        UseComponentStoreForDmaBufAllocator(dmaAllocator, store);
     }
 }
 
diff --git a/media/codecs/amrnb/common/Android.bp b/media/codecs/amrnb/common/Android.bp
index bae65f3..0bc6ed2 100644
--- a/media/codecs/amrnb/common/Android.bp
+++ b/media/codecs/amrnb/common/Android.bp
@@ -22,6 +22,10 @@
     vendor_available: true,
     host_supported: true,
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media.swcodec",
+    ],
 
     srcs: [
         "src/add.cpp",
diff --git a/media/codecs/amrnb/dec/Android.bp b/media/codecs/amrnb/dec/Android.bp
index 1083b82..70741d2 100644
--- a/media/codecs/amrnb/dec/Android.bp
+++ b/media/codecs/amrnb/dec/Android.bp
@@ -35,6 +35,10 @@
     vendor_available: true,
     host_supported: true,
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media.swcodec",
+    ],
 
     srcs: [
         "src/a_refl.cpp",
diff --git a/media/codecs/amrnb/enc/Android.bp b/media/codecs/amrnb/enc/Android.bp
index 9e947e9..3c6566e 100644
--- a/media/codecs/amrnb/enc/Android.bp
+++ b/media/codecs/amrnb/enc/Android.bp
@@ -34,6 +34,10 @@
     name: "libstagefright_amrnbenc",
     vendor_available: true,
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media.swcodec",
+    ],
 
     srcs: [
         "src/amrencode.cpp",
diff --git a/media/codecs/amrwb/dec/Android.bp b/media/codecs/amrwb/dec/Android.bp
index 228ea80..f16b0fe 100644
--- a/media/codecs/amrwb/dec/Android.bp
+++ b/media/codecs/amrwb/dec/Android.bp
@@ -35,6 +35,10 @@
     vendor_available: true,
     host_supported: true,
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media.swcodec",
+    ],
 
     srcs: [
         "src/agc2_amr_wb.cpp",
diff --git a/media/codecs/amrwb/enc/Android.bp b/media/codecs/amrwb/enc/Android.bp
index d945531..8780136 100644
--- a/media/codecs/amrwb/enc/Android.bp
+++ b/media/codecs/amrwb/enc/Android.bp
@@ -21,6 +21,10 @@
     name: "libstagefright_amrwbenc",
     vendor_available: true,
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media.swcodec",
+    ],
 
     srcs: [
         "src/autocorr.c",
diff --git a/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp b/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp
index 00b2ab6..b295258 100644
--- a/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp
+++ b/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp
@@ -501,13 +501,16 @@
     /* check frame rate */
     for (i = 0; i < encParams->nLayers; i++)
     {
+        if (encOption->encFrameRate[i] <= 0. || encOption->encFrameRate[i] > 120)
+        {
+            goto CLEAN_UP;
+        }
         encParams->LayerFrameRate[i] = encOption->encFrameRate[i];
     }
 
     if (encParams->nLayers > 1)
     {
-        if (encOption->encFrameRate[0] == encOption->encFrameRate[1] ||
-                encOption->encFrameRate[0] == 0. || encOption->encFrameRate[1] == 0.) /* 7/31/03 */
+        if (encOption->encFrameRate[0] == encOption->encFrameRate[1])
             goto CLEAN_UP;
     }
     /* set max frame rate */
diff --git a/media/codecs/m4v_h263/fuzzer/mpeg4_h263_dec_fuzzer.cpp b/media/codecs/m4v_h263/fuzzer/mpeg4_h263_dec_fuzzer.cpp
index 912c821..5e613d9 100644
--- a/media/codecs/m4v_h263/fuzzer/mpeg4_h263_dec_fuzzer.cpp
+++ b/media/codecs/m4v_h263/fuzzer/mpeg4_h263_dec_fuzzer.cpp
@@ -50,7 +50,7 @@
 
  private:
   tagvideoDecControls *mDecHandle = nullptr;
-  uint8_t *mOutputBuffer[kNumOutputBuffers];
+  uint8_t *mOutputBuffer[kNumOutputBuffers] = {};
   bool mInitialized = false;
   bool mFramesConfigured = false;
 #ifdef MPEG4
diff --git a/media/codecs/mp3dec/Android.bp b/media/codecs/mp3dec/Android.bp
index 1ab0511..6659ea5 100644
--- a/media/codecs/mp3dec/Android.bp
+++ b/media/codecs/mp3dec/Android.bp
@@ -47,6 +47,10 @@
     name: "libstagefright_mp3dec",
     vendor_available: true,
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media.swcodec",
+    ],
 
     host_supported:true,
     srcs: [
diff --git a/media/extractors/Android.bp b/media/extractors/Android.bp
index 7513cb1..f654ecd 100644
--- a/media/extractors/Android.bp
+++ b/media/extractors/Android.bp
@@ -24,21 +24,19 @@
 cc_defaults {
     name: "extractor-defaults",
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright/include",
-    ],
-
     shared_libs: [
         "liblog",
     ],
 
-    // extractors are supposed to work on Q(29)
+    // extractors are expected to run on Q(29)
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media",
+    ],
 
     relative_install_path: "extractors",
 
-    compile_multilib: "first",
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/media/extractors/TEST_MAPPING b/media/extractors/TEST_MAPPING
index 4984b8f..a7c2cfe 100644
--- a/media/extractors/TEST_MAPPING
+++ b/media/extractors/TEST_MAPPING
@@ -1,6 +1,9 @@
 {
   "presubmit": [
 
+        {
+            "name": "CtsMediaTranscodingTestCases"
+        }
     // TODO(b/153661591) enable test once the bug is fixed
     // This tests the extractor path
     // {
diff --git a/media/extractors/aac/Android.bp b/media/extractors/aac/Android.bp
index 7bf3a13..a926422 100644
--- a/media/extractors/aac/Android.bp
+++ b/media/extractors/aac/Android.bp
@@ -21,6 +21,10 @@
 
     srcs: ["AACExtractor.cpp"],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     static_libs: [
         "libstagefright_foundation",
         "libstagefright_metadatautils",
diff --git a/media/extractors/aac/AACExtractor.h b/media/extractors/aac/include/AACExtractor.h
similarity index 100%
rename from media/extractors/aac/AACExtractor.h
rename to media/extractors/aac/include/AACExtractor.h
diff --git a/media/extractors/amr/Android.bp b/media/extractors/amr/Android.bp
index 712360d..121b7a3 100644
--- a/media/extractors/amr/Android.bp
+++ b/media/extractors/amr/Android.bp
@@ -21,6 +21,10 @@
 
     srcs: ["AMRExtractor.cpp"],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     static_libs: [
         "libstagefright_foundation",
     ],
diff --git a/media/extractors/amr/AMRExtractor.h b/media/extractors/amr/include/AMRExtractor.h
similarity index 100%
rename from media/extractors/amr/AMRExtractor.h
rename to media/extractors/amr/include/AMRExtractor.h
diff --git a/media/extractors/flac/Android.bp b/media/extractors/flac/Android.bp
index 9a2a76b..834f4ad 100644
--- a/media/extractors/flac/Android.bp
+++ b/media/extractors/flac/Android.bp
@@ -19,12 +19,12 @@
 
 cc_library {
     name: "libflacextractor",
-    defaults: ["extractor-defaults", "libbinder_ndk_host_user"],
+    defaults: ["extractor-defaults"],
 
     srcs: ["FLACExtractor.cpp"],
 
-    include_dirs: [
-        "external/flac/include",
+    export_include_dirs: [
+        "include",
     ],
 
     shared_libs: [
diff --git a/media/extractors/flac/FLACExtractor.h b/media/extractors/flac/include/FLACExtractor.h
similarity index 100%
rename from media/extractors/flac/FLACExtractor.h
rename to media/extractors/flac/include/FLACExtractor.h
diff --git a/media/extractors/fuzzers/Android.bp b/media/extractors/fuzzers/Android.bp
index 0e54b58..490e195 100644
--- a/media/extractors/fuzzers/Android.bp
+++ b/media/extractors/fuzzers/Android.bp
@@ -80,11 +80,6 @@
     defaults: ["extractor-fuzzer-defaults"],
     host_supported: true,
 
-    include_dirs: [
-        "frameworks/av/media/extractors/mpeg2",
-        "frameworks/av/media/libstagefright",
-    ],
-
     static_libs: [
         "libstagefright_foundation_without_imemory",
         "libstagefright_mpeg2support",
@@ -124,14 +119,6 @@
         "mp4_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/mp4",
-    ],
-
-    header_libs: [
-        "libaudioclient_headers",
-    ],
-
     static_libs: [
         "libstagefright_id3",
         "libstagefright_esds",
@@ -150,10 +137,6 @@
         "wav_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/wav",
-    ],
-
     static_libs: [
         "libfifo",
         "libwavextractor",
@@ -173,10 +156,6 @@
         "amr_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/amr",
-    ],
-
     static_libs: [
         "libamrextractor",
     ],
@@ -193,10 +172,6 @@
         "mkv_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/mkv",
-    ],
-
     static_libs: [
         "libwebm",
         "libstagefright_flacdec",
@@ -217,9 +192,6 @@
         "ogg_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/ogg",
-    ],
 
     static_libs: [
         "libstagefright_metadatautils",
@@ -265,10 +237,6 @@
         "mp3_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/mp3",
-    ],
-
     static_libs: [
         "libfifo",
         "libmp3extractor",
@@ -285,10 +253,6 @@
         "aac_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/aac",
-    ],
-
     static_libs: [
         "libaacextractor",
         "libstagefright_metadatautils",
@@ -304,10 +268,6 @@
         "flac_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/flac",
-    ],
-
     static_libs: [
         "libstagefright_metadatautils",
         "libFLAC",
@@ -329,10 +289,6 @@
         "midi_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/midi",
-    ],
-
     static_libs: [
         "libsonivox",
         "libmedia_midiiowrapper",
diff --git a/media/extractors/midi/Android.bp b/media/extractors/midi/Android.bp
index 08a6fa0..feabf9e 100644
--- a/media/extractors/midi/Android.bp
+++ b/media/extractors/midi/Android.bp
@@ -23,6 +23,10 @@
 
     srcs: ["MidiExtractor.cpp"],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     header_libs: [
         "libmedia_datasource_headers",
     ],
diff --git a/media/extractors/midi/MidiExtractor.h b/media/extractors/midi/include/MidiExtractor.h
similarity index 100%
rename from media/extractors/midi/MidiExtractor.h
rename to media/extractors/midi/include/MidiExtractor.h
diff --git a/media/extractors/mkv/Android.bp b/media/extractors/mkv/Android.bp
index 54c5b27..98ce305 100644
--- a/media/extractors/mkv/Android.bp
+++ b/media/extractors/mkv/Android.bp
@@ -21,10 +21,8 @@
 
     srcs: ["MatroskaExtractor.cpp"],
 
-    include_dirs: [
-        "external/flac/include",
-        "external/libvpx/libwebm",
-        "frameworks/av/media/libstagefright/flac/dec",
+    export_include_dirs: [
+        "include",
     ],
 
     shared_libs: [
diff --git a/media/extractors/mkv/MatroskaExtractor.h b/media/extractors/mkv/include/MatroskaExtractor.h
similarity index 100%
rename from media/extractors/mkv/MatroskaExtractor.h
rename to media/extractors/mkv/include/MatroskaExtractor.h
diff --git a/media/extractors/mp3/Android.bp b/media/extractors/mp3/Android.bp
index 75b9b7b..396a13a 100644
--- a/media/extractors/mp3/Android.bp
+++ b/media/extractors/mp3/Android.bp
@@ -16,6 +16,10 @@
             "XINGSeeker.cpp",
     ],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     static_libs: [
         "libutils",
         "libstagefright_id3",
diff --git a/media/extractors/mp3/MP3Extractor.h b/media/extractors/mp3/include/MP3Extractor.h
similarity index 100%
rename from media/extractors/mp3/MP3Extractor.h
rename to media/extractors/mp3/include/MP3Extractor.h
diff --git a/media/extractors/mp3/MP3Seeker.h b/media/extractors/mp3/include/MP3Seeker.h
similarity index 100%
rename from media/extractors/mp3/MP3Seeker.h
rename to media/extractors/mp3/include/MP3Seeker.h
diff --git a/media/extractors/mp3/VBRISeeker.h b/media/extractors/mp3/include/VBRISeeker.h
similarity index 100%
rename from media/extractors/mp3/VBRISeeker.h
rename to media/extractors/mp3/include/VBRISeeker.h
diff --git a/media/extractors/mp3/XINGSeeker.h b/media/extractors/mp3/include/XINGSeeker.h
similarity index 100%
rename from media/extractors/mp3/XINGSeeker.h
rename to media/extractors/mp3/include/XINGSeeker.h
diff --git a/media/extractors/mp4/Android.bp b/media/extractors/mp4/Android.bp
index 7fa6bfd..540d75d 100644
--- a/media/extractors/mp4/Android.bp
+++ b/media/extractors/mp4/Android.bp
@@ -15,6 +15,15 @@
     ],
 }
 
+cc_library_headers {
+    name: "libmp4extractor_headers",
+    host_supported: true,
+
+    export_include_dirs: [
+        "include",
+    ],
+}
+
 cc_library {
     name: "libmp4extractor",
     defaults: ["extractor-defaults"],
@@ -27,6 +36,10 @@
         "SampleTable.cpp",
     ],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     static_libs: [
         "libstagefright_esds",
         "libstagefright_foundation",
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 8836c47..fb935b6 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -1127,10 +1127,10 @@
                     void *data;
                     size_t size;
 
-                    if (AMediaFormat_getBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_2,
+                    if (AMediaFormat_getBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_0,
                                                &data, &size)
-                        && size >= 5) {
-                        const uint8_t *ptr = (const uint8_t *)data;
+                        && size >= 24) {
+                        const uint8_t *ptr = (const uint8_t *)data + (size - 24);
                         const uint8_t profile = ptr[2] >> 1;
                         const uint8_t bl_compatibility_id = (ptr[4]) >> 4;
                         bool create_two_tracks = false;
@@ -1147,13 +1147,15 @@
 
                             track_b->timescale = mLastTrack->timescale;
                             track_b->sampleTable = mLastTrack->sampleTable;
-                            track_b->includes_expensive_metadata = mLastTrack->includes_expensive_metadata;
+                            track_b->includes_expensive_metadata =
+                                mLastTrack->includes_expensive_metadata;
                             track_b->skipTrack = mLastTrack->skipTrack;
                             track_b->elst_needs_processing = mLastTrack->elst_needs_processing;
                             track_b->elst_media_time = mLastTrack->elst_media_time;
                             track_b->elst_segment_duration = mLastTrack->elst_segment_duration;
                             track_b->elst_shift_start_ticks = mLastTrack->elst_shift_start_ticks;
-                            track_b->elst_initial_empty_edit_ticks = mLastTrack->elst_initial_empty_edit_ticks;
+                            track_b->elst_initial_empty_edit_ticks =
+                                mLastTrack->elst_initial_empty_edit_ticks;
                             track_b->subsample_encryption = mLastTrack->subsample_encryption;
 
                             track_b->mTx3gBuffer = mLastTrack->mTx3gBuffer;
@@ -1166,11 +1168,11 @@
                             mLastTrack->next = track_b;
                             track_b->next = NULL;
 
-                            // we want to remove the csd-2 key from the metadata, but
+                            // we want to remove the csd-0 key from the metadata, but
                             // don't have an AMediaFormat_* function to do so. Settle
-                            // for replacing this csd-2 with an empty csd-2.
+                            // for replacing this csd-0 with an empty csd-0.
                             uint8_t emptybuffer[8] = {};
-                            AMediaFormat_setBuffer(track_b->meta, AMEDIAFORMAT_KEY_CSD_2,
+                            AMediaFormat_setBuffer(track_b->meta, AMEDIAFORMAT_KEY_CSD_0,
                                                    emptybuffer, 0);
 
                             if (4 == profile || 7 == profile || 8 == profile ) {
@@ -1182,6 +1184,8 @@
                             } else if (10 == profile) {
                                 AMediaFormat_setString(track_b->meta,
                                         AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_AV1);
+                                AMediaFormat_setBuffer(track_b->meta, AMEDIAFORMAT_KEY_CSD_0,
+                                    data, size - 24);
                             } // Should never get to else part
 
                             mLastTrack = track_b;
@@ -2591,9 +2595,11 @@
             *offset += chunk_size;
             break;
         }
-        case FOURCC("dvcC"):
-        case FOURCC("dvvC"): {
 
+        case FOURCC("dvcC"):
+        case FOURCC("dvvC"):
+        case FOURCC("dvwC"):
+        {
             if (chunk_data_size != 24) {
                 return ERROR_MALFORMED;
             }
@@ -2612,14 +2618,29 @@
             if (mLastTrack == NULL)
                 return ERROR_MALFORMED;
 
-            AMediaFormat_setBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_2,
-                                   buffer.get(), chunk_data_size);
+            void *data = nullptr;
+            size_t size = 0;
+            if (AMediaFormat_getBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_0, &data, &size)) {
+                //if csd-0 is already present, then append dvcc
+                auto csd0_dvcc = heapbuffer<uint8_t>(size + chunk_data_size);
+
+                memcpy(csd0_dvcc.get(), data, size);
+                memcpy(csd0_dvcc.get() + size, buffer.get(), chunk_data_size);
+
+                AMediaFormat_setBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_0,
+                                    csd0_dvcc.get(), size + chunk_data_size);
+            } else {
+                //if not set csd-0 directly
+                AMediaFormat_setBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_0,
+                                    buffer.get(), chunk_data_size);
+            }
             AMediaFormat_setString(mLastTrack->meta, AMEDIAFORMAT_KEY_MIME,
                                    MEDIA_MIMETYPE_VIDEO_DOLBY_VISION);
 
             *offset += chunk_size;
             break;
         }
+
         case FOURCC("d263"):
         {
             *offset += chunk_size;
@@ -4458,7 +4479,6 @@
     if (!AMediaFormat_getString(track->meta, AMEDIAFORMAT_KEY_MIME, &mime)) {
         return NULL;
     }
-
     sp<ItemTable> itemTable;
     if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
         void *data;
@@ -4491,14 +4511,14 @@
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
         void *data;
         size_t size;
-        if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_2, &data, &size)) {
+        if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_0, &data, &size)
+                || size < 24) {
             return NULL;
         }
 
-        const uint8_t *ptr = (const uint8_t *)data;
-
+        const uint8_t *ptr = (const uint8_t *)data + (size - 24);
         // dv_major.dv_minor Should be 1.0 or 2.1
-        if (size != 24 || ((ptr[0] != 1 || ptr[1] != 0) && (ptr[0] != 2 || ptr[1] != 1))) {
+        if ((ptr[0] != 1 || ptr[1] != 0) && (ptr[0] != 2 || ptr[1] != 1)) {
             return NULL;
         }
    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)
@@ -4576,7 +4596,7 @@
             return ERROR_MALFORMED;
         }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
-        if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_2, &data, &size)) {
+        if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_0, &data, &size)) {
             return ERROR_MALFORMED;
         }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)) {
@@ -5152,11 +5172,11 @@
         ALOGV("%s DolbyVision stream detected", __FUNCTION__);
         void *data;
         size_t size;
-        CHECK(AMediaFormat_getBuffer(format, AMEDIAFORMAT_KEY_CSD_2, &data, &size));
+        CHECK(AMediaFormat_getBuffer(format, AMEDIAFORMAT_KEY_CSD_0, &data, &size));
 
-        const uint8_t *ptr = (const uint8_t *)data;
+        const uint8_t *ptr = (const uint8_t *)data + (size - 24);
 
-        CHECK(size == 24);
+        CHECK(size >= 24);
 
         // dv_major.dv_minor Should be 1.0 or 2.1
         CHECK(!((ptr[0] != 1 || ptr[1] != 0) && (ptr[0] != 2 || ptr[1] != 1)));
diff --git a/media/extractors/mp4/AC4Parser.h b/media/extractors/mp4/include/AC4Parser.h
similarity index 100%
rename from media/extractors/mp4/AC4Parser.h
rename to media/extractors/mp4/include/AC4Parser.h
diff --git a/media/extractors/mp4/ItemTable.h b/media/extractors/mp4/include/ItemTable.h
similarity index 100%
rename from media/extractors/mp4/ItemTable.h
rename to media/extractors/mp4/include/ItemTable.h
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/include/MPEG4Extractor.h
similarity index 100%
rename from media/extractors/mp4/MPEG4Extractor.h
rename to media/extractors/mp4/include/MPEG4Extractor.h
diff --git a/media/extractors/mp4/SampleIterator.h b/media/extractors/mp4/include/SampleIterator.h
similarity index 100%
rename from media/extractors/mp4/SampleIterator.h
rename to media/extractors/mp4/include/SampleIterator.h
diff --git a/media/extractors/mp4/SampleTable.h b/media/extractors/mp4/include/SampleTable.h
similarity index 100%
rename from media/extractors/mp4/SampleTable.h
rename to media/extractors/mp4/include/SampleTable.h
diff --git a/media/extractors/mpeg2/Android.bp b/media/extractors/mpeg2/Android.bp
index 7e6247b..8faecae 100644
--- a/media/extractors/mpeg2/Android.bp
+++ b/media/extractors/mpeg2/Android.bp
@@ -38,6 +38,10 @@
         "MPEG2TSExtractor.cpp",
     ],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     shared_libs: [
         "libbase",
         "libcgrouprc#29",
diff --git a/media/extractors/mpeg2/MPEG2PSExtractor.cpp b/media/extractors/mpeg2/MPEG2PSExtractor.cpp
index d431b05..afd28ef 100644
--- a/media/extractors/mpeg2/MPEG2PSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2PSExtractor.cpp
@@ -20,9 +20,6 @@
 
 #include "MPEG2PSExtractor.h"
 
-#include <AnotherPacketSource.h>
-#include <ESQueue.h>
-
 #include <media/stagefright/foundation/ABitReader.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -33,6 +30,8 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
+#include <mpeg2ts/AnotherPacketSource.h>
+#include <mpeg2ts/ESQueue.h>
 #include <utils/String8.h>
 
 #include <inttypes.h>
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.cpp b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
index 2e68809..9a3cd92 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
@@ -35,10 +35,9 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
+#include <mpeg2ts/AnotherPacketSource.h>
 #include <utils/String8.h>
 
-#include <AnotherPacketSource.h>
-
 #include <hidl/HybridInterface.h>
 #include <android/hardware/cas/1.0/ICas.h>
 
diff --git a/media/extractors/mpeg2/MPEG2PSExtractor.h b/media/extractors/mpeg2/include/MPEG2PSExtractor.h
similarity index 100%
rename from media/extractors/mpeg2/MPEG2PSExtractor.h
rename to media/extractors/mpeg2/include/MPEG2PSExtractor.h
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.h b/media/extractors/mpeg2/include/MPEG2TSExtractor.h
similarity index 98%
rename from media/extractors/mpeg2/MPEG2TSExtractor.h
rename to media/extractors/mpeg2/include/MPEG2TSExtractor.h
index fd77b08..0e3e484 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.h
+++ b/media/extractors/mpeg2/include/MPEG2TSExtractor.h
@@ -23,12 +23,11 @@
 #include <media/MediaExtractorPluginApi.h>
 #include <media/MediaExtractorPluginHelper.h>
 #include <media/stagefright/MetaDataBase.h>
+#include <mpeg2ts/ATSParser.h>
 #include <utils/threads.h>
 #include <utils/KeyedVector.h>
 #include <utils/Vector.h>
 
-#include <ATSParser.h>
-
 namespace android {
 
 struct AMessage;
diff --git a/media/extractors/ogg/Android.bp b/media/extractors/ogg/Android.bp
index d7540c4..dc3c25c 100644
--- a/media/extractors/ogg/Android.bp
+++ b/media/extractors/ogg/Android.bp
@@ -22,8 +22,8 @@
 
     srcs: ["OggExtractor.cpp"],
 
-    include_dirs: [
-        "external/tremolo",
+    export_include_dirs: [
+        "include",
     ],
 
     header_libs: [
diff --git a/media/extractors/ogg/OggExtractor.h b/media/extractors/ogg/include/OggExtractor.h
similarity index 100%
rename from media/extractors/ogg/OggExtractor.h
rename to media/extractors/ogg/include/OggExtractor.h
diff --git a/media/extractors/tests/Android.bp b/media/extractors/tests/Android.bp
index 23c74f7..3c3bbdc 100644
--- a/media/extractors/tests/Android.bp
+++ b/media/extractors/tests/Android.bp
@@ -79,11 +79,6 @@
         "libbase",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/",
-        "frameworks/av/media/libstagefright/",
-    ],
-
     compile_multilib: "first",
 
     cflags: [
diff --git a/media/extractors/tests/ExtractorUnitTest.cpp b/media/extractors/tests/ExtractorUnitTest.cpp
index 84ec1f2..2bd9c6a 100644
--- a/media/extractors/tests/ExtractorUnitTest.cpp
+++ b/media/extractors/tests/ExtractorUnitTest.cpp
@@ -27,18 +27,18 @@
 #include <media/stagefright/MetaDataUtils.h>
 #include <media/stagefright/foundation/OpusHeader.h>
 
-#include "aac/AACExtractor.h"
-#include "amr/AMRExtractor.h"
-#include "flac/FLACExtractor.h"
-#include "midi/MidiExtractor.h"
-#include "mkv/MatroskaExtractor.h"
-#include "mp3/MP3Extractor.h"
-#include "mp4/MPEG4Extractor.h"
-#include "mp4/SampleTable.h"
-#include "mpeg2/MPEG2PSExtractor.h"
-#include "mpeg2/MPEG2TSExtractor.h"
-#include "ogg/OggExtractor.h"
-#include "wav/WAVExtractor.h"
+#include <AACExtractor.h>
+#include <AMRExtractor.h>
+#include <FLACExtractor.h>
+#include <MidiExtractor.h>
+#include <MatroskaExtractor.h>
+#include <MP3Extractor.h>
+#include <MPEG4Extractor.h>
+#include <SampleTable.h>
+#include <MPEG2PSExtractor.h>
+#include <MPEG2TSExtractor.h>
+#include <OggExtractor.h>
+#include <WAVExtractor.h>
 
 #include "ExtractorUnitTestEnvironment.h"
 
diff --git a/media/extractors/wav/Android.bp b/media/extractors/wav/Android.bp
index cc5e1c7..cdf587c 100644
--- a/media/extractors/wav/Android.bp
+++ b/media/extractors/wav/Android.bp
@@ -18,12 +18,12 @@
 cc_library {
     name: "libwavextractor",
 
-    defaults: ["extractor-defaults", "libbinder_ndk_host_user"],
+    defaults: ["extractor-defaults"],
 
     srcs: ["WAVExtractor.cpp"],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright/include",
+    export_include_dirs: [
+        "include",
     ],
 
     shared_libs: [
diff --git a/media/extractors/wav/WAVExtractor.h b/media/extractors/wav/include/WAVExtractor.h
similarity index 100%
rename from media/extractors/wav/WAVExtractor.h
rename to media/extractors/wav/include/WAVExtractor.h
diff --git a/media/janitors/codec_OWNERS b/media/janitors/codec_OWNERS
index e201399..d4ee51b 100644
--- a/media/janitors/codec_OWNERS
+++ b/media/janitors/codec_OWNERS
@@ -2,4 +2,4 @@
 # differentiated from plugins connecting those codecs to either omx or codec2 infrastructure
 essick@google.com
 lajos@google.com
-marcone@google.com
+wonsik@google.com
diff --git a/media/janitors/reliability_mainline_OWNERS b/media/janitors/reliability_mainline_OWNERS
index e4c4fc2..cced19c 100644
--- a/media/janitors/reliability_mainline_OWNERS
+++ b/media/janitors/reliability_mainline_OWNERS
@@ -1,5 +1,5 @@
 # Bug component: 1051309
-# go/android-media-relaibility
+# go/android-media-reliability
 
 essick@google.com
 nchalko@google.com
diff --git a/media/libaaudio/examples/utils/AAudioSimplePlayer.h b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
index 7daac20..956b3cd 100644
--- a/media/libaaudio/examples/utils/AAudioSimplePlayer.h
+++ b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
@@ -40,6 +40,31 @@
     int64_t nanoseconds;
 } Timestamp;
 
+static constexpr int32_t   kWorkloadScaler = 500;
+
+// Linear congruential random number generator.
+static uint32_t s_random16() {
+    static uint32_t seed = 1234;
+    seed = ((seed * 31421) + 6927) & 0x0FFFF;
+    return seed;
+}
+
+/**
+ * The random number generator is good for burning CPU because the compiler cannot
+ * easily optimize away the computation.
+ * @param workload number of times to execute the loop
+ * @return a white noise value between -1.0 and +1.0
+ */
+static float s_burnCPU(int32_t workload) {
+    uint32_t random = 0;
+    for (int32_t i = 0; i < workload; i++) {
+        for (int32_t j = 0; j < 10; j++) {
+            random = random ^ s_random16();
+        }
+    }
+    return (random - 32768) * (1.0 / 32768);
+}
+
 /**
  * Simple wrapper for AAudio that opens an output stream either in callback or blocking write mode.
  */
@@ -268,11 +293,13 @@
     int32_t            timestampCount = 0; // in timestamps
     int32_t            sampleRate = 48000;
     int32_t            prefixToneFrames = 0;
+    double             workload = 0.0;
     bool               sweepSetup = false;
 
     int                scheduler = 0;
     bool               schedulerChecked = false;
     int32_t            hangTimeMSec = 0;
+    int                cpuAffinity = -1;
 
     AAudioSimplePlayer simplePlayer;
     int32_t            callbackCount = 0;
@@ -304,6 +331,14 @@
 
 } SineThreadedData_t;
 
+int setCpuAffinity(int cpuIndex) {
+cpu_set_t cpu_set;
+    CPU_ZERO(&cpu_set);
+    CPU_SET(cpuIndex, &cpu_set);
+    int err = sched_setaffinity((pid_t) 0, sizeof(cpu_set_t), &cpu_set);
+    return err == 0 ? 0 : -errno;
+}
+
 // Callback function that fills the audio output buffer.
 aaudio_data_callback_result_t SimplePlayerDataCallbackProc(
         AAudioStream *stream,
@@ -319,6 +354,10 @@
     }
     SineThreadedData_t *sineData = (SineThreadedData_t *) userData;
 
+    if (sineData->cpuAffinity >= 0) {
+        setCpuAffinity(sineData->cpuAffinity);
+        sineData->cpuAffinity = -1;
+    }
     // Play an initial high tone so we can tell whether the beginning was truncated.
     if (!sineData->sweepSetup && sineData->framesTotal >= sineData->prefixToneFrames) {
         sineData->setupSineSweeps();
@@ -398,6 +437,8 @@
             return AAUDIO_CALLBACK_RESULT_STOP;
     }
 
+    s_burnCPU((int32_t)(sineData->workload * kWorkloadScaler * numFrames));
+
     sineData->callbackCount++;
     sineData->framesTotal += numFrames;
     return AAUDIO_CALLBACK_RESULT_CONTINUE;
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index cdc987b..400fc7c 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -31,10 +31,10 @@
 #include "AAudioSimplePlayer.h"
 #include "AAudioArgsParser.h"
 
-#define APP_VERSION  "0.1.8"
+#define APP_VERSION  "0.2.1"
 
-constexpr int32_t kDefaultHangTimeMSec = 10;
-
+static constexpr int32_t kDefaultHangTimeMSec = 10;
+static constexpr int32_t kWorkPeriodSeconds = 6;
 /**
  * Open stream, play some sine waves, then close the stream.
  *
@@ -44,7 +44,11 @@
 static aaudio_result_t testOpenPlayClose(AAudioArgsParser &argParser,
                                          int32_t loopCount,
                                          int32_t prefixToneMsec,
-                                         int32_t hangTimeMSec)
+                                         int32_t hangTimeMSec,
+                                         int     cpuAffinity,
+                                         double  lowWorkLoad,
+                                         double  highWorkLoad,
+                                         int32_t workPeriodSeconds)
 {
     SineThreadedData_t myData;
     AAudioSimplePlayer &player = myData.simplePlayer;
@@ -57,6 +61,7 @@
     myData.schedulerChecked = false;
     myData.callbackCount = 0;
     myData.hangTimeMSec = hangTimeMSec; // test AAudioStream_getXRunCount()
+    myData.cpuAffinity = cpuAffinity;
 
     result = player.open(argParser,
                          SimplePlayerDataCallbackProc,
@@ -111,8 +116,8 @@
         }
 
         // Play a sine wave in the background.
-        printf("Sleep for %d seconds while audio plays in a callback thread. %d of %d\n",
-               argParser.getDurationSeconds(), (loopIndex + 1), loopCount);
+        printf("Monitor for %d seconds while audio plays in a callback thread. %d of %d, %d\n",
+               argParser.getDurationSeconds(), (loopIndex + 1), loopCount, workPeriodSeconds);
         startedAtNanos = getNanoseconds(CLOCK_MONOTONIC);
         for (int second = 0; second < durationSeconds; second++) {
             // Sleep a while. Wake up early if there is an error, for example a DISCONNECT.
@@ -123,13 +128,17 @@
             const int32_t framesWritten = (int32_t) AAudioStream_getFramesWritten(player.getStream());
             const int32_t framesRead = (int32_t) AAudioStream_getFramesRead(player.getStream());
             const int32_t xruns = AAudioStream_getXRunCount(player.getStream());
+            myData.workload = ((second % (2 * workPeriodSeconds)) < workPeriodSeconds)
+                    ? lowWorkLoad : highWorkLoad;
             printf(" waker result = %d, at %6d millis"
-                           ", second = %3d, frames written %8d - read %8d = %8d, underruns = %d\n",
+                   ", second = %3d, frames written %8d - read %8d = %8d"
+                   ", work = %5.1f, underruns = %d\n",
                    result, (int) millis,
                    second,
                    framesWritten,
                    framesRead,
                    framesWritten - framesRead,
+                   myData.workload,
                    xruns);
             if (result != AAUDIO_OK) {
                 disconnected = (result == AAUDIO_ERROR_DISCONNECTED);
@@ -220,6 +229,11 @@
     AAudioArgsParser::usage();
     printf("      -l{count} loopCount start/stop, every other one is silent\n");
     printf("      -t{msec}  play a high pitched tone at the beginning\n");
+    printf("      -w{workload}  set base workload, default 0.0\n");
+    printf("      -W{workload}  alternate between this higher workload and base workload\n");
+    printf("      -Z{duration}  number of seconds to spend at each workload, default = %d\n",
+           kWorkPeriodSeconds);
+    printf("      -a{cpu}   set CPU affinity, default none\n");
     printf("      -h{msec}  force periodic underruns by hanging in callback\n");
     printf("                If no value specified then %d used.\n",
             kDefaultHangTimeMSec);
@@ -232,6 +246,10 @@
     int32_t            loopCount = 1;
     int32_t            prefixToneMsec = 0;
     int32_t            hangTimeMSec = 0;
+    int                cpuAffinity = -1;
+    double             lowWorkLoad = 0.0;
+    double             highWorkLoad = -1.0;
+    int32_t            workPeriodSeconds = kWorkPeriodSeconds;
 
     // Make printf print immediately so that debug info is not stuck
     // in a buffer if we hang or crash.
@@ -247,6 +265,9 @@
             if (arg[0] == '-') {
                 char option = arg[1];
                 switch (option) {
+                    case 'a':
+                        cpuAffinity = atoi(&arg[2]);
+                        break;
                     case 'l':
                         loopCount = atoi(&arg[2]);
                         break;
@@ -258,6 +279,15 @@
                                 ? atoi(&arg[2])
                                 : kDefaultHangTimeMSec;
                         break;
+                    case 'w':
+                        lowWorkLoad = atof(&arg[2]);
+                        break;
+                    case 'W':
+                        highWorkLoad = atof(&arg[2]);
+                        break;
+                    case 'Z':
+                        workPeriodSeconds = atoi(&arg[2]);
+                        break;
                     default:
                         usage();
                         exit(EXIT_FAILURE);
@@ -271,9 +301,21 @@
         }
     }
 
+    if (highWorkLoad > 0) {
+        if (highWorkLoad < lowWorkLoad) {
+            printf("ERROR - -W%f workload lower than -w%f workload", highWorkLoad, lowWorkLoad);
+            return EXIT_FAILURE;
+        }
+    } else {
+        highWorkLoad = lowWorkLoad; // high not specified so use low
+    }
+
     // Keep looping until we can complete the test without disconnecting.
     while((result = testOpenPlayClose(argParser, loopCount,
-            prefixToneMsec, hangTimeMSec))
+            prefixToneMsec, hangTimeMSec,
+            cpuAffinity,
+            lowWorkLoad, highWorkLoad,
+            workPeriodSeconds))
             == AAUDIO_ERROR_DISCONNECTED);
 
     return (result) ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/media/libaaudio/fuzzer/Android.bp b/media/libaaudio/fuzzer/Android.bp
new file mode 100644
index 0000000..e2eec7a
--- /dev/null
+++ b/media/libaaudio/fuzzer/Android.bp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_av_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["frameworks_av_license"],
+}
+
+cc_fuzz {
+    name: "libaaudio_fuzzer",
+    srcs: [
+        "libaaudio_fuzzer.cpp",
+    ],
+    header_libs: [
+        "libaaudio_headers",
+    ],
+    shared_libs: [
+        "libbinder",
+        "libaudiomanager",
+        "libaudiopolicy",
+        "libaudioclient_aidl_conversion",
+    ],
+    static_libs: [
+        "android.media.audio.common.types-V1-cpp",
+        "liblog",
+        "libutils",
+        "libcutils",
+        "libaaudio",
+        "libjsoncpp",
+        "libbase_ndk",
+        "libcgrouprc",
+        "libaudioutils",
+        "libaudioclient",
+        "aaudio-aidl-cpp",
+        "libmedia_helper",
+        "libmediametrics",
+        "libprocessgroup",
+        "av-types-aidl-cpp",
+        "libaaudio_internal",
+        "libcgrouprc_format",
+        "audiopolicy-aidl-cpp",
+        "audioflinger-aidl-cpp",
+        "audiopolicy-types-aidl-cpp",
+        "audioclient-types-aidl-cpp",
+        "shared-file-region-aidl-cpp",
+        "framework-permission-aidl-cpp",
+        "mediametricsservice-aidl-cpp",
+    ],
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
diff --git a/media/libaaudio/fuzzer/README.md b/media/libaaudio/fuzzer/README.md
new file mode 100644
index 0000000..4ba15c5
--- /dev/null
+++ b/media/libaaudio/fuzzer/README.md
@@ -0,0 +1,77 @@
+# Fuzzer for libaaudio
+
+## Plugin Design Considerations
+The fuzzer plugin for `libaaudio` are designed based on the understanding of the
+source code and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+Fuzzers assigns values to the following parameters to pass on to libaaudio:
+1. Device Id (parameter name: `deviceId`)
+2. Sampling Rate (parameter name: `sampleRate`)
+3. Number of channels (parameter name: `channelCount`)
+4. Audio Travel Direction (parameter name: `direction`)
+5. Audio Format (parameter name: `format`)
+6. Audio Sharing Mode (parameter name: `sharingMode`)
+7. Audio Usage (parameter name: `usage`)
+8. Audio Content type (parameter name: `contentType`)
+9. Audio Input Preset (parameter name: `inputPreset`)
+10. Audio Privacy Sensitivity (parameter name: `privacySensitive`)
+11. Buffer Capacity In Frames (parameter name: `frames`)
+12. Performance Mode (parameter name: `mode`)
+13. Allowed Capture Policy (parameter name: `allowedCapturePolicy`)
+14. Session Id (parameter name: `sessionId`)
+15. Frames per Data Callback (parameter name: `framesPerDataCallback`)
+16. MMap Policy (parameter name: `policy`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `deviceId` | Any value of type `int32_t`  | Value obtained from FuzzedDataProvider |
+| `sampleRate` | Any value of type `int32_t`  | Value obtained from FuzzedDataProvider |
+| `channelCount` |  Any value of type `int32_t` | Value obtained from FuzzedDataProvider |
+| `direction` | 0. `AAUDIO_DIRECTION_OUTPUT` 1. `AAUDIO_DIRECTION_INPUT` | Value obtained from FuzzedDataProvider |
+| `format` | 0. `AAUDIO_FORMAT_INVALID` 1. `AAUDIO_FORMAT_UNSPECIFIED` 2. `AAUDIO_FORMAT_PCM_I16` 3. `AAUDIO_FORMAT_PCM_FLOAT` | Value obtained from FuzzedDataProvider |
+| `sharingMode` | 0. `AAUDIO_SHARING_MODE_EXCLUSIVE` 1. `AAUDIO_SHARING_MODE_SHARED` | Value obtained from FuzzedDataProvider |
+| `usage` | 0. `AAUDIO_USAGE_MEDIA` 1. `AAUDIO_USAGE_VOICE_COMMUNICATION` 2. `AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING` 3. `AAUDIO_USAGE_ALARM` 4. `AAUDIO_USAGE_NOTIFICATION` 5. `AAUDIO_USAGE_NOTIFICATION_RINGTONE` 6. `AAUDIO_USAGE_NOTIFICATION_EVENT` 7. `AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY` 8. `AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE` 9. `AAUDIO_USAGE_ASSISTANCE_SONIFICATION` 10. `AAUDIO_USAGE_GAME` 11. `AAUDIO_USAGE_ASSISTANT` 12. `AAUDIO_SYSTEM_USAGE_EMERGENCY` 13. `AAUDIO_SYSTEM_USAGE_SAFETY` 14. `AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS` 15. `AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT` | Value obtained from FuzzedDataProvider |
+| `contentType` | 0. `AAUDIO_CONTENT_TYPE_SPEECH` 1. `AAUDIO_CONTENT_TYPE_MUSIC` 2. `AAUDIO_CONTENT_TYPE_MOVIE` 3. `AAUDIO_CONTENT_TYPE_SONIFICATION` | Value obtained from FuzzedDataProvider |
+| `inputPreset` | 0. `AAUDIO_INPUT_PRESET_GENERIC` 1. `AAUDIO_INPUT_PRESET_CAMCORDER` 2. `AAUDIO_INPUT_PRESET_VOICE_RECOGNITION` 3. `AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION` 4. `AAUDIO_INPUT_PRESET_UNPROCESSED` 5. `AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE` | Value obtained from FuzzedDataProvider |
+| `privacySensitive` | 0. `true` 1. `false` | Value obtained from FuzzedDataProvider |
+| `frames` | Any value of type `int32_t`  | Value obtained from FuzzedDataProvider |
+| `mode` | 0. `AAUDIO_PERFORMANCE_MODE_NONE` 1. `AAUDIO_PERFORMANCE_MODE_POWER_SAVING` 2. `AAUDIO_PERFORMANCE_MODE_LOW_LATENCY` | Value obtained from FuzzedDataProvider |
+| `allowedCapturePolicy` | 0. `AAUDIO_ALLOW_CAPTURE_BY_ALL` 1. `AAUDIO_ALLOW_CAPTURE_BY_SYSTEM` 2. `AAUDIO_ALLOW_CAPTURE_BY_NONE` | Value obtained from FuzzedDataProvider |
+| `sessionId` | 0. `AAUDIO_SESSION_ID_NONE` 1. `AAUDIO_SESSION_ID_ALLOCATE` | Value obtained from FuzzedDataProvider |
+| `framesPerDataCallback` | Any value of type `int32_t` | Value obtained from FuzzedDataProvider |
+| `policy` | 0. `AAUDIO_POLICY_NEVER` 1. `AAUDIO_POLICY_AUTO` 2. `AAUDIO_POLICY_ALWAYS` | Value obtained from FuzzedDataProvider |
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feed the entire input data to the module.
+This ensures that the plugins tolerates any kind of input (empty, huge,
+malformed, etc) and doesn't `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build libaaudio_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+  $ mm -j$(nproc) libaaudio_fuzzer
+```
+### Steps to run
+
+To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/libaaudio_fuzzer/libaaudio_fuzzer
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/media/libaaudio/fuzzer/libaaudio_fuzzer.cpp b/media/libaaudio/fuzzer/libaaudio_fuzzer.cpp
new file mode 100644
index 0000000..1167bb0
--- /dev/null
+++ b/media/libaaudio/fuzzer/libaaudio_fuzzer.cpp
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "aaudio/AAudio.h"
+#include "aaudio/AAudioTesting.h"
+#include <fuzzer/FuzzedDataProvider.h>
+
+constexpr int32_t kRandomStringLength = 256;
+
+constexpr int64_t kNanosPerMillisecond = 1000 * 1000;
+
+constexpr aaudio_direction_t kDirections[] = {
+    AAUDIO_DIRECTION_OUTPUT, AAUDIO_DIRECTION_INPUT, AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_performance_mode_t kPerformanceModes[] = {
+    AAUDIO_PERFORMANCE_MODE_NONE, AAUDIO_PERFORMANCE_MODE_POWER_SAVING,
+    AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_format_t kFormats[] = {
+    AAUDIO_FORMAT_INVALID,        AAUDIO_FORMAT_UNSPECIFIED,
+    AAUDIO_FORMAT_PCM_I16,        AAUDIO_FORMAT_PCM_FLOAT,
+    AAUDIO_FORMAT_PCM_I24_PACKED, AAUDIO_FORMAT_PCM_I32};
+
+constexpr aaudio_sharing_mode_t kSharingModes[] = {
+    AAUDIO_SHARING_MODE_EXCLUSIVE, AAUDIO_SHARING_MODE_SHARED};
+
+constexpr int32_t kSampleRates[] = {AAUDIO_UNSPECIFIED,
+                                    8000,
+                                    11025,
+                                    16000,
+                                    22050,
+                                    32000,
+                                    44100,
+                                    48000,
+                                    88200,
+                                    96000};
+
+constexpr aaudio_usage_t kUsages[] = {
+    AAUDIO_USAGE_MEDIA,
+    AAUDIO_USAGE_VOICE_COMMUNICATION,
+    AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
+    AAUDIO_USAGE_ALARM,
+    AAUDIO_USAGE_NOTIFICATION,
+    AAUDIO_USAGE_NOTIFICATION_RINGTONE,
+    AAUDIO_USAGE_NOTIFICATION_EVENT,
+    AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
+    AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+    AAUDIO_USAGE_ASSISTANCE_SONIFICATION,
+    AAUDIO_USAGE_GAME,
+    AAUDIO_USAGE_ASSISTANT,
+    AAUDIO_SYSTEM_USAGE_EMERGENCY,
+    AAUDIO_SYSTEM_USAGE_SAFETY,
+    AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS,
+    AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT,
+    AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_content_type_t kContentTypes[] = {
+    AAUDIO_CONTENT_TYPE_SPEECH, AAUDIO_CONTENT_TYPE_MUSIC,
+    AAUDIO_CONTENT_TYPE_MOVIE, AAUDIO_CONTENT_TYPE_SONIFICATION,
+    AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_input_preset_t kInputPresets[] = {
+    AAUDIO_INPUT_PRESET_GENERIC,
+    AAUDIO_INPUT_PRESET_CAMCORDER,
+    AAUDIO_INPUT_PRESET_VOICE_RECOGNITION,
+    AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION,
+    AAUDIO_INPUT_PRESET_UNPROCESSED,
+    AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE,
+    AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_allowed_capture_policy_t kAllowedCapturePolicies[] = {
+    AAUDIO_ALLOW_CAPTURE_BY_ALL, AAUDIO_ALLOW_CAPTURE_BY_SYSTEM,
+    AAUDIO_ALLOW_CAPTURE_BY_NONE, AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_session_id_t kSessionIds[] = {
+    AAUDIO_SESSION_ID_NONE, AAUDIO_SESSION_ID_ALLOCATE, AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_policy_t kPolicies[] = {
+    AAUDIO_POLICY_NEVER, AAUDIO_POLICY_AUTO, AAUDIO_POLICY_ALWAYS,
+    AAUDIO_UNSPECIFIED};
+
+class LibAaudioFuzzer {
+public:
+  ~LibAaudioFuzzer() { deInit(); }
+  bool init();
+  void process(const uint8_t *data, size_t size);
+  void deInit();
+
+private:
+  AAudioStreamBuilder *mAaudioBuilder = nullptr;
+  AAudioStream *mAaudioStream = nullptr;
+};
+
+bool LibAaudioFuzzer::init() {
+  aaudio_result_t result = AAudio_createStreamBuilder(&mAaudioBuilder);
+  if ((result != AAUDIO_OK) || (!mAaudioBuilder)) {
+    return false;
+  }
+  return true;
+}
+
+void LibAaudioFuzzer::process(const uint8_t *data, size_t size) {
+  FuzzedDataProvider fdp(data, size);
+  aaudio_performance_mode_t mode =
+      fdp.PickValueInArray({fdp.PickValueInArray(kPerformanceModes),
+                            fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setPerformanceMode(mAaudioBuilder, mode);
+
+  int32_t deviceId = fdp.PickValueInArray(
+      {AAUDIO_UNSPECIFIED, fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setDeviceId(mAaudioBuilder, deviceId);
+
+  std::string packageName = fdp.PickValueInArray<std::string>(
+      {"android.nativemedia.aaudio", "android.app.appops.cts",
+       fdp.ConsumeRandomLengthString(kRandomStringLength)});
+  AAudioStreamBuilder_setPackageName(mAaudioBuilder, packageName.c_str());
+
+  std::string attributionTag =
+      fdp.ConsumeRandomLengthString(kRandomStringLength);
+  AAudioStreamBuilder_setAttributionTag(mAaudioBuilder, attributionTag.c_str());
+
+  int32_t sampleRate = fdp.PickValueInArray(kSampleRates);
+  AAudioStreamBuilder_setSampleRate(mAaudioBuilder, sampleRate);
+
+  int32_t channelCount = fdp.PickValueInArray(
+      {AAUDIO_UNSPECIFIED, fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setChannelCount(mAaudioBuilder, channelCount);
+
+  aaudio_direction_t direction = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kDirections), fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setDirection(mAaudioBuilder, direction);
+
+  aaudio_format_t format = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kFormats), fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setFormat(mAaudioBuilder, format);
+
+  aaudio_sharing_mode_t sharingMode = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kSharingModes), fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setSharingMode(mAaudioBuilder, sharingMode);
+
+  aaudio_usage_t usage = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kUsages), fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setUsage(mAaudioBuilder, usage);
+
+  aaudio_content_type_t contentType = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kContentTypes), fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setContentType(mAaudioBuilder, contentType);
+
+  aaudio_input_preset_t inputPreset = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kInputPresets), fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setInputPreset(mAaudioBuilder, inputPreset);
+
+  bool privacySensitive = fdp.ConsumeBool();
+  AAudioStreamBuilder_setPrivacySensitive(mAaudioBuilder, privacySensitive);
+
+  int32_t frames = fdp.PickValueInArray(
+      {AAUDIO_UNSPECIFIED, fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setBufferCapacityInFrames(mAaudioBuilder, frames);
+
+  aaudio_allowed_capture_policy_t allowedCapturePolicy =
+      fdp.PickValueInArray({fdp.PickValueInArray(kAllowedCapturePolicies),
+                            fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setAllowedCapturePolicy(mAaudioBuilder,
+                                              allowedCapturePolicy);
+
+  aaudio_session_id_t sessionId = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kSessionIds), fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setSessionId(mAaudioBuilder, sessionId);
+
+  AAudioStreamBuilder_setDataCallback(mAaudioBuilder, nullptr, nullptr);
+  AAudioStreamBuilder_setErrorCallback(mAaudioBuilder, nullptr, nullptr);
+
+  int32_t framesPerDataCallback = fdp.PickValueInArray(
+      {AAUDIO_UNSPECIFIED, fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setFramesPerDataCallback(mAaudioBuilder,
+                                               framesPerDataCallback);
+
+  aaudio_policy_t policy = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kPolicies), fdp.ConsumeIntegral<int32_t>()});
+  AAudio_setMMapPolicy(policy);
+  (void)AAudio_getMMapPolicy();
+
+  aaudio_result_t result =
+      AAudioStreamBuilder_openStream(mAaudioBuilder, &mAaudioStream);
+  if ((result != AAUDIO_OK) || (!mAaudioStream)) {
+    return;
+  }
+
+  int32_t framesPerBurst = AAudioStream_getFramesPerBurst(mAaudioStream);
+  uint8_t numberOfBursts = fdp.ConsumeIntegral<uint8_t>();
+  int32_t maxInputFrames = numberOfBursts * framesPerBurst;
+  int32_t requestedBufferSize =
+      fdp.ConsumeIntegral<uint16_t>() * framesPerBurst;
+  AAudioStream_setBufferSizeInFrames(mAaudioStream, requestedBufferSize);
+
+  int64_t position = 0, nanoseconds = 0;
+  AAudioStream_getTimestamp(mAaudioStream, CLOCK_MONOTONIC, &position,
+                            &nanoseconds);
+
+  AAudioStream_requestStart(mAaudioStream);
+
+  aaudio_format_t actualFormat = AAudioStream_getFormat(mAaudioStream);
+  int32_t actualChannelCount = AAudioStream_getChannelCount(mAaudioStream);
+
+  int32_t count = fdp.ConsumeIntegral<int32_t>();
+  direction = AAudioStream_getDirection(mAaudioStream);
+  framesPerDataCallback = AAudioStream_getFramesPerDataCallback(mAaudioStream);
+
+  if (actualFormat == AAUDIO_FORMAT_PCM_I16) {
+    std::vector<int16_t> inputShortData(maxInputFrames * actualChannelCount,
+                                        0x0);
+    if (direction == AAUDIO_DIRECTION_INPUT) {
+      AAudioStream_read(mAaudioStream, inputShortData.data(),
+                        framesPerDataCallback, count * kNanosPerMillisecond);
+    } else if (direction == AAUDIO_DIRECTION_OUTPUT) {
+      AAudioStream_write(mAaudioStream, inputShortData.data(),
+                         framesPerDataCallback, count * kNanosPerMillisecond);
+    }
+  } else if (actualFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+    std::vector<float> inputFloatData(maxInputFrames * actualChannelCount, 0x0);
+    if (direction == AAUDIO_DIRECTION_INPUT) {
+      AAudioStream_read(mAaudioStream, inputFloatData.data(),
+                        framesPerDataCallback, count * kNanosPerMillisecond);
+    } else if (direction == AAUDIO_DIRECTION_OUTPUT) {
+      AAudioStream_write(mAaudioStream, inputFloatData.data(),
+                         framesPerDataCallback, count * kNanosPerMillisecond);
+    }
+  }
+
+  aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+  AAudioStream_waitForStateChange(mAaudioStream, AAUDIO_STREAM_STATE_UNKNOWN,
+                                  &state, count * kNanosPerMillisecond);
+  (void)AAudio_convertStreamStateToText(state);
+
+  (void)AAudioStream_getUsage(mAaudioStream);
+  (void)AAudioStream_getSampleRate(mAaudioStream);
+  (void)AAudioStream_getState(mAaudioStream);
+  (void)AAudioStream_getSamplesPerFrame(mAaudioStream);
+  (void)AAudioStream_getContentType(mAaudioStream);
+  (void)AAudioStream_getInputPreset(mAaudioStream);
+  (void)AAudioStream_isPrivacySensitive(mAaudioStream);
+  (void)AAudioStream_getAllowedCapturePolicy(mAaudioStream);
+  (void)AAudioStream_getPerformanceMode(mAaudioStream);
+  (void)AAudioStream_getDeviceId(mAaudioStream);
+  (void)AAudioStream_getSharingMode(mAaudioStream);
+  (void)AAudioStream_getSessionId(mAaudioStream);
+  (void)AAudioStream_getFramesRead(mAaudioStream);
+  (void)AAudioStream_getFramesWritten(mAaudioStream);
+  (void)AAudioStream_getXRunCount(mAaudioStream);
+  (void)AAudioStream_getBufferCapacityInFrames(mAaudioStream);
+  (void)AAudioStream_getBufferSizeInFrames(mAaudioStream);
+  (void)AAudioStream_isMMapUsed(mAaudioStream);
+
+  AAudioStream_requestPause(mAaudioStream);
+  AAudioStream_requestFlush(mAaudioStream);
+  AAudioStream_release(mAaudioStream);
+  AAudioStream_requestStop(mAaudioStream);
+}
+
+void LibAaudioFuzzer::deInit() {
+  if (mAaudioBuilder) {
+    AAudioStreamBuilder_delete(mAaudioBuilder);
+  }
+  if (mAaudioStream) {
+    AAudioStream_close(mAaudioStream);
+  }
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+  LibAaudioFuzzer libAaudioFuzzer;
+  if (libAaudioFuzzer.init()) {
+    libAaudioFuzzer.process(data, size);
+  }
+  return 0;
+}
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 4b08295..efa9941 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -20,7 +20,7 @@
  */
 
 /**
- * @file AAudio.h
+ * @file aaudio/AAudio.h
  */
 
 /**
@@ -444,6 +444,22 @@
 };
 typedef int32_t aaudio_content_type_t;
 
+enum {
+
+    /**
+     * Constant indicating the audio content associated with these attributes will follow the
+     * default platform behavior with regards to which content will be spatialized or not.
+     */
+    AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO = 1,
+
+    /**
+     * Constant indicating the audio content associated with these attributes should never
+     * be spatialized.
+     */
+    AAUDIO_SPATIALIZATION_BEHAVIOR_NEVER = 2,
+};
+typedef int32_t aaudio_spatialization_behavior_t;
+
 /**
  * Defines the audio source.
  * An audio source defines both a default physical source of audio signal, and a recording
@@ -565,6 +581,145 @@
 };
 typedef int32_t aaudio_session_id_t;
 
+/**
+ * Defines the audio channel mask.
+ * Channel masks are used to describe the samples and their
+ * arrangement in the audio frame. They are also used in the endpoint
+ * (e.g. a USB audio interface, a DAC connected to headphones) to
+ * specify allowable configurations of a particular device.
+ *
+ * Added in API level 32.
+ */
+enum {
+    /**
+     * Invalid channel mask
+     */
+    AAUDIO_CHANNEL_INVALID = -1,
+
+    /**
+     * Output audio channel mask
+     */
+    AAUDIO_CHANNEL_FRONT_LEFT = 1 << 0,
+    AAUDIO_CHANNEL_FRONT_RIGHT = 1 << 1,
+    AAUDIO_CHANNEL_FRONT_CENTER = 1 << 2,
+    AAUDIO_CHANNEL_LOW_FREQUENCY = 1 << 3,
+    AAUDIO_CHANNEL_BACK_LEFT = 1 << 4,
+    AAUDIO_CHANNEL_BACK_RIGHT = 1 << 5,
+    AAUDIO_CHANNEL_FRONT_LEFT_OF_CENTER = 1 << 6,
+    AAUDIO_CHANNEL_FRONT_RIGHT_OF_CENTER = 1 << 7,
+    AAUDIO_CHANNEL_BACK_CENTER = 1 << 8,
+    AAUDIO_CHANNEL_SIDE_LEFT = 1 << 9,
+    AAUDIO_CHANNEL_SIDE_RIGHT = 1 << 10,
+    AAUDIO_CHANNEL_TOP_CENTER = 1 << 11,
+    AAUDIO_CHANNEL_TOP_FRONT_LEFT = 1 << 12,
+    AAUDIO_CHANNEL_TOP_FRONT_CENTER = 1 << 13,
+    AAUDIO_CHANNEL_TOP_FRONT_RIGHT = 1 << 14,
+    AAUDIO_CHANNEL_TOP_BACK_LEFT = 1 << 15,
+    AAUDIO_CHANNEL_TOP_BACK_CENTER = 1 << 16,
+    AAUDIO_CHANNEL_TOP_BACK_RIGHT = 1 << 17,
+    AAUDIO_CHANNEL_TOP_SIDE_LEFT = 1 << 18,
+    AAUDIO_CHANNEL_TOP_SIDE_RIGHT = 1 << 19,
+    AAUDIO_CHANNEL_BOTTOM_FRONT_LEFT = 1 << 20,
+    AAUDIO_CHANNEL_BOTTOM_FRONT_CENTER = 1 << 21,
+    AAUDIO_CHANNEL_BOTTOM_FRONT_RIGHT = 1 << 22,
+    AAUDIO_CHANNEL_LOW_FREQUENCY_2 = 1 << 23,
+    AAUDIO_CHANNEL_FRONT_WIDE_LEFT = 1 << 24,
+    AAUDIO_CHANNEL_FRONT_WIDE_RIGHT = 1 << 25,
+
+    AAUDIO_CHANNEL_MONO = AAUDIO_CHANNEL_FRONT_LEFT,
+    AAUDIO_CHANNEL_STEREO = AAUDIO_CHANNEL_FRONT_LEFT |
+                            AAUDIO_CHANNEL_FRONT_RIGHT,
+    AAUDIO_CHANNEL_2POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+                             AAUDIO_CHANNEL_FRONT_RIGHT |
+                             AAUDIO_CHANNEL_LOW_FREQUENCY,
+    AAUDIO_CHANNEL_TRI = AAUDIO_CHANNEL_FRONT_LEFT |
+                         AAUDIO_CHANNEL_FRONT_RIGHT |
+                         AAUDIO_CHANNEL_FRONT_CENTER,
+    AAUDIO_CHANNEL_TRI_BACK = AAUDIO_CHANNEL_FRONT_LEFT |
+                              AAUDIO_CHANNEL_FRONT_RIGHT |
+                              AAUDIO_CHANNEL_BACK_CENTER,
+    AAUDIO_CHANNEL_3POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+                             AAUDIO_CHANNEL_FRONT_RIGHT |
+                             AAUDIO_CHANNEL_FRONT_CENTER |
+                             AAUDIO_CHANNEL_LOW_FREQUENCY,
+    AAUDIO_CHANNEL_2POINT0POINT2 = AAUDIO_CHANNEL_FRONT_LEFT |
+                                   AAUDIO_CHANNEL_FRONT_RIGHT |
+                                   AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+                                   AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+    AAUDIO_CHANNEL_2POINT1POINT2 = AAUDIO_CHANNEL_2POINT0POINT2 |
+                                   AAUDIO_CHANNEL_LOW_FREQUENCY,
+    AAUDIO_CHANNEL_3POINT0POINT2 = AAUDIO_CHANNEL_FRONT_LEFT |
+                                   AAUDIO_CHANNEL_FRONT_RIGHT |
+                                   AAUDIO_CHANNEL_FRONT_CENTER |
+                                   AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+                                   AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+    AAUDIO_CHANNEL_3POINT1POINT2 = AAUDIO_CHANNEL_3POINT0POINT2 |
+                                   AAUDIO_CHANNEL_LOW_FREQUENCY,
+    AAUDIO_CHANNEL_QUAD = AAUDIO_CHANNEL_FRONT_LEFT |
+                          AAUDIO_CHANNEL_FRONT_RIGHT |
+                          AAUDIO_CHANNEL_BACK_LEFT |
+                          AAUDIO_CHANNEL_BACK_RIGHT,
+    AAUDIO_CHANNEL_QUAD_SIDE = AAUDIO_CHANNEL_FRONT_LEFT |
+                               AAUDIO_CHANNEL_FRONT_RIGHT |
+                               AAUDIO_CHANNEL_SIDE_LEFT |
+                               AAUDIO_CHANNEL_SIDE_RIGHT,
+    AAUDIO_CHANNEL_SURROUND = AAUDIO_CHANNEL_FRONT_LEFT |
+                              AAUDIO_CHANNEL_FRONT_RIGHT |
+                              AAUDIO_CHANNEL_FRONT_CENTER |
+                              AAUDIO_CHANNEL_BACK_CENTER,
+    AAUDIO_CHANNEL_PENTA = AAUDIO_CHANNEL_QUAD |
+                           AAUDIO_CHANNEL_FRONT_CENTER,
+    // aka 5POINT1_BACK
+    AAUDIO_CHANNEL_5POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+                             AAUDIO_CHANNEL_FRONT_RIGHT |
+                             AAUDIO_CHANNEL_FRONT_CENTER |
+                             AAUDIO_CHANNEL_LOW_FREQUENCY |
+                             AAUDIO_CHANNEL_BACK_LEFT |
+                             AAUDIO_CHANNEL_BACK_RIGHT,
+    AAUDIO_CHANNEL_5POINT1_SIDE = AAUDIO_CHANNEL_FRONT_LEFT |
+                                  AAUDIO_CHANNEL_FRONT_RIGHT |
+                                  AAUDIO_CHANNEL_FRONT_CENTER |
+                                  AAUDIO_CHANNEL_LOW_FREQUENCY |
+                                  AAUDIO_CHANNEL_SIDE_LEFT |
+                                  AAUDIO_CHANNEL_SIDE_RIGHT,
+    AAUDIO_CHANNEL_6POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+                             AAUDIO_CHANNEL_FRONT_RIGHT |
+                             AAUDIO_CHANNEL_FRONT_CENTER |
+                             AAUDIO_CHANNEL_LOW_FREQUENCY |
+                             AAUDIO_CHANNEL_BACK_LEFT |
+                             AAUDIO_CHANNEL_BACK_RIGHT |
+                             AAUDIO_CHANNEL_BACK_CENTER,
+    AAUDIO_CHANNEL_7POINT1 = AAUDIO_CHANNEL_5POINT1 |
+                             AAUDIO_CHANNEL_SIDE_LEFT |
+                             AAUDIO_CHANNEL_SIDE_RIGHT,
+    AAUDIO_CHANNEL_5POINT1POINT2 = AAUDIO_CHANNEL_5POINT1 |
+                                   AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+                                   AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+    AAUDIO_CHANNEL_5POINT1POINT4 = AAUDIO_CHANNEL_5POINT1 |
+                                   AAUDIO_CHANNEL_TOP_FRONT_LEFT |
+                                   AAUDIO_CHANNEL_TOP_FRONT_RIGHT |
+                                   AAUDIO_CHANNEL_TOP_BACK_LEFT |
+                                   AAUDIO_CHANNEL_TOP_BACK_RIGHT,
+    AAUDIO_CHANNEL_7POINT1POINT2 = AAUDIO_CHANNEL_7POINT1 |
+                                   AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+                                   AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+    AAUDIO_CHANNEL_7POINT1POINT4 = AAUDIO_CHANNEL_7POINT1 |
+                                   AAUDIO_CHANNEL_TOP_FRONT_LEFT |
+                                   AAUDIO_CHANNEL_TOP_FRONT_RIGHT |
+                                   AAUDIO_CHANNEL_TOP_BACK_LEFT |
+                                   AAUDIO_CHANNEL_TOP_BACK_RIGHT,
+    AAUDIO_CHANNEL_9POINT1POINT4 = AAUDIO_CHANNEL_7POINT1POINT4 |
+                                   AAUDIO_CHANNEL_FRONT_WIDE_LEFT |
+                                   AAUDIO_CHANNEL_FRONT_WIDE_RIGHT,
+    AAUDIO_CHANNEL_9POINT1POINT6 = AAUDIO_CHANNEL_9POINT1POINT4 |
+                                   AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+                                   AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+
+    AAUDIO_CHANNEL_FRONT_BACK = AAUDIO_CHANNEL_FRONT_CENTER |
+                                AAUDIO_CHANNEL_BACK_CENTER,
+};
+typedef uint32_t aaudio_channel_mask_t;
+
 typedef struct AAudioStreamStruct         AAudioStream;
 typedef struct AAudioStreamBuilderStruct  AAudioStreamBuilder;
 
@@ -643,8 +798,11 @@
  * This is usually {@code Context#getPackageName()}.
  *
  * The default, if you do not call this function, is a random package in the calling uid.
- * The vast majority of apps have only one package per calling UID. If the package
- * name does not match the calling UID, then requests will be rejected.
+ * The vast majority of apps have only one package per calling UID.
+ * If an invalid package name is set, input streams may not be given permission to
+ * record when started.
+ *
+ * The package name is usually the applicationId in your app's build.gradle file.
  *
  * Available since API level 31.
  *
@@ -699,6 +857,11 @@
  * If an exact value is specified then an opened stream will use that value.
  * If a stream cannot be opened with the specified value then the open will fail.
  *
+ * As the channel count provided here may be different from the corresponding channel count
+ * of channel mask used in {@link AAudioStreamBuilder_setChannelMask}, the last called function
+ * will be respected if both this function and {@link AAudioStreamBuilder_setChannelMask} are
+ * called.
+ *
  * Available since API level 26.
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
@@ -714,6 +877,8 @@
  *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param samplesPerFrame Number of samples in a frame.
+ *
+ * @deprecated use {@link AAudioStreamBuilder_setChannelCount}
  */
 AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
                                                        int32_t samplesPerFrame) __INTRODUCED_IN(26);
@@ -836,6 +1001,37 @@
         aaudio_content_type_t contentType) __INTRODUCED_IN(28);
 
 /**
+ * Sets the behavior affecting whether spatialization will be used.
+ *
+ * The AAudio system will use this information to select whether the stream will go
+ * through a spatializer effect or not when the effect is supported and enabled.
+ *
+ * Available since API level 32.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param spatializationBehavior the desired behavior with regards to spatialization, eg.
+ *     {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO}
+ */
+AAUDIO_API void AAudioStreamBuilder_setSpatializationBehavior(AAudioStreamBuilder* builder,
+        aaudio_spatialization_behavior_t spatializationBehavior) __INTRODUCED_IN(32);
+
+/**
+ * Specifies whether the audio data of this output stream has already been processed for
+ * spatialization.
+ *
+ * If the stream has been processed for spatialization, setting this to true will prevent
+ * issues such as double-processing on platforms that will spatialize audio data.
+ *
+ * Available since API level 32.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param isSpatialized true if the content is already processed for binaural or transaural spatial
+ *     rendering, false otherwise.
+ */
+AAUDIO_API void AAudioStreamBuilder_setIsContentSpatialized(AAudioStreamBuilder* builder,
+        bool isSpatialized) __INTRODUCED_IN(32);
+
+/**
  * Set the input (capture) preset for the stream.
  *
  * The AAudio system will use this information to optimize the
@@ -1136,6 +1332,32 @@
 AAUDIO_API aaudio_result_t  AAudioStreamBuilder_delete(AAudioStreamBuilder* builder)
     __INTRODUCED_IN(26);
 
+/**
+ * Set audio channel mask for the stream.
+ *
+ * The default, if you do not call this function, is {@link #AAUDIO_UNSPECIFIED}.
+ * If both channel mask and count are not set, then stereo will then be chosen when the
+ * stream is opened.
+ * After opening a stream with an unspecified value, the application must query for the
+ * actual value, which may vary by device.
+ *
+ * If an exact value is specified then an opened stream will use that value.
+ * If a stream cannot be opened with the specified value then the open will fail.
+ *
+ * As the corresponding channel count of provided channel mask here may be different
+ * from the channel count used in {@link AAudioStreamBuilder_setChannelCount} or
+ * {@link AAudioStreamBuilder_setSamplesPerFrame}, the last called function will be
+ * respected if this function and {@link AAudioStreamBuilder_setChannelCount} or
+ * {@link AAudioStreamBuilder_setSamplesPerFrame} are called.
+ *
+ * Available since API level 32.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param channelMask Audio channel mask desired.
+ */
+AAUDIO_API void AAudioStreamBuilder_setChannelMask(AAudioStreamBuilder* builder,
+        aaudio_channel_mask_t channelMask) __INTRODUCED_IN(32);
+
 // ============================================================
 // Stream Control
 // ============================================================
@@ -1616,6 +1838,31 @@
         __INTRODUCED_IN(28);
 
 /**
+ * Return the spatialization behavior for the stream.
+ *
+ * If none was explicitly set, it will return the default
+ * {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO} behavior.
+ *
+ * Available since API level 32.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return spatialization behavior, for example {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO}
+ */
+AAUDIO_API aaudio_spatialization_behavior_t AAudioStream_getSpatializationBehavior(
+        AAudioStream* stream) __INTRODUCED_IN(32);
+
+/**
+ * Return whether the content of the stream is spatialized.
+ *
+ * Available since API level 32.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return true if the content is spatialized
+ */
+AAUDIO_API bool AAudioStream_isContentSpatialized(AAudioStream* stream) __INTRODUCED_IN(32);
+
+
+/**
  * Return the input preset for the stream.
  *
  * Available since API level 28.
@@ -1652,6 +1899,18 @@
 AAUDIO_API bool AAudioStream_isPrivacySensitive(AAudioStream* stream)
         __INTRODUCED_IN(30);
 
+/**
+ * Return the channel mask for the stream. This will be the mask set using
+ * {@link #AAudioStreamBuilder_setChannelMask}, or {@link #AAUDIO_UNSPECIFIED} otherwise.
+ *
+ * Available since API level 32.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual channel mask
+ */
+AAUDIO_API aaudio_channel_mask_t AAudioStream_getChannelMask(AAudioStream* stream)
+        __INTRODUCED_IN(32);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/media/libaaudio/include/aaudio/AAudioTesting.h b/media/libaaudio/include/aaudio/AAudioTesting.h
index 02ec411..0f2d7a2 100644
--- a/media/libaaudio/include/aaudio/AAudioTesting.h
+++ b/media/libaaudio/include/aaudio/AAudioTesting.h
@@ -49,6 +49,12 @@
 };
 typedef int32_t aaudio_policy_t;
 
+// Internal error codes. Only used by the framework.
+enum {
+    AAUDIO_INTERNAL_ERROR_BASE = -1000,
+    AAUDIO_ERROR_STANDBY,
+};
+
 /**
  * Control whether AAudioStreamBuilder_openStream() will use the new MMAP data path
  * or the older "Legacy" data path.
diff --git a/media/libaaudio/scripts/measure_device_power.py b/media/libaaudio/scripts/measure_device_power.py
new file mode 100755
index 0000000..9603f88
--- /dev/null
+++ b/media/libaaudio/scripts/measure_device_power.py
@@ -0,0 +1,272 @@
+#!/usr/bin/python3
+"""
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+"""
+
+'''
+Measure CPU related power on Pixel 6 or later devices using ODPM,
+the On Device Power Measurement tool.
+Generate a CSV report for putting in a spreadsheet
+'''
+
+import argparse
+import os
+import re
+import subprocess
+import sys
+import time
+
+# defaults
+PRE_DELAY_SECONDS = 0.5 # time to sleep before command to avoid adb unroot error
+DEFAULT_NUM_ITERATIONS = 5
+DEFAULT_FILE_NAME = 'energy_commands.txt'
+
+'''
+Default rail assignments
+philburk-macbookpro3:expt philburk$ adb shell cat /sys/bus/iio/devices/iio\:device0/energy_value
+t=349894
+CH0(T=349894)[S10M_VDD_TPU], 5578756
+CH1(T=349894)[VSYS_PWR_MODEM], 29110940
+CH2(T=349894)[VSYS_PWR_RFFE], 3166046
+CH3(T=349894)[S2M_VDD_CPUCL2], 30203502
+CH4(T=349894)[S3M_VDD_CPUCL1], 23377533
+CH5(T=349894)[S4M_VDD_CPUCL0], 46356942
+CH6(T=349894)[S5M_VDD_INT], 10771876
+CH7(T=349894)[S1M_VDD_MIF], 21091363
+philburk-macbookpro3:expt philburk$ adb shell cat /sys/bus/iio/devices/iio\:device1/energy_value
+t=359458
+CH0(T=359458)[VSYS_PWR_WLAN_BT], 45993209
+CH1(T=359458)[L2S_VDD_AOC_RET], 2822928
+CH2(T=359458)[S9S_VDD_AOC], 6923706
+CH3(T=359458)[S5S_VDDQ_MEM], 4658202
+CH4(T=359458)[S10S_VDD2L], 5506273
+CH5(T=359458)[S4S_VDD2H_MEM], 14254574
+CH6(T=359458)[S2S_VDD_G3D], 5315420
+CH7(T=359458)[VSYS_PWR_DISPLAY], 81221665
+'''
+
+'''
+LDO2M(L2M_ALIVE):DDR  -> DRAM Array Core Power
+BUCK4S(S4S_VDD2H_MEM):DDR -> Normal operation data and control path logic circuits
+BUCK5S(S5S_VDDQ_MEM):DDR -> LPDDR I/O interface
+BUCK10S(S10S_VDD2L):DDR  -> DVFSC (1600Mbps or lower) operation data and control path logic circuits
+BUCK1M (S1M_VDD_MIF):  SoC side Memory InterFace and Controller
+'''
+
+# Map between rail name and human readable name.
+ENERGY_DICTIONARY = { \
+        'S4M_VDD_CPUCL0': 'CPU0', \
+        'S3M_VDD_CPUCL1': 'CPU1', \
+        'S2M_VDD_CPUCL2': 'CPU2', \
+        'S1M_VDD_MIF': 'MIF', \
+        'L2M_ALIVE': 'DDRAC', \
+        'S4S_VDD2H_MEM': 'DDRNO', \
+        'S10S_VDD2L': 'DDR16', \
+        'S5S_VDDQ_MEM': 'DDRIO', \
+        'VSYS_PWR_DISPLAY': 'SCREEN'}
+
+SORTED_ENERGY_LIST = sorted(ENERGY_DICTIONARY, key=ENERGY_DICTIONARY.get)
+
+# Sometimes "adb unroot" returns 1!
+# So try several times.
+# @return 0 on success
+def adbUnroot():
+    returnCode = 1
+    count = 0
+    limit = 5
+    while count < limit and returnCode != 0:
+        print(('Try to adb unroot {} of {}'.format(count, limit)))
+        subprocess.call(["adb", "wait-for-device"])
+        time.sleep(PRE_DELAY_SECONDS)
+        returnCode = subprocess.call(["adb", "unroot"])
+        print(('returnCode = {}'.format(returnCode)))
+        count += 1
+    return returnCode
+
+# @param commandString String containing shell command
+# @return Both the stdout and stderr of the commands run
+def runCommand(commandString):
+    print(commandString)
+    if commandString == "adb unroot":
+        result = adbUnroot()
+    else:
+        commandArray = commandString.split(' ')
+        result = subprocess.run(commandArray, check=True, capture_output=True).stdout
+    return result
+
+# @param commandString String containing ADB command
+# @return Both the stdout and stderr of the commands run
+def adbCommand(commandString):
+    if commandString == "unroot":
+        result = adbUnroot()
+    else:
+        print(("adb " + commandString))
+        commandArray = ["adb"] + commandString.split(' ')
+        subprocess.call(["adb", "wait-for-device"])
+        result = subprocess.run(commandArray, check=True, capture_output=True).stdout
+    return result
+
+# Parse a line that looks like "CH3(T=10697635)[S2M_VDD_CPUCL2], 116655335"
+# Use S2M_VDD_CPUCL2 as the tag and set value to the number
+# in the report dictionary.
+def parseEnergyValue(string):
+    return tuple(re.split('\[|\], +', string)[1:])
+
+# Read accumulated energy into a dictionary.
+def measureEnergyForDevice(deviceIndex, report):
+    # print("measureEnergyForDevice " + str(deviceIndex))
+    tableBytes = adbCommand( \
+            'shell cat /sys/bus/iio/devices/iio\:device{}/energy_value'\
+            .format(deviceIndex))
+    table = tableBytes.decode("utf-8")
+    # print(table)
+    for count, line in enumerate(table.splitlines()):
+        if count > 0:
+            tagEnergy = parseEnergyValue(line)
+            report[tagEnergy[0]] = int(tagEnergy[1].strip())
+    # print(report)
+
+def measureEnergyOnce():
+    adbCommand("root")
+    report = {}
+    d0 = measureEnergyForDevice(0, report)
+    d1 = measureEnergyForDevice(1, report)
+    adbUnroot()
+    return report
+
+# Subtract numeric values for matching keys.
+def subtractReports(A, B):
+    return {x: A[x] - B[x] for x in A if x in B}
+
+# Add numeric values for matching keys.
+def addReports(A, B):
+    return {x: A[x] + B[x] for x in A if x in B}
+
+# Divide numeric values by divisor.
+# @return Modified copy of report.
+def divideReport(report, divisor):
+    return {key: val / divisor for key, val in list(report.items())}
+
+# Generate a dictionary that is the difference between two measurements over time.
+def measureEnergyOverTime(duration):
+    report1 = measureEnergyOnce()
+    print(("Measure energy for " + str(duration) + " seconds."))
+    time.sleep(duration)
+    report2 = measureEnergyOnce()
+    return subtractReports(report2, report1)
+
+# Generate a CSV string containing the human readable headers.
+def formatEnergyHeader():
+    header = ""
+    for tag in SORTED_ENERGY_LIST:
+        header += ENERGY_DICTIONARY[tag] + ", "
+    return header
+
+# Generate a CSV string containing the numeric values.
+def formatEnergyData(report):
+    data = ""
+    for tag in SORTED_ENERGY_LIST:
+        if tag in list(report.keys()):
+            data += str(report[tag]) + ", "
+        else:
+            data += "-1,"
+    return data
+
+def printEnergyReport(report):
+    s = "\n"
+    s += "Values are in microWattSeconds\n"
+    s += "Report below is CSV format for pasting into a spreadsheet:\n"
+    s += formatEnergyHeader() + "\n"
+    s += formatEnergyData(report) + "\n"
+    print(s)
+
+# Generate a dictionary that is the difference between two measurements
+# before and after executing the command.
+def measureEnergyForCommand(command):
+    report1 = measureEnergyOnce()
+    print(("Measure energy for:  " + command))
+    result = runCommand(command)
+    report2 = measureEnergyOnce()
+    # print(result)
+    return subtractReports(report2, report1)
+
+# Average the results of several measurements for one command.
+def averageEnergyForCommand(command, count):
+    print("=================== #0\n")
+    sumReport = measureEnergyForCommand(command)
+    for i in range(1, count):
+        print(("=================== #" + str(i) + "\n"))
+        report = measureEnergyForCommand(command)
+        sumReport = addReports(sumReport, report)
+    print(sumReport)
+    return divideReport(sumReport, count)
+
+# Parse a list of commands in a file.
+# Lines ending in "\" are continuation lines.
+# Lines beginning with "#" are comments.
+def measureEnergyForCommands(fileName):
+    finalReport = "------------------------------------\n"
+    finalReport += "comment, command, " + formatEnergyHeader() + "\n"
+    comment = ""
+    try:
+        fp = open(fileName)
+        line = fp.readline()
+        while line:
+            command = line.strip()
+            if command.endswith('\\'):
+                command = command[:-1].strip() # remove \\:
+                runCommand(command)
+            elif command.startswith("#"):
+                # ignore comment
+                print((command + "\n"))
+                comment = command
+            elif command:
+                report = averageEnergyForCommand(command, DEFAULT_NUM_ITERATIONS)
+                finalReport += comment + ", " + command + ", " + formatEnergyData(report) + "\n"
+                print(finalReport)
+            line = fp.readline()
+    finally:
+        fp.close()
+    return finalReport
+
+def main():
+    # parse command line args
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-s', '--seconds',
+            help="Measure power for N seconds. Ignore scriptFile.",
+            type=float)
+    parser.add_argument("fileName",
+            nargs = '?',
+            help="Path to file containing commands to be measured."
+                    + " Default path = " + DEFAULT_FILE_NAME + "."
+                    + " Lines ending in '\' are continuation lines."
+                    + " Lines beginning with '#' are comments.",
+                    default=DEFAULT_FILE_NAME)
+    args=parser.parse_args();
+
+    print(("seconds  = " + str(args.seconds)))
+    print(("fileName = " + str(args.fileName)))
+    # Process command line
+    if args.seconds:
+        report = measureEnergyOverTime(args.seconds)
+        printEnergyReport(report)
+    else:
+        report = measureEnergyForCommands(args.fileName)
+        print(report)
+    print("Finished.\n")
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/media/libaaudio/scripts/setup_odpm_cpu_rails.sh b/media/libaaudio/scripts/setup_odpm_cpu_rails.sh
new file mode 100755
index 0000000..e9241b9
--- /dev/null
+++ b/media/libaaudio/scripts/setup_odpm_cpu_rails.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# Configure ODPM rails to measure CPU specific power.
+# See go/odpm-p21-userguide
+
+adb root
+
+# LDO2M(L2M_ALIVE) - DRAM Array Core Power
+adb shell 'echo "CH0=LDO2M" > /sys/bus/iio/devices/iio\:device0/enabled_rails'
+
+# These are the defaults.
+# BUCK2M(S2M_VDD_CPUCL2):CPU(BIG)
+# adb shell 'echo "CH3=BUCK2M" > /sys/bus/iio/devices/iio\:device0/enabled_rails'
+# BUCK3M(S3M_VDD_CPUCL1):CPU(MID)
+# adb shell 'echo "CH4=BUCK3M" > /sys/bus/iio/devices/iio\:device0/enabled_rails'
+# BUCK4M(S4M_VDD_CPUCL0):CPU(LITTLE)
+# adb shell 'echo "CH5=BUCK4M" > /sys/bus/iio/devices/iio\:device0/enabled_rails'
+# BUCK1M(S1M_VDD_MIF):MIF
+# adb shell 'echo "CH7=BUCK1M" > /sys/bus/iio/devices/iio\:device0/enabled_rails'
+
+# These are default on device1.
+# BUCK5S(S5S_VDDQ_MEM):DDR
+# adb shell 'echo "CH3=BUCK5S" > /sys/bus/iio/devices/iio\:device1/enabled_rails'
+# BUCK10S(S10S_VDD2L):DDR
+# adb shell 'echo "CH4=BUCK10S" > /sys/bus/iio/devices/iio\:device1/enabled_rails'
+# BUCK4S(S4S_VDD2H_MEM):DDR
+# adb shell 'echo "CH5=BUCK4S" > /sys/bus/iio/devices/iio\:device1/enabled_rails'
+
+adb shell 'cat /sys/bus/iio/devices/iio\:device0/enabled_rails'
+adb shell 'cat /sys/bus/iio/devices/iio\:device1/enabled_rails'
+
+adb unroot
+
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index 33a5c7f..f50b53a 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -7,6 +7,65 @@
     default_applicable_licenses: ["frameworks_av_license"],
 }
 
+tidy_errors = [
+    // https://clang.llvm.org/extra/clang-tidy/checks/list.html
+    // For many categories, the checks are too many to specify individually.
+    // Feel free to disable as needed - as warnings are generally ignored,
+    // we treat warnings as errors.
+    "android-*",
+    "bugprone-*",
+    "cert-*",
+    "clang-analyzer-security*",
+    "google-*",
+    "misc-*",
+    //"modernize-*",  // explicitly list the modernize as they can be subjective.
+    "modernize-avoid-bind",
+    //"modernize-avoid-c-arrays", // std::array<> can be verbose
+    "modernize-concat-nested-namespaces",
+    //"modernize-deprecated-headers", // C headers still ok even if there is C++ equivalent.
+    "modernize-deprecated-ios-base-aliases",
+    "modernize-loop-convert",
+    "modernize-make-shared",
+    "modernize-make-unique",
+    "modernize-pass-by-value",
+    "modernize-raw-string-literal",
+    "modernize-redundant-void-arg",
+    "modernize-replace-auto-ptr",
+    "modernize-replace-random-shuffle",
+    "modernize-return-braced-init-list",
+    "modernize-shrink-to-fit",
+    "modernize-unary-static-assert",
+    // "modernize-use-auto", // found in AAudioAudio.cpp
+    "modernize-use-bool-literals",
+    "modernize-use-default-member-init",
+    "modernize-use-emplace",
+    "modernize-use-equals-default",
+    "modernize-use-equals-delete",
+    // "modernize-use-nodiscard", // found in aidl generated files
+    "modernize-use-noexcept",
+    "modernize-use-nullptr",
+    // "modernize-use-override", // found in aidl generated files
+    // "modernize-use-trailing-return-type", // not necessarily more readable
+    "modernize-use-transparent-functors",
+    "modernize-use-uncaught-exceptions",
+    // "modernize-use-using", // found typedef in several files
+    "performance-*",
+
+    // Remove some pedantic stylistic requirements.
+    "-android-cloexec-dup", // found in SharedMemoryParcelable.cpp
+    "-bugprone-macro-parentheses", // found in SharedMemoryParcelable.h
+    "-bugprone-narrowing-conversions", // found in several interface from size_t to int32_t
+
+    "-google-readability-casting", // C++ casts not always necessary and may be verbose
+    "-google-readability-todo", // do not require TODO(info)
+    "-google-build-using-namespace", // Reenable and fix later.
+    "-google-global-names-in-headers", // found in several files
+
+    "-misc-non-private-member-variables-in-classes", // found in aidl generated files
+
+    "-performance-no-int-to-ptr", // found in SharedMemoryParcelable.h
+]
+
 cc_library {
     name: "libaaudio",
 
@@ -52,7 +111,7 @@
         "libcutils",
         "libutils",
         "libbinder",
-        "libpermission",
+        "framework-permission-aidl-cpp",
     ],
 
     sanitize: {
@@ -64,6 +123,13 @@
         symbol_file: "libaaudio.map.txt",
         versions: ["28"],
     },
+
+    tidy: true,
+    tidy_checks: tidy_errors,
+    tidy_checks_as_errors: tidy_errors,
+    tidy_flags: [
+        "-format-style=file",
+    ]
 }
 
 cc_library {
@@ -102,6 +168,8 @@
         "libbinder",
         "framework-permission-aidl-cpp",
         "aaudio-aidl-cpp",
+        "android.media.audio.common.types-V1-cpp",
+        "audioclient-types-aidl-cpp",
         "libaudioclient_aidl_conversion",
     ],
 
@@ -139,10 +207,16 @@
         "binding/RingBufferParcelable.cpp",
         "binding/SharedMemoryParcelable.cpp",
         "binding/SharedRegionParcelable.cpp",
-        "flowgraph/AudioProcessorBase.cpp",
+        "flowgraph/ChannelCountConverter.cpp",
         "flowgraph/ClipToRange.cpp",
+        "flowgraph/FlowGraphNode.cpp",
+        "flowgraph/ManyToMultiConverter.cpp",
+        "flowgraph/MonoBlend.cpp",
         "flowgraph/MonoToMultiConverter.cpp",
+        "flowgraph/MultiToMonoConverter.cpp",
+        "flowgraph/MultiToManyConverter.cpp",
         "flowgraph/RampLinear.cpp",
+        "flowgraph/SampleRateConverter.cpp",
         "flowgraph/SinkFloat.cpp",
         "flowgraph/SinkI16.cpp",
         "flowgraph/SinkI24.cpp",
@@ -151,11 +225,26 @@
         "flowgraph/SourceI16.cpp",
         "flowgraph/SourceI24.cpp",
         "flowgraph/SourceI32.cpp",
+        "flowgraph/resampler/IntegerRatio.cpp",
+        "flowgraph/resampler/LinearResampler.cpp",
+        "flowgraph/resampler/MultiChannelResampler.cpp",
+        "flowgraph/resampler/PolyphaseResampler.cpp",
+        "flowgraph/resampler/PolyphaseResamplerMono.cpp",
+        "flowgraph/resampler/PolyphaseResamplerStereo.cpp",
+        "flowgraph/resampler/SincResampler.cpp",
+        "flowgraph/resampler/SincResamplerStereo.cpp",
     ],
     sanitize: {
         integer_overflow: true,
         misc_undefined: ["bounds"],
     },
+
+    tidy: true,
+    tidy_checks: tidy_errors,
+    tidy_checks_as_errors: tidy_errors,
+    tidy_flags: [
+        "-format-style=file",
+    ]
 }
 
 aidl_interface {
@@ -172,19 +261,15 @@
         "binding/aidl/aaudio/IAAudioService.aidl",
     ],
     imports: [
-        "audio_common-aidl",
+        "android.media.audio.common.types",
+        "audioclient-types-aidl",
         "shared-file-region-aidl",
-        "framework-permission-aidl"
+        "framework-permission-aidl",
     ],
     backend:
     {
-        cpp: {
-            enabled: true,
-        },
         java: {
-            // TODO: need to have audio_common-aidl available in Java to enable
-            //       this.
-            enabled: false,
+            sdk_version: "module_current",
         },
     },
 }
diff --git a/media/libaaudio/src/binding/AAudioBinderAdapter.cpp b/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
index 6e3a1c8..42d81ca 100644
--- a/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
+++ b/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
@@ -124,4 +124,16 @@
     return result;
 }
 
+aaudio_result_t AAudioBinderAdapter::exitStandby(aaudio_handle_t streamHandle,
+                                                 AudioEndpointParcelable &endpointOut) {
+    aaudio_result_t result;
+    Endpoint endpoint;
+    Status status = mDelegate->exitStandby(streamHandle, &endpoint, &result);
+    if (!status.isOk()) {
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
+    }
+    endpointOut = std::move(endpoint);
+    return result;
+}
+
 }  // namespace aaudio
diff --git a/media/libaaudio/src/binding/AAudioBinderAdapter.h b/media/libaaudio/src/binding/AAudioBinderAdapter.h
index 5e9ab57..d170783 100644
--- a/media/libaaudio/src/binding/AAudioBinderAdapter.h
+++ b/media/libaaudio/src/binding/AAudioBinderAdapter.h
@@ -57,6 +57,9 @@
     aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
                                           pid_t clientThreadId) override;
 
+    aaudio_result_t exitStandby(aaudio_handle_t streamHandle,
+                                AudioEndpointParcelable &parcelable) override;
+
 private:
     IAAudioService* const mDelegate;
 };
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.cpp b/media/libaaudio/src/binding/AAudioBinderClient.cpp
index fa5a2da..8e5facc 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.cpp
+++ b/media/libaaudio/src/binding/AAudioBinderClient.cpp
@@ -36,13 +36,10 @@
 using android::IServiceManager;
 using android::defaultServiceManager;
 using android::interface_cast;
-using android::IInterface;
 using android::Mutex;
 using android::ProcessState;
 using android::sp;
 using android::status_t;
-using android::wp;
-using android::binder::Status;
 
 using namespace aaudio;
 
@@ -93,7 +90,7 @@
                     ALOGE("%s() - linkToDeath() returned %d", __func__, status);
                 }
                 aaudioService = interface_cast<IAAudioService>(binder);
-                mAdapter.reset(new Adapter(aaudioService, mAAudioClient));
+                mAdapter = std::make_shared<Adapter>(aaudioService, mAAudioClient);
                 needToRegister = true;
                 // Make sure callbacks can be received by mAAudioClient
                 ProcessState::self()->startThreadPool();
@@ -204,3 +201,11 @@
 
     return service->unregisterAudioThread(streamHandle, clientThreadId);
 }
+
+aaudio_result_t AAudioBinderClient::exitStandby(aaudio_handle_t streamHandle,
+                                                AudioEndpointParcelable &endpointOut) {
+    std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
+    if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
+
+    return service->exitStandby(streamHandle, endpointOut);
+}
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.h b/media/libaaudio/src/binding/AAudioBinderClient.h
index 6a7b639..0968f4c 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.h
+++ b/media/libaaudio/src/binding/AAudioBinderClient.h
@@ -108,7 +108,10 @@
         return AAUDIO_ERROR_UNAVAILABLE;
     }
 
-    void onStreamChange(aaudio_handle_t handle, int32_t opcode, int32_t value) {
+    aaudio_result_t exitStandby(aaudio_handle_t streamHandle,
+                                AudioEndpointParcelable &endpointOut) override;
+
+    void onStreamChange(aaudio_handle_t /*handle*/, int32_t /*opcode*/, int32_t /*value*/) {
         // TODO This is just a stub so we can have a client Binder to pass to the service.
         // TODO Implemented in a later CL.
         ALOGW("onStreamChange called!");
@@ -116,7 +119,7 @@
 
     class AAudioClient : public android::IBinder::DeathRecipient, public BnAAudioClient {
     public:
-        AAudioClient(android::wp<AAudioBinderClient> aaudioBinderClient)
+        explicit AAudioClient(const android::wp<AAudioBinderClient>& aaudioBinderClient)
                 : mBinderClient(aaudioBinderClient) {
         }
 
@@ -150,10 +153,10 @@
     class Adapter : public AAudioBinderAdapter {
     public:
         Adapter(const android::sp<IAAudioService>& delegate,
-                const android::sp<AAudioClient>& aaudioClient)
+                android::sp<AAudioClient> aaudioClient)
                 : AAudioBinderAdapter(delegate.get()),
                   mDelegate(delegate),
-                  mAAudioClient(aaudioClient) {}
+                  mAAudioClient(std::move(aaudioClient)) {}
 
         virtual ~Adapter() {
             if (mDelegate != nullptr) {
diff --git a/media/libaaudio/src/binding/AAudioServiceInterface.h b/media/libaaudio/src/binding/AAudioServiceInterface.h
index 5d11512..e901767 100644
--- a/media/libaaudio/src/binding/AAudioServiceInterface.h
+++ b/media/libaaudio/src/binding/AAudioServiceInterface.h
@@ -37,7 +37,7 @@
 class AAudioServiceInterface {
 public:
 
-    AAudioServiceInterface() {};
+    AAudioServiceInterface() = default;
     virtual ~AAudioServiceInterface() = default;
 
     virtual void registerClient(const android::sp<IAAudioClient>& client) = 0;
@@ -95,6 +95,16 @@
 
     virtual aaudio_result_t stopClient(aaudio_handle_t streamHandle,
                                        audio_port_handle_t clientHandle) = 0;
+
+    /**
+     * Exit the standby mode.
+     *
+     * @param streamHandle the stream handle
+     * @param parcelable contains new data queue information
+     * @return the result of the execution
+     */
+    virtual aaudio_result_t exitStandby(aaudio_handle_t streamHandle,
+                                        AudioEndpointParcelable &parcelable) = 0;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index 2d501ef..b60bac2 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -23,26 +23,36 @@
 #include <sys/mman.h>
 #include <aaudio/AAudio.h>
 
+#include <media/AidlConversion.h>
+
 #include "binding/AAudioStreamConfiguration.h"
 
 using namespace aaudio;
 
-using android::media::audio::common::AudioFormat;
+using android::media::audio::common::AudioFormatDescription;
 
 AAudioStreamConfiguration::AAudioStreamConfiguration(const StreamParameters& parcelable) {
-    setSamplesPerFrame(parcelable.samplesPerFrame);
+    setChannelMask(parcelable.channelMask);
     setSampleRate(parcelable.sampleRate);
     setDeviceId(parcelable.deviceId);
     static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(parcelable.sharingMode));
     setSharingMode(parcelable.sharingMode);
-    static_assert(sizeof(audio_format_t) == sizeof(parcelable.audioFormat));
-    setFormat(static_cast<audio_format_t>(parcelable.audioFormat));
+    auto convFormat = android::aidl2legacy_AudioFormatDescription_audio_format_t(
+            parcelable.audioFormat);
+    setFormat(convFormat.ok() ? convFormat.value() : AUDIO_FORMAT_INVALID);
     static_assert(sizeof(aaudio_direction_t) == sizeof(parcelable.direction));
     setDirection(parcelable.direction);
     static_assert(sizeof(audio_usage_t) == sizeof(parcelable.usage));
     setUsage(parcelable.usage);
     static_assert(sizeof(aaudio_content_type_t) == sizeof(parcelable.contentType));
     setContentType(parcelable.contentType);
+
+    static_assert(sizeof(aaudio_spatialization_behavior_t) ==
+            sizeof(parcelable.spatializationBehavior));
+    setSpatializationBehavior(parcelable.spatializationBehavior);
+    setIsContentSpatialized(parcelable.isContentSpatialized);
+
+
     static_assert(sizeof(aaudio_input_preset_t) == sizeof(parcelable.inputPreset));
     setInputPreset(parcelable.inputPreset);
     setBufferCapacity(parcelable.bufferCapacity);
@@ -63,13 +73,19 @@
 
 StreamParameters AAudioStreamConfiguration::parcelable() const {
     StreamParameters result;
-    result.samplesPerFrame = getSamplesPerFrame();
+    result.channelMask = getChannelMask();
     result.sampleRate = getSampleRate();
     result.deviceId = getDeviceId();
     static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(result.sharingMode));
     result.sharingMode = getSharingMode();
-    static_assert(sizeof(audio_format_t) == sizeof(result.audioFormat));
-    result.audioFormat = static_cast<AudioFormat>(getFormat());
+    auto convAudioFormat = android::legacy2aidl_audio_format_t_AudioFormatDescription(getFormat());
+    if (convAudioFormat.ok()) {
+        result.audioFormat = convAudioFormat.value();
+    } else {
+        result.audioFormat = AudioFormatDescription{};
+        result.audioFormat.type =
+                android::media::audio::common::AudioFormatType::SYS_RESERVED_INVALID;
+    }
     static_assert(sizeof(aaudio_direction_t) == sizeof(result.direction));
     result.direction = getDirection();
     static_assert(sizeof(audio_usage_t) == sizeof(result.usage));
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.cpp b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
index 8d90034..a4cc2bd 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
@@ -30,7 +30,7 @@
 using namespace aaudio;
 
 AAudioStreamRequest::AAudioStreamRequest(const StreamRequest& parcelable) :
-        mConfiguration(std::move(parcelable.params)),
+        mConfiguration(parcelable.params),
         mAttributionSource(parcelable.attributionSource),
         mSharingModeMatchRequired(parcelable.sharingModeMatchRequired),
         mInService(parcelable.inService) {
@@ -38,7 +38,7 @@
 
 StreamRequest AAudioStreamRequest::parcelable() const {
     StreamRequest result;
-    result.params = std::move(mConfiguration).parcelable();
+    result.params = mConfiguration.parcelable();
     result.attributionSource = mAttributionSource;
     result.sharingModeMatchRequired = mSharingModeMatchRequired;
     result.inService = mInService;
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
index aa4ac27..b1262df 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
@@ -29,17 +29,15 @@
 #include "binding/AudioEndpointParcelable.h"
 
 using android::base::unique_fd;
-using android::media::SharedFileRegion;
-using android::NO_ERROR;
 using android::status_t;
 
 using namespace aaudio;
 
 AudioEndpointParcelable::AudioEndpointParcelable(Endpoint&& parcelable)
-        : mUpMessageQueueParcelable(std::move(parcelable.upMessageQueueParcelable)),
-          mDownMessageQueueParcelable(std::move(parcelable.downMessageQueueParcelable)),
-          mUpDataQueueParcelable(std::move(parcelable.upDataQueueParcelable)),
-          mDownDataQueueParcelable(std::move(parcelable.downDataQueueParcelable)),
+        : mUpMessageQueueParcelable(parcelable.upMessageQueueParcelable),
+          mDownMessageQueueParcelable(parcelable.downMessageQueueParcelable),
+          mUpDataQueueParcelable(parcelable.upDataQueueParcelable),
+          mDownDataQueueParcelable(parcelable.downDataQueueParcelable),
           mNumSharedMemories(parcelable.sharedMemories.size()) {
     for (size_t i = 0; i < parcelable.sharedMemories.size() && i < MAX_SHARED_MEMORIES; ++i) {
         // Re-construct.
@@ -56,10 +54,10 @@
 
 Endpoint AudioEndpointParcelable::parcelable()&& {
     Endpoint result;
-    result.upMessageQueueParcelable = std::move(mUpMessageQueueParcelable).parcelable();
-    result.downMessageQueueParcelable = std::move(mDownMessageQueueParcelable).parcelable();
-    result.upDataQueueParcelable = std::move(mUpDataQueueParcelable).parcelable();
-    result.downDataQueueParcelable = std::move(mDownDataQueueParcelable).parcelable();
+    result.upMessageQueueParcelable = mUpMessageQueueParcelable.parcelable();
+    result.downMessageQueueParcelable = mDownMessageQueueParcelable.parcelable();
+    result.upDataQueueParcelable = mUpDataQueueParcelable.parcelable();
+    result.downDataQueueParcelable = mDownDataQueueParcelable.parcelable();
     result.sharedMemories.reserve(std::min(mNumSharedMemories, MAX_SHARED_MEMORIES));
     for (size_t i = 0; i < mNumSharedMemories && i < MAX_SHARED_MEMORIES; ++i) {
         result.sharedMemories.emplace_back(std::move(mSharedMemories[i]).parcelable());
@@ -81,6 +79,22 @@
     return index;
 }
 
+void AudioEndpointParcelable::closeDataFileDescriptor() {
+    const int32_t curDataMemoryIndex = mDownDataQueueParcelable.getSharedMemoryIndex();
+    mSharedMemories[curDataMemoryIndex].closeAndReleaseFd();
+}
+
+void AudioEndpointParcelable::updateDataFileDescriptor(
+        AudioEndpointParcelable* endpointParcelable) {
+    const int32_t curDataMemoryIndex = mDownDataQueueParcelable.getSharedMemoryIndex();
+    const int32_t newDataMemoryIndex =
+            endpointParcelable->mDownDataQueueParcelable.getSharedMemoryIndex();
+    mSharedMemories[curDataMemoryIndex].close();
+    mSharedMemories[curDataMemoryIndex].setup(
+            endpointParcelable->mSharedMemories[newDataMemoryIndex]);
+    mDownDataQueueParcelable.updateMemory(endpointParcelable->mDownDataQueueParcelable);
+}
+
 aaudio_result_t AudioEndpointParcelable::resolve(EndpointDescriptor *descriptor) {
     aaudio_result_t result = mUpMessageQueueParcelable.resolve(mSharedMemories,
                                                            &descriptor->upMessageQueueDescriptor);
@@ -94,6 +108,10 @@
     return result;
 }
 
+aaudio_result_t AudioEndpointParcelable::resolveDataQueue(RingBufferDescriptor *descriptor) {
+    return mDownDataQueueParcelable.resolve(mSharedMemories, descriptor);
+}
+
 aaudio_result_t AudioEndpointParcelable::close() {
     int err = 0;
     for (int i = 0; i < mNumSharedMemories; i++) {
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.h b/media/libaaudio/src/binding/AudioEndpointParcelable.h
index 5237a1a..5d2c38f 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.h
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.h
@@ -43,7 +43,7 @@
     // Ctor/assignment from a parcelable representation.
     // Since the parcelable object owns unique FDs (for shared memory blocks), move semantics are
     // provided to avoid the need to dupe.
-    AudioEndpointParcelable(Endpoint&& parcelable);
+    explicit AudioEndpointParcelable(Endpoint&& parcelable);
     AudioEndpointParcelable& operator=(Endpoint&& parcelable);
 
     /**
@@ -52,7 +52,20 @@
      */
     int32_t addFileDescriptor(const android::base::unique_fd& fd, int32_t sizeInBytes);
 
+    /**
+     * Close current data file descriptor. The duplicated file descriptor will be close.
+     */
+    void closeDataFileDescriptor();
+
+    /**
+     * Update current data file descriptor with given endpoint parcelable.
+     * @param endpointParcelable an endpoint parcelable that contains new data file
+     *                           descriptor information
+     */
+    void updateDataFileDescriptor(AudioEndpointParcelable* endpointParcelable);
+
     aaudio_result_t resolve(EndpointDescriptor *descriptor);
+    aaudio_result_t resolveDataQueue(RingBufferDescriptor *descriptor);
 
     aaudio_result_t close();
 
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.cpp b/media/libaaudio/src/binding/RingBufferParcelable.cpp
index a4b3cec..3bc51d0 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.cpp
+++ b/media/libaaudio/src/binding/RingBufferParcelable.cpp
@@ -30,9 +30,10 @@
 using namespace aaudio;
 
 RingBufferParcelable::RingBufferParcelable(const RingBuffer& parcelable)
-        : mReadCounterParcelable(std::move(parcelable.readCounterParcelable)),
-          mWriteCounterParcelable(std::move(parcelable.writeCounterParcelable)),
-          mDataParcelable(std::move(parcelable.dataParcelable)),
+        : mReadCounterParcelable(parcelable.readCounterParcelable),
+          mWriteCounterParcelable(parcelable.writeCounterParcelable),
+          mDataParcelable(parcelable.dataParcelable),
+          mSharedMemoryIndex(parcelable.sharedMemoryIndex),
           mBytesPerFrame(parcelable.bytesPerFrame),
           mFramesPerBurst(parcelable.framesPerBurst),
           mCapacityInFrames(parcelable.capacityInFrames),
@@ -42,9 +43,10 @@
 
 RingBuffer RingBufferParcelable::parcelable() const {
     RingBuffer result;
-    result.readCounterParcelable = std::move(mReadCounterParcelable).parcelable();
-    result.writeCounterParcelable = std::move(mWriteCounterParcelable).parcelable();
-    result.dataParcelable = std::move(mDataParcelable).parcelable();
+    result.readCounterParcelable = mReadCounterParcelable.parcelable();
+    result.writeCounterParcelable = mWriteCounterParcelable.parcelable();
+    result.dataParcelable = mDataParcelable.parcelable();
+    result.sharedMemoryIndex = mSharedMemoryIndex;
     result.bytesPerFrame = mBytesPerFrame;
     result.framesPerBurst = mFramesPerBurst;
     result.capacityInFrames = mCapacityInFrames;
@@ -60,6 +62,7 @@
                  int32_t readCounterOffset,
                  int32_t writeCounterOffset,
                  int32_t counterSizeBytes) {
+    mSharedMemoryIndex = sharedMemoryIndex;
     mReadCounterParcelable.setup(sharedMemoryIndex, readCounterOffset, counterSizeBytes);
     mWriteCounterParcelable.setup(sharedMemoryIndex, writeCounterOffset, counterSizeBytes);
     mDataParcelable.setup(sharedMemoryIndex, dataMemoryOffset, dataSizeInBytes);
@@ -68,12 +71,13 @@
 void RingBufferParcelable::setupMemory(int32_t sharedMemoryIndex,
                  int32_t dataMemoryOffset,
                  int32_t dataSizeInBytes) {
+    mSharedMemoryIndex = sharedMemoryIndex;
     mReadCounterParcelable.setup(sharedMemoryIndex, 0, 0);
     mWriteCounterParcelable.setup(sharedMemoryIndex, 0, 0);
     mDataParcelable.setup(sharedMemoryIndex, dataMemoryOffset, dataSizeInBytes);
 }
 
-int32_t RingBufferParcelable::getBytesPerFrame() {
+int32_t RingBufferParcelable::getBytesPerFrame() const {
     return mBytesPerFrame;
 }
 
@@ -81,7 +85,7 @@
     mBytesPerFrame = bytesPerFrame;
 }
 
-int32_t RingBufferParcelable::getFramesPerBurst() {
+int32_t RingBufferParcelable::getFramesPerBurst() const {
     return mFramesPerBurst;
 }
 
@@ -89,7 +93,7 @@
     mFramesPerBurst = framesPerBurst;
 }
 
-int32_t RingBufferParcelable::getCapacityInFrames() {
+int32_t RingBufferParcelable::getCapacityInFrames() const {
     return mCapacityInFrames;
 }
 
@@ -124,6 +128,14 @@
     return AAUDIO_OK;
 }
 
+void RingBufferParcelable::updateMemory(const RingBufferParcelable& parcelable) {
+    setupMemory(mSharedMemoryIndex, 0,
+                parcelable.getCapacityInFrames() * parcelable.getBytesPerFrame());
+    setBytesPerFrame(parcelable.getBytesPerFrame());
+    setFramesPerBurst(parcelable.getFramesPerBurst());
+    setCapacityInFrames(parcelable.getCapacityInFrames());
+}
+
 aaudio_result_t RingBufferParcelable::validate() const {
     if (mCapacityInFrames < 0 || mCapacityInFrames >= 32 * 1024) {
         ALOGE("invalid mCapacityInFrames = %d", mCapacityInFrames);
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.h b/media/libaaudio/src/binding/RingBufferParcelable.h
index 2508cea..29d0d86 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.h
+++ b/media/libaaudio/src/binding/RingBufferParcelable.h
@@ -46,15 +46,15 @@
                      int32_t dataMemoryOffset,
                      int32_t dataSizeInBytes);
 
-    int32_t getBytesPerFrame();
+    int32_t getBytesPerFrame() const;
 
     void setBytesPerFrame(int32_t bytesPerFrame);
 
-    int32_t getFramesPerBurst();
+    int32_t getFramesPerBurst() const;
 
     void setFramesPerBurst(int32_t framesPerBurst);
 
-    int32_t getCapacityInFrames();
+    int32_t getCapacityInFrames() const;
 
     void setCapacityInFrames(int32_t capacityInFrames);
 
@@ -62,6 +62,12 @@
 
     aaudio_result_t resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor);
 
+    void updateMemory(const RingBufferParcelable& parcelable);
+
+    int32_t getSharedMemoryIndex() const {
+        return mSharedMemoryIndex;
+    }
+
     void dump();
 
     // Extract a parcelable representation of this object.
@@ -71,6 +77,7 @@
     SharedRegionParcelable  mReadCounterParcelable;
     SharedRegionParcelable  mWriteCounterParcelable;
     SharedRegionParcelable  mDataParcelable;
+    int32_t                 mSharedMemoryIndex = -1;
     int32_t                 mBytesPerFrame = 0;     // index is in frames
     int32_t                 mFramesPerBurst = 0;    // for ISOCHRONOUS queues
     int32_t                 mCapacityInFrames = 0;  // zero if unused
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
index eef238f..741aefc 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -32,7 +32,6 @@
 #include "binding/SharedMemoryParcelable.h"
 
 using android::base::unique_fd;
-using android::NO_ERROR;
 using android::status_t;
 using android::media::SharedFileRegion;
 
@@ -65,6 +64,10 @@
     mSizeInBytes = sizeInBytes;
 }
 
+void SharedMemoryParcelable::setup(const SharedMemoryParcelable &sharedMemoryParcelable) {
+    setup(sharedMemoryParcelable.mFd, sharedMemoryParcelable.mSizeInBytes);
+}
+
 aaudio_result_t SharedMemoryParcelable::close() {
     if (mResolvedAddress != MMAP_UNRESOLVED_ADDRESS) {
         int err = munmap(mResolvedAddress, mSizeInBytes);
@@ -77,8 +80,16 @@
     return AAUDIO_OK;
 }
 
+aaudio_result_t SharedMemoryParcelable::closeAndReleaseFd() {
+    aaudio_result_t result = close();
+    if (result == AAUDIO_OK) {
+        mFd.reset();
+    }
+    return result;
+}
+
 aaudio_result_t SharedMemoryParcelable::resolveSharedMemory(const unique_fd& fd) {
-    mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ | PROT_WRITE,
+    mResolvedAddress = (uint8_t *) mmap(nullptr, mSizeInBytes, PROT_READ | PROT_WRITE,
                                         MAP_SHARED, fd.get(), 0);
     if (mResolvedAddress == MMAP_UNRESOLVED_ADDRESS) {
         ALOGE("mmap() failed for fd = %d, nBytes = %" PRId64 ", errno = %s",
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.h b/media/libaaudio/src/binding/SharedMemoryParcelable.h
index 1f2c335..7762fef 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.h
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.h
@@ -52,12 +52,16 @@
      */
     void setup(const android::base::unique_fd& fd, int32_t sizeInBytes);
 
+    void setup(const SharedMemoryParcelable& sharedMemoryParcelable);
+
     // mmap() shared memory
     aaudio_result_t resolve(int32_t offsetInBytes, int32_t sizeInBytes, void **regionAddressPtr);
 
     // munmap() any mapped memory
     aaudio_result_t close();
 
+    aaudio_result_t closeAndReleaseFd();
+
     int32_t getSizeInBytes();
 
     void dump();
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.cpp b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
index 56b99c0..6fa109b 100644
--- a/media/libaaudio/src/binding/SharedRegionParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
@@ -29,10 +29,7 @@
 #include "binding/SharedMemoryParcelable.h"
 #include "binding/SharedRegionParcelable.h"
 
-using android::NO_ERROR;
 using android::status_t;
-using android::Parcel;
-using android::Parcelable;
 
 using namespace aaudio;
 
diff --git a/media/libaaudio/src/binding/aidl/aaudio/IAAudioService.aidl b/media/libaaudio/src/binding/aidl/aaudio/IAAudioService.aidl
index 44d2211..485c2e2 100644
--- a/media/libaaudio/src/binding/aidl/aaudio/IAAudioService.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/IAAudioService.aidl
@@ -78,4 +78,6 @@
 
     int unregisterAudioThread(int streamHandle,
                               int clientThreadId);
+
+    int exitStandby(int streamHandle, out Endpoint endpoint);
 }
diff --git a/media/libaaudio/src/binding/aidl/aaudio/RingBuffer.aidl b/media/libaaudio/src/binding/aidl/aaudio/RingBuffer.aidl
index a58b33a..dd64493 100644
--- a/media/libaaudio/src/binding/aidl/aaudio/RingBuffer.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/RingBuffer.aidl
@@ -26,4 +26,5 @@
     int                 framesPerBurst;    // for ISOCHRONOUS queues
     int                 capacityInFrames;  // zero if unused
     int /* RingbufferFlags */ flags;  // = RingbufferFlags::NONE;
+    int                 sharedMemoryIndex;
 }
\ No newline at end of file
diff --git a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
index b7c4f70..983e193 100644
--- a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
@@ -16,17 +16,19 @@
 
 package aaudio;
 
-import android.media.audio.common.AudioFormat;
+import android.media.audio.common.AudioFormatDescription;
 
 parcelable StreamParameters {
-    int                                       samplesPerFrame;  //      = AAUDIO_UNSPECIFIED;
+    int                                       channelMask;  //          = AAUDIO_UNSPECIFIED;
     int                                       sampleRate;  //           = AAUDIO_UNSPECIFIED;
     int                                       deviceId;  //             = AAUDIO_UNSPECIFIED;
     int /* aaudio_sharing_mode_t */           sharingMode;  //          = AAUDIO_SHARING_MODE_SHARED;
-    AudioFormat                               audioFormat;  //          = AUDIO_FORMAT_DEFAULT;
+    AudioFormatDescription                    audioFormat;  //          = AUDIO_FORMAT_DEFAULT;
     int /* aaudio_direction_t */              direction;  //            = AAUDIO_DIRECTION_OUTPUT;
     int /* aaudio_usage_t */                  usage;  //                = AAUDIO_UNSPECIFIED;
     int /* aaudio_content_type_t */           contentType;  //          = AAUDIO_UNSPECIFIED;
+    int /* aaudio_spatialization_behavior_t */spatializationBehavior; //= AAUDIO_UNSPECIFIED;
+    boolean                                   isContentSpatialized;  // = false;
     int /* aaudio_input_preset_t */           inputPreset;  //          = AAUDIO_UNSPECIFIED;
     int                                       bufferCapacity;  //       = AAUDIO_UNSPECIFIED;
     int /* aaudio_allowed_capture_policy_t */ allowedCapturePolicy;  // = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.cpp b/media/libaaudio/src/client/AAudioFlowGraph.cpp
index 61b50f3..d0c3238 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.cpp
+++ b/media/libaaudio/src/client/AAudioFlowGraph.cpp
@@ -21,7 +21,10 @@
 #include "AAudioFlowGraph.h"
 
 #include <flowgraph/ClipToRange.h>
+#include <flowgraph/ManyToMultiConverter.h>
+#include <flowgraph/MonoBlend.h>
 #include <flowgraph/MonoToMultiConverter.h>
+#include <flowgraph/MultiToManyConverter.h>
 #include <flowgraph/RampLinear.h>
 #include <flowgraph/SinkFloat.h>
 #include <flowgraph/SinkI16.h>
@@ -37,12 +40,17 @@
 aaudio_result_t AAudioFlowGraph::configure(audio_format_t sourceFormat,
                           int32_t sourceChannelCount,
                           audio_format_t sinkFormat,
-                          int32_t sinkChannelCount) {
-    AudioFloatOutputPort *lastOutput = nullptr;
+                          int32_t sinkChannelCount,
+                          bool useMonoBlend,
+                          float audioBalance,
+                          bool isExclusive) {
+    FlowGraphPortFloatOutput *lastOutput = nullptr;
 
     // TODO change back to ALOGD
-    ALOGI("%s() source format = 0x%08x, channels = %d, sink format = 0x%08x, channels = %d",
-          __func__, sourceFormat, sourceChannelCount, sinkFormat, sinkChannelCount);
+    ALOGI("%s() source format = 0x%08x, channels = %d, sink format = 0x%08x, channels = %d, "
+          "useMonoBlend = %d, audioBalance = %f, isExclusive %d",
+          __func__, sourceFormat, sourceChannelCount, sinkFormat, sinkChannelCount,
+          useMonoBlend, audioBalance, isExclusive);
 
     switch (sourceFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
@@ -63,10 +71,11 @@
     }
     lastOutput = &mSource->output;
 
-    // Apply volume as a ramp to avoid pops.
-    mVolumeRamp = std::make_unique<RampLinear>(sourceChannelCount);
-    lastOutput->connect(&mVolumeRamp->input);
-    lastOutput = &mVolumeRamp->output;
+    if (useMonoBlend) {
+        mMonoBlend = std::make_unique<MonoBlend>(sourceChannelCount);
+        lastOutput->connect(&mMonoBlend->input);
+        lastOutput = &mMonoBlend->output;
+    }
 
     // For a pure float graph, there is chance that the data range may be very large.
     // So we should clip to a reasonable value that allows a little headroom.
@@ -86,6 +95,26 @@
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
+    // Apply volume ramps for only exclusive streams.
+    if (isExclusive) {
+        // Apply volume ramps to set the left/right audio balance and target volumes.
+        // The signals will be decoupled, volume ramps will be applied, before the signals are
+        // combined again.
+        mMultiToManyConverter = std::make_unique<MultiToManyConverter>(sinkChannelCount);
+        mManyToMultiConverter = std::make_unique<ManyToMultiConverter>(sinkChannelCount);
+        lastOutput->connect(&mMultiToManyConverter->input);
+        for (int i = 0; i < sinkChannelCount; i++) {
+            mVolumeRamps.emplace_back(std::make_unique<RampLinear>(1));
+            mPanningVolumes.emplace_back(1.0f);
+            lastOutput = mMultiToManyConverter->outputs[i].get();
+            lastOutput->connect(&(mVolumeRamps[i].get()->input));
+            lastOutput = &(mVolumeRamps[i].get()->output);
+            lastOutput->connect(mManyToMultiConverter->inputs[i].get());
+        }
+        lastOutput = &mManyToMultiConverter->output;
+        setAudioBalance(audioBalance);
+    }
+
     switch (sinkFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
             mSink = std::make_unique<SinkFloat>(sinkChannelCount);
@@ -117,9 +146,32 @@
  * @param volume between 0.0 and 1.0
  */
 void AAudioFlowGraph::setTargetVolume(float volume) {
-    mVolumeRamp->setTarget(volume);
+    for (int i = 0; i < mVolumeRamps.size(); i++) {
+        mVolumeRamps[i]->setTarget(volume * mPanningVolumes[i]);
+    }
+    mTargetVolume = volume;
 }
 
+/**
+ * @param audioBalance between -1.0 and 1.0
+ */
+void AAudioFlowGraph::setAudioBalance(float audioBalance) {
+    if (mPanningVolumes.size() >= 2) {
+        float leftMultiplier = 0;
+        float rightMultiplier = 0;
+        mBalance.computeStereoBalance(audioBalance, &leftMultiplier, &rightMultiplier);
+        mPanningVolumes[0] = leftMultiplier;
+        mPanningVolumes[1] = rightMultiplier;
+        mVolumeRamps[0]->setTarget(mTargetVolume * leftMultiplier);
+        mVolumeRamps[1]->setTarget(mTargetVolume * rightMultiplier);
+    }
+}
+
+/**
+ * @param numFrames to slowly adjust for volume changes
+ */
 void AAudioFlowGraph::setRampLengthInFrames(int32_t numFrames) {
-    mVolumeRamp->setLengthInFrames(numFrames);
+    for (auto& ramp : mVolumeRamps) {
+        ramp->setLengthInFrames(numFrames);
+    }
 }
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.h b/media/libaaudio/src/client/AAudioFlowGraph.h
index a49f64e..00b6575 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.h
+++ b/media/libaaudio/src/client/AAudioFlowGraph.h
@@ -23,8 +23,12 @@
 #include <system/audio.h>
 
 #include <aaudio/AAudio.h>
+#include <audio_utils/Balance.h>
 #include <flowgraph/ClipToRange.h>
+#include <flowgraph/ManyToMultiConverter.h>
+#include <flowgraph/MonoBlend.h>
 #include <flowgraph/MonoToMultiConverter.h>
+#include <flowgraph/MultiToManyConverter.h>
 #include <flowgraph/RampLinear.h>
 
 class AAudioFlowGraph {
@@ -36,12 +40,19 @@
      * @param sourceChannelCount
      * @param sinkFormat
      * @param sinkChannelCount
+     * @param useMonoBlend
+     * @param audioBalance
+     * @param channelMask
+     * @param isExclusive
      * @return
      */
     aaudio_result_t configure(audio_format_t sourceFormat,
                               int32_t sourceChannelCount,
                               audio_format_t sinkFormat,
-                              int32_t sinkChannelCount);
+                              int32_t sinkChannelCount,
+                              bool useMonoBlend,
+                              float audioBalance,
+                              bool isExclusive);
 
     void process(const void *source, void *destination, int32_t numFrames);
 
@@ -50,14 +61,28 @@
      */
     void setTargetVolume(float volume);
 
+    /**
+     * @param audioBalance between -1.0 and 1.0
+     */
+    void setAudioBalance(float audioBalance);
+
+    /**
+     * @param numFrames to slowly adjust for volume changes
+     */
     void setRampLengthInFrames(int32_t numFrames);
 
 private:
-    std::unique_ptr<flowgraph::AudioSource>          mSource;
-    std::unique_ptr<flowgraph::RampLinear>           mVolumeRamp;
-    std::unique_ptr<flowgraph::ClipToRange>          mClipper;
-    std::unique_ptr<flowgraph::MonoToMultiConverter> mChannelConverter;
-    std::unique_ptr<flowgraph::AudioSink>            mSink;
+    std::unique_ptr<flowgraph::FlowGraphSourceBuffered>     mSource;
+    std::unique_ptr<flowgraph::MonoBlend>                   mMonoBlend;
+    std::unique_ptr<flowgraph::ClipToRange>                 mClipper;
+    std::unique_ptr<flowgraph::MonoToMultiConverter>        mChannelConverter;
+    std::unique_ptr<flowgraph::ManyToMultiConverter>        mManyToMultiConverter;
+    std::unique_ptr<flowgraph::MultiToManyConverter>        mMultiToManyConverter;
+    std::vector<std::unique_ptr<flowgraph::RampLinear>>     mVolumeRamps;
+    std::vector<float>                                      mPanningVolumes;
+    float                                                   mTargetVolume = 1.0f;
+    android::audio_utils::Balance                           mBalance;
+    std::unique_ptr<flowgraph::FlowGraphSink>               mSink;
 };
 
 
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index ebc9f2b..e780f4f 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -31,13 +31,6 @@
 #define RIDICULOUSLY_LARGE_BUFFER_CAPACITY   (256 * 1024)
 #define RIDICULOUSLY_LARGE_FRAME_SIZE        4096
 
-AudioEndpoint::AudioEndpoint()
-    : mFreeRunning(false)
-    , mDataReadCounter(0)
-    , mDataWriteCounter(0)
-{
-}
-
 // TODO Consider moving to a method in RingBufferDescriptor
 static aaudio_result_t AudioEndpoint_validateQueueDescriptor(const char *type,
                                                   const RingBufferDescriptor *descriptor) {
@@ -146,38 +139,49 @@
     );
 
     // ============================ data queue =============================
-    descriptor = &pEndpointDescriptor->dataQueueDescriptor;
-    ALOGV("configure() data framesPerBurst = %d", descriptor->framesPerBurst);
+    result = configureDataQueue(pEndpointDescriptor->dataQueueDescriptor, direction);
+
+    return result;
+}
+
+aaudio_result_t AudioEndpoint::configureDataQueue(const RingBufferDescriptor& descriptor,
+                                                  aaudio_direction_t direction) {
+    aaudio_result_t result = AudioEndpoint_validateQueueDescriptor("data", &descriptor);
+    if (result != AAUDIO_OK) {
+        return result;
+    }
+
+    ALOGV("configure() data framesPerBurst = %d", descriptor.framesPerBurst);
     ALOGV("configure() data readCounterAddress = %p",
-          descriptor->readCounterAddress);
+          descriptor.readCounterAddress);
 
     // An example of free running is when the other side is read or written by hardware DMA
     // or a DSP. It does not update its counter so we have to update it.
     int64_t *remoteCounter = (direction == AAUDIO_DIRECTION_OUTPUT)
-                             ? descriptor->readCounterAddress // read by other side
-                             : descriptor->writeCounterAddress; // written by other side
+                             ? descriptor.readCounterAddress // read by other side
+                             : descriptor.writeCounterAddress; // written by other side
     mFreeRunning = (remoteCounter == nullptr);
     ALOGV("configure() mFreeRunning = %d", mFreeRunning ? 1 : 0);
 
-    int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
+    int64_t *readCounterAddress = (descriptor.readCounterAddress == nullptr)
                                   ? &mDataReadCounter
-                                  : descriptor->readCounterAddress;
-    int64_t *writeCounterAddress = (descriptor->writeCounterAddress == nullptr)
+                                  : descriptor.readCounterAddress;
+    int64_t *writeCounterAddress = (descriptor.writeCounterAddress == nullptr)
                                   ? &mDataWriteCounter
-                                  : descriptor->writeCounterAddress;
+                                  : descriptor.writeCounterAddress;
 
     // Clear buffer to avoid an initial glitch on some devices.
-    size_t bufferSizeBytes = descriptor->capacityInFrames * descriptor->bytesPerFrame;
-    memset(descriptor->dataAddress, 0, bufferSizeBytes);
+    size_t bufferSizeBytes = descriptor.capacityInFrames * descriptor.bytesPerFrame;
+    memset(descriptor.dataAddress, 0, bufferSizeBytes);
 
     mDataQueue = std::make_unique<FifoBufferIndirect>(
-            descriptor->bytesPerFrame,
-            descriptor->capacityInFrames,
+            descriptor.bytesPerFrame,
+            descriptor.capacityInFrames,
             readCounterAddress,
             writeCounterAddress,
-            descriptor->dataAddress
+            descriptor.dataAddress
     );
-    uint32_t threshold = descriptor->capacityInFrames / 2;
+    uint32_t threshold = descriptor.capacityInFrames / 2;
     mDataQueue->setThreshold(threshold);
     return result;
 }
@@ -188,47 +192,66 @@
 }
 
 int32_t AudioEndpoint::getEmptyFramesAvailable(WrappingBuffer *wrappingBuffer) {
-    return mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
+    return mDataQueue == nullptr ? 0 : mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
 }
 
 int32_t AudioEndpoint::getEmptyFramesAvailable() {
-    return mDataQueue->getEmptyFramesAvailable();
+    return mDataQueue == nullptr ? 0 : mDataQueue->getEmptyFramesAvailable();
 }
 
 int32_t AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer) {
-    return mDataQueue->getFullDataAvailable(wrappingBuffer);
+    return mDataQueue == nullptr ? 0 : mDataQueue->getFullDataAvailable(wrappingBuffer);
 }
 
 int32_t AudioEndpoint::getFullFramesAvailable() {
-    return mDataQueue->getFullFramesAvailable();
+    return mDataQueue == nullptr ? 0 : mDataQueue->getFullFramesAvailable();
+}
+
+android::fifo_frames_t AudioEndpoint::read(void *buffer, android::fifo_frames_t numFrames) {
+    return mDataQueue == nullptr ? 0 : mDataQueue->read(buffer, numFrames);
+}
+
+android::fifo_frames_t AudioEndpoint::write(void *buffer, android::fifo_frames_t numFrames) {
+    return mDataQueue == nullptr ? 0 : mDataQueue->write(buffer, numFrames);
 }
 
 void AudioEndpoint::advanceWriteIndex(int32_t deltaFrames) {
-    mDataQueue->advanceWriteIndex(deltaFrames);
+    if (mDataQueue != nullptr) {
+        mDataQueue->advanceWriteIndex(deltaFrames);
+    }
 }
 
 void AudioEndpoint::advanceReadIndex(int32_t deltaFrames) {
-    mDataQueue->advanceReadIndex(deltaFrames);
+    if (mDataQueue != nullptr) {
+        mDataQueue->advanceReadIndex(deltaFrames);
+    }
 }
 
 void AudioEndpoint::setDataReadCounter(fifo_counter_t framesRead) {
-    mDataQueue->setReadCounter(framesRead);
+    if (mDataQueue != nullptr) {
+        mDataQueue->setReadCounter(framesRead);
+    }
 }
 
 fifo_counter_t AudioEndpoint::getDataReadCounter() const {
-    return mDataQueue->getReadCounter();
+    return mDataQueue == nullptr ? 0 : mDataQueue->getReadCounter();
 }
 
 void AudioEndpoint::setDataWriteCounter(fifo_counter_t framesRead) {
-    mDataQueue->setWriteCounter(framesRead);
+    if (mDataQueue != nullptr) {
+        mDataQueue->setWriteCounter(framesRead);
+    }
 }
 
 fifo_counter_t AudioEndpoint::getDataWriteCounter() const {
-    return mDataQueue->getWriteCounter();
+    return mDataQueue == nullptr ? 0 : mDataQueue->getWriteCounter();
 }
 
 int32_t AudioEndpoint::setBufferSizeInFrames(int32_t requestedFrames,
                                             int32_t *actualFrames) {
+    if (mDataQueue == nullptr) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
     if (requestedFrames < ENDPOINT_DATA_QUEUE_SIZE_MIN) {
         requestedFrames = ENDPOINT_DATA_QUEUE_SIZE_MIN;
     }
@@ -238,11 +261,11 @@
 }
 
 int32_t AudioEndpoint::getBufferSizeInFrames() const {
-    return mDataQueue->getThreshold();
+    return mDataQueue == nullptr ? 0 : mDataQueue->getThreshold();
 }
 
 int32_t AudioEndpoint::getBufferCapacityInFrames() const {
-    return (int32_t)mDataQueue->getBufferCapacityInFrames();
+    return mDataQueue == nullptr ? 0 : (int32_t)mDataQueue->getBufferCapacityInFrames();
 }
 
 void AudioEndpoint::dump() const {
@@ -251,5 +274,7 @@
 }
 
 void AudioEndpoint::eraseDataMemory() {
-    mDataQueue->eraseMemory();
+    if (mDataQueue != nullptr) {
+        mDataQueue->eraseMemory();
+    }
 }
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index 4c8d60f..01dd05a 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_AAUDIO_AUDIO_ENDPOINT_H
 #define ANDROID_AAUDIO_AUDIO_ENDPOINT_H
 
+#include <mutex>
+
 #include <aaudio/AAudio.h>
 
 #include "binding/AAudioServiceMessage.h"
@@ -34,7 +36,7 @@
 class AudioEndpoint {
 
 public:
-    AudioEndpoint();
+    AudioEndpoint() = default;
 
     /**
      * Configure based on the EndPointDescriptor_t.
@@ -42,6 +44,9 @@
     aaudio_result_t configure(const EndpointDescriptor *pEndpointDescriptor,
                               aaudio_direction_t direction);
 
+    aaudio_result_t configureDataQueue(const RingBufferDescriptor &descriptor,
+                            aaudio_direction_t direction);
+
     /**
      * Read from a command passed up from the Server.
      * @return 1 if command received, 0 for no command, or negative error.
@@ -56,6 +61,10 @@
 
     int32_t getFullFramesAvailable();
 
+    android::fifo_frames_t read(void* buffer, android::fifo_frames_t numFrames);
+
+    android::fifo_frames_t write(void* buffer, android::fifo_frames_t numFrames);
+
     void advanceReadIndex(int32_t deltaFrames);
 
     void advanceWriteIndex(int32_t deltaFrames);
@@ -85,19 +94,31 @@
 
     int32_t getBufferCapacityInFrames() const;
 
+    void setThreshold(int32_t frames) {
+        mDataQueue->setThreshold(frames);
+    }
+
+    int32_t getThreshold() {
+        return mDataQueue->getThreshold();
+    }
+
     /**
      * Write zeros to the data queue memory.
      */
     void eraseDataMemory();
 
+    void freeDataQueue();
+
     void dump() const;
 
 private:
     std::unique_ptr<android::FifoBufferIndirect> mUpCommandQueue;
     std::unique_ptr<android::FifoBufferIndirect> mDataQueue;
-    bool                    mFreeRunning;
-    android::fifo_counter_t mDataReadCounter; // only used if free-running
-    android::fifo_counter_t mDataWriteCounter; // only used if free-running
+    bool                    mFreeRunning{false};
+    android::fifo_counter_t mDataReadCounter{0}; // only used if free-running
+    android::fifo_counter_t mDataWriteCounter{0}; // only used if free-running
+
+    std::mutex mDataQueueLock;
 };
 
 } // namespace aaudio
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 6d2d464..9f0564f 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -27,6 +27,8 @@
 #include <aaudio/AAudio.h>
 #include <cutils/properties.h>
 
+#include <media/AudioParameter.h>
+#include <media/AudioSystem.h>
 #include <media/MediaMetricsItem.h>
 #include <utils/Trace.h>
 
@@ -49,8 +51,6 @@
 // This is needed to make sense of the logs more easily.
 #define LOG_TAG (mInService ? "AudioStreamInternal_Service" : "AudioStreamInternal_Client")
 
-using android::Mutex;
-using android::WrappingBuffer;
 using android::content::AttributionSourceState;
 
 using namespace aaudio;
@@ -81,8 +81,6 @@
 aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
 
     aaudio_result_t result = AAUDIO_OK;
-    int32_t framesPerBurst;
-    int32_t framesPerHardwareBurst;
     AAudioStreamRequest request;
     AAudioStreamConfiguration configurationOutput;
 
@@ -97,9 +95,6 @@
         return result;
     }
 
-    const int32_t burstMinMicros = AAudioProperty_getHardwareBurstMinMicros();
-    int32_t burstMicros = 0;
-
     const audio_format_t requestedFormat = getFormat();
     // We have to do volume scaling. So we prefer FLOAT format.
     if (requestedFormat == AUDIO_FORMAT_DEFAULT) {
@@ -123,12 +118,14 @@
 
     request.getConfiguration().setDeviceId(getDeviceId());
     request.getConfiguration().setSampleRate(getSampleRate());
-    request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
     request.getConfiguration().setDirection(getDirection());
     request.getConfiguration().setSharingMode(getSharingMode());
+    request.getConfiguration().setChannelMask(getChannelMask());
 
     request.getConfiguration().setUsage(getUsage());
     request.getConfiguration().setContentType(getContentType());
+    request.getConfiguration().setSpatializationBehavior(getSpatializationBehavior());
+    request.getConfiguration().setIsContentSpatialized(isContentSpatialized());
     request.getConfiguration().setInputPreset(getInputPreset());
     request.getConfiguration().setPrivacySensitive(isPrivacySensitive());
 
@@ -138,7 +135,8 @@
 
     mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
     if (mServiceStreamHandle < 0
-            && request.getConfiguration().getSamplesPerFrame() == 1 // mono?
+            && (request.getConfiguration().getSamplesPerFrame() == 1
+                    || request.getConfiguration().getChannelMask() == AAUDIO_CHANNEL_MONO)
             && getDirection() == AAUDIO_DIRECTION_OUTPUT
             && !isInService()) {
         // if that failed then try switching from mono to stereo if OUTPUT.
@@ -146,7 +144,7 @@
         // that writes to a stereo MMAP stream.
         ALOGD("%s() - openStream() returned %d, try switching from MONO to STEREO",
               __func__, mServiceStreamHandle);
-        request.getConfiguration().setSamplesPerFrame(2); // stereo
+        request.getConfiguration().setChannelMask(AAUDIO_CHANNEL_STEREO);
         mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
     }
     if (mServiceStreamHandle < 0) {
@@ -174,9 +172,10 @@
         goto error;
     }
     // Save results of the open.
-    if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
-        setSamplesPerFrame(configurationOutput.getSamplesPerFrame());
+    if (getChannelMask() == AAUDIO_UNSPECIFIED) {
+        setChannelMask(configurationOutput.getChannelMask());
     }
+
     mDeviceChannelCount = configurationOutput.getSamplesPerFrame();
 
     setSampleRate(configurationOutput.getSampleRate());
@@ -186,6 +185,8 @@
 
     setUsage(configurationOutput.getUsage());
     setContentType(configurationOutput.getContentType());
+    setSpatializationBehavior(configurationOutput.getSpatializationBehavior());
+    setIsContentSpatialized(configurationOutput.isContentSpatialized());
     setInputPreset(configurationOutput.getInputPreset());
 
     // Save device format so we can do format conversion and volume scaling together.
@@ -209,12 +210,28 @@
         goto error;
     }
 
-    framesPerHardwareBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
+    if ((result = configureDataInformation(builder.getFramesPerDataCallback())) != AAUDIO_OK) {
+        goto error;
+    }
+
+    setState(AAUDIO_STREAM_STATE_OPEN);
+
+    return result;
+
+error:
+    safeReleaseClose();
+    return result;
+}
+
+aaudio_result_t AudioStreamInternal::configureDataInformation(int32_t callbackFrames) {
+    int32_t framesPerHardwareBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
 
     // Scale up the burst size to meet the minimum equivalent in microseconds.
     // This is to avoid waking the CPU too often when the HW burst is very small
     // or at high sample rates.
-    framesPerBurst = framesPerHardwareBurst;
+    int32_t framesPerBurst = framesPerHardwareBurst;
+    int32_t burstMicros = 0;
+    const int32_t burstMinMicros = android::AudioSystem::getAAudioHardwareBurstMinUsec();
     do {
         if (burstMicros > 0) {  // skip first loop
             framesPerBurst *= 2;
@@ -227,8 +244,7 @@
     // Validate final burst size.
     if (framesPerBurst < MIN_FRAMES_PER_BURST || framesPerBurst > MAX_FRAMES_PER_BURST) {
         ALOGE("%s - framesPerBurst out of range = %d", __func__, framesPerBurst);
-        result = AAUDIO_ERROR_OUT_OF_RANGE;
-        goto error;
+        return AAUDIO_ERROR_OUT_OF_RANGE;
     }
     setFramesPerBurst(framesPerBurst); // only save good value
 
@@ -236,26 +252,21 @@
     if (mBufferCapacityInFrames < getFramesPerBurst()
             || mBufferCapacityInFrames > MAX_BUFFER_CAPACITY_IN_FRAMES) {
         ALOGE("%s - bufferCapacity out of range = %d", __func__, mBufferCapacityInFrames);
-        result = AAUDIO_ERROR_OUT_OF_RANGE;
-        goto error;
+        return AAUDIO_ERROR_OUT_OF_RANGE;
     }
 
     mClockModel.setSampleRate(getSampleRate());
     mClockModel.setFramesPerBurst(framesPerHardwareBurst);
 
     if (isDataCallbackSet()) {
-        mCallbackFrames = builder.getFramesPerDataCallback();
+        mCallbackFrames = callbackFrames;
         if (mCallbackFrames > getBufferCapacity() / 2) {
             ALOGW("%s - framesPerCallback too big = %d, capacity = %d",
                   __func__, mCallbackFrames, getBufferCapacity());
-            result = AAUDIO_ERROR_OUT_OF_RANGE;
-            goto error;
-
+            return AAUDIO_ERROR_OUT_OF_RANGE;
         } else if (mCallbackFrames < 0) {
             ALOGW("%s - framesPerCallback negative", __func__);
-            result = AAUDIO_ERROR_OUT_OF_RANGE;
-            goto error;
-
+            return AAUDIO_ERROR_OUT_OF_RANGE;
         }
         if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
             mCallbackFrames = getFramesPerBurst();
@@ -265,6 +276,18 @@
         mCallbackBuffer = std::make_unique<uint8_t[]>(callbackBufferSize);
     }
 
+    // Exclusive output streams should combine channels when mono audio adjustment
+    // is enabled. They should also adjust for audio balance.
+    if ((getDirection() == AAUDIO_DIRECTION_OUTPUT) &&
+        (getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE)) {
+        bool isMasterMono = false;
+        android::AudioSystem::getMasterMono(&isMasterMono);
+        setRequireMonoBlend(isMasterMono);
+        float audioBalance = 0;
+        android::AudioSystem::getMasterBalance(&audioBalance);
+        setAudioBalance(audioBalance);
+    }
+
     // For debugging and analyzing the distribution of MMAP timestamps.
     // For OUTPUT, use a NEGATIVE offset to move the CPU writes further BEFORE the HW reads.
     // For INPUT, use a POSITIVE offset to move the CPU reads further AFTER the HW writes.
@@ -284,14 +307,7 @@
     }
 
     setBufferSize(mBufferCapacityInFrames / 2); // Default buffer size to match Q
-
-    setState(AAUDIO_STREAM_STATE_OPEN);
-
-    return result;
-
-error:
-    safeReleaseClose();
-    return result;
+    return AAUDIO_OK;
 }
 
 // This must be called under mStreamLock.
@@ -332,13 +348,67 @@
 {
     AudioStreamInternal *stream = (AudioStreamInternal *)context;
     //LOGD("oboe_callback_thread, stream = %p", stream);
-    if (stream != NULL) {
+    if (stream != nullptr) {
         return stream->callbackLoop();
     } else {
-        return NULL;
+        return nullptr;
     }
 }
 
+aaudio_result_t AudioStreamInternal::exitStandby_l() {
+    AudioEndpointParcelable endpointParcelable;
+    // The stream is in standby mode, copy all available data and then close the duplicated
+    // shared file descriptor so that it won't cause issue when the HAL try to reallocate new
+    // shared file descriptor when exiting from standby.
+    // Cache current read counter, which will be reset to new read and write counter
+    // when the new data queue and endpoint are reconfigured.
+    const android::fifo_counter_t readCounter = mAudioEndpoint->getDataReadCounter();
+    // Cache the buffer size which may be from client.
+    const int32_t previousBufferSize = mBufferSizeInFrames;
+    // Copy all available data from current data queue.
+    uint8_t buffer[getBufferCapacity() * getBytesPerFrame()];
+    android::fifo_frames_t fullFramesAvailable =
+            mAudioEndpoint->read(buffer, getBufferCapacity());
+    mEndPointParcelable.closeDataFileDescriptor();
+    aaudio_result_t result = mServiceInterface.exitStandby(
+            mServiceStreamHandle, endpointParcelable);
+    if (result != AAUDIO_OK) {
+        ALOGE("Failed to exit standby, error=%d", result);
+        goto exit;
+    }
+    // Reconstruct data queue descriptor using new shared file descriptor.
+    mEndPointParcelable.updateDataFileDescriptor(&endpointParcelable);
+    result = mEndPointParcelable.resolveDataQueue(&mEndpointDescriptor.dataQueueDescriptor);
+    if (result != AAUDIO_OK) {
+        ALOGE("Failed to resolve data queue after exiting standby, error=%d", result);
+        goto exit;
+    }
+    // Reconfigure audio endpoint with new data queue descriptor.
+    mAudioEndpoint->configureDataQueue(
+            mEndpointDescriptor.dataQueueDescriptor, getDirection());
+    // Set read and write counters with previous read counter, the later write action
+    // will make the counter at the correct place.
+    mAudioEndpoint->setDataReadCounter(readCounter);
+    mAudioEndpoint->setDataWriteCounter(readCounter);
+    result = configureDataInformation(mCallbackFrames);
+    if (result != AAUDIO_OK) {
+        ALOGE("Failed to configure data information after exiting standby, error=%d", result);
+        goto exit;
+    }
+    // Write data from previous data buffer to new endpoint.
+    if (android::fifo_frames_t framesWritten =
+                mAudioEndpoint->write(buffer, fullFramesAvailable);
+            framesWritten != fullFramesAvailable) {
+        ALOGW("Some data lost after exiting standby, frames written: %d, "
+              "frames to write: %d", framesWritten, fullFramesAvailable);
+    }
+    // Reset previous buffer size as it may be requested by the client.
+    setBufferSize(previousBufferSize);
+
+exit:
+    return result;
+}
+
 /*
  * It normally takes about 20-30 msec to start a stream on the server.
  * But the first time can take as much as 200-300 msec. The HW
@@ -375,8 +445,15 @@
     prepareBuffersForStart(); // tell subclasses to get ready
 
     aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);
-    if (result == AAUDIO_ERROR_INVALID_HANDLE) {
-        ALOGD("%s() INVALID_HANDLE, stream was probably stolen", __func__);
+    if (result == AAUDIO_ERROR_STANDBY) {
+        // The stream is at standby mode. Need to exit standby before starting the stream.
+        result = exitStandby_l();
+        if (result == AAUDIO_OK) {
+            result = mServiceInterface.startStream(mServiceStreamHandle);
+        }
+    }
+    if (result != AAUDIO_OK) {
+        ALOGD("%s() error = %d, stream was probably stolen", __func__, result);
         // Stealing was added in R. Coerce result to improve backward compatibility.
         result = AAUDIO_ERROR_DISCONNECTED;
         setState(AAUDIO_STREAM_STATE_DISCONNECTED);
@@ -396,6 +473,7 @@
         result = createThread_l(periodNanos, aaudio_callback_thread_proc, this);
     }
     if (result != AAUDIO_OK) {
+        // TODO(b/214607638): Do we want to roll back to original state or keep as disconnected?
         setState(originalState);
     }
     return result;
@@ -424,7 +502,7 @@
     if (isDataCallbackSet()
             && (isActive() || getState() == AAUDIO_STREAM_STATE_DISCONNECTED)) {
         mCallbackEnabled.store(false);
-        aaudio_result_t result = joinThread_l(NULL); // may temporarily unlock mStreamLock
+        aaudio_result_t result = joinThread_l(nullptr); // may temporarily unlock mStreamLock
         if (result == AAUDIO_ERROR_INVALID_HANDLE) {
             ALOGD("%s() INVALID_HANDLE, stream was probably stolen", __func__);
             result = AAUDIO_OK;
@@ -511,7 +589,7 @@
     return result;
 }
 
-aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
+aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t /*clockId*/,
                            int64_t *framePosition,
                            int64_t *timeNanoseconds) {
     // Generated in server and passed to client. Return latest.
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index fbe4c13..2367572 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -116,7 +116,7 @@
 
     virtual void prepareBuffersForStart() {}
 
-    virtual void advanceClientToMatchServerPosition(int32_t serverMargin = 0) = 0;
+    virtual void advanceClientToMatchServerPosition(int32_t serverMargin) = 0;
 
     virtual void onFlushFromServer() {}
 
@@ -184,9 +184,14 @@
     aaudio_result_t writeNowWithConversion(const void *buffer,
                                      int32_t numFrames);
 
+    // Exit the stream from standby, will reconstruct data path.
+    aaudio_result_t exitStandby_l() REQUIRES(mStreamLock);
+
     // Adjust timing model based on timestamp from service.
     void processTimestamp(uint64_t position, int64_t time);
 
+    aaudio_result_t configureDataInformation(int32_t callbackFrames);
+
     // Thread on other side of FIFO will have wakeup jitter.
     // By delaying slightly we can avoid waking up before other side is ready.
     const int32_t            mWakeupDelayNanos; // delay past typical wakeup jitter
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 2da5406..1efccb1 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -46,8 +46,6 @@
 
 }
 
-AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
-
 void AudioStreamInternalCapture::advanceClientToMatchServerPosition(int32_t serverMargin) {
     int64_t readCounter = mAudioEndpoint->getDataReadCounter();
     int64_t writeCounter = mAudioEndpoint->getDataWriteCounter() + serverMargin;
@@ -109,7 +107,7 @@
     if (mNeedCatchUp.isRequested()) {
         // Catch an MMAP pointer that is already advancing.
         // This will avoid initial underruns caused by a slow cold start.
-        advanceClientToMatchServerPosition();
+        advanceClientToMatchServerPosition(0 /*serverMargin*/);
         mNeedCatchUp.acknowledge();
     }
 
@@ -228,7 +226,7 @@
 void *AudioStreamInternalCapture::callbackLoop() {
     aaudio_result_t result = AAUDIO_OK;
     aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
-    if (!isDataCallbackSet()) return NULL;
+    if (!isDataCallbackSet()) return nullptr;
 
     // result might be a frame count
     while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
@@ -260,5 +258,5 @@
 
     ALOGD("callbackLoop() exiting, result = %d, isActive() = %d",
           result, (int) isActive());
-    return NULL;
+    return nullptr;
 }
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.h b/media/libaaudio/src/client/AudioStreamInternalCapture.h
index 251a7f2..87017de 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.h
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.h
@@ -28,8 +28,9 @@
 
 class AudioStreamInternalCapture : public AudioStreamInternal {
 public:
-    AudioStreamInternalCapture(AAudioServiceInterface  &serviceInterface, bool inService = false);
-    virtual ~AudioStreamInternalCapture();
+    explicit AudioStreamInternalCapture(AAudioServiceInterface  &serviceInterface,
+                                        bool inService = false);
+    virtual ~AudioStreamInternalCapture() = default;
 
     aaudio_result_t read(void *buffer,
                          int32_t numFrames,
@@ -45,7 +46,7 @@
     }
 protected:
 
-    void advanceClientToMatchServerPosition(int32_t serverOffset = 0) override;
+    void advanceClientToMatchServerPosition(int32_t serverOffset) override;
 
 /**
  * Low level data processing that will not block. It will just read or write as much as it can.
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 71bde90..450d390 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -44,8 +44,6 @@
 
 }
 
-AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
-
 constexpr int kRampMSec = 10; // time to apply a change in volume
 
 aaudio_result_t AudioStreamInternalPlay::open(const AudioStreamBuilder &builder) {
@@ -54,7 +52,10 @@
         result = mFlowGraph.configure(getFormat(),
                              getSamplesPerFrame(),
                              getDeviceFormat(),
-                             getDeviceChannelCount());
+                             getDeviceChannelCount(),
+                             getRequireMonoBlend(),
+                             getAudioBalance(),
+                             (getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE));
 
         if (result != AAUDIO_OK) {
             safeReleaseClose();
@@ -115,7 +116,7 @@
 }
 
 void AudioStreamInternalPlay::onFlushFromServer() {
-    advanceClientToMatchServerPosition();
+    advanceClientToMatchServerPosition(0 /*serverMargin*/);
 }
 
 // Write the data, block if needed and timeoutMillis > 0
@@ -281,7 +282,7 @@
     ALOGD("%s() entering >>>>>>>>>>>>>>>", __func__);
     aaudio_result_t result = AAUDIO_OK;
     aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
-    if (!isDataCallbackSet()) return NULL;
+    if (!isDataCallbackSet()) return nullptr;
     int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
 
     // result might be a frame count
@@ -309,7 +310,7 @@
 
     ALOGD("%s() exiting, result = %d, isActive() = %d <<<<<<<<<<<<<<",
           __func__, result, (int) isActive());
-    return NULL;
+    return nullptr;
 }
 
 //------------------------------------------------------------------------------
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
index 03c957d..e761807 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.h
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -30,8 +30,9 @@
 
 class AudioStreamInternalPlay : public AudioStreamInternal {
 public:
-    AudioStreamInternalPlay(AAudioServiceInterface  &serviceInterface, bool inService = false);
-    virtual ~AudioStreamInternalPlay();
+    explicit AudioStreamInternalPlay(AAudioServiceInterface  &serviceInterface,
+                                     bool inService = false);
+    virtual ~AudioStreamInternalPlay() = default;
 
     aaudio_result_t open(const AudioStreamBuilder &builder) override;
 
@@ -66,7 +67,7 @@
 
     void prepareBuffersForStart() override;
 
-    void advanceClientToMatchServerPosition(int32_t serverMargin = 0) override;
+    void advanceClientToMatchServerPosition(int32_t serverMargin) override;
 
     void onFlushFromServer() override;
 
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index f0dcd44..6921271 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -43,14 +43,7 @@
 // and dumped to the log when the stream is stopped.
 
 IsochronousClockModel::IsochronousClockModel()
-        : mMarkerFramePosition(0)
-        , mMarkerNanoTime(0)
-        , mSampleRate(48000)
-        , mFramesPerBurst(48)
-        , mBurstPeriodNanos(0) // this will be updated before use
-        , mMaxMeasuredLatenessNanos(0)
-        , mLatenessForDriftNanos(kInitialLatenessForDriftNanos)
-        , mState(STATE_STOPPED)
+        : mLatenessForDriftNanos(kInitialLatenessForDriftNanos)
 {
     if ((AAudioProperty_getLogMask() & AAUDIO_LOG_CLOCK_MODEL_HISTOGRAM) != 0) {
         mHistogramMicros = std::make_unique<Histogram>(kHistogramBinCount,
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 6280013..3007237 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -149,16 +149,16 @@
     static constexpr int32_t   kHistogramBinWidthMicros = 50;
     static constexpr int32_t   kHistogramBinCount = 128;
 
-    int64_t             mMarkerFramePosition; // Estimated HW position.
-    int64_t             mMarkerNanoTime;      // Estimated HW time.
-    int32_t             mSampleRate;
-    int32_t             mFramesPerBurst;      // number of frames transferred at one time.
-    int32_t             mBurstPeriodNanos;    // Time between HW bursts.
+    int64_t             mMarkerFramePosition{0}; // Estimated HW position.
+    int64_t             mMarkerNanoTime{0};      // Estimated HW time.
+    int32_t             mSampleRate{48000};
+    int32_t             mFramesPerBurst{48};     // number of frames transferred at one time.
+    int32_t             mBurstPeriodNanos{0};    // Time between HW bursts.
     // Includes mBurstPeriodNanos because we sample randomly over time.
-    int32_t             mMaxMeasuredLatenessNanos;
+    int32_t             mMaxMeasuredLatenessNanos{0};
     // Threshold for lateness that triggers a drift later in time.
     int32_t             mLatenessForDriftNanos;
-    clock_model_state_t mState;               // State machine handles startup sequence.
+    clock_model_state_t mState{STATE_STOPPED};   // State machine handles startup sequence.
 
     int32_t             mTimestampCount = 0;  // For logging.
 
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index d103aca..90ff4a5 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -128,7 +128,8 @@
                                                        int32_t samplesPerFrame)
 {
     AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
-    streamBuilder->setSamplesPerFrame(samplesPerFrame);
+    const aaudio_channel_mask_t channelMask = AAudioConvert_channelCountToMask(samplesPerFrame);
+    streamBuilder->setChannelMask(channelMask);
 }
 
 AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder* builder,
@@ -166,6 +167,18 @@
     streamBuilder->setContentType(contentType);
 }
 
+AAUDIO_API void AAudioStreamBuilder_setSpatializationBehavior(AAudioStreamBuilder* builder,
+        aaudio_spatialization_behavior_t spatializationBehavior) {
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+    streamBuilder->setSpatializationBehavior(spatializationBehavior);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setIsContentSpatialized(AAudioStreamBuilder* builder,
+                                                            bool isSpatialized) {
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+    streamBuilder->setIsContentSpatialized(isSpatialized);
+}
+
 AAUDIO_API void AAudioStreamBuilder_setInputPreset(AAudioStreamBuilder* builder,
                                                    aaudio_input_preset_t inputPreset) {
     AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
@@ -223,6 +236,13 @@
     streamBuilder->setFramesPerDataCallback(frames);
 }
 
+AAUDIO_API void AAudioStreamBuilder_setChannelMask(AAudioStreamBuilder* builder,
+                                                   aaudio_channel_mask_t channelMask)
+{
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+    streamBuilder->setChannelMask(channelMask);
+}
+
 AAUDIO_API aaudio_result_t  AAudioStreamBuilder_openStream(AAudioStreamBuilder* builder,
                                                      AAudioStream** streamPtr)
 {
@@ -332,7 +352,8 @@
 {
 
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
-    return audioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
+    android::sp<AudioStream> spAudioStream(audioStream);
+    return spAudioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
 }
 
 // ============================================================
@@ -495,6 +516,19 @@
     return audioStream->getContentType();
 }
 
+AAUDIO_API aaudio_spatialization_behavior_t AAudioStream_getSpatializationBehavior(
+        AAudioStream* stream)
+{
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getSpatializationBehavior();
+}
+
+AAUDIO_API bool AAudioStream_isContentSpatialized(AAudioStream* stream)
+{
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->isContentSpatialized();
+}
+
 AAUDIO_API aaudio_input_preset_t AAudioStream_getInputPreset(AAudioStream* stream)
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
@@ -562,3 +596,11 @@
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
     return audioStream->isPrivacySensitive();
 }
+
+AAUDIO_API aaudio_channel_mask_t AAudioStream_getChannelMask(AAudioStream* stream)
+{
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    const aaudio_channel_mask_t channelMask = audioStream->getChannelMask();
+    // Do not return channel index masks as they are not public.
+    return AAudio_isChannelIndexMask(channelMask) ? AAUDIO_UNSPECIFIED : channelMask;
+}
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.cpp b/media/libaaudio/src/core/AAudioStreamParameters.cpp
index acfac24..8b7b75e 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.cpp
+++ b/media/libaaudio/src/core/AAudioStreamParameters.cpp
@@ -30,9 +30,6 @@
 // HDMI supports up to 32 channels at 1536000 Hz.
 #define SAMPLE_RATE_HZ_MAX           1600000
 
-AAudioStreamParameters::AAudioStreamParameters() {}
-AAudioStreamParameters::~AAudioStreamParameters() {}
-
 void AAudioStreamParameters::copyFrom(const AAudioStreamParameters &other) {
     mSamplesPerFrame      = other.mSamplesPerFrame;
     mSampleRate           = other.mSampleRate;
@@ -44,11 +41,14 @@
     mBufferCapacity       = other.mBufferCapacity;
     mUsage                = other.mUsage;
     mContentType          = other.mContentType;
+    mSpatializationBehavior = other.mSpatializationBehavior;
+    mIsContentSpatialized = other.mIsContentSpatialized;
     mInputPreset          = other.mInputPreset;
     mAllowedCapturePolicy = other.mAllowedCapturePolicy;
     mIsPrivacySensitive   = other.mIsPrivacySensitive;
     mOpPackageName        = other.mOpPackageName;
     mAttributionTag       = other.mAttributionTag;
+    mChannelMask          = other.mChannelMask;
 }
 
 static aaudio_result_t isFormatValid(audio_format_t format) {
@@ -160,6 +160,19 @@
             // break;
     }
 
+    switch (mSpatializationBehavior) {
+        case AAUDIO_UNSPECIFIED:
+        case AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO:
+        case AAUDIO_SPATIALIZATION_BEHAVIOR_NEVER:
+            break; // valid
+        default:
+            ALOGD("spatialization behavior not valid = %d", mSpatializationBehavior);
+            return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+            // break;
+    }
+
+    // no validation required for mIsContentSpatialized
+
     switch (mInputPreset) {
         case AAUDIO_UNSPECIFIED:
         case AAUDIO_INPUT_PRESET_GENERIC:
@@ -187,7 +200,94 @@
             // break;
     }
 
-    return AAUDIO_OK;
+    return validateChannelMask();
+}
+
+bool AAudioStreamParameters::validateChannelMask() const {
+    if (mChannelMask == AAUDIO_UNSPECIFIED) {
+        return AAUDIO_OK;
+    }
+
+    if (mChannelMask & AAUDIO_CHANNEL_BIT_INDEX) {
+        switch (mChannelMask) {
+            case AAUDIO_CHANNEL_INDEX_MASK_1:
+            case AAUDIO_CHANNEL_INDEX_MASK_2:
+            case AAUDIO_CHANNEL_INDEX_MASK_3:
+            case AAUDIO_CHANNEL_INDEX_MASK_4:
+            case AAUDIO_CHANNEL_INDEX_MASK_5:
+            case AAUDIO_CHANNEL_INDEX_MASK_6:
+            case AAUDIO_CHANNEL_INDEX_MASK_7:
+            case AAUDIO_CHANNEL_INDEX_MASK_8:
+            case AAUDIO_CHANNEL_INDEX_MASK_9:
+            case AAUDIO_CHANNEL_INDEX_MASK_10:
+            case AAUDIO_CHANNEL_INDEX_MASK_11:
+            case AAUDIO_CHANNEL_INDEX_MASK_12:
+            case AAUDIO_CHANNEL_INDEX_MASK_13:
+            case AAUDIO_CHANNEL_INDEX_MASK_14:
+            case AAUDIO_CHANNEL_INDEX_MASK_15:
+            case AAUDIO_CHANNEL_INDEX_MASK_16:
+            case AAUDIO_CHANNEL_INDEX_MASK_17:
+            case AAUDIO_CHANNEL_INDEX_MASK_18:
+            case AAUDIO_CHANNEL_INDEX_MASK_19:
+            case AAUDIO_CHANNEL_INDEX_MASK_20:
+            case AAUDIO_CHANNEL_INDEX_MASK_21:
+            case AAUDIO_CHANNEL_INDEX_MASK_22:
+            case AAUDIO_CHANNEL_INDEX_MASK_23:
+            case AAUDIO_CHANNEL_INDEX_MASK_24:
+                return AAUDIO_OK;
+            default:
+                ALOGD("Invalid channel index mask %#x", mChannelMask);
+                return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+        }
+    }
+
+    if (getDirection() == AAUDIO_DIRECTION_INPUT) {
+        switch (mChannelMask) {
+            case AAUDIO_CHANNEL_MONO:
+            case AAUDIO_CHANNEL_STEREO:
+            case AAUDIO_CHANNEL_FRONT_BACK:
+            case AAUDIO_CHANNEL_2POINT0POINT2:
+            case AAUDIO_CHANNEL_2POINT1POINT2:
+            case AAUDIO_CHANNEL_3POINT0POINT2:
+            case AAUDIO_CHANNEL_3POINT1POINT2:
+            case AAUDIO_CHANNEL_5POINT1:
+                return AAUDIO_OK;
+            default:
+                ALOGD("Invalid channel mask %#x, IN", mChannelMask);
+                return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+        }
+    } else {
+        switch (mChannelMask) {
+            case AAUDIO_CHANNEL_MONO:
+            case AAUDIO_CHANNEL_STEREO:
+            case AAUDIO_CHANNEL_2POINT1:
+            case AAUDIO_CHANNEL_TRI:
+            case AAUDIO_CHANNEL_TRI_BACK:
+            case AAUDIO_CHANNEL_3POINT1:
+            case AAUDIO_CHANNEL_2POINT0POINT2:
+            case AAUDIO_CHANNEL_2POINT1POINT2:
+            case AAUDIO_CHANNEL_3POINT0POINT2:
+            case AAUDIO_CHANNEL_3POINT1POINT2:
+            case AAUDIO_CHANNEL_QUAD:
+            case AAUDIO_CHANNEL_QUAD_SIDE:
+            case AAUDIO_CHANNEL_SURROUND:
+            case AAUDIO_CHANNEL_PENTA:
+            case AAUDIO_CHANNEL_5POINT1:
+            case AAUDIO_CHANNEL_5POINT1_SIDE:
+            case AAUDIO_CHANNEL_5POINT1POINT2:
+            case AAUDIO_CHANNEL_5POINT1POINT4:
+            case AAUDIO_CHANNEL_6POINT1:
+            case AAUDIO_CHANNEL_7POINT1:
+            case AAUDIO_CHANNEL_7POINT1POINT2:
+            case AAUDIO_CHANNEL_7POINT1POINT4:
+            case AAUDIO_CHANNEL_9POINT1POINT4:
+            case AAUDIO_CHANNEL_9POINT1POINT6:
+                return AAUDIO_OK;
+            default:
+                ALOGD("Invalid channel mask %#x. OUT", mChannelMask);
+                return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+        }
+    }
 }
 
 void AAudioStreamParameters::dump() const {
@@ -195,12 +295,15 @@
     ALOGD("mSessionId            = %6d", mSessionId);
     ALOGD("mSampleRate           = %6d", mSampleRate);
     ALOGD("mSamplesPerFrame      = %6d", mSamplesPerFrame);
+    ALOGD("mChannelMask          = %#x", mChannelMask);
     ALOGD("mSharingMode          = %6d", (int)mSharingMode);
     ALOGD("mAudioFormat          = %6d", (int)mAudioFormat);
     ALOGD("mDirection            = %6d", mDirection);
     ALOGD("mBufferCapacity       = %6d", mBufferCapacity);
     ALOGD("mUsage                = %6d", mUsage);
     ALOGD("mContentType          = %6d", mContentType);
+    ALOGD("mSpatializationBehavior = %6d", mSpatializationBehavior);
+    ALOGD("mIsContentSpatialized = %s", mIsContentSpatialized ? "true" : "false");
     ALOGD("mInputPreset          = %6d", mInputPreset);
     ALOGD("mAllowedCapturePolicy = %6d", mAllowedCapturePolicy);
     ALOGD("mIsPrivacySensitive   = %s", mIsPrivacySensitive ? "true" : "false");
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.h b/media/libaaudio/src/core/AAudioStreamParameters.h
index 5737052..cb998bf 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.h
+++ b/media/libaaudio/src/core/AAudioStreamParameters.h
@@ -26,8 +26,8 @@
 
 class AAudioStreamParameters {
 public:
-    AAudioStreamParameters();
-    virtual ~AAudioStreamParameters();
+    AAudioStreamParameters() = default;
+    virtual ~AAudioStreamParameters() = default;
 
     int32_t getDeviceId() const {
         return mDeviceId;
@@ -49,13 +49,6 @@
         return mSamplesPerFrame;
     }
 
-    /**
-     * This is also known as channelCount.
-     */
-    void setSamplesPerFrame(int32_t samplesPerFrame) {
-        mSamplesPerFrame = samplesPerFrame;
-    }
-
     audio_format_t getFormat() const {
         return mAudioFormat;
     }
@@ -104,6 +97,22 @@
         mContentType = contentType;
     }
 
+    aaudio_spatialization_behavior_t getSpatializationBehavior() const {
+        return mSpatializationBehavior;
+    }
+
+    void setSpatializationBehavior(aaudio_spatialization_behavior_t spatializationBehavior) {
+        mSpatializationBehavior = spatializationBehavior;
+    }
+
+    bool isContentSpatialized() const {
+        return mIsContentSpatialized;
+    }
+
+    void setIsContentSpatialized(bool isSpatialized) {
+        mIsContentSpatialized = isSpatialized;
+    }
+
     aaudio_input_preset_t getInputPreset() const {
         return mInputPreset;
     }
@@ -141,7 +150,7 @@
     }
 
     // TODO b/182392769: reexamine if Identity can be used
-    void setOpPackageName(const std::optional<std::string> opPackageName) {
+    void setOpPackageName(const std::optional<std::string>& opPackageName) {
         mOpPackageName = opPackageName;
     }
 
@@ -149,10 +158,19 @@
         return mAttributionTag;
     }
 
-    void setAttributionTag(const std::optional<std::string> attributionTag) {
+    void setAttributionTag(const std::optional<std::string>& attributionTag) {
         mAttributionTag = attributionTag;
     }
 
+    aaudio_channel_mask_t getChannelMask() const {
+        return mChannelMask;
+    }
+
+    void setChannelMask(aaudio_channel_mask_t channelMask) {
+        mChannelMask = channelMask;
+        mSamplesPerFrame = AAudioConvert_channelMaskToCount(channelMask);
+    }
+
     /**
      * @return bytes per frame of getFormat()
      */
@@ -171,6 +189,8 @@
     void dump() const;
 
 private:
+    bool validateChannelMask() const;
+
     int32_t                         mSamplesPerFrame      = AAUDIO_UNSPECIFIED;
     int32_t                         mSampleRate           = AAUDIO_UNSPECIFIED;
     int32_t                         mDeviceId             = AAUDIO_UNSPECIFIED;
@@ -179,6 +199,9 @@
     aaudio_direction_t              mDirection            = AAUDIO_DIRECTION_OUTPUT;
     aaudio_usage_t                  mUsage                = AAUDIO_UNSPECIFIED;
     aaudio_content_type_t           mContentType          = AAUDIO_UNSPECIFIED;
+    aaudio_spatialization_behavior_t mSpatializationBehavior
+                                                          = AAUDIO_UNSPECIFIED;
+    bool                            mIsContentSpatialized = false;
     aaudio_input_preset_t           mInputPreset          = AAUDIO_UNSPECIFIED;
     int32_t                         mBufferCapacity       = AAUDIO_UNSPECIFIED;
     aaudio_allowed_capture_policy_t mAllowedCapturePolicy = AAUDIO_UNSPECIFIED;
@@ -186,6 +209,7 @@
     bool                            mIsPrivacySensitive   = false;
     std::optional<std::string>      mOpPackageName        = {};
     std::optional<std::string>      mAttributionTag       = {};
+    aaudio_channel_mask_t           mChannelMask          = AAUDIO_UNSPECIFIED;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/core/AudioGlobal.h b/media/libaaudio/src/core/AudioGlobal.h
index 1e88d15..6c22744 100644
--- a/media/libaaudio/src/core/AudioGlobal.h
+++ b/media/libaaudio/src/core/AudioGlobal.h
@@ -31,7 +31,8 @@
 const char* AudioGlobal_convertResultToText(aaudio_result_t returnCode);
 const char* AudioGlobal_convertSharingModeToText(aaudio_sharing_mode_t mode);
 const char* AudioGlobal_convertStreamStateToText(aaudio_stream_state_t state);
-}
+
+} // namespace aaudio
 
 #endif  // AAUDIO_AUDIOGLOBAL_H
 
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 09d9535..06f05b0 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -76,6 +76,7 @@
     // Copy parameters from the Builder because the Builder may be deleted after this call.
     // TODO AudioStream should be a subclass of AudioStreamParameters
     mSamplesPerFrame = builder.getSamplesPerFrame();
+    mChannelMask = builder.getChannelMask();
     mSampleRate = builder.getSampleRate();
     mDeviceId = builder.getDeviceId();
     mFormat = builder.getFormat();
@@ -91,6 +92,12 @@
     if (mContentType == AAUDIO_UNSPECIFIED) {
         mContentType = AAUDIO_CONTENT_TYPE_MUSIC;
     }
+    mSpatializationBehavior = builder.getSpatializationBehavior();
+    // for consistency with other properties, note UNSPECIFIED is the same as AUTO
+    if (mSpatializationBehavior == AAUDIO_UNSPECIFIED) {
+        mSpatializationBehavior = AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO;
+    }
+    mIsContentSpatialized = builder.isContentSpatialized();
     mInputPreset = builder.getInputPreset();
     if (mInputPreset == AAUDIO_UNSPECIFIED) {
         mInputPreset = AAUDIO_INPUT_PRESET_VOICE_RECOGNITION;
@@ -595,6 +602,7 @@
 
 void AudioStream::setDuckAndMuteVolume(float duckAndMuteVolume) {
     ALOGD("%s() to %f", __func__, duckAndMuteVolume);
+    std::lock_guard<std::mutex> lock(mStreamLock);
     mDuckAndMuteVolume = duckAndMuteVolume;
     doSetVolume(); // apply this change
 }
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 9835c8c..5fb4528 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -253,6 +253,14 @@
         return mContentType;
     }
 
+    aaudio_spatialization_behavior_t getSpatializationBehavior() const {
+        return mSpatializationBehavior;
+    }
+
+    bool isContentSpatialized() const {
+        return mIsContentSpatialized;
+    }
+
     aaudio_input_preset_t getInputPreset() const {
         return mInputPreset;
     }
@@ -269,8 +277,17 @@
         return mIsPrivacySensitive;
     }
 
+    bool getRequireMonoBlend() const {
+        return mRequireMonoBlend;
+    }
+
+    float getAudioBalance() const {
+        return mAudioBalance;
+    }
+
     /**
-     * This is only valid after setSamplesPerFrame() and setFormat() have been called.
+     * This is only valid after setChannelMask() and setFormat()
+     * have been called.
      */
     int32_t getBytesPerFrame() const {
         return mSamplesPerFrame * getBytesPerSample();
@@ -284,7 +301,7 @@
     }
 
     /**
-     * This is only valid after setSamplesPerFrame() and setDeviceFormat() have been called.
+     * This is only valid after setChannelMask() and setDeviceFormat() have been called.
      */
     int32_t getBytesPerDeviceFrame() const {
         return getSamplesPerFrame() * audio_bytes_per_sample(getDeviceFormat());
@@ -318,6 +335,15 @@
         return mFramesPerDataCallback;
     }
 
+    aaudio_channel_mask_t getChannelMask() const {
+        return mChannelMask;
+    }
+
+    void setChannelMask(aaudio_channel_mask_t channelMask) {
+        mChannelMask = channelMask;
+        mSamplesPerFrame = AAudioConvert_channelMaskToCount(channelMask);
+    }
+
     /**
      * @return true if data callback has been specified
      */
@@ -429,7 +455,7 @@
     // PlayerBase allows the system to control the stream volume.
     class MyPlayerBase : public android::PlayerBase {
     public:
-        MyPlayerBase() {};
+        MyPlayerBase() = default;
 
         virtual ~MyPlayerBase() = default;
 
@@ -495,11 +521,6 @@
     }
 
     // This should not be called after the open() call.
-    void setSamplesPerFrame(int32_t samplesPerFrame) {
-        mSamplesPerFrame = samplesPerFrame;
-    }
-
-    // This should not be called after the open() call.
     void setFramesPerBurst(int32_t framesPerBurst) {
         mFramesPerBurst = framesPerBurst;
     }
@@ -563,7 +584,7 @@
      * @param numFrames
      * @return original pointer or the conversion buffer
      */
-    virtual const void * maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
+    virtual const void * maybeConvertDeviceData(const void *audioData, int32_t /*numFrames*/) {
         return audioData;
     }
 
@@ -589,6 +610,14 @@
         mContentType = contentType;
     }
 
+    void setSpatializationBehavior(aaudio_spatialization_behavior_t spatializationBehavior) {
+        mSpatializationBehavior = spatializationBehavior;
+    }
+
+    void setIsContentSpatialized(bool isContentSpatialized) {
+        mIsContentSpatialized = isContentSpatialized;
+    }
+
     /**
      * This should not be called after the open() call.
      */
@@ -610,6 +639,20 @@
         mIsPrivacySensitive = privacySensitive;
     }
 
+    /**
+     * This should not be called after the open() call.
+     */
+    void setRequireMonoBlend(bool requireMonoBlend) {
+        mRequireMonoBlend = requireMonoBlend;
+    }
+
+    /**
+     * This should not be called after the open() call.
+     */
+    void setAudioBalance(float audioBalance) {
+        mAudioBalance = audioBalance;
+    }
+
     std::string mMetricsId; // set once during open()
 
     std::mutex                 mStreamLock;
@@ -633,6 +676,7 @@
 
     // These do not change after open().
     int32_t                     mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+    aaudio_channel_mask_t       mChannelMask = AAUDIO_UNSPECIFIED;
     int32_t                     mSampleRate = AAUDIO_UNSPECIFIED;
     int32_t                     mDeviceId = AAUDIO_UNSPECIFIED;
     aaudio_sharing_mode_t       mSharingMode = AAUDIO_SHARING_MODE_SHARED;
@@ -645,9 +689,13 @@
 
     aaudio_usage_t              mUsage           = AAUDIO_UNSPECIFIED;
     aaudio_content_type_t       mContentType     = AAUDIO_UNSPECIFIED;
+    aaudio_spatialization_behavior_t mSpatializationBehavior = AAUDIO_UNSPECIFIED;
+    bool                        mIsContentSpatialized = false;
     aaudio_input_preset_t       mInputPreset     = AAUDIO_UNSPECIFIED;
     aaudio_allowed_capture_policy_t mAllowedCapturePolicy = AAUDIO_ALLOW_CAPTURE_BY_ALL;
     bool                        mIsPrivacySensitive = false;
+    bool                        mRequireMonoBlend = false;
+    float                       mAudioBalance = 0;
 
     int32_t                     mSessionId = AAUDIO_UNSPECIFIED;
 
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index e015592..2be3d65 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -20,9 +20,14 @@
 
 #include <new>
 #include <stdint.h>
+#include <vector>
 
 #include <aaudio/AAudio.h>
 #include <aaudio/AAudioTesting.h>
+#include <android/media/audio/common/AudioMMapPolicy.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
+#include <media/AudioSystem.h>
 
 #include "binding/AAudioBinderClient.h"
 #include "client/AudioStreamInternalCapture.h"
@@ -35,6 +40,10 @@
 
 using namespace aaudio;
 
+using android::media::audio::common::AudioMMapPolicy;
+using android::media::audio::common::AudioMMapPolicyInfo;
+using android::media::audio::common::AudioMMapPolicyType;
+
 #define AAUDIO_MMAP_POLICY_DEFAULT             AAUDIO_POLICY_NEVER
 #define AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT   AAUDIO_POLICY_NEVER
 
@@ -53,16 +62,10 @@
 /*
  * AudioStreamBuilder
  */
-AudioStreamBuilder::AudioStreamBuilder() {
-}
-
-AudioStreamBuilder::~AudioStreamBuilder() {
-}
-
 static aaudio_result_t builder_createStream(aaudio_direction_t direction,
-                                         aaudio_sharing_mode_t sharingMode,
-                                         bool tryMMap,
-                                         android::sp<AudioStream> &stream) {
+                                            aaudio_sharing_mode_t /*sharingMode*/,
+                                            bool tryMMap,
+                                            android::sp<AudioStream> &stream) {
     aaudio_result_t result = AAUDIO_OK;
 
     switch (direction) {
@@ -92,6 +95,37 @@
     return result;
 }
 
+namespace {
+
+aaudio_policy_t aidl2legacy_aaudio_policy(AudioMMapPolicy aidl) {
+    switch (aidl) {
+        case AudioMMapPolicy::NEVER:
+            return AAUDIO_POLICY_NEVER;
+        case AudioMMapPolicy::AUTO:
+            return AAUDIO_POLICY_AUTO;
+        case AudioMMapPolicy::ALWAYS:
+            return AAUDIO_POLICY_ALWAYS;
+        case AudioMMapPolicy::UNSPECIFIED:
+        default:
+            return AAUDIO_UNSPECIFIED;
+    }
+}
+
+// The aaudio policy will be ALWAYS, NEVER, UNSPECIFIED only when all policy info are
+// ALWAYS, NEVER or UNSPECIFIED. Otherwise, the aaudio policy will be AUTO.
+aaudio_policy_t getAAudioPolicy(
+        const std::vector<AudioMMapPolicyInfo>& policyInfos) {
+    if (policyInfos.empty()) return AAUDIO_POLICY_AUTO;
+    for (size_t i = 1; i < policyInfos.size(); ++i) {
+        if (policyInfos.at(i).mmapPolicy != policyInfos.at(0).mmapPolicy) {
+            return AAUDIO_POLICY_AUTO;
+        }
+    }
+    return aidl2legacy_aaudio_policy(policyInfos.at(0).mmapPolicy);
+}
+
+} // namespace
+
 // Try to open using MMAP path if that is allowed.
 // Fall back to Legacy path if MMAP not available.
 // Exact behavior is controlled by MMapPolicy.
@@ -110,25 +144,32 @@
         return result;
     }
 
+    std::vector<AudioMMapPolicyInfo> policyInfos;
     // The API setting is the highest priority.
     aaudio_policy_t mmapPolicy = AudioGlobal_getMMapPolicy();
     // If not specified then get from a system property.
-    if (mmapPolicy == AAUDIO_UNSPECIFIED) {
-        mmapPolicy = AAudioProperty_getMMapPolicy();
+    if (mmapPolicy == AAUDIO_UNSPECIFIED && android::AudioSystem::getMmapPolicyInfo(
+                AudioMMapPolicyType::DEFAULT, &policyInfos) == NO_ERROR) {
+        mmapPolicy = getAAudioPolicy(policyInfos);
     }
     // If still not specified then use the default.
     if (mmapPolicy == AAUDIO_UNSPECIFIED) {
         mmapPolicy = AAUDIO_MMAP_POLICY_DEFAULT;
     }
 
-    int32_t mapExclusivePolicy = AAudioProperty_getMMapExclusivePolicy();
-    if (mapExclusivePolicy == AAUDIO_UNSPECIFIED) {
-        mapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
+    policyInfos.clear();
+    aaudio_policy_t mmapExclusivePolicy = AAUDIO_UNSPECIFIED;
+    if (android::AudioSystem::getMmapPolicyInfo(
+            AudioMMapPolicyType::EXCLUSIVE, &policyInfos) == NO_ERROR) {
+        mmapExclusivePolicy = getAAudioPolicy(policyInfos);
+    }
+    if (mmapExclusivePolicy == AAUDIO_UNSPECIFIED) {
+        mmapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
     }
 
     aaudio_sharing_mode_t sharingMode = getSharingMode();
     if ((sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE)
-        && (mapExclusivePolicy == AAUDIO_POLICY_NEVER)) {
+        && (mmapExclusivePolicy == AAUDIO_POLICY_NEVER)) {
         ALOGD("%s() EXCLUSIVE sharing mode not supported. Use SHARED.", __func__);
         sharingMode = AAUDIO_SHARING_MODE_SHARED;
         setSharingMode(sharingMode);
@@ -268,8 +309,8 @@
 
 void AudioStreamBuilder::logParameters() const {
     // This is very helpful for debugging in the future. Please leave it in.
-    ALOGI("rate   = %6d, channels  = %d, format   = %d, sharing = %s, dir = %s",
-          getSampleRate(), getSamplesPerFrame(), getFormat(),
+    ALOGI("rate   = %6d, channels  = %d, channelMask = %#x, format   = %d, sharing = %s, dir = %s",
+          getSampleRate(), getSamplesPerFrame(), getChannelMask(), getFormat(),
           AAudio_convertSharingModeToShortText(getSharingMode()),
           AAudio_convertDirectionToText(getDirection()));
     ALOGI("device = %6d, sessionId = %d, perfMode = %d, callback: %s with frames = %d",
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
index 9f93341..f91c25a 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.h
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -31,9 +31,9 @@
  */
 class AudioStreamBuilder : public AAudioStreamParameters {
 public:
-    AudioStreamBuilder();
+    AudioStreamBuilder() = default;
 
-    ~AudioStreamBuilder();
+    ~AudioStreamBuilder() = default;
 
     bool isSharingModeMatchRequired() const {
         return mSharingModeMatchRequired;
diff --git a/media/libaaudio/src/fifo/FifoBuffer.h b/media/libaaudio/src/fifo/FifoBuffer.h
index 37548f0..7b0aca1 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.h
+++ b/media/libaaudio/src/fifo/FifoBuffer.h
@@ -38,7 +38,7 @@
 
 class FifoBuffer {
 public:
-    FifoBuffer(int32_t bytesPerFrame);
+    explicit FifoBuffer(int32_t bytesPerFrame);
 
     virtual ~FifoBuffer() = default;
 
@@ -162,6 +162,6 @@
     uint8_t *mExternalStorage = nullptr;
 };
 
-}  // android
+}  // namespace android
 
 #endif //FIFO_FIFO_BUFFER_H
diff --git a/media/libaaudio/src/fifo/FifoController.h b/media/libaaudio/src/fifo/FifoController.h
index 057a94e..e15d444 100644
--- a/media/libaaudio/src/fifo/FifoController.h
+++ b/media/libaaudio/src/fifo/FifoController.h
@@ -36,7 +36,7 @@
     , mWriteCounter(0)
     {}
 
-    virtual ~FifoController() {}
+    virtual ~FifoController() = default;
 
     // TODO review use of memory barriers, probably incorrect
     virtual fifo_counter_t getReadCounter() override {
@@ -57,6 +57,6 @@
     std::atomic<fifo_counter_t> mWriteCounter;
 };
 
-}  // android
+}  // namespace android
 
 #endif //FIFO_FIFO_CONTROLLER_H
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.cpp b/media/libaaudio/src/fifo/FifoControllerBase.cpp
index 1dece0e..ad6d041 100644
--- a/media/libaaudio/src/fifo/FifoControllerBase.cpp
+++ b/media/libaaudio/src/fifo/FifoControllerBase.cpp
@@ -29,9 +29,6 @@
 {
 }
 
-FifoControllerBase::~FifoControllerBase() {
-}
-
 fifo_frames_t FifoControllerBase::getFullFramesAvailable() {
     fifo_frames_t temp = 0;
     __builtin_sub_overflow(getWriteCounter(), getReadCounter(), &temp);
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.h b/media/libaaudio/src/fifo/FifoControllerBase.h
index 1edb8a3..2a6173b 100644
--- a/media/libaaudio/src/fifo/FifoControllerBase.h
+++ b/media/libaaudio/src/fifo/FifoControllerBase.h
@@ -43,7 +43,7 @@
      */
     FifoControllerBase(fifo_frames_t capacity, fifo_frames_t threshold);
 
-    virtual ~FifoControllerBase();
+    virtual ~FifoControllerBase() = default;
 
     // Abstract methods to be implemented in subclasses.
     /**
@@ -123,6 +123,6 @@
     fifo_frames_t mThreshold;
 };
 
-}  // android
+}  // namespace android
 
 #endif // FIFO_FIFO_CONTROLLER_BASE_H
diff --git a/media/libaaudio/src/fifo/FifoControllerIndirect.h b/media/libaaudio/src/fifo/FifoControllerIndirect.h
index ec48e57..a59225a 100644
--- a/media/libaaudio/src/fifo/FifoControllerIndirect.h
+++ b/media/libaaudio/src/fifo/FifoControllerIndirect.h
@@ -44,7 +44,7 @@
         setReadCounter(0);
         setWriteCounter(0);
     }
-    virtual ~FifoControllerIndirect() {};
+    virtual ~FifoControllerIndirect() = default;
 
     // TODO review use of memory barriers, probably incorrect
     virtual fifo_counter_t getReadCounter() override {
@@ -68,6 +68,6 @@
     std::atomic<fifo_counter_t> * mWriteCounterAddress;
 };
 
-}  // android
+}  // namespace android
 
 #endif //FIFO_FIFO_CONTROLLER_INDIRECT_H
diff --git a/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp b/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp
deleted file mode 100644
index 5667fdb..0000000
--- a/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <algorithm>
-#include <sys/types.h>
-#include "AudioProcessorBase.h"
-
-using namespace flowgraph;
-
-/***************************************************************************/
-int32_t AudioProcessorBase::pullData(int64_t framePosition, int32_t numFrames) {
-    if (framePosition > mLastFramePosition) {
-        mLastFramePosition = framePosition;
-        mFramesValid = onProcess(framePosition, numFrames);
-    }
-    return mFramesValid;
-}
-
-/***************************************************************************/
-AudioFloatBlockPort::AudioFloatBlockPort(AudioProcessorBase &parent,
-                               int32_t samplesPerFrame,
-                               int32_t framesPerBlock)
-        : AudioPort(parent, samplesPerFrame)
-        , mFramesPerBlock(framesPerBlock)
-        , mSampleBlock(NULL) {
-    int32_t numFloats = framesPerBlock * getSamplesPerFrame();
-    mSampleBlock = new float[numFloats]{0.0f};
-}
-
-AudioFloatBlockPort::~AudioFloatBlockPort() {
-    delete[] mSampleBlock;
-}
-
-/***************************************************************************/
-int32_t AudioFloatOutputPort::pullData(int64_t framePosition, int32_t numFrames) {
-    numFrames = std::min(getFramesPerBlock(), numFrames);
-    return mParent.pullData(framePosition, numFrames);
-}
-
-// These need to be in the .cpp file because of forward cross references.
-void AudioFloatOutputPort::connect(AudioFloatInputPort *port) {
-    port->connect(this);
-}
-
-void AudioFloatOutputPort::disconnect(AudioFloatInputPort *port) {
-    port->disconnect(this);
-}
-
-/***************************************************************************/
-int32_t AudioFloatInputPort::pullData(int64_t framePosition, int32_t numFrames) {
-    return (mConnected == NULL)
-            ? std::min(getFramesPerBlock(), numFrames)
-            : mConnected->pullData(framePosition, numFrames);
-}
-
-float *AudioFloatInputPort::getBlock() {
-    if (mConnected == NULL) {
-        return AudioFloatBlockPort::getBlock(); // loaded using setValue()
-    } else {
-        return mConnected->getBlock();
-    }
-}
-
-/***************************************************************************/
-int32_t AudioSink::pull(int32_t numFrames) {
-    int32_t actualFrames = input.pullData(mFramePosition, numFrames);
-    mFramePosition += actualFrames;
-    return actualFrames;
-}
\ No newline at end of file
diff --git a/media/libaaudio/src/flowgraph/AudioProcessorBase.h b/media/libaaudio/src/flowgraph/AudioProcessorBase.h
deleted file mode 100644
index 972932f..0000000
--- a/media/libaaudio/src/flowgraph/AudioProcessorBase.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * AudioProcessorBase.h
- *
- * Audio processing node and ports that can be used in a simple data flow graph.
- */
-
-#ifndef FLOWGRAPH_AUDIO_PROCESSOR_BASE_H
-#define FLOWGRAPH_AUDIO_PROCESSOR_BASE_H
-
-#include <cassert>
-#include <cstring>
-#include <math.h>
-#include <sys/types.h>
-#include <time.h>
-#include <unistd.h>
-
-// TODO consider publishing all header files under "include/libaaudio/FlowGraph.h"
-
-namespace flowgraph {
-
-// Default block size that can be overridden when the AudioFloatBlockPort is created.
-// If it is too small then we will have too much overhead from switching between nodes.
-// If it is too high then we will thrash the caches.
-constexpr int kDefaultBlockSize = 8; // arbitrary
-
-class AudioFloatInputPort;
-
-/***************************************************************************/
-class AudioProcessorBase {
-public:
-    virtual ~AudioProcessorBase() = default;
-
-    /**
-     * Perform custom function.
-     *
-     * @param framePosition index of first frame to be processed
-     * @param numFrames maximum number of frames requested for processing
-     * @return number of frames actually processed
-     */
-    virtual int32_t onProcess(int64_t framePosition, int32_t numFrames) = 0;
-
-    /**
-     * If the framePosition is at or after the last frame position then call onProcess().
-     * This prevents infinite recursion in case of cyclic graphs.
-     * It also prevents nodes upstream from a branch from being executed twice.
-     *
-     * @param framePosition
-     * @param numFrames
-     * @return
-     */
-    int32_t pullData(int64_t framePosition, int32_t numFrames);
-
-protected:
-    int64_t  mLastFramePosition = -1; // Start at -1 so that the first pull works.
-
-private:
-    int32_t  mFramesValid = 0; // num valid frames in the block
-};
-
-/***************************************************************************/
-/**
-  * This is a connector that allows data to flow between modules.
-  */
-class AudioPort {
-public:
-    AudioPort(AudioProcessorBase &parent, int32_t samplesPerFrame)
-            : mParent(parent)
-            , mSamplesPerFrame(samplesPerFrame) {
-    }
-
-    // Ports are often declared public. So let's make them non-copyable.
-    AudioPort(const AudioPort&) = delete;
-    AudioPort& operator=(const AudioPort&) = delete;
-
-    int32_t getSamplesPerFrame() const {
-        return mSamplesPerFrame;
-    }
-
-protected:
-    AudioProcessorBase &mParent;
-
-private:
-    const int32_t    mSamplesPerFrame = 1;
-};
-
-/***************************************************************************/
-/**
- * This port contains a float type buffer.
- * The size is framesPerBlock * samplesPerFrame).
- */
-class AudioFloatBlockPort  : public AudioPort {
-public:
-    AudioFloatBlockPort(AudioProcessorBase &mParent,
-                   int32_t samplesPerFrame,
-                   int32_t framesPerBlock = kDefaultBlockSize
-                );
-
-    virtual ~AudioFloatBlockPort();
-
-    int32_t getFramesPerBlock() const {
-        return mFramesPerBlock;
-    }
-
-protected:
-
-    /**
-     * @return buffer internal to the port or from a connected port
-     */
-    virtual float *getBlock() {
-        return mSampleBlock;
-    }
-
-
-private:
-    const int32_t    mFramesPerBlock = 1;
-    float           *mSampleBlock = nullptr; // allocated in constructor
-};
-
-/***************************************************************************/
-/**
-  * The results of a module are stored in the buffer of the output ports.
-  */
-class AudioFloatOutputPort : public AudioFloatBlockPort {
-public:
-    AudioFloatOutputPort(AudioProcessorBase &parent, int32_t samplesPerFrame)
-            : AudioFloatBlockPort(parent, samplesPerFrame) {
-    }
-
-    virtual ~AudioFloatOutputPort() = default;
-
-    using AudioFloatBlockPort::getBlock;
-
-    /**
-     * Call the parent module's onProcess() method.
-     * That may pull data from its inputs and recursively
-     * process the entire graph.
-     * @return number of frames actually pulled
-     */
-    int32_t pullData(int64_t framePosition, int32_t numFrames);
-
-    /**
-     * Connect to the input of another module.
-     * An input port can only have one connection.
-     * An output port can have multiple connections.
-     * If you connect a second output port to an input port
-     * then it overwrites the previous connection.
-     *
-     * This not thread safe. Do not modify the graph topology form another thread while running.
-     */
-    void connect(AudioFloatInputPort *port);
-
-    /**
-     * Disconnect from the input of another module.
-     * This not thread safe.
-     */
-    void disconnect(AudioFloatInputPort *port);
-};
-
-/***************************************************************************/
-class AudioFloatInputPort : public AudioFloatBlockPort {
-public:
-    AudioFloatInputPort(AudioProcessorBase &parent, int32_t samplesPerFrame)
-            : AudioFloatBlockPort(parent, samplesPerFrame) {
-    }
-
-    virtual ~AudioFloatInputPort() = default;
-
-    /**
-     * If connected to an output port then this will return
-     * that output ports buffers.
-     * If not connected then it returns the input ports own buffer
-     * which can be loaded using setValue().
-     */
-    float *getBlock() override;
-
-    /**
-     * Pull data from any output port that is connected.
-     */
-    int32_t pullData(int64_t framePosition, int32_t numFrames);
-
-    /**
-     * Write every value of the float buffer.
-     * This value will be ignored if an output port is connected
-     * to this port.
-     */
-    void setValue(float value) {
-        int numFloats = kDefaultBlockSize * getSamplesPerFrame();
-        float *buffer = getBlock();
-        for (int i = 0; i < numFloats; i++) {
-            *buffer++ = value;
-        }
-    }
-
-    /**
-     * Connect to the output of another module.
-     * An input port can only have one connection.
-     * An output port can have multiple connections.
-     * This not thread safe.
-     */
-    void connect(AudioFloatOutputPort *port) {
-        assert(getSamplesPerFrame() == port->getSamplesPerFrame());
-        mConnected = port;
-    }
-
-    void disconnect(AudioFloatOutputPort *port) {
-        assert(mConnected == port);
-        (void) port;
-        mConnected = nullptr;
-    }
-
-    void disconnect() {
-        mConnected = nullptr;
-    }
-
-private:
-    AudioFloatOutputPort *mConnected = nullptr;
-};
-
-/***************************************************************************/
-class AudioSource : public AudioProcessorBase {
-public:
-    explicit AudioSource(int32_t channelCount)
-            : output(*this, channelCount) {
-    }
-
-    virtual ~AudioSource() = default;
-
-    AudioFloatOutputPort output;
-
-    void setData(const void *data, int32_t numFrames) {
-        mData = data;
-        mSizeInFrames = numFrames;
-        mFrameIndex = 0;
-    }
-
-protected:
-    const void *mData = nullptr;
-    int32_t     mSizeInFrames = 0; // number of frames in mData
-    int32_t     mFrameIndex = 0; // index of next frame to be processed
-};
-
-/***************************************************************************/
-class AudioSink : public AudioProcessorBase {
-public:
-    explicit AudioSink(int32_t channelCount)
-            : input(*this, channelCount) {
-    }
-
-    virtual ~AudioSink() = default;
-
-    AudioFloatInputPort input;
-
-    /**
-     * Do nothing. The work happens in the read() method.
-     *
-     * @param framePosition index of first frame to be processed
-     * @param numFrames
-     * @return number of frames actually processed
-     */
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override {
-        (void) framePosition;
-        (void) numFrames;
-        return 0;
-    };
-
-    virtual int32_t read(void *data, int32_t numFrames) = 0;
-
-protected:
-    int32_t pull(int32_t numFrames);
-
-private:
-    int64_t mFramePosition = 0;
-};
-
-} /* namespace flowgraph */
-
-#endif /* FLOWGRAPH_AUDIO_PROCESSOR_BASE_H */
diff --git a/media/libaaudio/src/flowgraph/ChannelCountConverter.cpp b/media/libaaudio/src/flowgraph/ChannelCountConverter.cpp
new file mode 100644
index 0000000..351def2
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ChannelCountConverter.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include "FlowGraphNode.h"
+#include "ChannelCountConverter.h"
+
+using namespace flowgraph;
+
+ChannelCountConverter::ChannelCountConverter(
+        int32_t inputChannelCount,
+        int32_t outputChannelCount)
+        : input(*this, inputChannelCount)
+        , output(*this, outputChannelCount) {
+}
+
+ChannelCountConverter::~ChannelCountConverter() = default;
+
+int32_t ChannelCountConverter::onProcess(int32_t numFrames) {
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
+    int32_t inputChannelCount = input.getSamplesPerFrame();
+    int32_t outputChannelCount = output.getSamplesPerFrame();
+    for (int i = 0; i < numFrames; i++) {
+        int inputChannel = 0;
+        for (int outputChannel = 0; outputChannel < outputChannelCount; outputChannel++) {
+            // Copy input channels to output channels.
+            // Wrap if we run out of inputs.
+            // Discard if we run out of outputs.
+            outputBuffer[outputChannel] = inputBuffer[inputChannel];
+            inputChannel = (inputChannel == inputChannelCount)
+                    ? 0 : inputChannel + 1;
+        }
+        inputBuffer += inputChannelCount;
+        outputBuffer += outputChannelCount;
+    }
+    return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/ChannelCountConverter.h b/media/libaaudio/src/flowgraph/ChannelCountConverter.h
new file mode 100644
index 0000000..e4b6f4e
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ChannelCountConverter.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
+#define FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Change the number of number of channels without mixing.
+ * When increasing the channel count, duplicate input channels.
+ * When decreasing the channel count, drop input channels.
+ */
+    class ChannelCountConverter : public FlowGraphNode {
+    public:
+        explicit ChannelCountConverter(
+                int32_t inputChannelCount,
+                int32_t outputChannelCount);
+
+        virtual ~ChannelCountConverter();
+
+        int32_t onProcess(int32_t numFrames) override;
+
+        const char *getName() override {
+            return "ChannelCountConverter";
+        }
+
+        FlowGraphPortFloatInput input;
+        FlowGraphPortFloatOutput output;
+    };
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/ClipToRange.cpp b/media/libaaudio/src/flowgraph/ClipToRange.cpp
index bd9c22a..d2f8a02 100644
--- a/media/libaaudio/src/flowgraph/ClipToRange.cpp
+++ b/media/libaaudio/src/flowgraph/ClipToRange.cpp
@@ -16,25 +16,23 @@
 
 #include <algorithm>
 #include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "ClipToRange.h"
 
 using namespace flowgraph;
 
 ClipToRange::ClipToRange(int32_t channelCount)
-        : input(*this, channelCount)
-        , output(*this, channelCount) {
+        : FlowGraphFilter(channelCount) {
 }
 
-int32_t ClipToRange::onProcess(int64_t framePosition, int32_t numFrames) {
-    int32_t framesToProcess = input.pullData(framePosition, numFrames);
-    const float *inputBuffer = input.getBlock();
-    float *outputBuffer = output.getBlock();
+int32_t ClipToRange::onProcess(int32_t numFrames) {
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
 
-    int32_t numSamples = framesToProcess * output.getSamplesPerFrame();
+    int32_t numSamples = numFrames * output.getSamplesPerFrame();
     for (int32_t i = 0; i < numSamples; i++) {
         *outputBuffer++ = std::min(mMaximum, std::max(mMinimum, *inputBuffer++));
     }
 
-    return framesToProcess;
+    return numFrames;
 }
diff --git a/media/libaaudio/src/flowgraph/ClipToRange.h b/media/libaaudio/src/flowgraph/ClipToRange.h
index 9eef254..22b7804 100644
--- a/media/libaaudio/src/flowgraph/ClipToRange.h
+++ b/media/libaaudio/src/flowgraph/ClipToRange.h
@@ -21,7 +21,7 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
@@ -30,13 +30,13 @@
 constexpr float kDefaultMaxHeadroom = 1.41253754f;
 constexpr float kDefaultMinHeadroom = -kDefaultMaxHeadroom;
 
-class ClipToRange : public AudioProcessorBase {
+class ClipToRange : public FlowGraphFilter {
 public:
     explicit ClipToRange(int32_t channelCount);
 
     virtual ~ClipToRange() = default;
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
 
     void setMinimum(float min) {
         mMinimum = min;
@@ -54,8 +54,9 @@
         return mMaximum;
     }
 
-    AudioFloatInputPort input;
-    AudioFloatOutputPort output;
+    const char *getName() override {
+        return "ClipToRange";
+    }
 
 private:
     float mMinimum = kDefaultMinHeadroom;
diff --git a/media/libaaudio/src/flowgraph/FlowGraphNode.cpp b/media/libaaudio/src/flowgraph/FlowGraphNode.cpp
new file mode 100644
index 0000000..4c76e77
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/FlowGraphNode.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stdio.h"
+#include <algorithm>
+#include <sys/types.h>
+#include "FlowGraphNode.h"
+
+using namespace flowgraph;
+
+/***************************************************************************/
+int32_t FlowGraphNode::pullData(int32_t numFrames, int64_t callCount) {
+    int32_t frameCount = numFrames;
+    // Prevent recursion and multiple execution of nodes.
+    if (callCount > mLastCallCount) {
+        mLastCallCount = callCount;
+        if (mDataPulledAutomatically) {
+            // Pull from all the upstream nodes.
+            for (auto &port : mInputPorts) {
+                // TODO fix bug of leaving unused data in some ports if using multiple AudioSource
+                frameCount = port.get().pullData(callCount, frameCount);
+            }
+        }
+        if (frameCount > 0) {
+            frameCount = onProcess(frameCount);
+        }
+        mLastFrameCount = frameCount;
+    } else {
+        frameCount = mLastFrameCount;
+    }
+    return frameCount;
+}
+
+void FlowGraphNode::pullReset() {
+    if (!mBlockRecursion) {
+        mBlockRecursion = true; // for cyclic graphs
+        // Pull reset from all the upstream nodes.
+        for (auto &port : mInputPorts) {
+            port.get().pullReset();
+        }
+        mBlockRecursion = false;
+        reset();
+    }
+}
+
+void FlowGraphNode::reset() {
+    mLastFrameCount = 0;
+    mLastCallCount = kInitialCallCount;
+}
+
+/***************************************************************************/
+FlowGraphPortFloat::FlowGraphPortFloat(FlowGraphNode &parent,
+                               int32_t samplesPerFrame,
+                               int32_t framesPerBuffer)
+        : FlowGraphPort(parent, samplesPerFrame)
+        , mFramesPerBuffer(framesPerBuffer)
+        , mBuffer(nullptr) {
+    size_t numFloats = framesPerBuffer * getSamplesPerFrame();
+    mBuffer = std::make_unique<float[]>(numFloats);
+}
+
+/***************************************************************************/
+int32_t FlowGraphPortFloatOutput::pullData(int64_t callCount, int32_t numFrames) {
+    numFrames = std::min(getFramesPerBuffer(), numFrames);
+    return mContainingNode.pullData(numFrames, callCount);
+}
+
+void FlowGraphPortFloatOutput::pullReset() {
+    mContainingNode.pullReset();
+}
+
+// These need to be in the .cpp file because of forward cross references.
+void FlowGraphPortFloatOutput::connect(FlowGraphPortFloatInput *port) {
+    port->connect(this);
+}
+
+void FlowGraphPortFloatOutput::disconnect(FlowGraphPortFloatInput *port) {
+    port->disconnect(this);
+}
+
+/***************************************************************************/
+int32_t FlowGraphPortFloatInput::pullData(int64_t callCount, int32_t numFrames) {
+    return (mConnected == nullptr)
+            ? std::min(getFramesPerBuffer(), numFrames)
+            : mConnected->pullData(callCount, numFrames);
+}
+void FlowGraphPortFloatInput::pullReset() {
+    if (mConnected != nullptr) mConnected->pullReset();
+}
+
+float *FlowGraphPortFloatInput::getBuffer() {
+    if (mConnected == nullptr) {
+        return FlowGraphPortFloat::getBuffer(); // loaded using setValue()
+    } else {
+        return mConnected->getBuffer();
+    }
+}
+
+int32_t FlowGraphSink::pullData(int32_t numFrames) {
+    return FlowGraphNode::pullData(numFrames, getLastCallCount() + 1);
+}
diff --git a/media/libaaudio/src/flowgraph/FlowGraphNode.h b/media/libaaudio/src/flowgraph/FlowGraphNode.h
new file mode 100644
index 0000000..69c83dd
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/FlowGraphNode.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * FlowGraph.h
+ *
+ * Processing node and ports that can be used in a simple data flow graph.
+ * This was designed to work with audio but could be used for other
+ * types of data.
+ */
+
+#ifndef FLOWGRAPH_FLOW_GRAPH_NODE_H
+#define FLOWGRAPH_FLOW_GRAPH_NODE_H
+
+#include <cassert>
+#include <cstring>
+#include <math.h>
+#include <memory>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+#include <vector>
+
+// TODO Move these classes into separate files.
+// TODO Review use of raw pointers for connect(). Maybe use smart pointers but need to avoid
+//      run-time deallocation in audio thread.
+
+// Set this to 1 if using it inside the Android framework.
+// This code is kept here so that it can be moved easily between Oboe and AAudio.
+#ifndef FLOWGRAPH_ANDROID_INTERNAL
+#define FLOWGRAPH_ANDROID_INTERNAL 0
+#endif
+
+namespace flowgraph {
+
+// Default block size that can be overridden when the FlowGraphPortFloat is created.
+// If it is too small then we will have too much overhead from switching between nodes.
+// If it is too high then we will thrash the caches.
+constexpr int kDefaultBufferSize = 8; // arbitrary
+
+class FlowGraphPort;
+class FlowGraphPortFloatInput;
+
+/***************************************************************************/
+/**
+ * Base class for all nodes in the flowgraph.
+ */
+class FlowGraphNode {
+public:
+    FlowGraphNode() = default;
+    virtual ~FlowGraphNode() = default;
+
+    /**
+     * Read from the input ports,
+     * generate multiple frames of data then write the results to the output ports.
+     *
+     * @param numFrames maximum number of frames requested for processing
+     * @return number of frames actually processed
+     */
+    virtual int32_t onProcess(int32_t numFrames) = 0;
+
+    /**
+     * If the callCount is at or after the previous callCount then call
+     * pullData on all of the upstreamNodes.
+     * Then call onProcess().
+     * This prevents infinite recursion in case of cyclic graphs.
+     * It also prevents nodes upstream from a branch from being executed twice.
+     *
+     * @param callCount
+     * @param numFrames
+     * @return number of frames valid
+     */
+    int32_t pullData(int32_t numFrames, int64_t callCount);
+
+    /**
+     * Recursively reset all the nodes in the graph, starting from a Sink.
+     *
+     * This must not be called at the same time as pullData!
+     */
+    void pullReset();
+
+    /**
+     * Reset framePosition counters.
+     */
+    virtual void reset();
+
+    void addInputPort(FlowGraphPort &port) {
+        mInputPorts.emplace_back(port);
+    }
+
+    bool isDataPulledAutomatically() const {
+        return mDataPulledAutomatically;
+    }
+
+    /**
+     * Set true if you want the data pulled through the graph automatically.
+     * This is the default.
+     *
+     * Set false if you want to pull the data from the input ports in the onProcess() method.
+     * You might do this, for example, in a sample rate converting node.
+     *
+     * @param automatic
+     */
+    void setDataPulledAutomatically(bool automatic) {
+        mDataPulledAutomatically = automatic;
+    }
+
+    virtual const char *getName() {
+        return "FlowGraph";
+    }
+
+    int64_t getLastCallCount() {
+        return mLastCallCount;
+    }
+
+protected:
+
+    static constexpr int64_t  kInitialCallCount = -1;
+    int64_t  mLastCallCount = kInitialCallCount;
+
+    std::vector<std::reference_wrapper<FlowGraphPort>> mInputPorts;
+
+private:
+    bool     mDataPulledAutomatically = true;
+    bool     mBlockRecursion = false;
+    int32_t  mLastFrameCount = 0;
+
+};
+
+/***************************************************************************/
+/**
+  * This is a connector that allows data to flow between modules.
+  *
+  * The ports are the primary means of interacting with a module.
+  * So they are generally declared as public.
+  *
+  */
+class FlowGraphPort {
+public:
+    FlowGraphPort(FlowGraphNode &parent, int32_t samplesPerFrame)
+            : mContainingNode(parent)
+            , mSamplesPerFrame(samplesPerFrame) {
+    }
+
+    virtual ~FlowGraphPort() = default;
+
+    // Ports are often declared public. So let's make them non-copyable.
+    FlowGraphPort(const FlowGraphPort&) = delete;
+    FlowGraphPort& operator=(const FlowGraphPort&) = delete;
+
+    int32_t getSamplesPerFrame() const {
+        return mSamplesPerFrame;
+    }
+
+    virtual int32_t pullData(int64_t framePosition, int32_t numFrames) = 0;
+
+    virtual void pullReset() {}
+
+protected:
+    FlowGraphNode &mContainingNode;
+
+private:
+    const int32_t    mSamplesPerFrame = 1;
+};
+
+/***************************************************************************/
+/**
+ * This port contains a 32-bit float buffer that can contain several frames of data.
+ * Processing the data in a block improves performance.
+ *
+ * The size is framesPerBuffer * samplesPerFrame).
+ */
+class FlowGraphPortFloat  : public FlowGraphPort {
+public:
+    FlowGraphPortFloat(FlowGraphNode &parent,
+                   int32_t samplesPerFrame,
+                   int32_t framesPerBuffer = kDefaultBufferSize
+                );
+
+    virtual ~FlowGraphPortFloat() = default;
+
+    int32_t getFramesPerBuffer() const {
+        return mFramesPerBuffer;
+    }
+
+protected:
+
+    /**
+     * @return buffer internal to the port or from a connected port
+     */
+    virtual float *getBuffer() {
+        return mBuffer.get();
+    }
+
+private:
+    const int32_t    mFramesPerBuffer = 1;
+    std::unique_ptr<float[]> mBuffer; // allocated in constructor
+};
+
+/***************************************************************************/
+/**
+  * The results of a node's processing are stored in the buffers of the output ports.
+  */
+class FlowGraphPortFloatOutput : public FlowGraphPortFloat {
+public:
+    FlowGraphPortFloatOutput(FlowGraphNode &parent, int32_t samplesPerFrame)
+            : FlowGraphPortFloat(parent, samplesPerFrame) {
+    }
+
+    virtual ~FlowGraphPortFloatOutput() = default;
+
+    using FlowGraphPortFloat::getBuffer;
+
+    /**
+     * Connect to the input of another module.
+     * An input port can only have one connection.
+     * An output port can have multiple connections.
+     * If you connect a second output port to an input port
+     * then it overwrites the previous connection.
+     *
+     * This not thread safe. Do not modify the graph topology from another thread while running.
+     * Also do not delete a module while it is connected to another port if the graph is running.
+     */
+    void connect(FlowGraphPortFloatInput *port);
+
+    /**
+     * Disconnect from the input of another module.
+     * This not thread safe.
+     */
+    void disconnect(FlowGraphPortFloatInput *port);
+
+    /**
+     * Call the parent module's onProcess() method.
+     * That may pull data from its inputs and recursively
+     * process the entire graph.
+     * @return number of frames actually pulled
+     */
+    int32_t pullData(int64_t framePosition, int32_t numFrames) override;
+
+
+    void pullReset() override;
+
+};
+
+/***************************************************************************/
+
+/**
+ * An input port for streaming audio data.
+ * You can set a value that will be used for processing.
+ * If you connect an output port to this port then its value will be used instead.
+ */
+class FlowGraphPortFloatInput : public FlowGraphPortFloat {
+public:
+    FlowGraphPortFloatInput(FlowGraphNode &parent, int32_t samplesPerFrame)
+            : FlowGraphPortFloat(parent, samplesPerFrame) {
+        // Add to parent so it can pull data from each input.
+        parent.addInputPort(*this);
+    }
+
+    virtual ~FlowGraphPortFloatInput() = default;
+
+    /**
+     * If connected to an output port then this will return
+     * that output ports buffers.
+     * If not connected then it returns the input ports own buffer
+     * which can be loaded using setValue().
+     */
+    float *getBuffer() override;
+
+    /**
+     * Write every value of the float buffer.
+     * This value will be ignored if an output port is connected
+     * to this port.
+     */
+    void setValue(float value) {
+        int numFloats = kDefaultBufferSize * getSamplesPerFrame();
+        float *buffer = getBuffer();
+        for (int i = 0; i < numFloats; i++) {
+            *buffer++ = value;
+        }
+    }
+
+    /**
+     * Connect to the output of another module.
+     * An input port can only have one connection.
+     * An output port can have multiple connections.
+     * This not thread safe.
+     */
+    void connect(FlowGraphPortFloatOutput *port) {
+        assert(getSamplesPerFrame() == port->getSamplesPerFrame());
+        mConnected = port;
+    }
+
+    void disconnect(FlowGraphPortFloatOutput *port) {
+        assert(mConnected == port);
+        (void) port;
+        mConnected = nullptr;
+    }
+
+    void disconnect() {
+        mConnected = nullptr;
+    }
+
+    /**
+     * Pull data from any output port that is connected.
+     */
+    int32_t pullData(int64_t framePosition, int32_t numFrames) override;
+
+    void pullReset() override;
+
+private:
+    FlowGraphPortFloatOutput *mConnected = nullptr;
+};
+
+/***************************************************************************/
+
+/**
+ * Base class for an edge node in a graph that has no upstream nodes.
+ * It outputs data but does not consume data.
+ * By default, it will read its data from an external buffer.
+ */
+class FlowGraphSource : public FlowGraphNode {
+public:
+    explicit FlowGraphSource(int32_t channelCount)
+            : output(*this, channelCount) {
+    }
+
+    virtual ~FlowGraphSource() = default;
+
+    FlowGraphPortFloatOutput output;
+};
+
+/***************************************************************************/
+
+/**
+ * Base class for an edge node in a graph that has no upstream nodes.
+ * It outputs data but does not consume data.
+ * By default, it will read its data from an external buffer.
+ */
+class FlowGraphSourceBuffered : public FlowGraphSource {
+public:
+    explicit FlowGraphSourceBuffered(int32_t channelCount)
+            : FlowGraphSource(channelCount) {}
+
+    virtual ~FlowGraphSourceBuffered() = default;
+
+    /**
+     * Specify buffer that the node will read from.
+     *
+     * @param data TODO Consider using std::shared_ptr.
+     * @param numFrames
+     */
+    void setData(const void *data, int32_t numFrames) {
+        mData = data;
+        mSizeInFrames = numFrames;
+        mFrameIndex = 0;
+    }
+
+protected:
+    const void *mData = nullptr;
+    int32_t     mSizeInFrames = 0; // number of frames in mData
+    int32_t     mFrameIndex = 0; // index of next frame to be processed
+};
+
+/***************************************************************************/
+/**
+ * Base class for an edge node in a graph that has no downstream nodes.
+ * It consumes data but does not output data.
+ * This graph will be executed when data is read() from this node
+ * by pulling data from upstream nodes.
+ */
+class FlowGraphSink : public FlowGraphNode {
+public:
+    explicit FlowGraphSink(int32_t channelCount)
+            : input(*this, channelCount) {
+    }
+
+    virtual ~FlowGraphSink() = default;
+
+    FlowGraphPortFloatInput input;
+
+    /**
+     * Do nothing. The work happens in the read() method.
+     *
+     * @param numFrames
+     * @return number of frames actually processed
+     */
+    int32_t onProcess(int32_t numFrames) override {
+        return numFrames;
+    }
+
+    virtual int32_t read(void *data, int32_t numFrames) = 0;
+
+protected:
+    /**
+     * Pull data through the graph using this nodes last callCount.
+     * @param numFrames
+     * @return
+     */
+    int32_t pullData(int32_t numFrames);
+};
+
+/***************************************************************************/
+/**
+ * Base class for a node that has an input and an output with the same number of channels.
+ * This may include traditional filters, eg. FIR, but also include
+ * any processing node that converts input to output.
+ */
+class FlowGraphFilter : public FlowGraphNode {
+public:
+    explicit FlowGraphFilter(int32_t channelCount)
+            : input(*this, channelCount)
+            , output(*this, channelCount) {
+    }
+
+    virtual ~FlowGraphFilter() = default;
+
+    FlowGraphPortFloatInput input;
+    FlowGraphPortFloatOutput output;
+};
+
+} /* namespace flowgraph */
+
+#endif /* FLOWGRAPH_FLOW_GRAPH_NODE_H */
diff --git a/media/libaaudio/src/flowgraph/ManyToMultiConverter.cpp b/media/libaaudio/src/flowgraph/ManyToMultiConverter.cpp
new file mode 100644
index 0000000..879685e
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ManyToMultiConverter.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+
+#include "ManyToMultiConverter.h"
+
+using namespace flowgraph;
+
+ManyToMultiConverter::ManyToMultiConverter(int32_t channelCount)
+        : inputs(channelCount)
+        , output(*this, channelCount) {
+    for (int i = 0; i < channelCount; i++) {
+        inputs[i] = std::make_unique<FlowGraphPortFloatInput>(*this, 1);
+    }
+}
+
+int32_t ManyToMultiConverter::onProcess(int32_t numFrames) {
+    int32_t channelCount = output.getSamplesPerFrame();
+
+    for (int ch = 0; ch < channelCount; ch++) {
+        const float *inputBuffer = inputs[ch]->getBuffer();
+        float *outputBuffer = output.getBuffer() + ch;
+
+        for (int i = 0; i < numFrames; i++) {
+            // read one, write into the proper interleaved output channel
+            float sample = *inputBuffer++;
+            *outputBuffer = sample;
+            outputBuffer += channelCount; // advance to next multichannel frame
+        }
+    }
+    return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/ManyToMultiConverter.h b/media/libaaudio/src/flowgraph/ManyToMultiConverter.h
new file mode 100644
index 0000000..c7460ff
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ManyToMultiConverter.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
+#define FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <vector>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Combine multiple mono inputs into one interleaved multi-channel output.
+ */
+class ManyToMultiConverter : public flowgraph::FlowGraphNode {
+public:
+    explicit ManyToMultiConverter(int32_t channelCount);
+
+    virtual ~ManyToMultiConverter() = default;
+
+    int32_t onProcess(int numFrames) override;
+
+    void setEnabled(bool /*enabled*/) {}
+
+    std::vector<std::unique_ptr<flowgraph::FlowGraphPortFloatInput>> inputs;
+    flowgraph::FlowGraphPortFloatOutput output;
+
+    const char *getName() override {
+        return "ManyToMultiConverter";
+    }
+
+private:
+};
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/MonoBlend.cpp b/media/libaaudio/src/flowgraph/MonoBlend.cpp
new file mode 100644
index 0000000..62e2809
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MonoBlend.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+
+#include "MonoBlend.h"
+
+using namespace flowgraph;
+
+MonoBlend::MonoBlend(int32_t channelCount)
+        : FlowGraphFilter(channelCount)
+        , mInvChannelCount(1. / channelCount)
+{
+}
+
+int32_t MonoBlend::onProcess(int32_t numFrames) {
+    int32_t channelCount = output.getSamplesPerFrame();
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
+
+    for (size_t i = 0; i < numFrames; ++i) {
+        float accum = 0;
+        for (size_t j = 0; j < channelCount; ++j) {
+            accum += *inputBuffer++;
+        }
+        accum *= mInvChannelCount;
+        for (size_t j = 0; j < channelCount; ++j) {
+            *outputBuffer++ = accum;
+        }
+    }
+
+    return numFrames;
+}
\ No newline at end of file
diff --git a/media/libaaudio/src/flowgraph/MonoBlend.h b/media/libaaudio/src/flowgraph/MonoBlend.h
new file mode 100644
index 0000000..7e3c35b
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MonoBlend.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MONO_BLEND_H
+#define FLOWGRAPH_MONO_BLEND_H
+
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Combine data between multiple channels so each channel is an average
+ * of all channels.
+ */
+class MonoBlend : public FlowGraphFilter {
+public:
+    explicit MonoBlend(int32_t channelCount);
+
+    virtual ~MonoBlend() = default;
+
+    int32_t onProcess(int32_t numFrames) override;
+
+    const char *getName() override {
+        return "MonoBlend";
+    }
+private:
+    const float mInvChannelCount;
+};
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MONO_BLEND
diff --git a/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp b/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
index 78aad52..c8d60b9 100644
--- a/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
+++ b/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
@@ -14,34 +14,28 @@
  * limitations under the License.
  */
 
-
 #include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "MonoToMultiConverter.h"
 
 using namespace flowgraph;
 
-MonoToMultiConverter::MonoToMultiConverter(int32_t channelCount)
+MonoToMultiConverter::MonoToMultiConverter(int32_t outputChannelCount)
         : input(*this, 1)
-        , output(*this, channelCount) {
+        , output(*this, outputChannelCount) {
 }
 
-MonoToMultiConverter::~MonoToMultiConverter() { }
-
-int32_t MonoToMultiConverter::onProcess(int64_t framePosition, int32_t numFrames) {
-    int32_t framesToProcess = input.pullData(framePosition, numFrames);
-
-    const float *inputBuffer = input.getBlock();
-    float *outputBuffer = output.getBlock();
+int32_t MonoToMultiConverter::onProcess(int32_t numFrames) {
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
     int32_t channelCount = output.getSamplesPerFrame();
-    // TODO maybe move to audio_util as audio_mono_to_multi()
-    for (int i = 0; i < framesToProcess; i++) {
+    for (int i = 0; i < numFrames; i++) {
         // read one, write many
         float sample = *inputBuffer++;
         for (int channel = 0; channel < channelCount; channel++) {
             *outputBuffer++ = sample;
         }
     }
-    return framesToProcess;
+    return numFrames;
 }
 
diff --git a/media/libaaudio/src/flowgraph/MonoToMultiConverter.h b/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
index 34d53c7..6e87ccb 100644
--- a/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
+++ b/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
@@ -14,27 +14,34 @@
  * limitations under the License.
  */
 
-
 #ifndef FLOWGRAPH_MONO_TO_MULTI_CONVERTER_H
 #define FLOWGRAPH_MONO_TO_MULTI_CONVERTER_H
 
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class MonoToMultiConverter : public AudioProcessorBase {
+/**
+ * Convert a monophonic stream to a multi-channel interleaved stream
+ * with the same signal on each channel.
+ */
+class MonoToMultiConverter : public FlowGraphNode {
 public:
-    explicit MonoToMultiConverter(int32_t channelCount);
+    explicit MonoToMultiConverter(int32_t outputChannelCount);
 
-    virtual ~MonoToMultiConverter();
+    virtual ~MonoToMultiConverter() = default;
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
 
-    AudioFloatInputPort input;
-    AudioFloatOutputPort output;
+    const char *getName() override {
+        return "MonoToMultiConverter";
+    }
+
+    FlowGraphPortFloatInput input;
+    FlowGraphPortFloatOutput output;
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/MultiToManyConverter.cpp b/media/libaaudio/src/flowgraph/MultiToManyConverter.cpp
new file mode 100644
index 0000000..f074364
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToManyConverter.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include "FlowGraphNode.h"
+#include "MultiToManyConverter.h"
+
+using namespace flowgraph;
+
+MultiToManyConverter::MultiToManyConverter(int32_t channelCount)
+        : outputs(channelCount)
+        , input(*this, channelCount) {
+    for (int i = 0; i < channelCount; i++) {
+        outputs[i] = std::make_unique<FlowGraphPortFloatOutput>(*this, 1);
+    }
+}
+
+MultiToManyConverter::~MultiToManyConverter() = default;
+
+int32_t MultiToManyConverter::onProcess(int32_t numFrames) {
+    int32_t channelCount = input.getSamplesPerFrame();
+
+    for (int ch = 0; ch < channelCount; ch++) {
+        const float *inputBuffer = input.getBuffer() + ch;
+        float *outputBuffer = outputs[ch]->getBuffer();
+
+        for (int i = 0; i < numFrames; i++) {
+            *outputBuffer++ = *inputBuffer;
+            inputBuffer += channelCount;
+        }
+    }
+
+    return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/MultiToManyConverter.h b/media/libaaudio/src/flowgraph/MultiToManyConverter.h
new file mode 100644
index 0000000..de31475
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToManyConverter.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MULTI_TO_MANY_CONVERTER_H
+#define FLOWGRAPH_MULTI_TO_MANY_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Convert a multi-channel interleaved stream to multiple mono-channel
+ * outputs
+ */
+    class MultiToManyConverter : public FlowGraphNode {
+    public:
+        explicit MultiToManyConverter(int32_t channelCount);
+
+        virtual ~MultiToManyConverter();
+
+        int32_t onProcess(int32_t numFrames) override;
+
+        const char *getName() override {
+            return "MultiToManyConverter";
+        }
+
+        std::vector<std::unique_ptr<flowgraph::FlowGraphPortFloatOutput>> outputs;
+        flowgraph::FlowGraphPortFloatInput input;
+    };
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MULTI_TO_MANY_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/MultiToMonoConverter.cpp b/media/libaaudio/src/flowgraph/MultiToMonoConverter.cpp
new file mode 100644
index 0000000..c745108
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToMonoConverter.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include "FlowGraphNode.h"
+#include "MultiToMonoConverter.h"
+
+using namespace flowgraph;
+
+MultiToMonoConverter::MultiToMonoConverter(int32_t inputChannelCount)
+        : input(*this, inputChannelCount)
+        , output(*this, 1) {
+}
+
+MultiToMonoConverter::~MultiToMonoConverter() = default;
+
+int32_t MultiToMonoConverter::onProcess(int32_t numFrames) {
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
+    int32_t channelCount = input.getSamplesPerFrame();
+    for (int i = 0; i < numFrames; i++) {
+        // read first channel of multi stream, write many
+        *outputBuffer++ = *inputBuffer;
+        inputBuffer += channelCount;
+    }
+    return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/MultiToMonoConverter.h b/media/libaaudio/src/flowgraph/MultiToMonoConverter.h
new file mode 100644
index 0000000..37c53bd
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToMonoConverter.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
+#define FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Convert a multi-channel interleaved stream to a monophonic stream
+ * by extracting channel[0].
+ */
+    class MultiToMonoConverter : public FlowGraphNode {
+    public:
+        explicit MultiToMonoConverter(int32_t inputChannelCount);
+
+        virtual ~MultiToMonoConverter();
+
+        int32_t onProcess(int32_t numFrames) override;
+
+        const char *getName() override {
+            return "MultiToMonoConverter";
+        }
+
+        FlowGraphPortFloatInput input;
+        FlowGraphPortFloatOutput output;
+    };
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/RampLinear.cpp b/media/libaaudio/src/flowgraph/RampLinear.cpp
index a260828..905ae07 100644
--- a/media/libaaudio/src/flowgraph/RampLinear.cpp
+++ b/media/libaaudio/src/flowgraph/RampLinear.cpp
@@ -14,20 +14,15 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "RampLinear"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
 #include <algorithm>
 #include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "RampLinear.h"
 
 using namespace flowgraph;
 
 RampLinear::RampLinear(int32_t channelCount)
-        : input(*this, channelCount)
-        , output(*this, channelCount) {
+        : FlowGraphFilter(channelCount) {
     mTarget.store(1.0f);
 }
 
@@ -37,16 +32,19 @@
 
 void RampLinear::setTarget(float target) {
     mTarget.store(target);
+    // If the ramp has not been used then start immediately at this level.
+    if (mLastCallCount == kInitialCallCount) {
+        forceCurrent(target);
+    }
 }
 
 float RampLinear::interpolateCurrent() {
     return mLevelTo - (mRemaining * mScaler);
 }
 
-int32_t RampLinear::onProcess(int64_t framePosition, int32_t numFrames) {
-    int32_t framesToProcess = input.pullData(framePosition, numFrames);
-    const float *inputBuffer = input.getBlock();
-    float *outputBuffer = output.getBlock();
+int32_t RampLinear::onProcess(int32_t numFrames) {
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
     int32_t channelCount = output.getSamplesPerFrame();
 
     float target = getTarget();
@@ -55,12 +53,10 @@
         mLevelFrom = interpolateCurrent();
         mLevelTo = target;
         mRemaining = mLengthInFrames;
-        ALOGV("%s() mLevelFrom = %f, mLevelTo = %f, mRemaining = %d, mScaler = %f",
-              __func__, mLevelFrom, mLevelTo, mRemaining, mScaler);
         mScaler = (mLevelTo - mLevelFrom) / mLengthInFrames; // for interpolation
     }
 
-    int32_t framesLeft = framesToProcess;
+    int32_t framesLeft = numFrames;
 
     if (mRemaining > 0) { // Ramping? This doesn't happen very often.
         int32_t framesToRamp = std::min(framesLeft, mRemaining);
@@ -81,5 +77,5 @@
         *outputBuffer++ = *inputBuffer++ * mLevelTo;
     }
 
-    return framesToProcess;
+    return numFrames;
 }
diff --git a/media/libaaudio/src/flowgraph/RampLinear.h b/media/libaaudio/src/flowgraph/RampLinear.h
index bdc8f41..f285704 100644
--- a/media/libaaudio/src/flowgraph/RampLinear.h
+++ b/media/libaaudio/src/flowgraph/RampLinear.h
@@ -21,17 +21,25 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class RampLinear : public AudioProcessorBase {
+/**
+ * When the target is modified then the output will ramp smoothly
+ * between the original and the new target value.
+ * This can be used to smooth out control values and reduce pops.
+ *
+ * The target may be updated while a ramp is in progress, which will trigger
+ * a new ramp from the current value.
+ */
+class RampLinear : public FlowGraphFilter {
 public:
     explicit RampLinear(int32_t channelCount);
 
     virtual ~RampLinear() = default;
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
 
     /**
      * This is used for the next ramp.
@@ -66,8 +74,9 @@
         mLevelTo = level;
     }
 
-    AudioFloatInputPort input;
-    AudioFloatOutputPort output;
+    const char *getName() override {
+        return "RampLinear";
+    }
 
 private:
 
diff --git a/media/libaaudio/src/flowgraph/SampleRateConverter.cpp b/media/libaaudio/src/flowgraph/SampleRateConverter.cpp
new file mode 100644
index 0000000..5c3ed1f
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/SampleRateConverter.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SampleRateConverter.h"
+
+using namespace flowgraph;
+using namespace resampler;
+
+SampleRateConverter::SampleRateConverter(int32_t channelCount, MultiChannelResampler &resampler)
+        : FlowGraphFilter(channelCount)
+        , mResampler(resampler) {
+    setDataPulledAutomatically(false);
+}
+
+void SampleRateConverter::reset() {
+    FlowGraphNode::reset();
+    mInputCursor = kInitialCallCount;
+}
+
+// Return true if there is a sample available.
+bool SampleRateConverter::isInputAvailable() {
+    // If we have consumed all of the input data then go out and get some more.
+    if (mInputCursor >= mNumValidInputFrames) {
+        mInputCallCount++;
+        mNumValidInputFrames = input.pullData(mInputCallCount, input.getFramesPerBuffer());
+        mInputCursor = 0;
+    }
+    return (mInputCursor < mNumValidInputFrames);
+}
+
+const float *SampleRateConverter::getNextInputFrame() {
+    const float *inputBuffer = input.getBuffer();
+    return &inputBuffer[mInputCursor++ * input.getSamplesPerFrame()];
+}
+
+int32_t SampleRateConverter::onProcess(int32_t numFrames) {
+    float *outputBuffer = output.getBuffer();
+    int32_t channelCount = output.getSamplesPerFrame();
+    int framesLeft = numFrames;
+    while (framesLeft > 0) {
+        // Gather input samples as needed.
+        if(mResampler.isWriteNeeded()) {
+            if (isInputAvailable()) {
+                const float *frame = getNextInputFrame();
+                mResampler.writeNextFrame(frame);
+            } else {
+                break;
+            }
+        } else {
+            // Output frame is interpolated from input samples.
+            mResampler.readNextFrame(outputBuffer);
+            outputBuffer += channelCount;
+            framesLeft--;
+        }
+    }
+    return numFrames - framesLeft;
+}
diff --git a/media/libaaudio/src/flowgraph/SampleRateConverter.h b/media/libaaudio/src/flowgraph/SampleRateConverter.h
new file mode 100644
index 0000000..57d76a4
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/SampleRateConverter.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SAMPLE_RATE_CONVERTER_H
+#define OBOE_SAMPLE_RATE_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+#include "resampler/MultiChannelResampler.h"
+
+namespace flowgraph {
+
+class SampleRateConverter : public FlowGraphFilter {
+public:
+    explicit SampleRateConverter(int32_t channelCount,
+                                 resampler::MultiChannelResampler &mResampler);
+
+    virtual ~SampleRateConverter() = default;
+
+    int32_t onProcess(int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SampleRateConverter";
+    }
+
+    void reset() override;
+
+private:
+
+    // Return true if there is a sample available.
+    bool isInputAvailable();
+
+    // This assumes data is available. Only call after calling isInputAvailable().
+    const float *getNextInputFrame();
+
+    resampler::MultiChannelResampler &mResampler;
+
+    int32_t mInputCursor = 0;         // offset into the input port buffer
+    int32_t mNumValidInputFrames = 0; // number of valid frames currently in the input port buffer
+    // We need our own callCount for upstream calls because calls occur at a different rate.
+    // This means we cannot have cyclic graphs or merges that contain an SRC.
+    int64_t mInputCallCount = 0;
+
+};
+
+} /* namespace flowgraph */
+
+#endif //OBOE_SAMPLE_RATE_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/SinkFloat.cpp b/media/libaaudio/src/flowgraph/SinkFloat.cpp
index fb3dcbc..0588848 100644
--- a/media/libaaudio/src/flowgraph/SinkFloat.cpp
+++ b/media/libaaudio/src/flowgraph/SinkFloat.cpp
@@ -16,31 +16,31 @@
 
 #include <algorithm>
 #include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "SinkFloat.h"
 
 using namespace flowgraph;
 
 SinkFloat::SinkFloat(int32_t channelCount)
-        : AudioSink(channelCount) {
+        : FlowGraphSink(channelCount) {
 }
 
 int32_t SinkFloat::read(void *data, int32_t numFrames) {
     float *floatData = (float *) data;
-    int32_t channelCount = input.getSamplesPerFrame();
+    const int32_t channelCount = input.getSamplesPerFrame();
 
     int32_t framesLeft = numFrames;
     while (framesLeft > 0) {
         // Run the graph and pull data through the input port.
-        int32_t framesRead = pull(framesLeft);
-        if (framesRead <= 0) {
+        int32_t framesPulled = pullData(framesLeft);
+        if (framesPulled <= 0) {
             break;
         }
-        const float *signal = input.getBlock();
-        int32_t numSamples = framesRead * channelCount;
+        const float *signal = input.getBuffer();
+        int32_t numSamples = framesPulled * channelCount;
         memcpy(floatData, signal, numSamples * sizeof(float));
         floatData += numSamples;
-        framesLeft -= framesRead;
+        framesLeft -= framesPulled;
     }
     return numFrames - framesLeft;
 }
diff --git a/media/libaaudio/src/flowgraph/SinkFloat.h b/media/libaaudio/src/flowgraph/SinkFloat.h
index 7775c08..c812373 100644
--- a/media/libaaudio/src/flowgraph/SinkFloat.h
+++ b/media/libaaudio/src/flowgraph/SinkFloat.h
@@ -21,16 +21,23 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SinkFloat : public AudioSink {
+/**
+ * AudioSink that lets you read data as 32-bit floats.
+ */
+class SinkFloat : public FlowGraphSink {
 public:
     explicit SinkFloat(int32_t channelCount);
+    ~SinkFloat() override = default;
 
     int32_t read(void *data, int32_t numFrames) override;
 
+    const char *getName() override {
+        return "SinkFloat";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SinkI16.cpp b/media/libaaudio/src/flowgraph/SinkI16.cpp
index ffec8f5..da7fd6b 100644
--- a/media/libaaudio/src/flowgraph/SinkI16.cpp
+++ b/media/libaaudio/src/flowgraph/SinkI16.cpp
@@ -17,17 +17,16 @@
 #include <algorithm>
 #include <unistd.h>
 
-#ifdef __ANDROID__
+#include "SinkI16.h"
+
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
-#include "SinkI16.h"
-
 using namespace flowgraph;
 
 SinkI16::SinkI16(int32_t channelCount)
-        : AudioSink(channelCount) {}
+        : FlowGraphSink(channelCount) {}
 
 int32_t SinkI16::read(void *data, int32_t numFrames) {
     int16_t *shortData = (int16_t *) data;
@@ -36,13 +35,13 @@
     int32_t framesLeft = numFrames;
     while (framesLeft > 0) {
         // Run the graph and pull data through the input port.
-        int32_t framesRead = pull(framesLeft);
+        int32_t framesRead = pullData(framesLeft);
         if (framesRead <= 0) {
             break;
         }
-        const float *signal = input.getBlock();
+        const float *signal = input.getBuffer();
         int32_t numSamples = framesRead * channelCount;
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
         memcpy_to_i16_from_float(shortData, signal, numSamples);
         shortData += numSamples;
         signal += numSamples;
diff --git a/media/libaaudio/src/flowgraph/SinkI16.h b/media/libaaudio/src/flowgraph/SinkI16.h
index 6d86266..1e1ce3a 100644
--- a/media/libaaudio/src/flowgraph/SinkI16.h
+++ b/media/libaaudio/src/flowgraph/SinkI16.h
@@ -20,15 +20,22 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SinkI16 : public AudioSink {
+/**
+ * AudioSink that lets you read data as 16-bit signed integers.
+ */
+class SinkI16 : public FlowGraphSink {
 public:
     explicit SinkI16(int32_t channelCount);
 
     int32_t read(void *data, int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SinkI16";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SinkI24.cpp b/media/libaaudio/src/flowgraph/SinkI24.cpp
index 0cb077d..a9fb5d2 100644
--- a/media/libaaudio/src/flowgraph/SinkI24.cpp
+++ b/media/libaaudio/src/flowgraph/SinkI24.cpp
@@ -15,19 +15,20 @@
  */
 
 #include <algorithm>
-#include <stdint.h>
+#include <unistd.h>
 
-#ifdef __ANDROID__
+
+#include "FlowGraphNode.h"
+#include "SinkI24.h"
+
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
-#include "SinkI24.h"
-
 using namespace flowgraph;
 
 SinkI24::SinkI24(int32_t channelCount)
-        : AudioSink(channelCount) {}
+        : FlowGraphSink(channelCount) {}
 
 int32_t SinkI24::read(void *data, int32_t numFrames) {
     uint8_t *byteData = (uint8_t *) data;
@@ -36,13 +37,13 @@
     int32_t framesLeft = numFrames;
     while (framesLeft > 0) {
         // Run the graph and pull data through the input port.
-        int32_t framesRead = pull(framesLeft);
+        int32_t framesRead = pullData(framesLeft);
         if (framesRead <= 0) {
             break;
         }
-        const float *floatData = input.getBlock();
+        const float *floatData = input.getBuffer();
         int32_t numSamples = framesRead * channelCount;
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
         memcpy_to_p24_from_float(byteData, floatData, numSamples);
         static const int kBytesPerI24Packed = 3;
         byteData += numSamples * kBytesPerI24Packed;
diff --git a/media/libaaudio/src/flowgraph/SinkI24.h b/media/libaaudio/src/flowgraph/SinkI24.h
index 5b9b505..44078a9 100644
--- a/media/libaaudio/src/flowgraph/SinkI24.h
+++ b/media/libaaudio/src/flowgraph/SinkI24.h
@@ -20,15 +20,23 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SinkI24 : public AudioSink {
+/**
+ * AudioSink that lets you read data as packed 24-bit signed integers.
+ * The sample size is 3 bytes.
+ */
+class SinkI24 : public FlowGraphSink {
 public:
     explicit SinkI24(int32_t channelCount);
 
     int32_t read(void *data, int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SinkI24";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SinkI32.cpp b/media/libaaudio/src/flowgraph/SinkI32.cpp
index eab863d..9fd4e96 100644
--- a/media/libaaudio/src/flowgraph/SinkI32.cpp
+++ b/media/libaaudio/src/flowgraph/SinkI32.cpp
@@ -14,18 +14,18 @@
  * limitations under the License.
  */
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "FlowgraphUtilities.h"
 #include "SinkI32.h"
 
 using namespace flowgraph;
 
 SinkI32::SinkI32(int32_t channelCount)
-        : AudioSink(channelCount) {}
+        : FlowGraphSink(channelCount) {}
 
 int32_t SinkI32::read(void *data, int32_t numFrames) {
     int32_t *intData = (int32_t *) data;
@@ -34,13 +34,13 @@
     int32_t framesLeft = numFrames;
     while (framesLeft > 0) {
         // Run the graph and pull data through the input port.
-        int32_t framesRead = pull(framesLeft);
+        int32_t framesRead = pullData(framesLeft);
         if (framesRead <= 0) {
             break;
         }
-        const float *signal = input.getBlock();
+        const float *signal = input.getBuffer();
         int32_t numSamples = framesRead * channelCount;
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
         memcpy_to_i32_from_float(intData, signal, numSamples);
         intData += numSamples;
         signal += numSamples;
diff --git a/media/libaaudio/src/flowgraph/SinkI32.h b/media/libaaudio/src/flowgraph/SinkI32.h
index 09d23b7..7456d5f 100644
--- a/media/libaaudio/src/flowgraph/SinkI32.h
+++ b/media/libaaudio/src/flowgraph/SinkI32.h
@@ -19,16 +19,20 @@
 
 #include <stdint.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SinkI32 : public AudioSink {
+class SinkI32 : public FlowGraphSink {
 public:
     explicit SinkI32(int32_t channelCount);
     ~SinkI32() override = default;
 
     int32_t read(void *data, int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SinkI32";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceFloat.cpp b/media/libaaudio/src/flowgraph/SourceFloat.cpp
index 4bb674f..1b3daf1 100644
--- a/media/libaaudio/src/flowgraph/SourceFloat.cpp
+++ b/media/libaaudio/src/flowgraph/SourceFloat.cpp
@@ -16,23 +16,22 @@
 
 #include <algorithm>
 #include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "SourceFloat.h"
 
 using namespace flowgraph;
 
 SourceFloat::SourceFloat(int32_t channelCount)
-        : AudioSource(channelCount) {
+        : FlowGraphSourceBuffered(channelCount) {
 }
 
-int32_t SourceFloat::onProcess(int64_t framePosition, int32_t numFrames) {
+int32_t SourceFloat::onProcess(int32_t numFrames) {
+    float *outputBuffer = output.getBuffer();
+    const int32_t channelCount = output.getSamplesPerFrame();
 
-    float *outputBuffer = output.getBlock();
-    int32_t channelCount = output.getSamplesPerFrame();
-
-    int32_t framesLeft = mSizeInFrames - mFrameIndex;
-    int32_t framesToProcess = std::min(numFrames, framesLeft);
-    int32_t numSamples = framesToProcess * channelCount;
+    const int32_t framesLeft = mSizeInFrames - mFrameIndex;
+    const int32_t framesToProcess = std::min(numFrames, framesLeft);
+    const int32_t numSamples = framesToProcess * channelCount;
 
     const float *floatBase = (float *) mData;
     const float *floatData = &floatBase[mFrameIndex * channelCount];
diff --git a/media/libaaudio/src/flowgraph/SourceFloat.h b/media/libaaudio/src/flowgraph/SourceFloat.h
index e6eed9f..4719669 100644
--- a/media/libaaudio/src/flowgraph/SourceFloat.h
+++ b/media/libaaudio/src/flowgraph/SourceFloat.h
@@ -20,15 +20,23 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SourceFloat : public AudioSource {
+/**
+ * AudioSource that reads a block of pre-defined float data.
+ */
+class SourceFloat : public FlowGraphSourceBuffered {
 public:
     explicit SourceFloat(int32_t channelCount);
+    ~SourceFloat() override = default;
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SourceFloat";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceI16.cpp b/media/libaaudio/src/flowgraph/SourceI16.cpp
index c3fcec2..8813023 100644
--- a/media/libaaudio/src/flowgraph/SourceI16.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI16.cpp
@@ -17,21 +17,21 @@
 #include <algorithm>
 #include <unistd.h>
 
-#ifdef __ANDROID__
+#include "FlowGraphNode.h"
+#include "SourceI16.h"
+
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
-#include "SourceI16.h"
-
 using namespace flowgraph;
 
 SourceI16::SourceI16(int32_t channelCount)
-        : AudioSource(channelCount) {
+        : FlowGraphSourceBuffered(channelCount) {
 }
 
-int32_t SourceI16::onProcess(int64_t framePosition, int32_t numFrames) {
-    float *floatData = output.getBlock();
+int32_t SourceI16::onProcess(int32_t numFrames) {
+    float *floatData = output.getBuffer();
     int32_t channelCount = output.getSamplesPerFrame();
 
     int32_t framesLeft = mSizeInFrames - mFrameIndex;
@@ -41,7 +41,7 @@
     const int16_t *shortBase = static_cast<const int16_t *>(mData);
     const int16_t *shortData = &shortBase[mFrameIndex * channelCount];
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
     memcpy_to_float_from_i16(floatData, shortData, numSamples);
 #else
     for (int i = 0; i < numSamples; i++) {
diff --git a/media/libaaudio/src/flowgraph/SourceI16.h b/media/libaaudio/src/flowgraph/SourceI16.h
index 2b116cf..fe440b2 100644
--- a/media/libaaudio/src/flowgraph/SourceI16.h
+++ b/media/libaaudio/src/flowgraph/SourceI16.h
@@ -20,15 +20,21 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
-
-class SourceI16 : public AudioSource {
+/**
+ * AudioSource that reads a block of pre-defined 16-bit integer data.
+ */
+class SourceI16 : public FlowGraphSourceBuffered {
 public:
     explicit SourceI16(int32_t channelCount);
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SourceI16";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceI24.cpp b/media/libaaudio/src/flowgraph/SourceI24.cpp
index 097954e..1975878 100644
--- a/media/libaaudio/src/flowgraph/SourceI24.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI24.cpp
@@ -15,13 +15,13 @@
  */
 
 #include <algorithm>
-#include <stdint.h>
+#include <unistd.h>
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "SourceI24.h"
 
 using namespace flowgraph;
@@ -29,11 +29,11 @@
 constexpr int kBytesPerI24Packed = 3;
 
 SourceI24::SourceI24(int32_t channelCount)
-        : AudioSource(channelCount) {
+        : FlowGraphSourceBuffered(channelCount) {
 }
 
-int32_t SourceI24::onProcess(int64_t framePosition, int32_t numFrames) {
-    float *floatData = output.getBlock();
+int32_t SourceI24::onProcess(int32_t numFrames) {
+    float *floatData = output.getBuffer();
     int32_t channelCount = output.getSamplesPerFrame();
 
     int32_t framesLeft = mSizeInFrames - mFrameIndex;
@@ -43,7 +43,7 @@
     const uint8_t *byteBase = (uint8_t *) mData;
     const uint8_t *byteData = &byteBase[mFrameIndex * channelCount * kBytesPerI24Packed];
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
     memcpy_to_float_from_p24(floatData, byteData, numSamples);
 #else
     static const float scale = 1. / (float)(1UL << 31);
diff --git a/media/libaaudio/src/flowgraph/SourceI24.h b/media/libaaudio/src/flowgraph/SourceI24.h
index 2ed6f18..3779534 100644
--- a/media/libaaudio/src/flowgraph/SourceI24.h
+++ b/media/libaaudio/src/flowgraph/SourceI24.h
@@ -17,17 +17,25 @@
 #ifndef FLOWGRAPH_SOURCE_I24_H
 #define FLOWGRAPH_SOURCE_I24_H
 
-#include <stdint.h>
+#include <unistd.h>
+#include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SourceI24 : public AudioSource {
+/**
+ * AudioSource that reads a block of pre-defined 24-bit packed integer data.
+ */
+class SourceI24 : public FlowGraphSourceBuffered {
 public:
     explicit SourceI24(int32_t channelCount);
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SourceI24";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceI32.cpp b/media/libaaudio/src/flowgraph/SourceI32.cpp
index e8177ad..4b2e8c4 100644
--- a/media/libaaudio/src/flowgraph/SourceI32.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI32.cpp
@@ -17,31 +17,31 @@
 #include <algorithm>
 #include <unistd.h>
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "SourceI32.h"
 
 using namespace flowgraph;
 
 SourceI32::SourceI32(int32_t channelCount)
-        : AudioSource(channelCount) {
+        : FlowGraphSourceBuffered(channelCount) {
 }
 
-int32_t SourceI32::onProcess(int64_t framePosition, int32_t numFrames) {
-    float *floatData = output.getBlock();
-    int32_t channelCount = output.getSamplesPerFrame();
+int32_t SourceI32::onProcess(int32_t numFrames) {
+    float *floatData = output.getBuffer();
+    const int32_t channelCount = output.getSamplesPerFrame();
 
-    int32_t framesLeft = mSizeInFrames - mFrameIndex;
-    int32_t framesToProcess = std::min(numFrames, framesLeft);
-    int32_t numSamples = framesToProcess * channelCount;
+    const int32_t framesLeft = mSizeInFrames - mFrameIndex;
+    const int32_t framesToProcess = std::min(numFrames, framesLeft);
+    const int32_t numSamples = framesToProcess * channelCount;
 
     const int32_t *intBase = static_cast<const int32_t *>(mData);
     const int32_t *intData = &intBase[mFrameIndex * channelCount];
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
     memcpy_to_float_from_i32(floatData, intData, numSamples);
 #else
     for (int i = 0; i < numSamples; i++) {
diff --git a/media/libaaudio/src/flowgraph/SourceI32.h b/media/libaaudio/src/flowgraph/SourceI32.h
index e50f9be..b4e0d7b 100644
--- a/media/libaaudio/src/flowgraph/SourceI32.h
+++ b/media/libaaudio/src/flowgraph/SourceI32.h
@@ -19,17 +19,20 @@
 
 #include <stdint.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SourceI32 : public AudioSource {
+class SourceI32 : public FlowGraphSourceBuffered {
 public:
     explicit SourceI32(int32_t channelCount);
     ~SourceI32() override = default;
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
 
+    const char *getName() override {
+        return "SourceI32";
+    }
 private:
     static constexpr float kScale = 1.0 / (1UL << 31);
 };
diff --git a/media/libaaudio/src/flowgraph/resampler/HyperbolicCosineWindow.h b/media/libaaudio/src/flowgraph/resampler/HyperbolicCosineWindow.h
new file mode 100644
index 0000000..f6479ae
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/HyperbolicCosineWindow.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
+#define RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
+
+#include <math.h>
+
+namespace resampler {
+
+/**
+ * Calculate a HyperbolicCosineWindow window centered at 0.
+ * This can be used in place of a Kaiser window.
+ *
+ * The code is based on an anonymous contribution by "a concerned citizen":
+ * https://dsp.stackexchange.com/questions/37714/kaiser-window-approximation
+ */
+class HyperbolicCosineWindow {
+public:
+    HyperbolicCosineWindow() {
+        setStopBandAttenuation(60);
+    }
+
+    /**
+     * @param attenuation typical values range from 30 to 90 dB
+     * @return beta
+     */
+    double setStopBandAttenuation(double attenuation) {
+        double alpha = ((-325.1e-6 * attenuation + 0.1677) * attenuation) - 3.149;
+        setAlpha(alpha);
+        return alpha;
+    }
+
+    void setAlpha(double alpha) {
+        mAlpha = alpha;
+        mInverseCoshAlpha = 1.0 / cosh(alpha);
+    }
+
+    /**
+     * @param x ranges from -1.0 to +1.0
+     */
+    double operator()(double x) {
+        double x2 = x * x;
+        if (x2 >= 1.0) return 0.0;
+        double w = mAlpha * sqrt(1.0 - x2);
+        return cosh(w) * mInverseCoshAlpha;
+    }
+
+private:
+    double mAlpha = 0.0;
+    double mInverseCoshAlpha = 1.0;
+};
+
+} // namespace resampler
+#endif //RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
diff --git a/media/libaaudio/src/flowgraph/resampler/IntegerRatio.cpp b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.cpp
new file mode 100644
index 0000000..4bd75b3
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IntegerRatio.h"
+
+using namespace resampler;
+
+// Enough primes to cover the common sample rates.
+static const int kPrimes[] = {
+        2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
+        43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97,
+        101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
+        151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199};
+
+void IntegerRatio::reduce() {
+    for (int prime : kPrimes) {
+        if (mNumerator < prime || mDenominator < prime) {
+            break;
+        }
+
+        // Find biggest prime factor for numerator.
+        while (true) {
+            int top = mNumerator / prime;
+            int bottom = mDenominator / prime;
+            if ((top >= 1)
+                && (bottom >= 1)
+                && (top * prime == mNumerator) // divided evenly?
+                && (bottom * prime == mDenominator)) {
+                mNumerator = top;
+                mDenominator = bottom;
+            } else {
+                break;
+            }
+        }
+
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/IntegerRatio.h b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.h
new file mode 100644
index 0000000..8c044d8
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_INTEGER_RATIO_H
+#define OBOE_INTEGER_RATIO_H
+
+#include <sys/types.h>
+
+namespace resampler {
+
+/**
+ * Represent the ratio of two integers.
+ */
+class IntegerRatio {
+public:
+    IntegerRatio(int32_t numerator, int32_t denominator)
+            : mNumerator(numerator), mDenominator(denominator) {}
+
+    /**
+     * Reduce by removing common prime factors.
+     */
+    void reduce();
+
+    int32_t getNumerator() {
+        return mNumerator;
+    }
+
+    int32_t getDenominator() {
+        return mDenominator;
+    }
+
+private:
+    int32_t mNumerator;
+    int32_t mDenominator;
+};
+
+} // namespace resampler
+
+#endif //OBOE_INTEGER_RATIO_H
diff --git a/media/libaaudio/src/flowgraph/resampler/KaiserWindow.h b/media/libaaudio/src/flowgraph/resampler/KaiserWindow.h
new file mode 100644
index 0000000..73dbc41
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/KaiserWindow.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RESAMPLER_KAISER_WINDOW_H
+#define RESAMPLER_KAISER_WINDOW_H
+
+#include <math.h>
+
+namespace resampler {
+
+/**
+ * Calculate a Kaiser window centered at 0.
+ */
+class KaiserWindow {
+public:
+    KaiserWindow() {
+        setStopBandAttenuation(60);
+    }
+
+    /**
+     * @param attenuation typical values range from 30 to 90 dB
+     * @return beta
+     */
+    double setStopBandAttenuation(double attenuation) {
+        double beta = 0.0;
+        if (attenuation > 50) {
+            beta = 0.1102 * (attenuation - 8.7);
+        } else if (attenuation >= 21) {
+            double a21 = attenuation - 21;
+            beta = 0.5842 * pow(a21, 0.4) + (0.07886 * a21);
+        }
+        setBeta(beta);
+        return beta;
+    }
+
+    void setBeta(double beta) {
+        mBeta = beta;
+        mInverseBesselBeta = 1.0 / bessel(beta);
+    }
+
+    /**
+     * @param x ranges from -1.0 to +1.0
+     */
+    double operator()(double x) {
+        double x2 = x * x;
+        if (x2 >= 1.0) return 0.0;
+        double w = mBeta * sqrt(1.0 - x2);
+        return bessel(w) * mInverseBesselBeta;
+    }
+
+    // Approximation of a
+    // modified zero order Bessel function of the first kind.
+    // Based on a discussion at:
+    // https://dsp.stackexchange.com/questions/37714/kaiser-window-approximation
+    static double bessel(double x) {
+        double y = cosh(0.970941817426052 * x);
+        y += cosh(0.8854560256532099 * x);
+        y += cosh(0.7485107481711011 * x);
+        y += cosh(0.5680647467311558 * x);
+        y += cosh(0.3546048870425356 * x);
+        y += cosh(0.120536680255323 * x);
+        y *= 2;
+        y += cosh(x);
+        y /= 13;
+        return y;
+    }
+
+private:
+    double mBeta = 0.0;
+    double mInverseBesselBeta = 1.0;
+};
+
+} // namespace resampler
+#endif //RESAMPLER_KAISER_WINDOW_H
diff --git a/media/libaaudio/src/flowgraph/resampler/LinearResampler.cpp b/media/libaaudio/src/flowgraph/resampler/LinearResampler.cpp
new file mode 100644
index 0000000..a7748c1
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/LinearResampler.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LinearResampler.h"
+
+using namespace resampler;
+
+LinearResampler::LinearResampler(const MultiChannelResampler::Builder &builder)
+        : MultiChannelResampler(builder) {
+    mPreviousFrame = std::make_unique<float[]>(getChannelCount());
+    mCurrentFrame = std::make_unique<float[]>(getChannelCount());
+}
+
+void LinearResampler::writeFrame(const float *frame) {
+    memcpy(mPreviousFrame.get(), mCurrentFrame.get(), sizeof(float) * getChannelCount());
+    memcpy(mCurrentFrame.get(), frame, sizeof(float) * getChannelCount());
+}
+
+void LinearResampler::readFrame(float *frame) {
+    float *previous = mPreviousFrame.get();
+    float *current = mCurrentFrame.get();
+    float phase = (float) getIntegerPhase() / mDenominator;
+    // iterate across samples in the frame
+    for (int channel = 0; channel < getChannelCount(); channel++) {
+        float f0 = *previous++;
+        float f1 = *current++;
+        *frame++ = f0 + (phase * (f1 - f0));
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/LinearResampler.h b/media/libaaudio/src/flowgraph/resampler/LinearResampler.h
new file mode 100644
index 0000000..6bde81d
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/LinearResampler.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_LINEAR_RESAMPLER_H
+#define OBOE_LINEAR_RESAMPLER_H
+
+#include <memory>
+#include <sys/types.h>
+#include <unistd.h>
+#include "MultiChannelResampler.h"
+
+namespace resampler {
+
+/**
+ * Simple resampler that uses bi-linear interpolation.
+ */
+class LinearResampler : public MultiChannelResampler {
+public:
+    explicit LinearResampler(const MultiChannelResampler::Builder &builder);
+
+    void writeFrame(const float *frame) override;
+
+    void readFrame(float *frame) override;
+
+private:
+    std::unique_ptr<float[]> mPreviousFrame;
+    std::unique_ptr<float[]> mCurrentFrame;
+};
+
+} // namespace resampler
+#endif //OBOE_LINEAR_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp
new file mode 100644
index 0000000..d630520
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <math.h>
+
+#include "IntegerRatio.h"
+#include "LinearResampler.h"
+#include "MultiChannelResampler.h"
+#include "PolyphaseResampler.h"
+#include "PolyphaseResamplerMono.h"
+#include "PolyphaseResamplerStereo.h"
+#include "SincResampler.h"
+#include "SincResamplerStereo.h"
+
+using namespace resampler;
+
+MultiChannelResampler::MultiChannelResampler(const MultiChannelResampler::Builder &builder)
+        : mNumTaps(builder.getNumTaps())
+        , mX(builder.getChannelCount() * builder.getNumTaps() * 2)
+        , mSingleFrame(builder.getChannelCount())
+        , mChannelCount(builder.getChannelCount())
+        {
+    // Reduce sample rates to the smallest ratio.
+    // For example 44100/48000 would become 147/160.
+    IntegerRatio ratio(builder.getInputRate(), builder.getOutputRate());
+    ratio.reduce();
+    mNumerator = ratio.getNumerator();
+    mDenominator = ratio.getDenominator();
+    mIntegerPhase = mDenominator;
+}
+
+// static factory method
+MultiChannelResampler *MultiChannelResampler::make(int32_t channelCount,
+                                                   int32_t inputRate,
+                                                   int32_t outputRate,
+                                                   Quality quality) {
+    Builder builder;
+    builder.setInputRate(inputRate);
+    builder.setOutputRate(outputRate);
+    builder.setChannelCount(channelCount);
+
+    switch (quality) {
+        case Quality::Fastest:
+            builder.setNumTaps(2);
+            break;
+        case Quality::Low:
+            builder.setNumTaps(4);
+            break;
+        case Quality::Medium:
+        default:
+            builder.setNumTaps(8);
+            break;
+        case Quality::High:
+            builder.setNumTaps(16);
+            break;
+        case Quality::Best:
+            builder.setNumTaps(32);
+            break;
+    }
+
+    // Set the cutoff frequency so that we do not get aliasing when down-sampling.
+    if (inputRate > outputRate) {
+        builder.setNormalizedCutoff(kDefaultNormalizedCutoff);
+    }
+    return builder.build();
+}
+
+MultiChannelResampler *MultiChannelResampler::Builder::build() {
+    if (getNumTaps() == 2) {
+        // Note that this does not do low pass filteringh.
+        return new LinearResampler(*this);
+    }
+    IntegerRatio ratio(getInputRate(), getOutputRate());
+    ratio.reduce();
+    bool usePolyphase = (getNumTaps() * ratio.getDenominator()) <= kMaxCoefficients;
+    if (usePolyphase) {
+        if (getChannelCount() == 1) {
+            return new PolyphaseResamplerMono(*this);
+        } else if (getChannelCount() == 2) {
+            return new PolyphaseResamplerStereo(*this);
+        } else {
+            return new PolyphaseResampler(*this);
+        }
+    } else {
+        // Use less optimized resampler that uses a float phaseIncrement.
+        // TODO mono resampler
+        if (getChannelCount() == 2) {
+            return new SincResamplerStereo(*this);
+        } else {
+            return new SincResampler(*this);
+        }
+    }
+}
+
+void MultiChannelResampler::writeFrame(const float *frame) {
+    // Move cursor before write so that cursor points to last written frame in read.
+    if (--mCursor < 0) {
+        mCursor = getNumTaps() - 1;
+    }
+    float *dest = &mX[mCursor * getChannelCount()];
+    int offset = getNumTaps() * getChannelCount();
+    for (int channel = 0; channel < getChannelCount(); channel++) {
+        // Write twice so we avoid having to wrap when reading.
+        dest[channel] = dest[channel + offset] = frame[channel];
+    }
+}
+
+float MultiChannelResampler::sinc(float radians) {
+    if (abs(radians) < 1.0e-9) return 1.0f;   // avoid divide by zero
+    return sinf(radians) / radians;   // Sinc function
+}
+
+// Generate coefficients in the order they will be used by readFrame().
+// This is more complicated but readFrame() is called repeatedly and should be optimized.
+void MultiChannelResampler::generateCoefficients(int32_t inputRate,
+                                              int32_t outputRate,
+                                              int32_t numRows,
+                                              double phaseIncrement,
+                                              float normalizedCutoff) {
+    mCoefficients.resize(getNumTaps() * numRows);
+    int coefficientIndex = 0;
+    double phase = 0.0; // ranges from 0.0 to 1.0, fraction between samples
+    // Stretch the sinc function for low pass filtering.
+    const float cutoffScaler = normalizedCutoff *
+            ((outputRate < inputRate)
+             ? ((float)outputRate / inputRate)
+             : ((float)inputRate / outputRate));
+    const int numTapsHalf = getNumTaps() / 2; // numTaps must be even.
+    const float numTapsHalfInverse = 1.0f / numTapsHalf;
+    for (int i = 0; i < numRows; i++) {
+        float tapPhase = phase - numTapsHalf;
+        float gain = 0.0; // sum of raw coefficients
+        int gainCursor = coefficientIndex;
+        for (int tap = 0; tap < getNumTaps(); tap++) {
+            float radians = tapPhase * M_PI;
+
+#if MCR_USE_KAISER
+            float window = mKaiserWindow(tapPhase * numTapsHalfInverse);
+#else
+            float window = mCoshWindow(tapPhase * numTapsHalfInverse);
+#endif
+            float coefficient = sinc(radians * cutoffScaler) * window;
+            mCoefficients.at(coefficientIndex++) = coefficient;
+            gain += coefficient;
+            tapPhase += 1.0;
+        }
+        phase += phaseIncrement;
+        while (phase >= 1.0) {
+            phase -= 1.0;
+        }
+
+        // Correct for gain variations.
+        float gainCorrection = 1.0 / gain; // normalize the gain
+        for (int tap = 0; tap < getNumTaps(); tap++) {
+            mCoefficients.at(gainCursor + tap) *= gainCorrection;
+        }
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.h b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.h
new file mode 100644
index 0000000..da79cad
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_MULTICHANNEL_RESAMPLER_H
+#define OBOE_MULTICHANNEL_RESAMPLER_H
+
+#include <memory>
+#include <vector>
+#include <sys/types.h>
+#include <unistd.h>
+
+#ifndef MCR_USE_KAISER
+// It appears from the spectrogram that the HyperbolicCosine window leads to fewer artifacts.
+// And it is faster to calculate.
+#define MCR_USE_KAISER 0
+#endif
+
+#if MCR_USE_KAISER
+#include "KaiserWindow.h"
+#else
+#include "HyperbolicCosineWindow.h"
+#endif
+
+namespace resampler {
+
+class MultiChannelResampler {
+
+public:
+
+    enum class Quality : int32_t {
+        Fastest,
+        Low,
+        Medium,
+        High,
+        Best,
+    };
+
+    class Builder {
+    public:
+        /**
+         * Construct an optimal resampler based on the specified parameters.
+         * @return address of a resampler
+         */
+        MultiChannelResampler *build();
+
+        /**
+         * The number of taps in the resampling filter.
+         * More taps gives better quality but uses more CPU time.
+         * This typically ranges from 4 to 64. Default is 16.
+         *
+         * For polyphase filters, numTaps must be a multiple of four for loop unrolling.
+         * @param numTaps number of taps for the filter
+         * @return address of this builder for chaining calls
+         */
+        Builder *setNumTaps(int32_t numTaps) {
+            mNumTaps = numTaps;
+            return this;
+        }
+
+        /**
+         * Use 1 for mono, 2 for stereo, etc. Default is 1.
+         *
+         * @param channelCount number of channels
+         * @return address of this builder for chaining calls
+         */
+        Builder *setChannelCount(int32_t channelCount) {
+            mChannelCount = channelCount;
+            return this;
+        }
+
+        /**
+         * Default is 48000.
+         *
+         * @param inputRate sample rate of the input stream
+         * @return address of this builder for chaining calls
+         */
+        Builder *setInputRate(int32_t inputRate) {
+            mInputRate = inputRate;
+            return this;
+        }
+
+        /**
+         * Default is 48000.
+         *
+         * @param outputRate sample rate of the output stream
+         * @return address of this builder for chaining calls
+         */
+        Builder *setOutputRate(int32_t outputRate) {
+            mOutputRate = outputRate;
+            return this;
+        }
+
+        /**
+         * Set cutoff frequency relative to the Nyquist rate of the output sample rate.
+         * Set to 1.0 to match the Nyquist frequency.
+         * Set lower to reduce aliasing.
+         * Default is 0.70.
+         *
+         * @param normalizedCutoff anti-aliasing filter cutoff
+         * @return address of this builder for chaining calls
+         */
+        Builder *setNormalizedCutoff(float normalizedCutoff) {
+            mNormalizedCutoff = normalizedCutoff;
+            return this;
+        }
+
+        int32_t getNumTaps() const {
+            return mNumTaps;
+        }
+
+        int32_t getChannelCount() const {
+            return mChannelCount;
+        }
+
+        int32_t getInputRate() const {
+            return mInputRate;
+        }
+
+        int32_t getOutputRate() const {
+            return mOutputRate;
+        }
+
+        float getNormalizedCutoff() const {
+            return mNormalizedCutoff;
+        }
+
+    protected:
+        int32_t mChannelCount = 1;
+        int32_t mNumTaps = 16;
+        int32_t mInputRate = 48000;
+        int32_t mOutputRate = 48000;
+        float   mNormalizedCutoff = kDefaultNormalizedCutoff;
+    };
+
+    virtual ~MultiChannelResampler() = default;
+
+    /**
+     * Factory method for making a resampler that is optimal for the given inputs.
+     *
+     * @param channelCount number of channels, 2 for stereo
+     * @param inputRate sample rate of the input stream
+     * @param outputRate  sample rate of the output stream
+     * @param quality higher quality sounds better but uses more CPU
+     * @return an optimal resampler
+     */
+    static MultiChannelResampler *make(int32_t channelCount,
+                                       int32_t inputRate,
+                                       int32_t outputRate,
+                                       Quality quality);
+
+    bool isWriteNeeded() const {
+        return mIntegerPhase >= mDenominator;
+    }
+
+    /**
+     * Write a frame containing N samples.
+     *
+     * @param frame pointer to the first sample in a frame
+     */
+    void writeNextFrame(const float *frame) {
+        writeFrame(frame);
+        advanceWrite();
+    }
+
+    /**
+     * Read a frame containing N samples.
+     *
+     * @param frame pointer to the first sample in a frame
+     */
+    void readNextFrame(float *frame) {
+        readFrame(frame);
+        advanceRead();
+    }
+
+    int getNumTaps() const {
+        return mNumTaps;
+    }
+
+    int getChannelCount() const {
+        return mChannelCount;
+    }
+
+    static float hammingWindow(float radians, float spread);
+
+    static float sinc(float radians);
+
+protected:
+
+    explicit MultiChannelResampler(const MultiChannelResampler::Builder &builder);
+
+    /**
+     * Write a frame containing N samples.
+     * Call advanceWrite() after calling this.
+     * @param frame pointer to the first sample in a frame
+     */
+    virtual void writeFrame(const float *frame);
+
+    /**
+     * Read a frame containing N samples using interpolation.
+     * Call advanceRead() after calling this.
+     * @param frame pointer to the first sample in a frame
+     */
+    virtual void readFrame(float *frame) = 0;
+
+    void advanceWrite() {
+        mIntegerPhase -= mDenominator;
+    }
+
+    void advanceRead() {
+        mIntegerPhase += mNumerator;
+    }
+
+    /**
+     * Generate the filter coefficients in optimal order.
+     * @param inputRate sample rate of the input stream
+     * @param outputRate  sample rate of the output stream
+     * @param numRows number of rows in the array that contain a set of tap coefficients
+     * @param phaseIncrement how much to increment the phase between rows
+     * @param normalizedCutoff filter cutoff frequency normalized to Nyquist rate of output
+     */
+    void generateCoefficients(int32_t inputRate,
+                              int32_t outputRate,
+                              int32_t numRows,
+                              double phaseIncrement,
+                              float normalizedCutoff);
+
+
+    int32_t getIntegerPhase() {
+        return mIntegerPhase;
+    }
+
+    static constexpr int kMaxCoefficients = 8 * 1024;
+    std::vector<float>   mCoefficients;
+
+    const int            mNumTaps;
+    int                  mCursor = 0;
+    std::vector<float>   mX;           // delayed input values for the FIR
+    std::vector<float>   mSingleFrame; // one frame for temporary use
+    int32_t              mIntegerPhase = 0;
+    int32_t              mNumerator = 0;
+    int32_t              mDenominator = 0;
+
+
+private:
+
+#if MCR_USE_KAISER
+    KaiserWindow           mKaiserWindow;
+#else
+    HyperbolicCosineWindow mCoshWindow;
+#endif
+
+    static constexpr float kDefaultNormalizedCutoff = 0.70f;
+
+    const int              mChannelCount;
+};
+
+} // namespace resampler
+#endif //OBOE_MULTICHANNEL_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.cpp b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.cpp
new file mode 100644
index 0000000..aa4ffd9
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <math.h>
+#include "IntegerRatio.h"
+#include "PolyphaseResampler.h"
+
+using namespace resampler;
+
+PolyphaseResampler::PolyphaseResampler(const MultiChannelResampler::Builder &builder)
+        : MultiChannelResampler(builder)
+        {
+    assert((getNumTaps() % 4) == 0); // Required for loop unrolling.
+
+    int32_t inputRate = builder.getInputRate();
+    int32_t outputRate = builder.getOutputRate();
+
+    int32_t numRows = mDenominator;
+    double phaseIncrement = (double) inputRate / (double) outputRate;
+    generateCoefficients(inputRate, outputRate,
+                         numRows, phaseIncrement,
+                         builder.getNormalizedCutoff());
+}
+
+void PolyphaseResampler::readFrame(float *frame) {
+    // Clear accumulator for mixing.
+    std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
+
+    // Multiply input times windowed sinc function.
+    float *coefficients = &mCoefficients[mCoefficientCursor];
+    float *xFrame = &mX[mCursor * getChannelCount()];
+    for (int i = 0; i < mNumTaps; i++) {
+        float coefficient = *coefficients++;
+        for (int channel = 0; channel < getChannelCount(); channel++) {
+            mSingleFrame[channel] += *xFrame++ * coefficient;
+        }
+    }
+
+    // Advance and wrap through coefficients.
+    mCoefficientCursor = (mCoefficientCursor + mNumTaps) % mCoefficients.size();
+
+    // Copy accumulator to output.
+    for (int channel = 0; channel < getChannelCount(); channel++) {
+        frame[channel] = mSingleFrame[channel];
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.h b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.h
new file mode 100644
index 0000000..1aeb680
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_POLYPHASE_RESAMPLER_H
+#define OBOE_POLYPHASE_RESAMPLER_H
+
+#include <memory>
+#include <vector>
+#include <sys/types.h>
+#include <unistd.h>
+#include "MultiChannelResampler.h"
+
+namespace resampler {
+/**
+ * Resampler that is optimized for a reduced ratio of sample rates.
+ * All of the coefficients for each possible phase value are pre-calculated.
+ */
+class PolyphaseResampler : public MultiChannelResampler {
+public:
+    /**
+     *
+     * @param builder containing lots of parameters
+     */
+    explicit PolyphaseResampler(const MultiChannelResampler::Builder &builder);
+
+    virtual ~PolyphaseResampler() = default;
+
+    void readFrame(float *frame) override;
+
+protected:
+
+    int32_t                mCoefficientCursor = 0;
+
+};
+
+} // namespace resampler
+
+#endif //OBOE_POLYPHASE_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.cpp b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.cpp
new file mode 100644
index 0000000..c0e29b7
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include "PolyphaseResamplerMono.h"
+
+using namespace resampler;
+
+#define MONO  1
+
+PolyphaseResamplerMono::PolyphaseResamplerMono(const MultiChannelResampler::Builder &builder)
+        : PolyphaseResampler(builder) {
+    assert(builder.getChannelCount() == MONO);
+}
+
+void PolyphaseResamplerMono::writeFrame(const float *frame) {
+    // Move cursor before write so that cursor points to last written frame in read.
+    if (--mCursor < 0) {
+        mCursor = getNumTaps() - 1;
+    }
+    float *dest = &mX[mCursor * MONO];
+    const int offset = mNumTaps * MONO;
+    // Write each channel twice so we avoid having to wrap when running the FIR.
+    const float sample =  frame[0];
+    // Put ordered writes together.
+    dest[0] = sample;
+    dest[offset] = sample;
+}
+
+void PolyphaseResamplerMono::readFrame(float *frame) {
+    // Clear accumulator.
+    float sum = 0.0;
+
+    // Multiply input times precomputed windowed sinc function.
+    const float *coefficients = &mCoefficients[mCoefficientCursor];
+    float *xFrame = &mX[mCursor * MONO];
+    const int numLoops = mNumTaps >> 2; // n/4
+    for (int i = 0; i < numLoops; i++) {
+        // Manual loop unrolling, might get converted to SIMD.
+        sum += *xFrame++ * *coefficients++;
+        sum += *xFrame++ * *coefficients++;
+        sum += *xFrame++ * *coefficients++;
+        sum += *xFrame++ * *coefficients++;
+    }
+
+    mCoefficientCursor = (mCoefficientCursor + mNumTaps) % mCoefficients.size();
+
+    // Copy accumulator to output.
+    frame[0] = sum;
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.h b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.h
new file mode 100644
index 0000000..0a691a3
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_POLYPHASE_RESAMPLER_MONO_H
+#define OBOE_POLYPHASE_RESAMPLER_MONO_H
+
+#include <sys/types.h>
+#include <unistd.h>
+#include "PolyphaseResampler.h"
+
+namespace resampler {
+
+class PolyphaseResamplerMono : public PolyphaseResampler {
+public:
+    explicit PolyphaseResamplerMono(const MultiChannelResampler::Builder &builder);
+
+    virtual ~PolyphaseResamplerMono() = default;
+
+    void writeFrame(const float *frame) override;
+
+    void readFrame(float *frame) override;
+};
+
+} // namespace resampler
+
+#endif //OBOE_POLYPHASE_RESAMPLER_MONO_H
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp
new file mode 100644
index 0000000..e4bef74
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include "PolyphaseResamplerStereo.h"
+
+using namespace resampler;
+
+#define STEREO  2
+
+PolyphaseResamplerStereo::PolyphaseResamplerStereo(const MultiChannelResampler::Builder &builder)
+        : PolyphaseResampler(builder) {
+    assert(builder.getChannelCount() == STEREO);
+}
+
+void PolyphaseResamplerStereo::writeFrame(const float *frame) {
+    // Move cursor before write so that cursor points to last written frame in read.
+    if (--mCursor < 0) {
+        mCursor = getNumTaps() - 1;
+    }
+    float *dest = &mX[mCursor * STEREO];
+    const int offset = mNumTaps * STEREO;
+    // Write each channel twice so we avoid having to wrap when running the FIR.
+    const float left =  frame[0];
+    const float right = frame[1];
+    // Put ordered writes together.
+    dest[0] = left;
+    dest[1] = right;
+    dest[offset] = left;
+    dest[1 + offset] = right;
+}
+
+void PolyphaseResamplerStereo::readFrame(float *frame) {
+    // Clear accumulators.
+    float left = 0.0;
+    float right = 0.0;
+
+    // Multiply input times precomputed windowed sinc function.
+    const float *coefficients = &mCoefficients[mCoefficientCursor];
+    float *xFrame = &mX[mCursor * STEREO];
+    const int numLoops = mNumTaps >> 2; // n/4
+    for (int i = 0; i < numLoops; i++) {
+        // Manual loop unrolling, might get converted to SIMD.
+        float coefficient = *coefficients++;
+        left += *xFrame++ * coefficient;
+        right += *xFrame++ * coefficient;
+
+        coefficient = *coefficients++; // next tap
+        left += *xFrame++ * coefficient;
+        right += *xFrame++ * coefficient;
+
+        coefficient = *coefficients++;  // next tap
+        left += *xFrame++ * coefficient;
+        right += *xFrame++ * coefficient;
+
+        coefficient = *coefficients++;  // next tap
+        left += *xFrame++ * coefficient;
+        right += *xFrame++ * coefficient;
+    }
+
+    mCoefficientCursor = (mCoefficientCursor + mNumTaps) % mCoefficients.size();
+
+    // Copy accumulators to output.
+    frame[0] = left;
+    frame[1] = right;
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.h b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.h
new file mode 100644
index 0000000..e608483
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_POLYPHASE_RESAMPLER_STEREO_H
+#define OBOE_POLYPHASE_RESAMPLER_STEREO_H
+
+#include <sys/types.h>
+#include <unistd.h>
+#include "PolyphaseResampler.h"
+
+namespace resampler {
+
+class PolyphaseResamplerStereo : public PolyphaseResampler {
+public:
+    explicit PolyphaseResamplerStereo(const MultiChannelResampler::Builder &builder);
+
+    virtual ~PolyphaseResamplerStereo() = default;
+
+    void writeFrame(const float *frame) override;
+
+    void readFrame(float *frame) override;
+};
+
+} // namespace resampler
+
+#endif //OBOE_POLYPHASE_RESAMPLER_STEREO_H
diff --git a/media/libaaudio/src/flowgraph/resampler/README.md b/media/libaaudio/src/flowgraph/resampler/README.md
new file mode 100644
index 0000000..05d8a89
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/README.md
@@ -0,0 +1,91 @@
+# Sample Rate Converter
+
+This folder contains a sample rate converter, or "resampler".
+
+The converter is based on a sinc function that has been windowed by a hyperbolic cosine.
+We found this had fewer artifacts than the more traditional Kaiser window.
+
+## Creating a Resampler
+
+Include the [main header](MultiChannelResampler.h) for the resampler.
+
+    #include "resampler/MultiChannelResampler.h"
+
+Here is an example of creating a stereo resampler that will convert from 44100 to 48000 Hz.
+Only do this once, when you open your stream. Then use the sample resampler to process multiple buffers.
+
+    MultiChannelResampler *resampler = MultiChannelResampler::make(
+            2, // channel count
+            44100, // input sampleRate
+            48000, // output sampleRate
+            MultiChannelResampler::Quality::Medium); // conversion quality
+
+Possible values for quality include { Fastest, Low, Medium, High, Best }.
+Higher quality levels will sound better but consume more CPU because they have more taps in the filter.
+
+## Fractional Frame Counts
+
+Note that the number of output frames generated for a given number of input frames can vary.
+
+For example, suppose you are converting from 44100 Hz to 48000 Hz and using an input buffer with 960 frames. If you calculate the number of output frames you get:
+
+    960 * 48000 * 44100 = 1044.897959...
+
+You cannot generate a fractional number of frames. So the resampler will sometimes generate 1044 frames and sometimes 1045 frames. On average it will generate 1044.897959 frames. The resampler stores the fraction internally and keeps track of when to consume or generate a frame.
+
+You can either use a fixed number of input frames or a fixed number of output frames. The other frame count will vary.
+
+## Calling the Resampler with a fixed number of OUTPUT frames
+
+In this example, suppose we have a fixed number of output frames and a variable number of input frames.
+
+Assume you start with these variables and a method that returns the next input frame:
+
+    float *outputBuffer;     // multi-channel buffer to be filled
+    int    numOutputFrames;  // number of frames of output
+
+The resampler has a method isWriteNeeded() that tells you whether to write to or read from the resampler.
+
+    int outputFramesLeft = numOutputFrames;
+    while (outputFramesLeft > 0) {
+        if(resampler->isWriteNeeded()) {
+            const float *frame = getNextInputFrame(); // you provide this
+            resampler->writeNextFrame(frame);
+        } else {
+            resampler->readNextFrame(outputBuffer);
+            outputBuffer += channelCount;
+            outputFramesLeft--;
+        }
+    }
+
+## Calling the Resampler with a fixed number of INPUT frames
+
+In this example, suppose we have a fixed number of input frames and a variable number of output frames.
+
+Assume you start with these variables:
+
+    float *inputBuffer;     // multi-channel buffer to be consumed
+    float *outputBuffer;    // multi-channel buffer to be filled
+    int    numInputFrames;  // number of frames of input
+    int    numOutputFrames = 0;
+    int    channelCount;    // 1 for mono, 2 for stereo
+
+    int inputFramesLeft = numInputFrames;
+    while (inputFramesLeft > 0) {
+        if(resampler->isWriteNeeded()) {
+            resampler->writeNextFrame(inputBuffer);
+            inputBuffer += channelCount;
+            inputFramesLeft--;
+        } else {
+            resampler->readNextFrame(outputBuffer);
+            outputBuffer += channelCount;
+            numOutputFrames++;
+        }
+    }
+
+## Deleting the Resampler
+
+When you are done, you should delete the Resampler to avoid a memory leak.
+
+    delete resampler;
+
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp b/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp
new file mode 100644
index 0000000..5e8a9e0
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <math.h>
+#include "SincResampler.h"
+
+using namespace resampler;
+
+SincResampler::SincResampler(const MultiChannelResampler::Builder &builder)
+        : MultiChannelResampler(builder)
+        , mSingleFrame2(builder.getChannelCount()) {
+    assert((getNumTaps() % 4) == 0); // Required for loop unrolling.
+    mNumRows = kMaxCoefficients / getNumTaps(); // no guard row needed
+    mPhaseScaler = (double) mNumRows / mDenominator;
+    double phaseIncrement = 1.0 / mNumRows;
+    generateCoefficients(builder.getInputRate(),
+                         builder.getOutputRate(),
+                         mNumRows,
+                         phaseIncrement,
+                         builder.getNormalizedCutoff());
+}
+
+void SincResampler::readFrame(float *frame) {
+    // Clear accumulator for mixing.
+    std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
+    std::fill(mSingleFrame2.begin(), mSingleFrame2.end(), 0.0);
+
+    // Determine indices into coefficients table.
+    double tablePhase = getIntegerPhase() * mPhaseScaler;
+    int index1 = static_cast<int>(floor(tablePhase));
+    if (index1 >= mNumRows) { // no guard row needed because we wrap the indices
+        tablePhase -= mNumRows;
+        index1 -= mNumRows;
+    }
+
+    int index2 = index1 + 1;
+    if (index2 >= mNumRows) { // no guard row needed because we wrap the indices
+        index2 -= mNumRows;
+    }
+
+    float *coefficients1 = &mCoefficients[index1 * getNumTaps()];
+    float *coefficients2 = &mCoefficients[index2 * getNumTaps()];
+
+    float *xFrame = &mX[mCursor * getChannelCount()];
+    for (int i = 0; i < mNumTaps; i++) {
+        float coefficient1 = *coefficients1++;
+        float coefficient2 = *coefficients2++;
+        for (int channel = 0; channel < getChannelCount(); channel++) {
+            float sample = *xFrame++;
+            mSingleFrame[channel] +=  sample * coefficient1;
+            mSingleFrame2[channel] += sample * coefficient2;
+        }
+    }
+
+    // Interpolate and copy to output.
+    float fraction = tablePhase - index1;
+    for (int channel = 0; channel < getChannelCount(); channel++) {
+        float low = mSingleFrame[channel];
+        float high = mSingleFrame2[channel];
+        frame[channel] = low + (fraction * (high - low));
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResampler.h b/media/libaaudio/src/flowgraph/resampler/SincResampler.h
new file mode 100644
index 0000000..b235188
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResampler.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SINC_RESAMPLER_H
+#define OBOE_SINC_RESAMPLER_H
+
+#include <memory>
+#include <sys/types.h>
+#include <unistd.h>
+#include "MultiChannelResampler.h"
+
+namespace resampler {
+
+/**
+ * Resampler that can interpolate between coefficients.
+ * This can be used to support arbitrary ratios.
+ */
+class SincResampler : public MultiChannelResampler {
+public:
+    explicit SincResampler(const MultiChannelResampler::Builder &builder);
+
+    virtual ~SincResampler() = default;
+
+    void readFrame(float *frame) override;
+
+protected:
+
+    std::vector<float> mSingleFrame2; // for interpolation
+    int32_t            mNumRows = 0;
+    double             mPhaseScaler = 1.0;
+};
+
+} // namespace resampler
+#endif //OBOE_SINC_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp
new file mode 100644
index 0000000..ce00302
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <math.h>
+
+#include "SincResamplerStereo.h"
+
+using namespace resampler;
+
+#define STEREO  2
+
+SincResamplerStereo::SincResamplerStereo(const MultiChannelResampler::Builder &builder)
+        : SincResampler(builder) {
+    assert(builder.getChannelCount() == STEREO);
+}
+
+void SincResamplerStereo::writeFrame(const float *frame) {
+    // Move cursor before write so that cursor points to last written frame in read.
+    if (--mCursor < 0) {
+        mCursor = getNumTaps() - 1;
+    }
+    float *dest = &mX[mCursor * STEREO];
+    const int offset = mNumTaps * STEREO;
+    // Write each channel twice so we avoid having to wrap when running the FIR.
+    const float left =  frame[0];
+    const float right = frame[1];
+    // Put ordered writes together.
+    dest[0] = left;
+    dest[1] = right;
+    dest[offset] = left;
+    dest[1 + offset] = right;
+}
+
+// Multiply input times windowed sinc function.
+void SincResamplerStereo::readFrame(float *frame) {
+    // Clear accumulator for mixing.
+    std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
+    std::fill(mSingleFrame2.begin(), mSingleFrame2.end(), 0.0);
+
+    // Determine indices into coefficients table.
+    double tablePhase = getIntegerPhase() * mPhaseScaler;
+    int index1 = static_cast<int>(floor(tablePhase));
+    float *coefficients1 = &mCoefficients[index1 * getNumTaps()];
+    int index2 = (index1 + 1);
+    if (index2 >= mNumRows) { // no guard row needed because we wrap the indices
+        index2 = 0;
+    }
+    float *coefficients2 = &mCoefficients[index2 * getNumTaps()];
+    float *xFrame = &mX[mCursor * getChannelCount()];
+    for (int i = 0; i < mNumTaps; i++) {
+        float coefficient1 = *coefficients1++;
+        float coefficient2 = *coefficients2++;
+        for (int channel = 0; channel < getChannelCount(); channel++) {
+            float sample = *xFrame++;
+            mSingleFrame[channel] +=  sample * coefficient1;
+            mSingleFrame2[channel] += sample * coefficient2;
+        }
+    }
+
+    // Interpolate and copy to output.
+    float fraction = tablePhase - index1;
+    for (int channel = 0; channel < getChannelCount(); channel++) {
+        float low = mSingleFrame[channel];
+        float high = mSingleFrame2[channel];
+        frame[channel] = low + (fraction * (high - low));
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.h b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.h
new file mode 100644
index 0000000..7d49ec7
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SINC_RESAMPLER_STEREO_H
+#define OBOE_SINC_RESAMPLER_STEREO_H
+
+#include <sys/types.h>
+#include <unistd.h>
+#include "SincResampler.h"
+
+namespace resampler {
+
+class SincResamplerStereo : public SincResampler {
+public:
+    explicit SincResamplerStereo(const MultiChannelResampler::Builder &builder);
+
+    virtual ~SincResamplerStereo() = default;
+
+    void writeFrame(const float *frame) override;
+
+    void readFrame(float *frame) override;
+
+};
+
+} // namespace resampler
+#endif //OBOE_SINC_RESAMPLER_STEREO_H
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index e96e134..38f3c24 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -37,18 +37,6 @@
         : AudioStream() {
 }
 
-AudioStreamLegacy::~AudioStreamLegacy() {
-}
-
-// Called from AudioTrack.cpp or AudioRecord.cpp
-static void AudioStreamLegacy_callback(int event, void* userData, void *info) {
-    AudioStreamLegacy *streamLegacy = (AudioStreamLegacy *) userData;
-    streamLegacy->processCallback(event, info);
-}
-
-aaudio_legacy_callback_t AudioStreamLegacy::getLegacyCallback() {
-    return AudioStreamLegacy_callback;
-}
 
 aaudio_data_callback_result_t AudioStreamLegacy::callDataCallbackFrames(uint8_t *buffer,
                                                                         int32_t numFrames) {
@@ -76,84 +64,77 @@
     return (int32_t) callDataCallbackFrames(buffer, numFrames);
 }
 
-void AudioStreamLegacy::processCallbackCommon(aaudio_callback_operation_t opcode, void *info) {
-    aaudio_data_callback_result_t callbackResult;
+
+void AudioStreamLegacy::onNewIAudioTrack() {
+    ALOGD("%s stream disconnected", __func__);
+    forceDisconnect();
+    mCallbackEnabled.store(false);
+}
+
+size_t AudioStreamLegacy::onMoreData(const android::AudioTrack::Buffer& buffer) {
     // This illegal size can be used to tell AudioRecord or AudioTrack to stop calling us.
     // This takes advantage of them killing the stream when they see a size out of range.
     // That is an undocumented behavior.
     // TODO add to API in AudioRecord and AudioTrack
     const size_t SIZE_STOP_CALLBACKS = SIZE_MAX;
+    aaudio_data_callback_result_t callbackResult;
+    (void) checkForDisconnectRequest(true);
 
-    switch (opcode) {
-        case AAUDIO_CALLBACK_OPERATION_PROCESS_DATA: {
-            (void) checkForDisconnectRequest(true);
-
-            // Note that this code assumes an AudioTrack::Buffer is the same as
-            // AudioRecord::Buffer
-            // TODO define our own AudioBuffer and pass it from the subclasses.
-            AudioTrack::Buffer *audioBuffer = static_cast<AudioTrack::Buffer *>(info);
-            if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
-                ALOGW("processCallbackCommon() data, stream disconnected");
-                // This will kill the stream and prevent it from being restarted.
-                // That is OK because the stream is disconnected.
-                audioBuffer->size = SIZE_STOP_CALLBACKS;
-            } else if (!mCallbackEnabled.load()) {
-                ALOGW("processCallbackCommon() no data because callback disabled, set size=0");
-                // Do NOT use SIZE_STOP_CALLBACKS here because that will kill the stream and
-                // prevent it from being restarted. This can occur because of a race condition
-                // caused by Legacy callbacks running after the track is "stopped".
-                audioBuffer->size = 0;
-            } else {
-                if (audioBuffer->frameCount == 0) {
-                    ALOGW("processCallbackCommon() data, frameCount is zero");
-                    return;
-                }
-
-                // If the caller specified an exact size then use a block size adapter.
-                if (mBlockAdapter != nullptr) {
-                    int32_t byteCount = audioBuffer->frameCount * getBytesPerDeviceFrame();
-                    callbackResult = mBlockAdapter->processVariableBlock(
-                            (uint8_t *) audioBuffer->raw, byteCount);
-                } else {
-                    // Call using the AAudio callback interface.
-                    callbackResult = callDataCallbackFrames((uint8_t *)audioBuffer->raw,
-                                                            audioBuffer->frameCount);
-                }
-                if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
-                    audioBuffer->size = audioBuffer->frameCount * getBytesPerDeviceFrame();
-                } else {
-                    if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
-                        ALOGD("%s() callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
-                    } else {
-                        ALOGW("%s() callback returned invalid result = %d",
-                              __func__, callbackResult);
-                    }
-                    audioBuffer->size = 0;
-                    systemStopInternal();
-                    // Disable the callback just in case the system keeps trying to call us.
-                    mCallbackEnabled.store(false);
-                }
-
-                if (updateStateMachine() != AAUDIO_OK) {
-                    forceDisconnect();
-                    mCallbackEnabled.store(false);
-                }
-            }
+    // Note that this code assumes an AudioTrack::Buffer is the same as
+    // AudioRecord::Buffer
+    // TODO define our own AudioBuffer and pass it from the subclasses.
+    size_t written = buffer.size;
+    if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+        ALOGW("%s() data, stream disconnected", __func__);
+        // This will kill the stream and prevent it from being restarted.
+        // That is OK because the stream is disconnected.
+        written = SIZE_STOP_CALLBACKS;
+    } else if (!mCallbackEnabled.load()) {
+        ALOGW("%s() no data because callback disabled, set size=0", __func__);
+        // Do NOT use SIZE_STOP_CALLBACKS here because that will kill the stream and
+        // prevent it from being restarted. This can occur because of a race condition
+        // caused by Legacy callbacks running after the track is "stopped".
+        written = 0;
+    } else {
+        if (buffer.frameCount == 0) {
+            ALOGW("%s() data, frameCount is zero", __func__);
+            return written;
         }
-            break;
 
-        // Stream got rerouted so we disconnect.
-        case AAUDIO_CALLBACK_OPERATION_DISCONNECTED:
-            ALOGD("processCallbackCommon() stream disconnected");
+        // If the caller specified an exact size then use a block size adapter.
+        if (mBlockAdapter != nullptr) {
+            int32_t byteCount = buffer.frameCount * getBytesPerDeviceFrame();
+            callbackResult = mBlockAdapter->processVariableBlock(
+                    static_cast<uint8_t*>(buffer.raw), byteCount);
+        } else {
+            // Call using the AAudio callback interface.
+            callbackResult = callDataCallbackFrames(static_cast<uint8_t *>(buffer.raw),
+                                                    buffer.frameCount);
+        }
+        if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
+            written = buffer.frameCount * getBytesPerDeviceFrame();
+        } else {
+            if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+                ALOGD("%s() callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
+            } else {
+                ALOGW("%s() callback returned invalid result = %d",
+                      __func__, callbackResult);
+            }
+            written = 0;
+            systemStopInternal();
+            // Disable the callback just in case the system keeps trying to call us.
+            mCallbackEnabled.store(false);
+        }
+
+        if (updateStateMachine() != AAUDIO_OK) {
             forceDisconnect();
             mCallbackEnabled.store(false);
-            break;
-
-        default:
-            break;
+        }
     }
+    return written;
 }
 
+
 aaudio_result_t AudioStreamLegacy::checkForDisconnectRequest(bool errorCallbackEnabled) {
     if (mRequestDisconnect.isRequested()) {
         ALOGD("checkForDisconnectRequest() mRequestDisconnect acknowledged");
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
index 88ef270..c54d7e2 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.h
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -18,6 +18,7 @@
 #define LEGACY_AUDIO_STREAM_LEGACY_H
 
 #include <media/AudioTimestamp.h>
+#include <media/AudioTrack.h>
 #include <media/AudioSystem.h>
 
 #include <aaudio/AAudio.h>
@@ -30,8 +31,6 @@
 namespace aaudio {
 
 
-typedef void (*aaudio_legacy_callback_t)(int event, void* user, void *info);
-
 enum {
     /**
      * Request that the callback function should fill the data buffer of an output stream,
@@ -56,21 +55,17 @@
 typedef int32_t aaudio_callback_operation_t;
 
 
-class AudioStreamLegacy : public AudioStream, public FixedBlockProcessor {
+class AudioStreamLegacy : public AudioStream,
+                          public FixedBlockProcessor,
+                          protected android::AudioTrack::IAudioTrackCallback {
 public:
     AudioStreamLegacy();
 
-    virtual ~AudioStreamLegacy();
+    virtual ~AudioStreamLegacy() = default;
 
-    aaudio_legacy_callback_t getLegacyCallback();
 
     int32_t callDataCallbackFrames(uint8_t *buffer, int32_t numFrames);
 
-    // This is public so it can be called from the C callback function.
-    // This is called from the AudioTrack/AudioRecord client.
-    virtual void processCallback(int event, void *info) = 0;
-
-    void processCallbackCommon(aaudio_callback_operation_t opcode, void *info);
 
     // Implement FixedBlockProcessor
     int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override;
@@ -86,7 +81,8 @@
     }
 
 protected:
-
+    size_t onMoreData(const android::AudioTrack::Buffer& buffer) override;
+    void onNewIAudioTrack() override;
     aaudio_result_t getBestTimestamp(clockid_t clockId,
                                      int64_t *framePosition,
                                      int64_t *timeNanoseconds,
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index dc66742..d9f12a5 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -37,6 +37,10 @@
 using namespace android;
 using namespace aaudio;
 
+static void sCallbackWrapper(int event, void* userData, void* info) {
+    static_cast<AudioStreamRecord*>(userData)->processCallback(event, info);
+}
+
 AudioStreamRecord::AudioStreamRecord()
     : AudioStreamLegacy()
     , mFixedBlockWriter(*this)
@@ -65,11 +69,8 @@
     const audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
 
     // TODO Support UNSPECIFIED in AudioRecord. For now, use stereo if unspecified.
-    int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
-                              ? 2 : getSamplesPerFrame();
-    audio_channel_mask_t channelMask = samplesPerFrame <= 2 ?
-                               audio_channel_in_mask_from_count(samplesPerFrame) :
-                               audio_channel_mask_for_index_assignment_from_count(samplesPerFrame);
+    audio_channel_mask_t channelMask =
+            AAudio_getChannelMaskForOpen(getChannelMask(), getSamplesPerFrame(), true /*isInput*/);
 
     size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
                         : builder.getBufferCapacity();
@@ -115,7 +116,7 @@
     constexpr int32_t kMostLikelySampleRateForFast = 48000;
     if (getFormat() == AUDIO_FORMAT_PCM_FLOAT
             && perfMode == AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
-            && (samplesPerFrame <= 2) // FAST only for mono and stereo
+            && (audio_channel_count_from_in_mask(channelMask) <= 2) // FAST only for mono and stereo
             && (getSampleRate() == kMostLikelySampleRateForFast
                 || getSampleRate() == AAUDIO_UNSPECIFIED)) {
         setDeviceFormat(AUDIO_FORMAT_PCM_16_BIT);
@@ -127,12 +128,12 @@
     uint32_t notificationFrames = 0;
 
     // Setup the callback if there is one.
-    AudioRecord::callback_t callback = nullptr;
+    AudioRecord::legacy_callback_t callback = nullptr;
     void *callbackData = nullptr;
     AudioRecord::transfer_type streamTransferType = AudioRecord::transfer_type::TRANSFER_SYNC;
     if (builder.getDataCallbackProc() != nullptr) {
         streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
-        callback = getLegacyCallback();
+        callback = sCallbackWrapper;
         callbackData = this;
     }
     mCallbackBufferSize = builder.getFramesPerDataCallback();
@@ -228,7 +229,9 @@
             .set(AMEDIAMETRICS_PROP_ENCODINGCLIENT, toString(requestedFormat).c_str()).record();
 
     // Get the actual values from the AudioRecord.
-    setSamplesPerFrame(mAudioRecord->channelCount());
+    setChannelMask(AAudioConvert_androidToAAudioChannelMask(
+            mAudioRecord->channelMask(), true /*isInput*/,
+            AAudio_isChannelIndexMask(getChannelMask())));
     setSampleRate(mAudioRecord->getSampleRate());
     setBufferCapacity(getBufferCapacityFromDevice());
     setFramesPerBurst(getFramesPerBurstFromDevice());
@@ -354,14 +357,15 @@
 void AudioStreamRecord::processCallback(int event, void *info) {
     switch (event) {
         case AudioRecord::EVENT_MORE_DATA:
-            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
+        {
+            AudioTrack::Buffer *audioBuffer = static_cast<AudioTrack::Buffer *>(info);
+            audioBuffer->size = onMoreData(*audioBuffer);
             break;
-
+        }
             // Stream got rerouted so we disconnect.
         case AudioRecord::EVENT_NEW_IAUDIORECORD:
-            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
+            onNewIAudioTrack();
             break;
-
         default:
             break;
     }
@@ -505,7 +509,7 @@
     return (aaudio_result_t) framesRead;
 }
 
-aaudio_result_t AudioStreamRecord::setBufferSize(int32_t requestedFrames)
+aaudio_result_t AudioStreamRecord::setBufferSize(int32_t /*requestedFrames*/)
 {
     return getBufferSize();
 }
@@ -553,7 +557,7 @@
         case AAUDIO_STREAM_STATE_STARTED:
             result = mAudioRecord->getPosition(&position);
             if (result == OK) {
-                mFramesWritten.update32(position);
+                mFramesWritten.update32((int32_t)position);
             }
             break;
         case AAUDIO_STREAM_STATE_STOPPING:
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index 692651d..5ce73f9 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -65,7 +65,9 @@
     }
 
     // This is public so it can be called from the C callback function.
-    void processCallback(int event, void *info) override;
+    void processCallback(int event, void *info);
+
+    void processCallbackRecord(aaudio_callback_operation_t opcode, void *info);
 
     int64_t incrementClientFrameCounter(int32_t frames) override {
         return incrementFramesRead(frames);
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 1d412c0..6f1dc92 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -66,13 +66,8 @@
     const aaudio_session_id_t requestedSessionId = builder.getSessionId();
     const audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
 
-    // Try to create an AudioTrack
-    // Use stereo if unspecified.
-    int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
-                              ? 2 : getSamplesPerFrame();
-    audio_channel_mask_t channelMask = samplesPerFrame <= 2 ?
-                            audio_channel_out_mask_from_count(samplesPerFrame) :
-                            audio_channel_mask_for_index_assignment_from_count(samplesPerFrame);
+    audio_channel_mask_t channelMask =
+            AAudio_getChannelMaskForOpen(getChannelMask(), getSamplesPerFrame(), false /*isInput*/);
 
     audio_output_flags_t flags;
     aaudio_performance_mode_t perfMode = getPerformanceMode();
@@ -108,14 +103,12 @@
             : getFormat();
 
     // Setup the callback if there is one.
-    AudioTrack::callback_t callback = nullptr;
-    void *callbackData = nullptr;
+    wp<AudioTrack::IAudioTrackCallback> callback;
     // Note that TRANSFER_SYNC does not allow FAST track
     AudioTrack::transfer_type streamTransferType = AudioTrack::transfer_type::TRANSFER_SYNC;
     if (builder.getDataCallbackProc() != nullptr) {
         streamTransferType = AudioTrack::transfer_type::TRANSFER_CALLBACK;
-        callback = getLegacyCallback();
-        callbackData = this;
+        callback = wp<AudioTrack::IAudioTrackCallback>::fromExisting(this);
 
         // If the total buffer size is unspecified then base the size on the burst size.
         if (frameCount == 0
@@ -140,7 +133,9 @@
     const audio_usage_t usage =
             AAudioConvert_usageToInternal(builder.getUsage());
     const audio_flags_mask_t attributesFlags =
-        AAudioConvert_allowCapturePolicyToAudioFlagsMask(builder.getAllowedCapturePolicy());
+        AAudioConvert_allowCapturePolicyToAudioFlagsMask(builder.getAllowedCapturePolicy(),
+                                                         builder.getSpatializationBehavior(),
+                                                         builder.isContentSpatialized());
 
     const audio_attributes_t attributes = {
             .content_type = contentType,
@@ -160,13 +155,12 @@
             frameCount,
             flags,
             callback,
-            callbackData,
             notificationFrames,
-            0,       // DEFAULT sharedBuffer*/,
+            nullptr,       // DEFAULT sharedBuffer*/,
             false,   // DEFAULT threadCanCallJava
             sessionId,
             streamTransferType,
-            NULL,    // DEFAULT audio_offload_info_t
+            nullptr,    // DEFAULT audio_offload_info_t
             AttributionSourceState(), // DEFAULT uid and pid
             &attributes,
             // WARNING - If doNotReconnect set true then audio stops after plugging and unplugging
@@ -199,7 +193,9 @@
     doSetVolume();
 
     // Get the actual values from the AudioTrack.
-    setSamplesPerFrame(mAudioTrack->channelCount());
+    setChannelMask(AAudioConvert_androidToAAudioChannelMask(
+        mAudioTrack->channelMask(), false /*isInput*/,
+        AAudio_isChannelIndexMask(getChannelMask())));
     setFormat(mAudioTrack->format());
     setDeviceFormat(mAudioTrack->format());
     setSampleRate(mAudioTrack->getSampleRate());
@@ -218,7 +214,6 @@
         mBlockAdapter = nullptr;
     }
 
-    setState(AAUDIO_STREAM_STATE_OPEN);
     setDeviceId(mAudioTrack->getRoutedDeviceId());
 
     aaudio_session_id_t actualSessionId =
@@ -251,6 +246,19 @@
              "open() perfMode changed from %d to %d",
              perfMode, actualPerformanceMode);
 
+    if (getState() != AAUDIO_STREAM_STATE_UNINITIALIZED) {
+        ALOGE("%s - Open canceled since state = %d", __func__, getState());
+        if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED)
+        {
+            ALOGE("%s - Opening while state is disconnected", __func__);
+            safeReleaseClose();
+            return AAUDIO_ERROR_DISCONNECTED;
+        }
+        safeReleaseClose();
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+
+    setState(AAUDIO_STREAM_STATE_OPEN);
     return AAUDIO_OK;
 }
 
@@ -282,31 +290,19 @@
     AudioStream::close_l();
 }
 
-void AudioStreamTrack::processCallback(int event, void *info) {
 
-    switch (event) {
-        case AudioTrack::EVENT_MORE_DATA:
-            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
-            break;
-
-            // Stream got rerouted so we disconnect.
-        case AudioTrack::EVENT_NEW_IAUDIOTRACK:
-            // request stream disconnect if the restored AudioTrack has properties not matching
-            // what was requested initially
-            if (mAudioTrack->channelCount() != getSamplesPerFrame()
-                    || mAudioTrack->format() != getFormat()
-                    || mAudioTrack->getSampleRate() != getSampleRate()
-                    || mAudioTrack->getRoutedDeviceId() != getDeviceId()
-                    || getBufferCapacityFromDevice() != getBufferCapacity()
-                    || getFramesPerBurstFromDevice() != getFramesPerBurst()) {
-                processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
-            }
-            break;
-
-        default:
-            break;
+void AudioStreamTrack::onNewIAudioTrack() {
+    // Stream got rerouted so we disconnect.
+    // request stream disconnect if the restored AudioTrack has properties not matching
+    // what was requested initially
+    if (mAudioTrack->channelCount() != getSamplesPerFrame()
+          || mAudioTrack->format() != getFormat()
+          || mAudioTrack->getSampleRate() != getSampleRate()
+          || mAudioTrack->getRoutedDeviceId() != getDeviceId()
+          || getBufferCapacityFromDevice() != getBufferCapacity()
+          || getFramesPerBurstFromDevice() != getFramesPerBurst()) {
+        AudioStreamLegacy::onNewIAudioTrack();
     }
-    return;
 }
 
 aaudio_result_t AudioStreamTrack::requestStart_l() {
@@ -512,7 +508,7 @@
     case AAUDIO_STREAM_STATE_PAUSED:
         result = mAudioTrack->getPosition(&position);
         if (result == OK) {
-            mFramesRead.update32(position);
+            mFramesRead.update32((int32_t)position);
         }
         break;
     default:
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
index f604871..0f4d72b 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.h
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -81,9 +81,6 @@
 
     aaudio_result_t updateStateMachine() override;
 
-    // This is public so it can be called from the C callback function.
-    void processCallback(int event, void *info) override;
-
     int64_t incrementClientFrameCounter(int32_t frames) override {
         return incrementFramesWritten(frames);
     }
@@ -100,6 +97,7 @@
 
     int32_t getFramesPerBurstFromDevice() const override;
     int32_t getBufferCapacityFromDevice() const override;
+    void onNewIAudioTrack() override;
 
 private:
 
diff --git a/media/libaaudio/src/libaaudio.map.txt b/media/libaaudio/src/libaaudio.map.txt
index 1dd44d1..f45b816 100644
--- a/media/libaaudio/src/libaaudio.map.txt
+++ b/media/libaaudio/src/libaaudio.map.txt
@@ -25,6 +25,9 @@
     AAudioStreamBuilder_setPrivacySensitive;   # introduced=30
     AAudioStreamBuilder_setPackageName;   # introduced=31
     AAudioStreamBuilder_setAttributionTag;   # introduced=31
+    AAudioStreamBuilder_setChannelMask;    # introduced=32
+    AAudioStreamBuilder_setSpatializationBehavior; # introduced=32
+    AAudioStreamBuilder_setIsContentSpatialized;   # introduced=32
     AAudioStreamBuilder_openStream;
     AAudioStreamBuilder_delete;
     AAudioStream_close;
@@ -61,6 +64,9 @@
     AAudioStream_isMMapUsed;
     AAudioStream_isPrivacySensitive;   # introduced=30
     AAudioStream_release;        # introduced=30
+    AAudioStream_getChannelMask;  # introduced=32
+    AAudioStream_getSpatializationBehavior;  # introduced=32
+    AAudioStream_isContentSpatialized;       # introduced=32
   local:
     *;
 };
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index d795725..a0952fe 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -235,20 +235,46 @@
 }
 
 audio_flags_mask_t AAudioConvert_allowCapturePolicyToAudioFlagsMask(
-        aaudio_allowed_capture_policy_t policy) {
+        aaudio_allowed_capture_policy_t policy,
+        aaudio_spatialization_behavior_t spatializationBehavior,
+        bool isContentSpatialized) {
+    audio_flags_mask_t flagsMask = AUDIO_FLAG_NONE;
     switch (policy) {
         case AAUDIO_UNSPECIFIED:
         case AAUDIO_ALLOW_CAPTURE_BY_ALL:
-            return AUDIO_FLAG_NONE;
+            // flagsMask is not modified
+            break;
         case AAUDIO_ALLOW_CAPTURE_BY_SYSTEM:
-            return AUDIO_FLAG_NO_MEDIA_PROJECTION;
+            flagsMask = static_cast<audio_flags_mask_t>(flagsMask | AUDIO_FLAG_NO_MEDIA_PROJECTION);
+            break;
         case AAUDIO_ALLOW_CAPTURE_BY_NONE:
-            return static_cast<audio_flags_mask_t>(
+            flagsMask = static_cast<audio_flags_mask_t>(flagsMask |
                     AUDIO_FLAG_NO_MEDIA_PROJECTION | AUDIO_FLAG_NO_SYSTEM_CAPTURE);
+            break;
         default:
-            ALOGE("%s() 0x%08X unrecognized", __func__, policy);
-            return AUDIO_FLAG_NONE; //
+            ALOGE("%s() 0x%08X unrecognized capture policy", __func__, policy);
+            // flagsMask is not modified
     }
+
+    switch (spatializationBehavior) {
+        case AAUDIO_UNSPECIFIED:
+        case AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO:
+            // flagsMask is not modified
+            break;
+        case AAUDIO_SPATIALIZATION_BEHAVIOR_NEVER:
+            flagsMask = static_cast<audio_flags_mask_t>(flagsMask | AUDIO_FLAG_NEVER_SPATIALIZE);
+            break;
+        default:
+            ALOGE("%s() 0x%08X unrecognized spatialization behavior",
+                  __func__, spatializationBehavior);
+            // flagsMask is not modified
+    }
+
+    if (isContentSpatialized) {
+        flagsMask = static_cast<audio_flags_mask_t>(flagsMask | AUDIO_FLAG_CONTENT_SPATIALIZED);
+    }
+
+    return flagsMask;
 }
 
 audio_flags_mask_t AAudioConvert_privacySensitiveToAudioFlagsMask(
@@ -256,6 +282,248 @@
     return privacySensitive ? AUDIO_FLAG_CAPTURE_PRIVATE : AUDIO_FLAG_NONE;
 }
 
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelLayoutMask(
+        aaudio_channel_mask_t channelMask, bool isInput) {
+    if (isInput) {
+        switch (channelMask) {
+            case AAUDIO_CHANNEL_MONO:
+                return AUDIO_CHANNEL_IN_MONO;
+            case AAUDIO_CHANNEL_STEREO:
+                return AUDIO_CHANNEL_IN_STEREO;
+            case AAUDIO_CHANNEL_FRONT_BACK:
+                return AUDIO_CHANNEL_IN_FRONT_BACK;
+            case AAUDIO_CHANNEL_2POINT0POINT2:
+                return AUDIO_CHANNEL_IN_2POINT0POINT2;
+            case AAUDIO_CHANNEL_2POINT1POINT2:
+                return AUDIO_CHANNEL_IN_2POINT1POINT2;
+            case AAUDIO_CHANNEL_3POINT0POINT2:
+                return AUDIO_CHANNEL_IN_3POINT0POINT2;
+            case AAUDIO_CHANNEL_3POINT1POINT2:
+                return AUDIO_CHANNEL_IN_3POINT1POINT2;
+            case AAUDIO_CHANNEL_5POINT1:
+                return AUDIO_CHANNEL_IN_5POINT1;
+            default:
+                ALOGE("%s() %#x unrecognized", __func__, channelMask);
+                return AUDIO_CHANNEL_INVALID;
+        }
+    } else {
+        switch (channelMask) {
+            case AAUDIO_CHANNEL_MONO:
+                return AUDIO_CHANNEL_OUT_MONO;
+            case AAUDIO_CHANNEL_STEREO:
+                return AUDIO_CHANNEL_OUT_STEREO;
+            case AAUDIO_CHANNEL_2POINT1:
+                return AUDIO_CHANNEL_OUT_2POINT1;
+            case AAUDIO_CHANNEL_TRI:
+                return AUDIO_CHANNEL_OUT_TRI;
+            case AAUDIO_CHANNEL_TRI_BACK:
+                return AUDIO_CHANNEL_OUT_TRI_BACK;
+            case AAUDIO_CHANNEL_3POINT1:
+                return AUDIO_CHANNEL_OUT_3POINT1;
+            case AAUDIO_CHANNEL_2POINT0POINT2:
+                return AUDIO_CHANNEL_OUT_2POINT0POINT2;
+            case AAUDIO_CHANNEL_2POINT1POINT2:
+                return AUDIO_CHANNEL_OUT_2POINT1POINT2;
+            case AAUDIO_CHANNEL_3POINT0POINT2:
+                return AUDIO_CHANNEL_OUT_3POINT0POINT2;
+            case AAUDIO_CHANNEL_3POINT1POINT2:
+                return AUDIO_CHANNEL_OUT_3POINT1POINT2;
+            case AAUDIO_CHANNEL_QUAD:
+                return AUDIO_CHANNEL_OUT_QUAD;
+            case AAUDIO_CHANNEL_QUAD_SIDE:
+                return AUDIO_CHANNEL_OUT_QUAD_SIDE;
+            case AAUDIO_CHANNEL_SURROUND:
+                return AUDIO_CHANNEL_OUT_SURROUND;
+            case AAUDIO_CHANNEL_PENTA:
+                return AUDIO_CHANNEL_OUT_PENTA;
+            case AAUDIO_CHANNEL_5POINT1:
+                return AUDIO_CHANNEL_OUT_5POINT1;
+            case AAUDIO_CHANNEL_5POINT1_SIDE:
+                return AUDIO_CHANNEL_OUT_5POINT1_SIDE;
+            case AAUDIO_CHANNEL_5POINT1POINT2:
+                return AUDIO_CHANNEL_OUT_5POINT1POINT2;
+            case AAUDIO_CHANNEL_5POINT1POINT4:
+                return AUDIO_CHANNEL_OUT_5POINT1POINT4;
+            case AAUDIO_CHANNEL_6POINT1:
+                return AUDIO_CHANNEL_OUT_6POINT1;
+            case AAUDIO_CHANNEL_7POINT1:
+                return AUDIO_CHANNEL_OUT_7POINT1;
+            case AAUDIO_CHANNEL_7POINT1POINT2:
+                return AUDIO_CHANNEL_OUT_7POINT1POINT2;
+            case AAUDIO_CHANNEL_7POINT1POINT4:
+                return AUDIO_CHANNEL_OUT_7POINT1POINT4;
+            // TODO: add 9point1point4 and 9point1point6 when they are added in audio-hal-enums.h
+            // case AAUDIO_CHANNEL_9POINT1POINT4:
+            //     return AUDIO_CHANNEL_OUT_9POINT1POINT4;
+            // case AAUDIO_CHANNEL_9POINT1POINT6:
+            //     return AUDIO_CHANNEL_OUT_9POINT1POINT6;
+            default:
+                ALOGE("%s() %#x unrecognized", __func__, channelMask);
+                return AUDIO_CHANNEL_INVALID;
+        }
+    }
+}
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelLayoutMask(
+        audio_channel_mask_t channelMask, bool isInput) {
+    if (isInput) {
+        switch (channelMask) {
+            case AUDIO_CHANNEL_IN_MONO:
+                return AAUDIO_CHANNEL_MONO;
+            case AUDIO_CHANNEL_IN_STEREO:
+                return AAUDIO_CHANNEL_STEREO;
+            case AUDIO_CHANNEL_IN_FRONT_BACK:
+                return AAUDIO_CHANNEL_FRONT_BACK;
+            case AUDIO_CHANNEL_IN_2POINT0POINT2:
+                return AAUDIO_CHANNEL_2POINT0POINT2;
+            case AUDIO_CHANNEL_IN_2POINT1POINT2:
+                return AAUDIO_CHANNEL_2POINT1POINT2;
+            case AUDIO_CHANNEL_IN_3POINT0POINT2:
+                return AAUDIO_CHANNEL_3POINT0POINT2;
+            case AUDIO_CHANNEL_IN_3POINT1POINT2:
+                return AAUDIO_CHANNEL_3POINT1POINT2;
+            case AUDIO_CHANNEL_IN_5POINT1:
+                return AAUDIO_CHANNEL_5POINT1;
+            default:
+                ALOGE("%s() %#x unrecognized", __func__, channelMask);
+                return AAUDIO_CHANNEL_INVALID;
+        }
+    } else {
+        switch (channelMask) {
+            case AUDIO_CHANNEL_OUT_MONO:
+                return AAUDIO_CHANNEL_MONO;
+            case AUDIO_CHANNEL_OUT_STEREO:
+                return AAUDIO_CHANNEL_STEREO;
+            case AUDIO_CHANNEL_OUT_2POINT1:
+                return AAUDIO_CHANNEL_2POINT1;
+            case AUDIO_CHANNEL_OUT_TRI:
+                return AAUDIO_CHANNEL_TRI;
+            case AUDIO_CHANNEL_OUT_TRI_BACK:
+                return AAUDIO_CHANNEL_TRI_BACK;
+            case AUDIO_CHANNEL_OUT_3POINT1:
+                return AAUDIO_CHANNEL_3POINT1;
+            case AUDIO_CHANNEL_OUT_2POINT0POINT2:
+                return AAUDIO_CHANNEL_2POINT0POINT2;
+            case AUDIO_CHANNEL_OUT_2POINT1POINT2:
+                return AAUDIO_CHANNEL_2POINT1POINT2;
+            case AUDIO_CHANNEL_OUT_3POINT0POINT2:
+                return AAUDIO_CHANNEL_3POINT0POINT2;
+            case AUDIO_CHANNEL_OUT_3POINT1POINT2:
+                return AAUDIO_CHANNEL_3POINT1POINT2;
+            case AUDIO_CHANNEL_OUT_QUAD:
+                return AAUDIO_CHANNEL_QUAD;
+            case AUDIO_CHANNEL_OUT_QUAD_SIDE:
+                return AAUDIO_CHANNEL_QUAD_SIDE;
+            case AUDIO_CHANNEL_OUT_SURROUND:
+                return AAUDIO_CHANNEL_SURROUND;
+            case AUDIO_CHANNEL_OUT_PENTA:
+                return AAUDIO_CHANNEL_PENTA;
+            case AUDIO_CHANNEL_OUT_5POINT1:
+                return AAUDIO_CHANNEL_5POINT1;
+            case AUDIO_CHANNEL_OUT_5POINT1_SIDE:
+                return AAUDIO_CHANNEL_5POINT1_SIDE;
+            case AUDIO_CHANNEL_OUT_5POINT1POINT2:
+                return AAUDIO_CHANNEL_5POINT1POINT2;
+            case AUDIO_CHANNEL_OUT_5POINT1POINT4:
+                return AAUDIO_CHANNEL_5POINT1POINT4;
+            case AUDIO_CHANNEL_OUT_6POINT1:
+                return AAUDIO_CHANNEL_6POINT1;
+            case AUDIO_CHANNEL_OUT_7POINT1:
+                return AAUDIO_CHANNEL_7POINT1;
+            case AUDIO_CHANNEL_OUT_7POINT1POINT2:
+                return AAUDIO_CHANNEL_7POINT1POINT2;
+            case AUDIO_CHANNEL_OUT_7POINT1POINT4:
+                return AAUDIO_CHANNEL_7POINT1POINT4;
+            // TODO: add 9point1point4 and 9point1point6 when they are added in audio-hal-enums.h
+            // case AUDIO_CHANNEL_OUT_9POINT1POINT4:
+            //     return AAUDIO_CHANNEL_9POINT1POINT4;
+            // case AUDIO_CHANNEL_OUT_9POINT1POINT6:
+            //     return AAUDIO_CHANNEL_9POINT1POINT6;
+            default:
+                ALOGE("%s() %#x unrecognized", __func__, channelMask);
+                return AAUDIO_CHANNEL_INVALID;
+        }
+    }
+}
+
+int32_t AAudioConvert_channelMaskToCount(aaudio_channel_mask_t channelMask) {
+    return __builtin_popcount(channelMask & ~AAUDIO_CHANNEL_BIT_INDEX);
+}
+
+aaudio_channel_mask_t AAudioConvert_channelCountToMask(int32_t channelCount) {
+    if (channelCount < 0 || channelCount > AUDIO_CHANNEL_COUNT_MAX) {
+        return AAUDIO_CHANNEL_INVALID;
+    }
+
+    if (channelCount == 0) {
+        return AAUDIO_UNSPECIFIED;
+    }
+
+    // Return index mask if the channel count is greater than 2.
+    return AAUDIO_CHANNEL_BIT_INDEX | ((1 << channelCount) - 1);
+}
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelIndexMask(
+        audio_channel_mask_t channelMask) {
+    if (audio_channel_mask_get_representation(channelMask) != AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+        ALOGE("%s() %#x not an index mask", __func__, channelMask);
+        return AAUDIO_CHANNEL_INVALID;
+    }
+    return (channelMask & ~AUDIO_CHANNEL_INDEX_HDR) | AAUDIO_CHANNEL_BIT_INDEX;
+}
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelIndexMask(
+        aaudio_channel_mask_t channelMask) {
+    if (!AAudio_isChannelIndexMask(channelMask)) {
+        ALOGE("%s() %#x not an index mask", __func__, channelMask);
+        return AUDIO_CHANNEL_INVALID;
+    }
+    return audio_channel_mask_for_index_assignment_from_count(
+            AAudioConvert_channelMaskToCount(channelMask));
+}
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelMask(
+        audio_channel_mask_t channelMask, bool isInput, bool indexMaskRequired) {
+    if (audio_channel_mask_get_representation(channelMask) == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+        return AAudioConvert_androidToAAudioChannelIndexMask(channelMask);
+    }
+    if (indexMaskRequired) {
+        // Require index mask, `channelMask` here is a position mask.
+        const int channelCount = isInput ? audio_channel_count_from_in_mask(channelMask)
+                                         : audio_channel_count_from_out_mask(channelMask);
+        return AAudioConvert_channelCountToMask(channelCount);
+    }
+    return AAudioConvert_androidToAAudioChannelLayoutMask(channelMask, isInput);
+}
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelMask(
+        aaudio_channel_mask_t channelMask, bool isInput) {
+    return AAudio_isChannelIndexMask(channelMask)
+            ? AAudioConvert_aaudioToAndroidChannelIndexMask(channelMask)
+            : AAudioConvert_aaudioToAndroidChannelLayoutMask(channelMask, isInput);
+}
+
+bool AAudio_isChannelIndexMask(aaudio_channel_mask_t channelMask) {
+    return (channelMask & AAUDIO_CHANNEL_BIT_INDEX) == AAUDIO_CHANNEL_BIT_INDEX;
+}
+
+audio_channel_mask_t AAudio_getChannelMaskForOpen(
+        aaudio_channel_mask_t channelMask, int32_t samplesPerFrame, bool isInput) {
+    if (channelMask != AAUDIO_UNSPECIFIED) {
+        if (AAudio_isChannelIndexMask(channelMask) && samplesPerFrame <= 2) {
+            // When it is index mask and the count is less than 3, use position mask
+            // instead of index mask for opening a stream. This may need to be revisited
+            // when making channel index mask public.
+            return isInput ? audio_channel_in_mask_from_count(samplesPerFrame)
+                           : audio_channel_out_mask_from_count(samplesPerFrame);
+        }
+        return AAudioConvert_aaudioToAndroidChannelMask(channelMask, isInput);
+    }
+
+    // Return stereo when unspecified.
+    return isInput ? AUDIO_CHANNEL_IN_STEREO : AUDIO_CHANNEL_OUT_STEREO;
+}
+
 int32_t AAudioConvert_framesToBytes(int32_t numFrames,
                                     int32_t bytesPerFrame,
                                     int32_t *sizeInBytes) {
@@ -276,45 +544,6 @@
     return AAUDIO_OK;
 }
 
-static int32_t AAudioProperty_getMMapProperty(const char *propName,
-                                              int32_t defaultValue,
-                                              const char * caller) {
-    int32_t prop = property_get_int32(propName, defaultValue);
-    switch (prop) {
-        case AAUDIO_UNSPECIFIED:
-        case AAUDIO_POLICY_NEVER:
-        case AAUDIO_POLICY_ALWAYS:
-        case AAUDIO_POLICY_AUTO:
-            break;
-        default:
-            ALOGE("%s: invalid = %d", caller, prop);
-            prop = defaultValue;
-            break;
-    }
-    return prop;
-}
-
-int32_t AAudioProperty_getMMapPolicy() {
-    return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_POLICY,
-                                          AAUDIO_UNSPECIFIED, __func__);
-}
-
-int32_t AAudioProperty_getMMapExclusivePolicy() {
-    return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY,
-                                          AAUDIO_UNSPECIFIED, __func__);
-}
-
-int32_t AAudioProperty_getMixerBursts() {
-    const int32_t defaultBursts = 2; // arbitrary, use 2 for double buffered
-    const int32_t maxBursts = 1024; // arbitrary
-    int32_t prop = property_get_int32(AAUDIO_PROP_MIXER_BURSTS, defaultBursts);
-    if (prop < 1 || prop > maxBursts) {
-        ALOGE("AAudioProperty_getMixerBursts: invalid = %d", prop);
-        prop = defaultBursts;
-    }
-    return prop;
-}
-
 int32_t AAudioProperty_getWakeupDelayMicros() {
     const int32_t minMicros = 0; // arbitrary
     const int32_t defaultMicros = 200; // arbitrary, based on some observed jitter
@@ -345,18 +574,6 @@
     return prop;
 }
 
-int32_t AAudioProperty_getHardwareBurstMinMicros() {
-    const int32_t defaultMicros = 1000; // arbitrary
-    const int32_t maxMicros = 1000 * 1000; // arbitrary
-    int32_t prop = property_get_int32(AAUDIO_PROP_HW_BURST_MIN_USEC, defaultMicros);
-    if (prop < 1 || prop > maxMicros) {
-        ALOGE("AAudioProperty_getHardwareBurstMinMicros: invalid = %d, use %d",
-              prop, defaultMicros);
-        prop = defaultMicros;
-    }
-    return prop;
-}
-
 static int32_t AAudioProperty_getMMapOffsetMicros(const char *functionName,
         const char *propertyName) {
     const int32_t minMicros = -20000; // arbitrary
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index 82eb77d..b59ce1c 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -91,35 +91,43 @@
  * @return internal audio flags mask
  */
 audio_flags_mask_t AAudioConvert_allowCapturePolicyToAudioFlagsMask(
-        aaudio_allowed_capture_policy_t policy);
+        aaudio_allowed_capture_policy_t policy,
+        aaudio_spatialization_behavior_t spatializationBehavior,
+        bool isContentSpatialized);
 
 audio_flags_mask_t AAudioConvert_privacySensitiveToAudioFlagsMask(
         bool privacySensitive);
 
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelLayoutMask(
+        aaudio_channel_mask_t channelMask, bool isInput);
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelLayoutMask(
+        audio_channel_mask_t channelMask, bool isInput);
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelIndexMask(
+        audio_channel_mask_t channelMask);
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelIndexMask(
+        aaudio_channel_mask_t channelMask);
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelMask(
+        audio_channel_mask_t channelMask, bool isInput, bool indexMaskRequired);
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelMask(
+        aaudio_channel_mask_t channelMask, bool isInput);
+
+bool AAudio_isChannelIndexMask(aaudio_channel_mask_t channelMask);
+
+int32_t AAudioConvert_channelMaskToCount(aaudio_channel_mask_t channelMask);
+
+aaudio_channel_mask_t AAudioConvert_channelCountToMask(int32_t channelCount);
+
+audio_channel_mask_t AAudio_getChannelMaskForOpen(
+        aaudio_channel_mask_t channelMask, int32_t samplesPerFrame, bool isInput);
+
 // Note that this code may be replaced by Settings or by some other system configuration tool.
 
 /**
- * Read system property.
- * @return AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER or AAUDIO_POLICY_AUTO or AAUDIO_POLICY_ALWAYS
- */
-int32_t AAudioProperty_getMMapPolicy();
-#define AAUDIO_PROP_MMAP_POLICY           "aaudio.mmap_policy"
-
-/**
- * Read system property.
- * @return AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER or AAUDIO_POLICY_AUTO or AAUDIO_POLICY_ALWAYS
- */
-int32_t AAudioProperty_getMMapExclusivePolicy();
-#define AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY "aaudio.mmap_exclusive_policy"
-
-/**
- * Read system property.
- * @return number of bursts per AAudio service mixer cycle
- */
-int32_t AAudioProperty_getMixerBursts();
-#define AAUDIO_PROP_MIXER_BURSTS           "aaudio.mixer_bursts"
-
-/**
  * Read a system property that specifies the number of extra microseconds that a thread
  * should sleep when waiting for another thread to service a FIFO. This is used
  * to avoid the waking thread from being overly optimistic about the other threads
@@ -140,19 +148,6 @@
 #define AAUDIO_PROP_MINIMUM_SLEEP_USEC      "aaudio.minimum_sleep_usec"
 
 /**
- * Read system property.
- * This is handy in case the DMA is bursting too quickly for the CPU to keep up.
- * For example, there may be a DMA burst every 100 usec but you only
- * want to feed the MMAP buffer every 2000 usec.
- *
- * This will affect the framesPerBurst for an MMAP stream.
- *
- * @return minimum number of microseconds for a MMAP HW burst
- */
-int32_t AAudioProperty_getHardwareBurstMinMicros();
-#define AAUDIO_PROP_HW_BURST_MIN_USEC      "aaudio.hw_burst_min_usec"
-
-/**
  * Read a system property that specifies an offset that will be added to MMAP timestamps.
  * This can be used to correct bias in the timestamp.
  * It can also be used to analyze the time distribution of the timestamp
@@ -198,7 +193,7 @@
  * @return true if f() eventually returns true.
  */
 static inline bool AAudio_tryUntilTrue(
-        std::function<bool()> f, int times, int sleepMs) {
+        const std::function<bool()>& f, int times, int sleepMs) {
     static const useconds_t US_PER_MS = 1000;
 
     sleepMs = std::max(sleepMs, 0);
@@ -270,9 +265,7 @@
 
 class Timestamp {
 public:
-    Timestamp()
-            : mPosition(0)
-            , mNanoseconds(0) {}
+    Timestamp() = default;
     Timestamp(int64_t position, int64_t nanoseconds)
             : mPosition(position)
             , mNanoseconds(nanoseconds) {}
@@ -283,8 +276,8 @@
 
 private:
     // These cannot be const because we need to implement the copy assignment operator.
-    int64_t mPosition;
-    int64_t mNanoseconds;
+    int64_t mPosition{0};
+    int64_t mNanoseconds{0};
 };
 
 
@@ -318,4 +311,36 @@
     std::atomic<int> mRequested{0};
     std::atomic<int> mAcknowledged{0};
 };
+
+enum {
+    /**
+     * Audio channel index mask, only used internally.
+     */
+    AAUDIO_CHANNEL_BIT_INDEX = 0x80000000,
+    AAUDIO_CHANNEL_INDEX_MASK_1 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 1) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_2 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 2) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_3 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 3) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_4 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 4) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_5 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 5) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_6 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 6) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_7 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 7) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_8 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 8) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_9 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 9) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_10 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 10) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_11 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 11) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_12 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 12) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_13 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 13) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_14 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 14) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_15 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 15) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_16 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 16) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_17 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 17) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_18 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 18) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_19 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 19) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_20 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 20) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_21 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 21) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_22 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 22) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_23 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 23) - 1,
+    AAUDIO_CHANNEL_INDEX_MASK_24 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 24) - 1,
+};
+
 #endif //UTILITY_AAUDIO_UTILITIES_H
diff --git a/media/libaaudio/src/utility/FixedBlockAdapter.h b/media/libaaudio/src/utility/FixedBlockAdapter.h
index 4dc7e68..290e473 100644
--- a/media/libaaudio/src/utility/FixedBlockAdapter.h
+++ b/media/libaaudio/src/utility/FixedBlockAdapter.h
@@ -35,7 +35,7 @@
 class FixedBlockAdapter
 {
 public:
-    FixedBlockAdapter(FixedBlockProcessor &fixedBlockProcessor)
+    explicit FixedBlockAdapter(FixedBlockProcessor &fixedBlockProcessor)
     : mFixedBlockProcessor(fixedBlockProcessor) {}
 
     virtual ~FixedBlockAdapter() = default;
diff --git a/media/libaaudio/src/utility/FixedBlockReader.h b/media/libaaudio/src/utility/FixedBlockReader.h
index 128dd52..dc82416 100644
--- a/media/libaaudio/src/utility/FixedBlockReader.h
+++ b/media/libaaudio/src/utility/FixedBlockReader.h
@@ -30,7 +30,7 @@
 class FixedBlockReader : public FixedBlockAdapter
 {
 public:
-    FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor);
+    explicit FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor);
 
     virtual ~FixedBlockReader() = default;
 
diff --git a/media/libaaudio/src/utility/FixedBlockWriter.h b/media/libaaudio/src/utility/FixedBlockWriter.h
index f1d917c..3e89b5d 100644
--- a/media/libaaudio/src/utility/FixedBlockWriter.h
+++ b/media/libaaudio/src/utility/FixedBlockWriter.h
@@ -28,7 +28,7 @@
 class FixedBlockWriter : public FixedBlockAdapter
 {
 public:
-    FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor);
+    explicit FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor);
 
     virtual ~FixedBlockWriter() = default;
 
diff --git a/media/libaaudio/src/utility/MonotonicCounter.h b/media/libaaudio/src/utility/MonotonicCounter.h
index 63add4e..51eb69b 100644
--- a/media/libaaudio/src/utility/MonotonicCounter.h
+++ b/media/libaaudio/src/utility/MonotonicCounter.h
@@ -30,8 +30,8 @@
 class MonotonicCounter {
 
 public:
-    MonotonicCounter() {};
-    virtual ~MonotonicCounter() {};
+    MonotonicCounter() = default;
+    virtual ~MonotonicCounter() = default;
 
     /**
      * @return current value of the counter
@@ -41,7 +41,12 @@
     }
 
     /**
-     * advance the current value to match the counter
+     * Advance the current value to match the counter.
+     *
+     * Note that it will take several million years for the 64-bit
+     * counters to wrap around.
+     * So we do not use __builtin_sub_overflow.
+     * We want to know if overflow happens because of a bug.
      */
     void catchUpTo(int64_t counter) {
         if ((counter - mCounter64) > 0) {
@@ -74,7 +79,8 @@
      * @return current value of the 64-bit counter
      */
     int64_t update32(int32_t counter32) {
-        int32_t delta = counter32 - mCounter32;
+        int32_t delta;
+        __builtin_sub_overflow(counter32, mCounter32, &delta);
         // protect against the mCounter64 going backwards
         if (delta > 0) {
             mCounter64 += delta;
@@ -108,5 +114,4 @@
     int32_t mCounter32 = 0;
 };
 
-
 #endif //UTILITY_MONOTONIC_COUNTER_H
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 98e9727..4b45909 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -13,6 +13,11 @@
         "-Wall",
         "-Werror",
     ],
+
+    sanitize: {
+        integer_overflow: true,
+        misc_undefined: ["bounds"],
+    },
 }
 
 cc_test {
@@ -48,7 +53,7 @@
     shared_libs: ["libaaudio_internal"],
 }
 
-cc_test {
+cc_binary {
     name: "test_timestamps",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_timestamps.cpp"],
@@ -60,121 +65,71 @@
     name: "test_open_params",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_open_params.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
-cc_test {
+cc_binary {
     name: "test_no_close",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_no_close.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
-cc_test {
+cc_binary {
     name: "test_aaudio_recovery",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_recovery.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
-cc_test {
+cc_binary {
     name: "test_n_streams",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_n_streams.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
-cc_test {
+cc_binary {
     name: "test_bad_disconnect",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_bad_disconnect.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
     name: "test_various",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_various.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
     name: "test_session_id",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_session_id.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
-cc_test {
+cc_binary {
     name: "test_aaudio_monkey",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_aaudio_monkey.cpp"],
     header_libs: ["libaaudio_example_utils"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
     name: "test_attributes",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_attributes.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
     name: "test_interference",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_interference.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
@@ -197,27 +152,29 @@
 }
 
 cc_test {
-    name: "test_return_stop",
+    name: "test_monotonic_counter",
     defaults: ["libaaudio_tests_defaults"],
-    srcs: ["test_return_stop.cpp"],
+    srcs: ["test_monotonic_counter.cpp"],
     shared_libs: [
-        "libaaudio",
+        "libaaudio_internal",
         "libbinder",
         "libcutils",
         "libutils",
     ],
 }
 
+cc_binary {
+    name: "test_return_stop",
+    defaults: ["libaaudio_tests_defaults"],
+    srcs: ["test_return_stop.cpp"],
+    shared_libs: ["libaaudio"],
+}
+
 cc_test {
     name: "test_callback_race",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_callback_race.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
@@ -238,7 +195,7 @@
     ],
 }
 
-cc_test {
+cc_binary {
     name: "test_steal_exclusive",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_steal_exclusive.cpp"],
@@ -251,15 +208,9 @@
     ],
 }
 
-
-cc_test {
+cc_binary {
     name: "test_disconnect_race",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_disconnect_race.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
diff --git a/media/libaaudio/tests/test_attributes.cpp b/media/libaaudio/tests/test_attributes.cpp
index d540866..b88d562 100644
--- a/media/libaaudio/tests/test_attributes.cpp
+++ b/media/libaaudio/tests/test_attributes.cpp
@@ -16,6 +16,10 @@
 
 // Test AAudio attributes such as Usage, ContentType and InputPreset.
 
+// TODO Many of these tests are duplicates of CTS tests in
+// "test_aaudio_attributes.cpp". That other file is more current.
+// So these tests could be deleted.
+
 #include <stdio.h>
 #include <unistd.h>
 
@@ -91,7 +95,7 @@
     aaudio_allowed_capture_policy_t expectedCapturePolicy =
             (capturePolicy == DONT_SET || capturePolicy == AAUDIO_UNSPECIFIED)
             ? AAUDIO_ALLOW_CAPTURE_BY_ALL // default
-            : preset;
+            : capturePolicy;
     EXPECT_EQ(expectedCapturePolicy, AAudioStream_getAllowedCapturePolicy(aaudioStream));
 
     bool expectedPrivacyMode =
@@ -132,10 +136,7 @@
     AAUDIO_USAGE_ASSISTANCE_SONIFICATION,
     AAUDIO_USAGE_GAME,
     AAUDIO_USAGE_ASSISTANT,
-    AAUDIO_SYSTEM_USAGE_EMERGENCY,
-    AAUDIO_SYSTEM_USAGE_SAFETY,
-    AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS,
-    AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT
+    // Note that the AAUDIO_SYSTEM_USAGE_* values requires special permission.
 };
 
 static const aaudio_content_type_t sContentypes[] = {
diff --git a/media/libaaudio/tests/test_flowgraph.cpp b/media/libaaudio/tests/test_flowgraph.cpp
index d563a7e..0792fc5 100644
--- a/media/libaaudio/tests/test_flowgraph.cpp
+++ b/media/libaaudio/tests/test_flowgraph.cpp
@@ -23,6 +23,7 @@
 #include <gtest/gtest.h>
 
 #include "flowgraph/ClipToRange.h"
+#include "flowgraph/MonoBlend.h"
 #include "flowgraph/MonoToMultiConverter.h"
 #include "flowgraph/SourceFloat.h"
 #include "flowgraph/RampLinear.h"
@@ -76,31 +77,40 @@
 }
 
 TEST(test_flowgraph, module_ramp_linear) {
+    constexpr int singleNumOutput = 1;
     constexpr int rampSize = 5;
     constexpr int numOutput = 100;
     constexpr float value = 1.0f;
-    constexpr float target = 100.0f;
+    constexpr float initialTarget = 10.0f;
+    constexpr float finalTarget = 100.0f;
+    constexpr float tolerance = 0.0001f; // arbitrary
     float output[numOutput] = {};
     RampLinear rampLinear{1};
     SinkFloat sinkFloat{1};
 
     rampLinear.input.setValue(value);
     rampLinear.setLengthInFrames(rampSize);
-    rampLinear.setTarget(target);
-    rampLinear.forceCurrent(0.0f);
-
     rampLinear.output.connect(&sinkFloat.input);
 
+    // Check that the values go to the initial target instantly.
+    rampLinear.setTarget(initialTarget);
+    int32_t singleNumRead = sinkFloat.read(output, singleNumOutput);
+    ASSERT_EQ(singleNumRead, singleNumOutput);
+    EXPECT_NEAR(value * initialTarget, output[0], tolerance);
+
+    // Now set target and check that the linear ramp works as expected.
+    rampLinear.setTarget(finalTarget);
     int32_t numRead = sinkFloat.read(output, numOutput);
+    const float incrementSize = (finalTarget - initialTarget) / rampSize;
     ASSERT_EQ(numOutput, numRead);
-    constexpr float tolerance = 0.0001f; // arbitrary
+
     int i = 0;
     for (; i < rampSize; i++) {
-        float expected = i * value * target / rampSize;
+        float expected = value * (initialTarget + i * incrementSize);
         EXPECT_NEAR(expected, output[i], tolerance);
     }
     for (; i < numOutput; i++) {
-        float expected = value * target;
+        float expected = value * finalTarget;
         EXPECT_NEAR(expected, output[i], tolerance);
     }
 }
@@ -155,3 +165,29 @@
         EXPECT_NEAR(expected[i], output[i], tolerance);
     }
 }
+
+TEST(test_flowgraph, module_mono_blend) {
+    // Two channel to two channel with 3 inputs and outputs.
+    constexpr int numChannels = 2;
+    constexpr int numFrames = 3;
+
+    static const float input[] = {-0.7, 0.5, -0.25, 1.25, 1000, 2000};
+    static const float expected[] = {-0.1, -0.1, 0.5, 0.5, 1500, 1500};
+    float output[100];
+    SourceFloat sourceFloat{numChannels};
+    MonoBlend monoBlend{numChannels};
+    SinkFloat sinkFloat{numChannels};
+
+    sourceFloat.setData(input, numFrames);
+
+    sourceFloat.output.connect(&monoBlend.input);
+    monoBlend.output.connect(&sinkFloat.input);
+
+    int32_t numRead = sinkFloat.read(output, numFrames);
+    ASSERT_EQ(numRead, numFrames);
+    constexpr float tolerance = 0.000001f; // arbitrary
+    for (int i = 0; i < numRead; i++) {
+        EXPECT_NEAR(expected[i], output[i], tolerance);
+    }
+}
+
diff --git a/media/libaaudio/tests/test_monotonic_counter.cpp b/media/libaaudio/tests/test_monotonic_counter.cpp
new file mode 100644
index 0000000..5cbbaf7
--- /dev/null
+++ b/media/libaaudio/tests/test_monotonic_counter.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Test MonotonicCounter
+ */
+
+#include <iostream>
+
+#include <gtest/gtest.h>
+
+#include "utility/MonotonicCounter.h"
+
+TEST(test_monotonic_counter, builtin_wrap) {
+    int32_t x = 0x7FFFFFF0;
+    int32_t y = 0x80000010;
+    int32_t delta;
+    // delta = y - x; // This would cause a numeric overflow!
+    __builtin_sub_overflow(y, x, &delta);
+    ASSERT_EQ(0x20, delta);
+}
+
+// test updating past some overflow points
+TEST(test_monotonic_counter, mono_counter_update32_wrap) {
+    MonotonicCounter counter;
+    ASSERT_EQ(0, counter.get());
+
+    static constexpr uint32_t x = (uint32_t) 0x7FFFFFF0;
+    counter.update32(x);
+    ASSERT_EQ((int64_t)0x7FFFFFF0, counter.get());
+
+    static constexpr uint32_t y = (uint32_t) 0x80000010;
+    counter.update32(y);
+    ASSERT_EQ((int64_t)0x80000010, counter.get());
+
+    counter.update32(0);
+    ASSERT_EQ((int64_t)0x100000000, counter.get());
+}
+
+TEST(test_monotonic_counter, mono_counter_roundup) {
+    MonotonicCounter counter;
+    static constexpr uint32_t x = 2345;
+    counter.update32(x);
+    ASSERT_EQ((int64_t)x, counter.get());
+
+    counter.roundUp64(100);
+    ASSERT_EQ((int64_t)2400, counter.get());
+}
+
+TEST(test_monotonic_counter, mono_counter_catchup) {
+    MonotonicCounter counter;
+    counter.update32(7654);
+    counter.catchUpTo(5000); // already past 5000 so no change
+    ASSERT_EQ((int64_t)7654, counter.get());
+    counter.catchUpTo(9876); // jumps
+    ASSERT_EQ((int64_t)9876, counter.get());
+}
+
+TEST(test_monotonic_counter, mono_counter_increment) {
+    MonotonicCounter counter;
+    counter.update32(1000);
+    counter.increment(-234); // will not go backwards
+    ASSERT_EQ((int64_t)1000, counter.get());
+    counter.increment(96); // advances
+    ASSERT_EQ((int64_t)1096, counter.get());
+}
+
+TEST(test_monotonic_counter, mono_counter_reset) {
+    MonotonicCounter counter;
+    counter.update32(1000);
+    // Counter is monotonic and should not go backwards.
+    counter.update32(500); // No change because 32-bit counter is already past 1000.
+    ASSERT_EQ((int64_t)1000, counter.get());
+
+    counter.reset32();
+    counter.update32(500);
+    ASSERT_EQ((int64_t)1500, counter.get());
+}
diff --git a/media/libaaudio/tests/test_steal_exclusive.cpp b/media/libaaudio/tests/test_steal_exclusive.cpp
index 05c560d..ca4f3d6 100644
--- a/media/libaaudio/tests/test_steal_exclusive.cpp
+++ b/media/libaaudio/tests/test_steal_exclusive.cpp
@@ -110,7 +110,11 @@
         mOpenDelayMillis = openDelayMillis;
     }
 
-    void restartStream() {
+    void setCloseEnabled(bool enabled) {
+        mCloseEnabled = enabled;
+    }
+
+    aaudio_result_t restartStream() {
         int retriesLeft = mMaxRetries;
         aaudio_result_t result;
         do {
@@ -126,6 +130,7 @@
                     mName.c_str(),
                     AAudio_convertResultToText(result));
         } while (retriesLeft-- > 0 && result != AAUDIO_OK);
+        return result;
     }
 
     aaudio_data_callback_result_t onAudioReady(
@@ -189,10 +194,12 @@
         std::lock_guard<std::mutex> lock(mLock);
         aaudio_result_t result = AAUDIO_OK;
         if (mStream != nullptr) {
-            result = AAudioStream_close(mStream);
-            if (result != AAUDIO_OK) {
-                printf("AAudioStream_close returned %s\n",
-                       AAudio_convertResultToText(result));
+            if (mCloseEnabled) {
+                result = AAudioStream_close(mStream);
+                printf("AAudioStream_close() returned %s\n",
+                        AAudio_convertResultToText(result));
+            } else {
+                printf("AAudioStream_close() DISABLED!\n");
             }
             mStream = nullptr;
         }
@@ -232,6 +239,12 @@
         return AAudioStream_requestStart(mStream);
     }
 
+    aaudio_result_t pause() {
+        std::lock_guard<std::mutex> lock(mLock);
+        if (mStream == nullptr) return 0;
+        return AAudioStream_requestPause(mStream);
+    }
+
     aaudio_result_t stop() {
         std::lock_guard<std::mutex> lock(mLock);
         if (mStream == nullptr) return 0;
@@ -287,6 +300,7 @@
     std::string         mName;
     int                 mMaxRetries = 1;
     int                 mOpenDelayMillis = 0;
+    bool                mCloseEnabled = true;
 };
 
 // Callback function that fills the audio output buffer.
@@ -319,11 +333,13 @@
 }
 
 static void s_usage() {
-    printf("test_steal_exclusive [-i] [-r{maxRetries}] [-d{delay}] -s\n");
+    printf("test_steal_exclusive [-i] [-r{maxRetries}] [-d{delay}] [-p{pausedTime}]-s -c{flag}\n");
     printf("     -i direction INPUT, otherwise OUTPUT\n");
-    printf("     -d delay open by milliseconds, default = 0\n");
-    printf("     -r max retries in the error callback, default = 1\n");
+    printf("     -d Delay open by milliseconds, default = 0\n");
+    printf("     -p Pause first stream then sleep for msec before opening second streams, default = 0\n");
+    printf("     -r max Retries in the error callback, default = 1\n");
     printf("     -s try to open in SHARED mode\n");
+    printf("     -c enable or disabling Closing of the stream with 0/1, default = 1\n");
 }
 
 int main(int argc, char ** argv) {
@@ -334,7 +350,9 @@
     int errorCount = 0;
     int maxRetries = 1;
     int openDelayMillis = 0;
+    bool closeEnabled = true;
     aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
+    int pausedMillis = 0;
 
     // Make printf print immediately so that debug info is not stuck
     // in a buffer if we hang or crash.
@@ -348,12 +366,18 @@
         if (arg[0] == '-') {
             char option = arg[1];
             switch (option) {
+                case 'c':
+                    closeEnabled = atoi(&arg[2]) != 0;
+                    break;
                 case 'd':
                     openDelayMillis = atoi(&arg[2]);
                     break;
                 case 'i':
                     direction = AAUDIO_DIRECTION_INPUT;
                     break;
+                case 'p':
+                    pausedMillis = atoi(&arg[2]);
+                    break;
                 case 'r':
                     maxRetries = atoi(&arg[2]);
                     break;
@@ -376,6 +400,8 @@
     thief.setOpenDelayMillis(openDelayMillis);
     victim.setMaxRetries(maxRetries);
     thief.setMaxRetries(maxRetries);
+    victim.setCloseEnabled(closeEnabled);
+    thief.setCloseEnabled(closeEnabled);
 
     result = victim.openAudioStream(direction, requestedSharingMode);
     if (result != AAUDIO_OK) {
@@ -414,6 +440,12 @@
         }
     }
 
+    if (pausedMillis > 0) {
+        printf("Pausing the VICTIM for %d millis before starting THIEF -----\n", pausedMillis);
+        victim.pause();
+        usleep(pausedMillis * 1000);
+    }
+
     printf("Trying to start the THIEF stream, which may steal the VICTIM MMAP resource -----\n");
     result = thief.openAudioStream(direction, requestedSharingMode);
     if (result != AAUDIO_OK) {
@@ -429,6 +461,25 @@
         errorCount++;
     }
 
+    if (pausedMillis > 0) {
+        result = victim.start();
+        printf("Restarting VICTIM, AAudioStream_requestStart(VICTIM) returned %d "
+               ">>>>>>>>>>>>>>>>>>>>>>\n", result);
+        if (result == AAUDIO_ERROR_DISCONNECTED) {
+            // The stream is disconnected due to thief steal the resource
+            printf("VICTIM was disconnected while hanging as the THIEF "
+                   "stole the resource >>>>>>>>>>>>>>>>>>>>>>\n");
+            result = victim.restartStream();
+            printf("Restarting VICTIM, AAudioStream_requestStart(VICTIM) returned %d "
+                   ">>>>>>>>>>>>>>>>>>>>>>\n", result);
+            if (result != AAUDIO_OK) {
+                errorCount++;
+            }
+        } else {
+            errorCount++;
+        }
+    }
+
     // Give stream time to advance.
     usleep(SLEEP_DURATION_MSEC * 1000);
 
@@ -442,7 +493,7 @@
     }
 
     LOGI("Both streams running. Ask user to plug in headset. ====");
-    printf("\n====\nPlease PLUG IN A HEADSET now!\n====\n\n");
+    printf("\n====\nPlease PLUG IN A HEADSET now! - OPTIONAL\n====\n\n");
 
     if (result == AAUDIO_OK) {
         const int watchLoops = DUET_DURATION_MSEC / SLEEP_DURATION_MSEC;
diff --git a/media/libaaudio/tests/test_various.cpp b/media/libaaudio/tests/test_various.cpp
index cbf863f..b68fc7b 100644
--- a/media/libaaudio/tests/test_various.cpp
+++ b/media/libaaudio/tests/test_various.cpp
@@ -25,6 +25,7 @@
 
 #include <gtest/gtest.h>
 #include <unistd.h>
+#include <thread>
 
 // Callback function that does nothing.
 aaudio_data_callback_result_t NoopDataCallbackProc(
@@ -51,6 +52,7 @@
 }
 
 constexpr int64_t NANOS_PER_MILLISECOND = 1000 * 1000;
+constexpr int64_t MICROS_PER_MILLISECOND = 1000;
 
 void checkReleaseThenClose(aaudio_performance_mode_t perfMode,
         aaudio_sharing_mode_t sharingMode,
@@ -762,6 +764,58 @@
     checkCallbackOnce(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
 }
 
+void waitForStateChangeToClosingorClosed(AAudioStream **stream, std::atomic<bool>* isReady)
+{
+    *isReady = true;
+    aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(*stream,
+                                                         AAUDIO_STREAM_STATE_OPEN, &state,
+                                                         10000 * NANOS_PER_MILLISECOND));
+    if ((state != AAUDIO_STREAM_STATE_CLOSING) && (state != AAUDIO_STREAM_STATE_CLOSED)){
+        FAIL() << "ERROR - State not closing or closed. Current state: " <<
+                AAudio_convertStreamStateToText(state);
+    }
+}
+
+void testWaitForStateChangeClose(aaudio_performance_mode_t perfMode) {
+    AAudioStreamBuilder *aaudioBuilder = nullptr;
+    AAudioStream *aaudioStream = nullptr;
+
+    ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+    AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, perfMode);
+    ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+
+    // Verify Open State
+    aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+                                                         AAUDIO_STREAM_STATE_UNKNOWN, &state,
+                                                         1000 * NANOS_PER_MILLISECOND));
+    EXPECT_EQ(AAUDIO_STREAM_STATE_OPEN, state);
+
+    std::atomic<bool> isWaitThreadReady{false};
+
+    // Spawn a new thread to wait for the state change
+    std::thread waitThread (waitForStateChangeToClosingorClosed, &aaudioStream,
+                            &isWaitThreadReady);
+
+    // Wait for worker thread to be ready
+    while (!isWaitThreadReady) {
+        usleep(MICROS_PER_MILLISECOND);
+    }
+    // Sleep an additional millisecond to make sure waitForAudioThread is called
+    usleep(MICROS_PER_MILLISECOND);
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+    waitThread.join();
+}
+
+TEST(test_various, wait_for_state_change_close_none) {
+    testWaitForStateChangeClose(AAUDIO_PERFORMANCE_MODE_NONE);
+}
+
+TEST(test_various, wait_for_state_change_close_lowlat) {
+    testWaitForStateChangeClose(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+}
+
 // ************************************************************
 struct WakeUpCallbackData {
     void wakeOther() {
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index 321e7f9..11724e0 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -14,6 +14,11 @@
  * limitations under the License.
  */
 
+#include <algorithm>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
 #define LOG_TAG "AidlConversion"
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
@@ -21,6 +26,7 @@
 #include "media/AidlConversion.h"
 
 #include <media/ShmemCompat.h>
+#include <media/stagefright/foundation/MediaDefs.h>
 
 ////////////////////////////////////////////////////////////////////////////////////////////////////
 // Utilities
@@ -28,6 +34,40 @@
 namespace android {
 
 using base::unexpected;
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioContentType;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceAddress;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioDeviceType;
+using media::audio::common::AudioEncapsulationMetadataType;
+using media::audio::common::AudioEncapsulationMode;
+using media::audio::common::AudioEncapsulationType;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioFormatType;
+using media::audio::common::AudioGain;
+using media::audio::common::AudioGainConfig;
+using media::audio::common::AudioGainMode;
+using media::audio::common::AudioInputFlags;
+using media::audio::common::AudioIoFlags;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioOffloadInfo;
+using media::audio::common::AudioOutputFlags;
+using media::audio::common::AudioPortDeviceExt;
+using media::audio::common::AudioPortExt;
+using media::audio::common::AudioPortMixExt;
+using media::audio::common::AudioPortMixExtUseCase;
+using media::audio::common::AudioProfile;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStandard;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::AudioUuid;
+using media::audio::common::ExtraAudioDescriptor;
+using media::audio::common::Int;
+using media::audio::common::PcmType;
 
 namespace {
 
@@ -219,75 +259,7 @@
     return std::string(legacy.c_str());
 }
 
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<int32_t> aidl2legacy_AudioPortConfigType_int32_t(
-        media::AudioPortConfigType aidl) {
-    switch (aidl) {
-        case media::AudioPortConfigType::SAMPLE_RATE:
-            return AUDIO_PORT_CONFIG_SAMPLE_RATE;
-        case media::AudioPortConfigType::CHANNEL_MASK:
-            return AUDIO_PORT_CONFIG_CHANNEL_MASK;
-        case media::AudioPortConfigType::FORMAT:
-            return AUDIO_PORT_CONFIG_FORMAT;
-        case media::AudioPortConfigType::GAIN:
-            return AUDIO_PORT_CONFIG_GAIN;
-        case media::AudioPortConfigType::FLAGS:
-            return AUDIO_PORT_CONFIG_FLAGS;
-    }
-    return unexpected(BAD_VALUE);
-}
-
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<media::AudioPortConfigType> legacy2aidl_int32_t_AudioPortConfigType(
-        int32_t legacy) {
-    switch (legacy) {
-        case AUDIO_PORT_CONFIG_SAMPLE_RATE:
-            return media::AudioPortConfigType::SAMPLE_RATE;
-        case AUDIO_PORT_CONFIG_CHANNEL_MASK:
-            return media::AudioPortConfigType::CHANNEL_MASK;
-        case AUDIO_PORT_CONFIG_FORMAT:
-            return media::AudioPortConfigType::FORMAT;
-        case AUDIO_PORT_CONFIG_GAIN:
-            return media::AudioPortConfigType::GAIN;
-        case AUDIO_PORT_CONFIG_FLAGS:
-            return media::AudioPortConfigType::FLAGS;
-    }
-    return unexpected(BAD_VALUE);
-}
-
-ConversionResult<unsigned int> aidl2legacy_int32_t_config_mask(int32_t aidl) {
-    return convertBitmask<unsigned int, int32_t, int, media::AudioPortConfigType>(
-            aidl, aidl2legacy_AudioPortConfigType_int32_t,
-            // AudioPortConfigType enum is index-based.
-            indexToEnum_index<media::AudioPortConfigType>,
-            // AUDIO_PORT_CONFIG_* flags are mask-based.
-            enumToMask_bitmask<unsigned int, int>);
-}
-
-ConversionResult<int32_t> legacy2aidl_config_mask_int32_t(unsigned int legacy) {
-    return convertBitmask<int32_t, unsigned int, media::AudioPortConfigType, int>(
-            legacy, legacy2aidl_int32_t_AudioPortConfigType,
-            // AUDIO_PORT_CONFIG_* flags are mask-based.
-            indexToEnum_bitmask<unsigned>,
-            // AudioPortConfigType enum is index-based.
-            enumToMask_index<int32_t, media::AudioPortConfigType>);
-}
-
-ConversionResult<audio_channel_mask_t> aidl2legacy_int32_t_audio_channel_mask_t(int32_t aidl) {
-    // TODO(ytai): should we convert bit-by-bit?
-    // One problem here is that the representation is both opaque and is different based on the
-    // context (input vs. output). Can determine based on type and role, as per useInChannelMask().
-    return convertReinterpret<audio_channel_mask_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_channel_mask_t_int32_t(audio_channel_mask_t legacy) {
-    // TODO(ytai): should we convert bit-by-bit?
-    // One problem here is that the representation is both opaque and is different based on the
-    // context (input vs. output). Can determine based on type and role, as per useInChannelMask().
-    return convertReinterpret<int32_t>(legacy);
-}
-
-ConversionResult<audio_io_config_event> aidl2legacy_AudioIoConfigEvent_audio_io_config_event(
+ConversionResult<audio_io_config_event_t> aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(
         media::AudioIoConfigEvent aidl) {
     switch (aidl) {
         case media::AudioIoConfigEvent::OUTPUT_REGISTERED:
@@ -312,8 +284,8 @@
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_AudioIoConfigEvent(
-        audio_io_config_event legacy) {
+ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_t_AudioIoConfigEvent(
+        audio_io_config_event_t legacy) {
     switch (legacy) {
         case AUDIO_OUTPUT_REGISTERED:
             return media::AudioIoConfigEvent::OUTPUT_REGISTERED;
@@ -393,81 +365,1029 @@
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<audio_format_t> aidl2legacy_AudioFormat_audio_format_t(
-        media::audio::common::AudioFormat aidl) {
-    // This relies on AudioFormat being kept in sync with audio_format_t.
-    static_assert(sizeof(media::audio::common::AudioFormat) == sizeof(audio_format_t));
-    return static_cast<audio_format_t>(aidl);
+namespace {
+
+namespace detail {
+using AudioChannelBitPair = std::pair<audio_channel_mask_t, int>;
+using AudioChannelBitPairs = std::vector<AudioChannelBitPair>;
+using AudioChannelPair = std::pair<audio_channel_mask_t, AudioChannelLayout>;
+using AudioChannelPairs = std::vector<AudioChannelPair>;
+using AudioDevicePair = std::pair<audio_devices_t, AudioDeviceDescription>;
+using AudioDevicePairs = std::vector<AudioDevicePair>;
+using AudioFormatPair = std::pair<audio_format_t, AudioFormatDescription>;
+using AudioFormatPairs = std::vector<AudioFormatPair>;
 }
 
-ConversionResult<media::audio::common::AudioFormat> legacy2aidl_audio_format_t_AudioFormat(
+const detail::AudioChannelBitPairs& getInAudioChannelBits() {
+    static const detail::AudioChannelBitPairs pairs = {
+        { AUDIO_CHANNEL_IN_LEFT, AudioChannelLayout::CHANNEL_FRONT_LEFT },
+        { AUDIO_CHANNEL_IN_RIGHT, AudioChannelLayout::CHANNEL_FRONT_RIGHT },
+        // AUDIO_CHANNEL_IN_FRONT is at the end
+        { AUDIO_CHANNEL_IN_BACK, AudioChannelLayout::CHANNEL_BACK_CENTER },
+        // AUDIO_CHANNEL_IN_*_PROCESSED not supported
+        // AUDIO_CHANNEL_IN_PRESSURE not supported
+        // AUDIO_CHANNEL_IN_*_AXIS not supported
+        // AUDIO_CHANNEL_IN_VOICE_* not supported
+        { AUDIO_CHANNEL_IN_BACK_LEFT, AudioChannelLayout::CHANNEL_BACK_LEFT },
+        { AUDIO_CHANNEL_IN_BACK_RIGHT, AudioChannelLayout::CHANNEL_BACK_RIGHT },
+        { AUDIO_CHANNEL_IN_CENTER, AudioChannelLayout::CHANNEL_FRONT_CENTER },
+        { AUDIO_CHANNEL_IN_LOW_FREQUENCY, AudioChannelLayout::CHANNEL_LOW_FREQUENCY },
+        { AUDIO_CHANNEL_IN_TOP_LEFT, AudioChannelLayout::CHANNEL_TOP_SIDE_LEFT },
+        { AUDIO_CHANNEL_IN_TOP_RIGHT, AudioChannelLayout::CHANNEL_TOP_SIDE_RIGHT },
+        // When going from aidl to legacy, IN_CENTER is used
+        { AUDIO_CHANNEL_IN_FRONT, AudioChannelLayout::CHANNEL_FRONT_CENTER }
+    };
+    return pairs;
+}
+
+const detail::AudioChannelPairs& getInAudioChannelPairs() {
+    static const detail::AudioChannelPairs pairs = {
+#define DEFINE_INPUT_LAYOUT(n)                                                 \
+            {                                                                  \
+                AUDIO_CHANNEL_IN_##n,                                          \
+                AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
+                        AudioChannelLayout::LAYOUT_##n)                        \
+            }
+
+        DEFINE_INPUT_LAYOUT(MONO),
+        DEFINE_INPUT_LAYOUT(STEREO),
+        DEFINE_INPUT_LAYOUT(FRONT_BACK),
+        // AUDIO_CHANNEL_IN_6 not supported
+        DEFINE_INPUT_LAYOUT(2POINT0POINT2),
+        DEFINE_INPUT_LAYOUT(2POINT1POINT2),
+        DEFINE_INPUT_LAYOUT(3POINT0POINT2),
+        DEFINE_INPUT_LAYOUT(3POINT1POINT2),
+        DEFINE_INPUT_LAYOUT(5POINT1)
+#undef DEFINE_INPUT_LAYOUT
+    };
+    return pairs;
+}
+
+const detail::AudioChannelBitPairs& getOutAudioChannelBits() {
+    static const detail::AudioChannelBitPairs pairs = {
+#define DEFINE_OUTPUT_BITS(n)                                                  \
+            { AUDIO_CHANNEL_OUT_##n, AudioChannelLayout::CHANNEL_##n }
+
+        DEFINE_OUTPUT_BITS(FRONT_LEFT),
+        DEFINE_OUTPUT_BITS(FRONT_RIGHT),
+        DEFINE_OUTPUT_BITS(FRONT_CENTER),
+        DEFINE_OUTPUT_BITS(LOW_FREQUENCY),
+        DEFINE_OUTPUT_BITS(BACK_LEFT),
+        DEFINE_OUTPUT_BITS(BACK_RIGHT),
+        DEFINE_OUTPUT_BITS(FRONT_LEFT_OF_CENTER),
+        DEFINE_OUTPUT_BITS(FRONT_RIGHT_OF_CENTER),
+        DEFINE_OUTPUT_BITS(BACK_CENTER),
+        DEFINE_OUTPUT_BITS(SIDE_LEFT),
+        DEFINE_OUTPUT_BITS(SIDE_RIGHT),
+        DEFINE_OUTPUT_BITS(TOP_CENTER),
+        DEFINE_OUTPUT_BITS(TOP_FRONT_LEFT),
+        DEFINE_OUTPUT_BITS(TOP_FRONT_CENTER),
+        DEFINE_OUTPUT_BITS(TOP_FRONT_RIGHT),
+        DEFINE_OUTPUT_BITS(TOP_BACK_LEFT),
+        DEFINE_OUTPUT_BITS(TOP_BACK_CENTER),
+        DEFINE_OUTPUT_BITS(TOP_BACK_RIGHT),
+        DEFINE_OUTPUT_BITS(TOP_SIDE_LEFT),
+        DEFINE_OUTPUT_BITS(TOP_SIDE_RIGHT),
+        DEFINE_OUTPUT_BITS(BOTTOM_FRONT_LEFT),
+        DEFINE_OUTPUT_BITS(BOTTOM_FRONT_CENTER),
+        DEFINE_OUTPUT_BITS(BOTTOM_FRONT_RIGHT),
+        DEFINE_OUTPUT_BITS(LOW_FREQUENCY_2),
+        DEFINE_OUTPUT_BITS(FRONT_WIDE_LEFT),
+        DEFINE_OUTPUT_BITS(FRONT_WIDE_RIGHT),
+#undef DEFINE_OUTPUT_BITS
+        { AUDIO_CHANNEL_OUT_HAPTIC_A, AudioChannelLayout::CHANNEL_HAPTIC_A },
+        { AUDIO_CHANNEL_OUT_HAPTIC_B, AudioChannelLayout::CHANNEL_HAPTIC_B }
+    };
+    return pairs;
+}
+
+const detail::AudioChannelPairs& getOutAudioChannelPairs() {
+    static const detail::AudioChannelPairs pairs = {
+#define DEFINE_OUTPUT_LAYOUT(n)                                                \
+            {                                                                  \
+                AUDIO_CHANNEL_OUT_##n,                                         \
+                AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
+                        AudioChannelLayout::LAYOUT_##n)                        \
+            }
+
+        DEFINE_OUTPUT_LAYOUT(MONO),
+        DEFINE_OUTPUT_LAYOUT(STEREO),
+        DEFINE_OUTPUT_LAYOUT(2POINT1),
+        DEFINE_OUTPUT_LAYOUT(TRI),
+        DEFINE_OUTPUT_LAYOUT(TRI_BACK),
+        DEFINE_OUTPUT_LAYOUT(3POINT1),
+        DEFINE_OUTPUT_LAYOUT(2POINT0POINT2),
+        DEFINE_OUTPUT_LAYOUT(2POINT1POINT2),
+        DEFINE_OUTPUT_LAYOUT(3POINT0POINT2),
+        DEFINE_OUTPUT_LAYOUT(3POINT1POINT2),
+        DEFINE_OUTPUT_LAYOUT(QUAD),
+        DEFINE_OUTPUT_LAYOUT(QUAD_SIDE),
+        DEFINE_OUTPUT_LAYOUT(SURROUND),
+        DEFINE_OUTPUT_LAYOUT(PENTA),
+        DEFINE_OUTPUT_LAYOUT(5POINT1),
+        DEFINE_OUTPUT_LAYOUT(5POINT1_SIDE),
+        DEFINE_OUTPUT_LAYOUT(5POINT1POINT2),
+        DEFINE_OUTPUT_LAYOUT(5POINT1POINT4),
+        DEFINE_OUTPUT_LAYOUT(6POINT1),
+        DEFINE_OUTPUT_LAYOUT(7POINT1),
+        DEFINE_OUTPUT_LAYOUT(7POINT1POINT2),
+        DEFINE_OUTPUT_LAYOUT(7POINT1POINT4),
+        DEFINE_OUTPUT_LAYOUT(13POINT_360RA),
+        DEFINE_OUTPUT_LAYOUT(22POINT2),
+        DEFINE_OUTPUT_LAYOUT(MONO_HAPTIC_A),
+        DEFINE_OUTPUT_LAYOUT(STEREO_HAPTIC_A),
+        DEFINE_OUTPUT_LAYOUT(HAPTIC_AB),
+        DEFINE_OUTPUT_LAYOUT(MONO_HAPTIC_AB),
+        DEFINE_OUTPUT_LAYOUT(STEREO_HAPTIC_AB)
+#undef DEFINE_OUTPUT_LAYOUT
+    };
+    return pairs;
+}
+
+const detail::AudioChannelPairs& getVoiceAudioChannelPairs() {
+    static const detail::AudioChannelPairs pairs = {
+#define DEFINE_VOICE_LAYOUT(n)                                                 \
+            {                                                                  \
+                AUDIO_CHANNEL_IN_VOICE_##n,                                    \
+                AudioChannelLayout::make<AudioChannelLayout::Tag::voiceMask>(  \
+                        AudioChannelLayout::VOICE_##n)                         \
+            }
+        DEFINE_VOICE_LAYOUT(UPLINK_MONO),
+        DEFINE_VOICE_LAYOUT(DNLINK_MONO),
+        DEFINE_VOICE_LAYOUT(CALL_MONO)
+#undef DEFINE_VOICE_LAYOUT
+    };
+    return pairs;
+}
+
+AudioDeviceDescription make_AudioDeviceDescription(AudioDeviceType type,
+        const std::string& connection = "") {
+    AudioDeviceDescription result;
+    result.type = type;
+    result.connection = connection;
+    return result;
+}
+
+void append_AudioDeviceDescription(detail::AudioDevicePairs& pairs,
+        audio_devices_t inputType, audio_devices_t outputType,
+        AudioDeviceType inType, AudioDeviceType outType,
+        const std::string& connection = "") {
+    pairs.push_back(std::make_pair(inputType, make_AudioDeviceDescription(inType, connection)));
+    pairs.push_back(std::make_pair(outputType, make_AudioDeviceDescription(outType, connection)));
+}
+
+const detail::AudioDevicePairs& getAudioDevicePairs() {
+    static const detail::AudioDevicePairs pairs = []() {
+        detail::AudioDevicePairs pairs = {{
+            {
+                AUDIO_DEVICE_NONE, AudioDeviceDescription{}
+            },
+            {
+                AUDIO_DEVICE_OUT_EARPIECE, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_SPEAKER_EARPIECE)
+            },
+            {
+                AUDIO_DEVICE_OUT_SPEAKER, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_SPEAKER)
+            },
+            {
+                AUDIO_DEVICE_OUT_WIRED_HEADPHONE, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_HEADPHONE,
+                        AudioDeviceDescription::CONNECTION_ANALOG())
+            },
+            {
+                AUDIO_DEVICE_OUT_BLUETOOTH_SCO, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_DEVICE,
+                        AudioDeviceDescription::CONNECTION_BT_SCO())
+            },
+            {
+                AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_CARKIT,
+                        AudioDeviceDescription::CONNECTION_BT_SCO())
+            },
+            {
+                AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_HEADPHONE,
+                        AudioDeviceDescription::CONNECTION_BT_A2DP())
+            },
+            {
+                AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_SPEAKER,
+                        AudioDeviceDescription::CONNECTION_BT_A2DP())
+            },
+            {
+                AUDIO_DEVICE_OUT_TELEPHONY_TX, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_TELEPHONY_TX)
+            },
+            {
+                AUDIO_DEVICE_OUT_AUX_LINE, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_LINE_AUX)
+            },
+            {
+                AUDIO_DEVICE_OUT_SPEAKER_SAFE, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_SPEAKER_SAFE)
+            },
+            {
+                AUDIO_DEVICE_OUT_HEARING_AID, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_HEARING_AID,
+                        AudioDeviceDescription::CONNECTION_WIRELESS())
+            },
+            {
+                AUDIO_DEVICE_OUT_ECHO_CANCELLER, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_ECHO_CANCELLER)
+            },
+            {
+                AUDIO_DEVICE_OUT_BLE_SPEAKER, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_SPEAKER,
+                        AudioDeviceDescription::CONNECTION_BT_LE())
+            },
+            {
+                AUDIO_DEVICE_OUT_BLE_BROADCAST, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_BROADCAST,
+                        AudioDeviceDescription::CONNECTION_BT_LE())
+            },
+            // AUDIO_DEVICE_IN_AMBIENT and IN_COMMUNICATION are removed since they were deprecated.
+            {
+                AUDIO_DEVICE_IN_BUILTIN_MIC, make_AudioDeviceDescription(
+                        AudioDeviceType::IN_MICROPHONE)
+            },
+            {
+                AUDIO_DEVICE_IN_BACK_MIC, make_AudioDeviceDescription(
+                        AudioDeviceType::IN_MICROPHONE_BACK)
+            },
+            {
+                AUDIO_DEVICE_IN_TELEPHONY_RX, make_AudioDeviceDescription(
+                        AudioDeviceType::IN_TELEPHONY_RX)
+            },
+            {
+                AUDIO_DEVICE_IN_TV_TUNER, make_AudioDeviceDescription(
+                        AudioDeviceType::IN_TV_TUNER)
+            },
+            {
+                AUDIO_DEVICE_IN_LOOPBACK, make_AudioDeviceDescription(
+                        AudioDeviceType::IN_LOOPBACK)
+            },
+            {
+                AUDIO_DEVICE_IN_BLUETOOTH_BLE, make_AudioDeviceDescription(
+                        AudioDeviceType::IN_DEVICE,
+                        AudioDeviceDescription::CONNECTION_BT_LE())
+            },
+            {
+                AUDIO_DEVICE_IN_ECHO_REFERENCE, make_AudioDeviceDescription(
+                        AudioDeviceType::IN_ECHO_REFERENCE)
+            }
+        }};
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_DEFAULT, AUDIO_DEVICE_OUT_DEFAULT,
+                AudioDeviceType::IN_DEFAULT, AudioDeviceType::OUT_DEFAULT);
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_OUT_WIRED_HEADSET,
+                AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+                AudioDeviceDescription::CONNECTION_ANALOG());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET,
+                AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+                AudioDeviceDescription::CONNECTION_BT_SCO());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_HDMI, AUDIO_DEVICE_OUT_HDMI,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_HDMI());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_REMOTE_SUBMIX, AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                AudioDeviceType::IN_SUBMIX, AudioDeviceType::OUT_SUBMIX);
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET, AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET,
+                AudioDeviceType::IN_DOCK, AudioDeviceType::OUT_DOCK,
+                AudioDeviceDescription::CONNECTION_ANALOG());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET,
+                AudioDeviceType::IN_DOCK, AudioDeviceType::OUT_DOCK,
+                AudioDeviceDescription::CONNECTION_USB());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_USB_ACCESSORY, AUDIO_DEVICE_OUT_USB_ACCESSORY,
+                AudioDeviceType::IN_ACCESSORY, AudioDeviceType::OUT_ACCESSORY,
+                AudioDeviceDescription::CONNECTION_USB());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_OUT_USB_DEVICE,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_USB());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_FM_TUNER, AUDIO_DEVICE_OUT_FM,
+                AudioDeviceType::IN_FM_TUNER, AudioDeviceType::OUT_FM);
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_LINE, AUDIO_DEVICE_OUT_LINE,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_ANALOG());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_SPDIF, AUDIO_DEVICE_OUT_SPDIF,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_SPDIF());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_BLUETOOTH_A2DP, AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_BT_A2DP());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_IP, AUDIO_DEVICE_OUT_IP,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_IP_V4());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_BUS, AUDIO_DEVICE_OUT_BUS,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_BUS());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_PROXY, AUDIO_DEVICE_OUT_PROXY,
+                AudioDeviceType::IN_AFE_PROXY, AudioDeviceType::OUT_AFE_PROXY);
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_USB_HEADSET, AUDIO_DEVICE_OUT_USB_HEADSET,
+                AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+                AudioDeviceDescription::CONNECTION_USB());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_HDMI_ARC, AUDIO_DEVICE_OUT_HDMI_ARC,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_HDMI_ARC());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_HDMI_EARC, AUDIO_DEVICE_OUT_HDMI_EARC,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_HDMI_EARC());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_BLE_HEADSET, AUDIO_DEVICE_OUT_BLE_HEADSET,
+                AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+                AudioDeviceDescription::CONNECTION_BT_LE());
+        return pairs;
+    }();
+    return pairs;
+}
+
+AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
+    AudioFormatDescription result;
+    result.type = type;
+    return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(PcmType pcm) {
+    auto result = make_AudioFormatDescription(AudioFormatType::PCM);
+    result.pcm = pcm;
+    return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
+    AudioFormatDescription result;
+    result.encoding = encoding;
+    return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(PcmType transport,
+        const std::string& encoding) {
+    auto result = make_AudioFormatDescription(encoding);
+    result.pcm = transport;
+    return result;
+}
+
+const detail::AudioFormatPairs& getAudioFormatPairs() {
+    static const detail::AudioFormatPairs pairs = {{
+        {
+            AUDIO_FORMAT_INVALID,
+            make_AudioFormatDescription(AudioFormatType::SYS_RESERVED_INVALID)
+        },
+        {
+            AUDIO_FORMAT_DEFAULT, AudioFormatDescription{}
+        },
+        {
+            AUDIO_FORMAT_PCM_16_BIT, make_AudioFormatDescription(PcmType::INT_16_BIT)
+        },
+        {
+            AUDIO_FORMAT_PCM_8_BIT, make_AudioFormatDescription(PcmType::UINT_8_BIT)
+        },
+        {
+            AUDIO_FORMAT_PCM_32_BIT, make_AudioFormatDescription(PcmType::INT_32_BIT)
+        },
+        {
+            AUDIO_FORMAT_PCM_8_24_BIT, make_AudioFormatDescription(PcmType::FIXED_Q_8_24)
+        },
+        {
+            AUDIO_FORMAT_PCM_FLOAT, make_AudioFormatDescription(PcmType::FLOAT_32_BIT)
+        },
+        {
+            AUDIO_FORMAT_PCM_24_BIT_PACKED, make_AudioFormatDescription(PcmType::INT_24_BIT)
+        },
+        {
+            AUDIO_FORMAT_MP3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEG)
+        },
+        {
+            AUDIO_FORMAT_AMR_NB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_NB)
+        },
+        {
+            AUDIO_FORMAT_AMR_WB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_WB)
+        },
+        {
+            AUDIO_FORMAT_AAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_MP4)
+        },
+        {
+            AUDIO_FORMAT_AAC_MAIN, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_MAIN)
+        },
+        {
+            AUDIO_FORMAT_AAC_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LC)
+        },
+        {
+            AUDIO_FORMAT_AAC_SSR, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_SSR)
+        },
+        {
+            AUDIO_FORMAT_AAC_LTP, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LTP)
+        },
+        {
+            AUDIO_FORMAT_AAC_HE_V1, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_HE_V1)
+        },
+        {
+            AUDIO_FORMAT_AAC_SCALABLE,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE)
+        },
+        {
+            AUDIO_FORMAT_AAC_ERLC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ERLC)
+        },
+        {
+            AUDIO_FORMAT_AAC_LD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LD)
+        },
+        {
+            AUDIO_FORMAT_AAC_HE_V2, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_HE_V2)
+        },
+        {
+            AUDIO_FORMAT_AAC_ELD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ELD)
+        },
+        {
+            AUDIO_FORMAT_AAC_XHE, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_XHE)
+        },
+        // AUDIO_FORMAT_HE_AAC_V1 and HE_AAC_V2 are removed since they were deprecated long time
+        // ago.
+        {
+            AUDIO_FORMAT_VORBIS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_VORBIS)
+        },
+        {
+            AUDIO_FORMAT_OPUS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_OPUS)
+        },
+        {
+            AUDIO_FORMAT_AC3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AC3)
+        },
+        {
+            AUDIO_FORMAT_E_AC3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EAC3)
+        },
+        {
+            AUDIO_FORMAT_E_AC3_JOC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EAC3_JOC)
+        },
+        {
+            AUDIO_FORMAT_DTS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS)
+        },
+        {
+            AUDIO_FORMAT_DTS_HD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS_HD)
+        },
+        // In the future, we would like to represent encapsulated bitstreams as
+        // nested AudioFormatDescriptions. The legacy 'AUDIO_FORMAT_IEC61937' type doesn't
+        // specify the format of the encapsulated bitstream.
+        {
+            AUDIO_FORMAT_IEC61937,
+            make_AudioFormatDescription(PcmType::INT_16_BIT, MEDIA_MIMETYPE_AUDIO_IEC61937)
+        },
+        {
+            AUDIO_FORMAT_DOLBY_TRUEHD,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD)
+        },
+        {
+            AUDIO_FORMAT_EVRC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRC)
+        },
+        {
+            AUDIO_FORMAT_EVRCB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCB)
+        },
+        {
+            AUDIO_FORMAT_EVRCWB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCWB)
+        },
+        {
+            AUDIO_FORMAT_EVRCNW, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCNW)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADIF, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADIF)
+        },
+        {
+            AUDIO_FORMAT_WMA, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_WMA)
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_WMA_PRO, make_AudioFormatDescription("audio/x-ms-wma.pro")
+        },
+        {
+            AUDIO_FORMAT_AMR_WB_PLUS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS)
+        },
+        {
+            AUDIO_FORMAT_MP2, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II)
+        },
+        {
+            AUDIO_FORMAT_QCELP, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_QCELP)
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_DSD, make_AudioFormatDescription("audio/vnd.sony.dsd")
+        },
+        {
+            AUDIO_FORMAT_FLAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_FLAC)
+        },
+        {
+            AUDIO_FORMAT_ALAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_ALAC)
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_APE, make_AudioFormatDescription("audio/x-ape")
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_MAIN,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_SSR,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_LTP,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_HE_V1,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_SCALABLE,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_ERLC,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_LD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_HE_V2,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_ELD,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_XHE,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE)
+        },
+        {
+            // Note: not in the IANA registry. "vnd.octel.sbc" is not BT SBC.
+            AUDIO_FORMAT_SBC, make_AudioFormatDescription("audio/x-sbc")
+        },
+        {
+            AUDIO_FORMAT_APTX, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_APTX)
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_APTX_HD, make_AudioFormatDescription("audio/vnd.qcom.aptx.hd")
+        },
+        {
+            AUDIO_FORMAT_AC4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AC4)
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_LDAC, make_AudioFormatDescription("audio/vnd.sony.ldac")
+        },
+        {
+            AUDIO_FORMAT_MAT, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT)
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_MAT_1_0,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".1.0"))
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_MAT_2_0,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".2.0"))
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_MAT_2_1,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".2.1"))
+        },
+        {
+            AUDIO_FORMAT_AAC_LATM, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC)
+        },
+        {
+            AUDIO_FORMAT_AAC_LATM_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC)
+        },
+        {
+            AUDIO_FORMAT_AAC_LATM_HE_V1,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1)
+        },
+        {
+            AUDIO_FORMAT_AAC_LATM_HE_V2,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2)
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_CELT, make_AudioFormatDescription("audio/x-celt")
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_APTX_ADAPTIVE, make_AudioFormatDescription("audio/vnd.qcom.aptx.adaptive")
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_LHDC, make_AudioFormatDescription("audio/vnd.savitech.lhdc")
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_LHDC_LL, make_AudioFormatDescription("audio/vnd.savitech.lhdc.ll")
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_APTX_TWSP, make_AudioFormatDescription("audio/vnd.qcom.aptx.twsp")
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_LC3, make_AudioFormatDescription("audio/x-lc3")
+        },
+        {
+            AUDIO_FORMAT_MPEGH, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1)
+        },
+        {
+            AUDIO_FORMAT_MPEGH_BL_L3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3)
+        },
+        {
+            AUDIO_FORMAT_MPEGH_BL_L4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4)
+        },
+        {
+            AUDIO_FORMAT_MPEGH_LC_L3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3)
+        },
+        {
+            AUDIO_FORMAT_MPEGH_LC_L4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4)
+        },
+        {
+            AUDIO_FORMAT_IEC60958,
+            make_AudioFormatDescription(PcmType::INT_24_BIT, MEDIA_MIMETYPE_AUDIO_IEC60958)
+        },
+        {
+            AUDIO_FORMAT_DTS_UHD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS_UHD)
+        },
+        {
+            AUDIO_FORMAT_DRA, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DRA)
+        },
+    }};
+    return pairs;
+}
+
+template<typename S, typename T>
+std::unordered_map<S, T> make_DirectMap(const std::vector<std::pair<S, T>>& v) {
+    std::unordered_map<S, T> result(v.begin(), v.end());
+    LOG_ALWAYS_FATAL_IF(result.size() != v.size(), "Duplicate key elements detected");
+    return result;
+}
+
+template<typename S, typename T>
+std::unordered_map<S, T> make_DirectMap(
+        const std::vector<std::pair<S, T>>& v1, const std::vector<std::pair<S, T>>& v2) {
+    std::unordered_map<S, T> result(v1.begin(), v1.end());
+    LOG_ALWAYS_FATAL_IF(result.size() != v1.size(), "Duplicate key elements detected in v1");
+    result.insert(v2.begin(), v2.end());
+    LOG_ALWAYS_FATAL_IF(result.size() != v1.size() + v2.size(),
+            "Duplicate key elements detected in v1+v2");
+    return result;
+}
+
+template<typename S, typename T>
+std::unordered_map<T, S> make_ReverseMap(const std::vector<std::pair<S, T>>& v) {
+    std::unordered_map<T, S> result;
+    std::transform(v.begin(), v.end(), std::inserter(result, result.begin()),
+            [](const std::pair<S, T>& p) {
+                return std::make_pair(p.second, p.first);
+            });
+    LOG_ALWAYS_FATAL_IF(result.size() != v.size(), "Duplicate key elements detected");
+    return result;
+}
+
+}  // namespace
+
+audio_channel_mask_t aidl2legacy_AudioChannelLayout_layout_audio_channel_mask_t_bits(
+        int aidlLayout, bool isInput) {
+    auto& bitMapping = isInput ? getInAudioChannelBits() : getOutAudioChannelBits();
+    const int aidlLayoutInitial = aidlLayout; // for error message
+    audio_channel_mask_t legacy = AUDIO_CHANNEL_NONE;
+    for (const auto& bitPair : bitMapping) {
+        if ((aidlLayout & bitPair.second) == bitPair.second) {
+            legacy = static_cast<audio_channel_mask_t>(legacy | bitPair.first);
+            aidlLayout &= ~bitPair.second;
+            if (aidlLayout == 0) {
+                return legacy;
+            }
+        }
+    }
+    ALOGE("%s: aidl layout 0x%x contains bits 0x%x that have no match to legacy %s bits",
+            __func__, aidlLayoutInitial, aidlLayout, isInput ? "input" : "output");
+    return AUDIO_CHANNEL_NONE;
+}
+
+ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+        const AudioChannelLayout& aidl, bool isInput) {
+    using ReverseMap = std::unordered_map<AudioChannelLayout, audio_channel_mask_t>;
+    using Tag = AudioChannelLayout::Tag;
+    static const ReverseMap mIn = make_ReverseMap(getInAudioChannelPairs());
+    static const ReverseMap mOut = make_ReverseMap(getOutAudioChannelPairs());
+    static const ReverseMap mVoice = make_ReverseMap(getVoiceAudioChannelPairs());
+
+    auto convert = [](const AudioChannelLayout& aidl, const ReverseMap& m,
+            const char* func, const char* type) -> ConversionResult<audio_channel_mask_t> {
+        if (auto it = m.find(aidl); it != m.end()) {
+            return it->second;
+        } else {
+            ALOGW("%s: no legacy %s audio_channel_mask_t found for %s", func, type,
+                    aidl.toString().c_str());
+            return unexpected(BAD_VALUE);
+        }
+    };
+
+    switch (aidl.getTag()) {
+        case Tag::none:
+            return AUDIO_CHANNEL_NONE;
+        case Tag::invalid:
+            return AUDIO_CHANNEL_INVALID;
+        case Tag::indexMask:
+            // Index masks do not have pre-defined values.
+            if (const int bits = aidl.get<Tag::indexMask>();
+                    __builtin_popcount(bits) != 0 &&
+                    __builtin_popcount(bits) <= AUDIO_CHANNEL_COUNT_MAX) {
+                return audio_channel_mask_from_representation_and_bits(
+                        AUDIO_CHANNEL_REPRESENTATION_INDEX, bits);
+            } else {
+                ALOGE("%s: invalid indexMask value 0x%x in %s",
+                        __func__, bits, aidl.toString().c_str());
+                return unexpected(BAD_VALUE);
+            }
+        case Tag::layoutMask:
+            // The fast path is to find a direct match for some known layout mask.
+            if (const auto layoutMatch = convert(aidl, isInput ? mIn : mOut, __func__,
+                    isInput ? "input" : "output");
+                    layoutMatch.ok()) {
+                return layoutMatch;
+            }
+            // If a match for a predefined layout wasn't found, make a custom one from bits.
+            if (audio_channel_mask_t bitMask =
+                    aidl2legacy_AudioChannelLayout_layout_audio_channel_mask_t_bits(
+                            aidl.get<Tag::layoutMask>(), isInput);
+                    bitMask != AUDIO_CHANNEL_NONE) {
+                return bitMask;
+            }
+            return unexpected(BAD_VALUE);
+        case Tag::voiceMask:
+            return convert(aidl, mVoice, __func__, "voice");
+    }
+    ALOGE("%s: unexpected tag value %d", __func__, aidl.getTag());
+    return unexpected(BAD_VALUE);
+}
+
+int legacy2aidl_audio_channel_mask_t_bits_AudioChannelLayout_layout(
+        audio_channel_mask_t legacy, bool isInput) {
+    auto& bitMapping = isInput ? getInAudioChannelBits() : getOutAudioChannelBits();
+    const int legacyInitial = legacy; // for error message
+    int aidlLayout = 0;
+    for (const auto& bitPair : bitMapping) {
+        if ((legacy & bitPair.first) == bitPair.first) {
+            aidlLayout |= bitPair.second;
+            legacy = static_cast<audio_channel_mask_t>(legacy & ~bitPair.first);
+            if (legacy == 0) {
+                return aidlLayout;
+            }
+        }
+    }
+    ALOGE("%s: legacy %s audio_channel_mask_t 0x%x contains unrecognized bits 0x%x",
+            __func__, isInput ? "input" : "output", legacyInitial, legacy);
+    return 0;
+}
+
+ConversionResult<AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+        audio_channel_mask_t legacy, bool isInput) {
+    using DirectMap = std::unordered_map<audio_channel_mask_t, AudioChannelLayout>;
+    using Tag = AudioChannelLayout::Tag;
+    static const DirectMap mInAndVoice = make_DirectMap(
+            getInAudioChannelPairs(), getVoiceAudioChannelPairs());
+    static const DirectMap mOut = make_DirectMap(getOutAudioChannelPairs());
+
+    auto convert = [](const audio_channel_mask_t legacy, const DirectMap& m,
+            const char* func, const char* type) -> ConversionResult<AudioChannelLayout> {
+        if (auto it = m.find(legacy); it != m.end()) {
+            return it->second;
+        } else {
+            ALOGW("%s: no AudioChannelLayout found for legacy %s audio_channel_mask_t value 0x%x",
+                    func, type, legacy);
+            return unexpected(BAD_VALUE);
+        }
+    };
+
+    if (legacy == AUDIO_CHANNEL_NONE) {
+        return AudioChannelLayout{};
+    } else if (legacy == AUDIO_CHANNEL_INVALID) {
+        return AudioChannelLayout::make<Tag::invalid>(0);
+    }
+
+    const audio_channel_representation_t repr = audio_channel_mask_get_representation(legacy);
+    if (repr == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+        if (audio_channel_mask_is_valid(legacy)) {
+            const int indexMask = VALUE_OR_RETURN(
+                    convertIntegral<int>(audio_channel_mask_get_bits(legacy)));
+            return AudioChannelLayout::make<Tag::indexMask>(indexMask);
+        } else {
+            ALOGE("%s: legacy audio_channel_mask_t value 0x%x is invalid", __func__, legacy);
+            return unexpected(BAD_VALUE);
+        }
+    } else if (repr == AUDIO_CHANNEL_REPRESENTATION_POSITION) {
+        // The fast path is to find a direct match for some known layout mask.
+        if (const auto layoutMatch = convert(legacy, isInput ? mInAndVoice : mOut, __func__,
+                isInput ? "input / voice" : "output");
+                layoutMatch.ok()) {
+            return layoutMatch;
+        }
+        // If a match for a predefined layout wasn't found, make a custom one from bits,
+        // rejecting those with voice channel bits.
+        if (!isInput ||
+                (legacy & (AUDIO_CHANNEL_IN_VOICE_UPLINK | AUDIO_CHANNEL_IN_VOICE_DNLINK)) == 0) {
+            if (int bitMaskLayout =
+                    legacy2aidl_audio_channel_mask_t_bits_AudioChannelLayout_layout(
+                            legacy, isInput);
+                    bitMaskLayout != 0) {
+                return AudioChannelLayout::make<Tag::layoutMask>(bitMaskLayout);
+            }
+        } else {
+            ALOGE("%s: legacy audio_channel_mask_t value 0x%x contains voice bits",
+                    __func__, legacy);
+        }
+        return unexpected(BAD_VALUE);
+    }
+
+    ALOGE("%s: unknown representation %d in audio_channel_mask_t value 0x%x",
+            __func__, repr, legacy);
+    return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
+        const AudioDeviceDescription& aidl) {
+    static const std::unordered_map<AudioDeviceDescription, audio_devices_t> m =
+            make_ReverseMap(getAudioDevicePairs());
+    if (auto it = m.find(aidl); it != m.end()) {
+        return it->second;
+    } else {
+        ALOGE("%s: no legacy audio_devices_t found for %s", __func__, aidl.toString().c_str());
+        return unexpected(BAD_VALUE);
+    }
+}
+
+ConversionResult<AudioDeviceDescription> legacy2aidl_audio_devices_t_AudioDeviceDescription(
+        audio_devices_t legacy) {
+    static const std::unordered_map<audio_devices_t, AudioDeviceDescription> m =
+            make_DirectMap(getAudioDevicePairs());
+    if (auto it = m.find(legacy); it != m.end()) {
+        return it->second;
+    } else {
+        ALOGE("%s: no AudioDeviceDescription found for legacy audio_devices_t value 0x%x",
+                __func__, legacy);
+        return unexpected(BAD_VALUE);
+    }
+}
+
+status_t aidl2legacy_AudioDevice_audio_device(
+        const AudioDevice& aidl,
+        audio_devices_t* legacyType, char* legacyAddress) {
+    *legacyType = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
+    return aidl2legacy_string(
+                    aidl.address.get<AudioDeviceAddress::id>(),
+                    legacyAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN);
+}
+
+status_t aidl2legacy_AudioDevice_audio_device(
+        const AudioDevice& aidl,
+        audio_devices_t* legacyType, String8* legacyAddress) {
+    *legacyType = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
+    *legacyAddress = VALUE_OR_RETURN_STATUS(aidl2legacy_string_view_String8(
+                    aidl.address.get<AudioDeviceAddress::id>()));
+    return OK;
+}
+
+status_t aidl2legacy_AudioDevice_audio_device(
+        const AudioDevice& aidl,
+        audio_devices_t* legacyType, std::string* legacyAddress) {
+    *legacyType = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
+    *legacyAddress = aidl.address.get<AudioDeviceAddress::id>();
+    return OK;
+}
+
+ConversionResult<AudioDevice> legacy2aidl_audio_device_AudioDevice(
+        audio_devices_t legacyType, const char* legacyAddress) {
+    AudioDevice aidl;
+    aidl.type = VALUE_OR_RETURN(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(legacyType));
+    const std::string aidl_id = VALUE_OR_RETURN(
+            legacy2aidl_string(legacyAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN));
+    aidl.address = AudioDeviceAddress::make<AudioDeviceAddress::id>(aidl_id);
+    return aidl;
+}
+
+ConversionResult<AudioDevice>
+legacy2aidl_audio_device_AudioDevice(
+        audio_devices_t legacyType, const String8& legacyAddress) {
+    AudioDevice aidl;
+    aidl.type = VALUE_OR_RETURN(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(legacyType));
+    const std::string aidl_id = VALUE_OR_RETURN(
+            legacy2aidl_String8_string(legacyAddress));
+    aidl.address = AudioDeviceAddress::make<AudioDeviceAddress::id>(aidl_id);
+    return aidl;
+}
+
+ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
+        const AudioFormatDescription& aidl) {
+    static const std::unordered_map<AudioFormatDescription, audio_format_t> m =
+            make_ReverseMap(getAudioFormatPairs());
+    if (auto it = m.find(aidl); it != m.end()) {
+        return it->second;
+    } else {
+        ALOGE("%s: no legacy audio_format_t found for %s", __func__, aidl.toString().c_str());
+        return unexpected(BAD_VALUE);
+    }
+}
+
+ConversionResult<AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
         audio_format_t legacy) {
-    // This relies on AudioFormat being kept in sync with audio_format_t.
-    static_assert(sizeof(media::audio::common::AudioFormat) == sizeof(audio_format_t));
-    return static_cast<media::audio::common::AudioFormat>(legacy);
+    static const std::unordered_map<audio_format_t, AudioFormatDescription> m =
+            make_DirectMap(getAudioFormatPairs());
+    if (auto it = m.find(legacy); it != m.end()) {
+        return it->second;
+    } else {
+        ALOGE("%s: no AudioFormatDescription found for legacy audio_format_t value 0x%x",
+                __func__, legacy);
+        return unexpected(BAD_VALUE);
+    }
 }
 
-ConversionResult<audio_gain_mode_t> aidl2legacy_AudioGainMode_audio_gain_mode_t(media::AudioGainMode aidl) {
+ConversionResult<audio_gain_mode_t> aidl2legacy_AudioGainMode_audio_gain_mode_t(
+        AudioGainMode aidl) {
     switch (aidl) {
-        case media::AudioGainMode::JOINT:
+        case AudioGainMode::JOINT:
             return AUDIO_GAIN_MODE_JOINT;
-        case media::AudioGainMode::CHANNELS:
+        case AudioGainMode::CHANNELS:
             return AUDIO_GAIN_MODE_CHANNELS;
-        case media::AudioGainMode::RAMP:
+        case AudioGainMode::RAMP:
             return AUDIO_GAIN_MODE_RAMP;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioGainMode> legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy) {
+ConversionResult<AudioGainMode> legacy2aidl_audio_gain_mode_t_AudioGainMode(
+        audio_gain_mode_t legacy) {
     switch (legacy) {
         case AUDIO_GAIN_MODE_JOINT:
-            return media::AudioGainMode::JOINT;
+            return AudioGainMode::JOINT;
         case AUDIO_GAIN_MODE_CHANNELS:
-            return media::AudioGainMode::CHANNELS;
+            return AudioGainMode::CHANNELS;
         case AUDIO_GAIN_MODE_RAMP:
-            return media::AudioGainMode::RAMP;
+            return AudioGainMode::RAMP;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl) {
-    return convertBitmask<audio_gain_mode_t, int32_t, audio_gain_mode_t, media::AudioGainMode>(
+    return convertBitmask<audio_gain_mode_t, int32_t, audio_gain_mode_t, AudioGainMode>(
             aidl, aidl2legacy_AudioGainMode_audio_gain_mode_t,
             // AudioGainMode is index-based.
-            indexToEnum_index<media::AudioGainMode>,
+            indexToEnum_index<AudioGainMode>,
             // AUDIO_GAIN_MODE_* constants are mask-based.
             enumToMask_bitmask<audio_gain_mode_t, audio_gain_mode_t>);
 }
 
 ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy) {
-    return convertBitmask<int32_t, audio_gain_mode_t, media::AudioGainMode, audio_gain_mode_t>(
+    return convertBitmask<int32_t, audio_gain_mode_t, AudioGainMode, audio_gain_mode_t>(
             legacy, legacy2aidl_audio_gain_mode_t_AudioGainMode,
             // AUDIO_GAIN_MODE_* constants are mask-based.
             indexToEnum_bitmask<audio_gain_mode_t>,
             // AudioGainMode is index-based.
-            enumToMask_index<int32_t, media::AudioGainMode>);
-}
-
-ConversionResult<audio_devices_t> aidl2legacy_int32_t_audio_devices_t(int32_t aidl) {
-    // TODO(ytai): bitfield?
-    return convertReinterpret<audio_devices_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_devices_t_int32_t(audio_devices_t legacy) {
-    // TODO(ytai): bitfield?
-    return convertReinterpret<int32_t>(legacy);
+            enumToMask_index<int32_t, AudioGainMode>);
 }
 
 ConversionResult<audio_gain_config> aidl2legacy_AudioGainConfig_audio_gain_config(
-        const media::AudioGainConfig& aidl, media::AudioPortRole role, media::AudioPortType type) {
+        const AudioGainConfig& aidl, bool isInput) {
     audio_gain_config legacy;
     legacy.index = VALUE_OR_RETURN(convertIntegral<int>(aidl.index));
     legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
-    legacy.channel_mask =
-            VALUE_OR_RETURN(aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
-    const bool isInput = VALUE_OR_RETURN(direction(role, type)) == Direction::INPUT;
-    const bool isJoint = bitmaskIsSet(aidl.mode, media::AudioGainMode::JOINT);
+    legacy.channel_mask = VALUE_OR_RETURN(
+            aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
+    const bool isJoint = bitmaskIsSet(aidl.mode, AudioGainMode::JOINT);
     size_t numValues = isJoint ? 1
                                : isInput ? audio_channel_count_from_in_mask(legacy.channel_mask)
                                          : audio_channel_count_from_out_mask(legacy.channel_mask);
@@ -481,14 +1401,13 @@
     return legacy;
 }
 
-ConversionResult<media::AudioGainConfig> legacy2aidl_audio_gain_config_AudioGainConfig(
-        const audio_gain_config& legacy, audio_port_role_t role, audio_port_type_t type) {
-    media::AudioGainConfig aidl;
+ConversionResult<AudioGainConfig> legacy2aidl_audio_gain_config_AudioGainConfig(
+        const audio_gain_config& legacy, bool isInput) {
+    AudioGainConfig aidl;
     aidl.index = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.index));
     aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
-    aidl.channelMask =
-            VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
-    const bool isInput = VALUE_OR_RETURN(direction(role, type)) == Direction::INPUT;
+    aidl.channelMask = VALUE_OR_RETURN(
+            legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
     const bool isJoint = (legacy.mode & AUDIO_GAIN_MODE_JOINT) != 0;
     size_t numValues = isJoint ? 1
                                : isInput ? audio_channel_count_from_in_mask(legacy.channel_mask)
@@ -502,129 +1421,141 @@
 }
 
 ConversionResult<audio_input_flags_t> aidl2legacy_AudioInputFlags_audio_input_flags_t(
-        media::AudioInputFlags aidl) {
+        AudioInputFlags aidl) {
     switch (aidl) {
-        case media::AudioInputFlags::FAST:
+        case AudioInputFlags::FAST:
             return AUDIO_INPUT_FLAG_FAST;
-        case media::AudioInputFlags::HW_HOTWORD:
+        case AudioInputFlags::HW_HOTWORD:
             return AUDIO_INPUT_FLAG_HW_HOTWORD;
-        case media::AudioInputFlags::RAW:
+        case AudioInputFlags::RAW:
             return AUDIO_INPUT_FLAG_RAW;
-        case media::AudioInputFlags::SYNC:
+        case AudioInputFlags::SYNC:
             return AUDIO_INPUT_FLAG_SYNC;
-        case media::AudioInputFlags::MMAP_NOIRQ:
+        case AudioInputFlags::MMAP_NOIRQ:
             return AUDIO_INPUT_FLAG_MMAP_NOIRQ;
-        case media::AudioInputFlags::VOIP_TX:
+        case AudioInputFlags::VOIP_TX:
             return AUDIO_INPUT_FLAG_VOIP_TX;
-        case media::AudioInputFlags::HW_AV_SYNC:
+        case AudioInputFlags::HW_AV_SYNC:
             return AUDIO_INPUT_FLAG_HW_AV_SYNC;
-        case media::AudioInputFlags::DIRECT:
+        case AudioInputFlags::DIRECT:
             return AUDIO_INPUT_FLAG_DIRECT;
+        case AudioInputFlags::ULTRASOUND:
+            return AUDIO_INPUT_FLAG_ULTRASOUND;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioInputFlags> legacy2aidl_audio_input_flags_t_AudioInputFlags(
+ConversionResult<AudioInputFlags> legacy2aidl_audio_input_flags_t_AudioInputFlags(
         audio_input_flags_t legacy) {
     switch (legacy) {
         case AUDIO_INPUT_FLAG_NONE:
             break; // shouldn't get here. must be listed  -Werror,-Wswitch
         case AUDIO_INPUT_FLAG_FAST:
-            return media::AudioInputFlags::FAST;
+            return AudioInputFlags::FAST;
         case AUDIO_INPUT_FLAG_HW_HOTWORD:
-            return media::AudioInputFlags::HW_HOTWORD;
+            return AudioInputFlags::HW_HOTWORD;
         case AUDIO_INPUT_FLAG_RAW:
-            return media::AudioInputFlags::RAW;
+            return AudioInputFlags::RAW;
         case AUDIO_INPUT_FLAG_SYNC:
-            return media::AudioInputFlags::SYNC;
+            return AudioInputFlags::SYNC;
         case AUDIO_INPUT_FLAG_MMAP_NOIRQ:
-            return media::AudioInputFlags::MMAP_NOIRQ;
+            return AudioInputFlags::MMAP_NOIRQ;
         case AUDIO_INPUT_FLAG_VOIP_TX:
-            return media::AudioInputFlags::VOIP_TX;
+            return AudioInputFlags::VOIP_TX;
         case AUDIO_INPUT_FLAG_HW_AV_SYNC:
-            return media::AudioInputFlags::HW_AV_SYNC;
+            return AudioInputFlags::HW_AV_SYNC;
         case AUDIO_INPUT_FLAG_DIRECT:
-            return media::AudioInputFlags::DIRECT;
+            return AudioInputFlags::DIRECT;
+        case AUDIO_INPUT_FLAG_ULTRASOUND:
+            return AudioInputFlags::ULTRASOUND;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_output_flags_t> aidl2legacy_AudioOutputFlags_audio_output_flags_t(
-        media::AudioOutputFlags aidl) {
+        AudioOutputFlags aidl) {
     switch (aidl) {
-        case media::AudioOutputFlags::DIRECT:
+        case AudioOutputFlags::DIRECT:
             return AUDIO_OUTPUT_FLAG_DIRECT;
-        case media::AudioOutputFlags::PRIMARY:
+        case AudioOutputFlags::PRIMARY:
             return AUDIO_OUTPUT_FLAG_PRIMARY;
-        case media::AudioOutputFlags::FAST:
+        case AudioOutputFlags::FAST:
             return AUDIO_OUTPUT_FLAG_FAST;
-        case media::AudioOutputFlags::DEEP_BUFFER:
+        case AudioOutputFlags::DEEP_BUFFER:
             return AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
-        case media::AudioOutputFlags::COMPRESS_OFFLOAD:
+        case AudioOutputFlags::COMPRESS_OFFLOAD:
             return AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
-        case media::AudioOutputFlags::NON_BLOCKING:
+        case AudioOutputFlags::NON_BLOCKING:
             return AUDIO_OUTPUT_FLAG_NON_BLOCKING;
-        case media::AudioOutputFlags::HW_AV_SYNC:
+        case AudioOutputFlags::HW_AV_SYNC:
             return AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
-        case media::AudioOutputFlags::TTS:
+        case AudioOutputFlags::TTS:
             return AUDIO_OUTPUT_FLAG_TTS;
-        case media::AudioOutputFlags::RAW:
+        case AudioOutputFlags::RAW:
             return AUDIO_OUTPUT_FLAG_RAW;
-        case media::AudioOutputFlags::SYNC:
+        case AudioOutputFlags::SYNC:
             return AUDIO_OUTPUT_FLAG_SYNC;
-        case media::AudioOutputFlags::IEC958_NONAUDIO:
+        case AudioOutputFlags::IEC958_NONAUDIO:
             return AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
-        case media::AudioOutputFlags::DIRECT_PCM:
+        case AudioOutputFlags::DIRECT_PCM:
             return AUDIO_OUTPUT_FLAG_DIRECT_PCM;
-        case media::AudioOutputFlags::MMAP_NOIRQ:
+        case AudioOutputFlags::MMAP_NOIRQ:
             return AUDIO_OUTPUT_FLAG_MMAP_NOIRQ;
-        case media::AudioOutputFlags::VOIP_RX:
+        case AudioOutputFlags::VOIP_RX:
             return AUDIO_OUTPUT_FLAG_VOIP_RX;
-        case media::AudioOutputFlags::INCALL_MUSIC:
+        case AudioOutputFlags::INCALL_MUSIC:
             return AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
-        case media::AudioOutputFlags::GAPLESS_OFFLOAD:
+        case AudioOutputFlags::GAPLESS_OFFLOAD:
             return AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD;
+        case AudioOutputFlags::ULTRASOUND:
+            return AUDIO_OUTPUT_FLAG_ULTRASOUND;
+        case AudioOutputFlags::SPATIALIZER:
+            return AUDIO_OUTPUT_FLAG_SPATIALIZER;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioOutputFlags> legacy2aidl_audio_output_flags_t_AudioOutputFlags(
+ConversionResult<AudioOutputFlags> legacy2aidl_audio_output_flags_t_AudioOutputFlags(
         audio_output_flags_t legacy) {
     switch (legacy) {
         case AUDIO_OUTPUT_FLAG_NONE:
             break; // shouldn't get here. must be listed  -Werror,-Wswitch
         case AUDIO_OUTPUT_FLAG_DIRECT:
-            return media::AudioOutputFlags::DIRECT;
+            return AudioOutputFlags::DIRECT;
         case AUDIO_OUTPUT_FLAG_PRIMARY:
-            return media::AudioOutputFlags::PRIMARY;
+            return AudioOutputFlags::PRIMARY;
         case AUDIO_OUTPUT_FLAG_FAST:
-            return media::AudioOutputFlags::FAST;
+            return AudioOutputFlags::FAST;
         case AUDIO_OUTPUT_FLAG_DEEP_BUFFER:
-            return media::AudioOutputFlags::DEEP_BUFFER;
+            return AudioOutputFlags::DEEP_BUFFER;
         case AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD:
-            return media::AudioOutputFlags::COMPRESS_OFFLOAD;
+            return AudioOutputFlags::COMPRESS_OFFLOAD;
         case AUDIO_OUTPUT_FLAG_NON_BLOCKING:
-            return media::AudioOutputFlags::NON_BLOCKING;
+            return AudioOutputFlags::NON_BLOCKING;
         case AUDIO_OUTPUT_FLAG_HW_AV_SYNC:
-            return media::AudioOutputFlags::HW_AV_SYNC;
+            return AudioOutputFlags::HW_AV_SYNC;
         case AUDIO_OUTPUT_FLAG_TTS:
-            return media::AudioOutputFlags::TTS;
+            return AudioOutputFlags::TTS;
         case AUDIO_OUTPUT_FLAG_RAW:
-            return media::AudioOutputFlags::RAW;
+            return AudioOutputFlags::RAW;
         case AUDIO_OUTPUT_FLAG_SYNC:
-            return media::AudioOutputFlags::SYNC;
+            return AudioOutputFlags::SYNC;
         case AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO:
-            return media::AudioOutputFlags::IEC958_NONAUDIO;
+            return AudioOutputFlags::IEC958_NONAUDIO;
         case AUDIO_OUTPUT_FLAG_DIRECT_PCM:
-            return media::AudioOutputFlags::DIRECT_PCM;
+            return AudioOutputFlags::DIRECT_PCM;
         case AUDIO_OUTPUT_FLAG_MMAP_NOIRQ:
-            return media::AudioOutputFlags::MMAP_NOIRQ;
+            return AudioOutputFlags::MMAP_NOIRQ;
         case AUDIO_OUTPUT_FLAG_VOIP_RX:
-            return media::AudioOutputFlags::VOIP_RX;
+            return AudioOutputFlags::VOIP_RX;
         case AUDIO_OUTPUT_FLAG_INCALL_MUSIC:
-            return media::AudioOutputFlags::INCALL_MUSIC;
+            return AudioOutputFlags::INCALL_MUSIC;
         case AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD:
-            return media::AudioOutputFlags::GAPLESS_OFFLOAD;
+            return AudioOutputFlags::GAPLESS_OFFLOAD;
+        case AUDIO_OUTPUT_FLAG_ULTRASOUND:
+            return AudioOutputFlags::ULTRASOUND;
+        case AUDIO_OUTPUT_FLAG_SPATIALIZER:
+            return AudioOutputFlags::SPATIALIZER;
     }
     return unexpected(BAD_VALUE);
 }
@@ -634,9 +1565,9 @@
     using LegacyMask = std::underlying_type_t<audio_input_flags_t>;
 
     LegacyMask converted = VALUE_OR_RETURN(
-            (convertBitmask<LegacyMask, int32_t, audio_input_flags_t, media::AudioInputFlags>(
+            (convertBitmask<LegacyMask, int32_t, audio_input_flags_t, AudioInputFlags>(
                     aidl, aidl2legacy_AudioInputFlags_audio_input_flags_t,
-                    indexToEnum_index<media::AudioInputFlags>,
+                    indexToEnum_index<AudioInputFlags>,
                     enumToMask_bitmask<LegacyMask, audio_input_flags_t>)));
     return static_cast<audio_input_flags_t>(converted);
 }
@@ -646,10 +1577,10 @@
     using LegacyMask = std::underlying_type_t<audio_input_flags_t>;
 
     LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
-    return convertBitmask<int32_t, LegacyMask, media::AudioInputFlags, audio_input_flags_t>(
+    return convertBitmask<int32_t, LegacyMask, AudioInputFlags, audio_input_flags_t>(
             legacyMask, legacy2aidl_audio_input_flags_t_AudioInputFlags,
             indexToEnum_bitmask<audio_input_flags_t>,
-            enumToMask_index<int32_t, media::AudioInputFlags>);
+            enumToMask_index<int32_t, AudioInputFlags>);
 }
 
 ConversionResult<audio_output_flags_t> aidl2legacy_int32_t_audio_output_flags_t_mask(
@@ -657,9 +1588,9 @@
     return convertBitmask<audio_output_flags_t,
             int32_t,
             audio_output_flags_t,
-            media::AudioOutputFlags>(
+            AudioOutputFlags>(
             aidl, aidl2legacy_AudioOutputFlags_audio_output_flags_t,
-            indexToEnum_index<media::AudioOutputFlags>,
+            indexToEnum_index<AudioOutputFlags>,
             enumToMask_bitmask<audio_output_flags_t, audio_output_flags_t>);
 }
 
@@ -668,225 +1599,215 @@
     using LegacyMask = std::underlying_type_t<audio_output_flags_t>;
 
     LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
-    return convertBitmask<int32_t, LegacyMask, media::AudioOutputFlags, audio_output_flags_t>(
+    return convertBitmask<int32_t, LegacyMask, AudioOutputFlags, audio_output_flags_t>(
             legacyMask, legacy2aidl_audio_output_flags_t_AudioOutputFlags,
             indexToEnum_bitmask<audio_output_flags_t>,
-            enumToMask_index<int32_t, media::AudioOutputFlags>);
+            enumToMask_index<int32_t, AudioOutputFlags>);
 }
 
 ConversionResult<audio_io_flags> aidl2legacy_AudioIoFlags_audio_io_flags(
-        const media::AudioIoFlags& aidl, media::AudioPortRole role, media::AudioPortType type) {
+        const AudioIoFlags& aidl, bool isInput) {
     audio_io_flags legacy;
-    Direction dir = VALUE_OR_RETURN(direction(role, type));
-    switch (dir) {
-        case Direction::INPUT: {
-            legacy.input = VALUE_OR_RETURN(
-                    aidl2legacy_int32_t_audio_input_flags_t_mask(
-                            VALUE_OR_RETURN(UNION_GET(aidl, input))));
-        }
-            break;
-
-        case Direction::OUTPUT: {
-            legacy.output = VALUE_OR_RETURN(
-                    aidl2legacy_int32_t_audio_output_flags_t_mask(
-                            VALUE_OR_RETURN(UNION_GET(aidl, output))));
-        }
-            break;
+    if (isInput) {
+        legacy.input = VALUE_OR_RETURN(
+                aidl2legacy_int32_t_audio_input_flags_t_mask(
+                        VALUE_OR_RETURN(UNION_GET(aidl, input))));
+    } else {
+        legacy.output = VALUE_OR_RETURN(
+                aidl2legacy_int32_t_audio_output_flags_t_mask(
+                        VALUE_OR_RETURN(UNION_GET(aidl, output))));
     }
-
     return legacy;
 }
 
-ConversionResult<media::AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
-        const audio_io_flags& legacy, audio_port_role_t role, audio_port_type_t type) {
-    media::AudioIoFlags aidl;
-
-    Direction dir = VALUE_OR_RETURN(direction(role, type));
-    switch (dir) {
-        case Direction::INPUT:
-            UNION_SET(aidl, input,
-                      VALUE_OR_RETURN(legacy2aidl_audio_input_flags_t_int32_t_mask(
-                              legacy.input)));
-            break;
-        case Direction::OUTPUT:
-            UNION_SET(aidl, output,
-                      VALUE_OR_RETURN(legacy2aidl_audio_output_flags_t_int32_t_mask(
-                              legacy.output)));
-            break;
+ConversionResult<AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
+        const audio_io_flags& legacy, bool isInput) {
+    AudioIoFlags aidl;
+    if (isInput) {
+        UNION_SET(aidl, input,
+                VALUE_OR_RETURN(legacy2aidl_audio_input_flags_t_int32_t_mask(legacy.input)));
+    } else {
+        UNION_SET(aidl, output,
+                VALUE_OR_RETURN(legacy2aidl_audio_output_flags_t_int32_t_mask(legacy.output)));
     }
     return aidl;
 }
 
 ConversionResult<audio_port_config_device_ext>
-aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
-        const media::AudioPortConfigDeviceExt& aidl) {
+aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(
+        const AudioPortDeviceExt& aidl, const media::AudioPortDeviceExtSys& aidlDeviceExt) {
     audio_port_config_device_ext legacy;
-    legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
-    legacy.type = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.type));
-    RETURN_IF_ERROR(aidl2legacy_string(aidl.address, legacy.address, AUDIO_DEVICE_MAX_ADDRESS_LEN));
+    legacy.hw_module = VALUE_OR_RETURN(
+            aidl2legacy_int32_t_audio_module_handle_t(aidlDeviceExt.hwModule));
+    RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
+                    aidl.device, &legacy.type, legacy.address));
     return legacy;
 }
 
-ConversionResult<media::AudioPortConfigDeviceExt>
-legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
-        const audio_port_config_device_ext& legacy) {
-    media::AudioPortConfigDeviceExt aidl;
-    aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
-    aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.type));
-    aidl.address = VALUE_OR_RETURN(
-            legacy2aidl_string(legacy.address, AUDIO_DEVICE_MAX_ADDRESS_LEN));
-    return aidl;
+status_t legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(
+        const audio_port_config_device_ext& legacy,
+        AudioPortDeviceExt* aidl, media::AudioPortDeviceExtSys* aidlDeviceExt) {
+    aidlDeviceExt->hwModule = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+    aidl->device = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_device_AudioDevice(legacy.type, legacy.address));
+    return OK;
 }
 
 ConversionResult<audio_stream_type_t> aidl2legacy_AudioStreamType_audio_stream_type_t(
-        media::AudioStreamType aidl) {
+        AudioStreamType aidl) {
     switch (aidl) {
-        case media::AudioStreamType::DEFAULT:
+        case AudioStreamType::INVALID:
+            break;  // return error
+        case AudioStreamType::SYS_RESERVED_DEFAULT:
             return AUDIO_STREAM_DEFAULT;
-        case media::AudioStreamType::VOICE_CALL:
+        case AudioStreamType::VOICE_CALL:
             return AUDIO_STREAM_VOICE_CALL;
-        case media::AudioStreamType::SYSTEM:
+        case AudioStreamType::SYSTEM:
             return AUDIO_STREAM_SYSTEM;
-        case media::AudioStreamType::RING:
+        case AudioStreamType::RING:
             return AUDIO_STREAM_RING;
-        case media::AudioStreamType::MUSIC:
+        case AudioStreamType::MUSIC:
             return AUDIO_STREAM_MUSIC;
-        case media::AudioStreamType::ALARM:
+        case AudioStreamType::ALARM:
             return AUDIO_STREAM_ALARM;
-        case media::AudioStreamType::NOTIFICATION:
+        case AudioStreamType::NOTIFICATION:
             return AUDIO_STREAM_NOTIFICATION;
-        case media::AudioStreamType::BLUETOOTH_SCO:
+        case AudioStreamType::BLUETOOTH_SCO:
             return AUDIO_STREAM_BLUETOOTH_SCO;
-        case media::AudioStreamType::ENFORCED_AUDIBLE:
+        case AudioStreamType::ENFORCED_AUDIBLE:
             return AUDIO_STREAM_ENFORCED_AUDIBLE;
-        case media::AudioStreamType::DTMF:
+        case AudioStreamType::DTMF:
             return AUDIO_STREAM_DTMF;
-        case media::AudioStreamType::TTS:
+        case AudioStreamType::TTS:
             return AUDIO_STREAM_TTS;
-        case media::AudioStreamType::ACCESSIBILITY:
+        case AudioStreamType::ACCESSIBILITY:
             return AUDIO_STREAM_ACCESSIBILITY;
-        case media::AudioStreamType::ASSISTANT:
+        case AudioStreamType::ASSISTANT:
             return AUDIO_STREAM_ASSISTANT;
-        case media::AudioStreamType::REROUTING:
+        case AudioStreamType::SYS_RESERVED_REROUTING:
             return AUDIO_STREAM_REROUTING;
-        case media::AudioStreamType::PATCH:
+        case AudioStreamType::SYS_RESERVED_PATCH:
             return AUDIO_STREAM_PATCH;
-        case media::AudioStreamType::CALL_ASSISTANT:
+        case AudioStreamType::CALL_ASSISTANT:
             return AUDIO_STREAM_CALL_ASSISTANT;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
+ConversionResult<AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
         audio_stream_type_t legacy) {
     switch (legacy) {
         case AUDIO_STREAM_DEFAULT:
-            return media::AudioStreamType::DEFAULT;
+            return AudioStreamType::SYS_RESERVED_DEFAULT;
         case AUDIO_STREAM_VOICE_CALL:
-            return media::AudioStreamType::VOICE_CALL;
+            return AudioStreamType::VOICE_CALL;
         case AUDIO_STREAM_SYSTEM:
-            return media::AudioStreamType::SYSTEM;
+            return AudioStreamType::SYSTEM;
         case AUDIO_STREAM_RING:
-            return media::AudioStreamType::RING;
+            return AudioStreamType::RING;
         case AUDIO_STREAM_MUSIC:
-            return media::AudioStreamType::MUSIC;
+            return AudioStreamType::MUSIC;
         case AUDIO_STREAM_ALARM:
-            return media::AudioStreamType::ALARM;
+            return AudioStreamType::ALARM;
         case AUDIO_STREAM_NOTIFICATION:
-            return media::AudioStreamType::NOTIFICATION;
+            return AudioStreamType::NOTIFICATION;
         case AUDIO_STREAM_BLUETOOTH_SCO:
-            return media::AudioStreamType::BLUETOOTH_SCO;
+            return AudioStreamType::BLUETOOTH_SCO;
         case AUDIO_STREAM_ENFORCED_AUDIBLE:
-            return media::AudioStreamType::ENFORCED_AUDIBLE;
+            return AudioStreamType::ENFORCED_AUDIBLE;
         case AUDIO_STREAM_DTMF:
-            return media::AudioStreamType::DTMF;
+            return AudioStreamType::DTMF;
         case AUDIO_STREAM_TTS:
-            return media::AudioStreamType::TTS;
+            return AudioStreamType::TTS;
         case AUDIO_STREAM_ACCESSIBILITY:
-            return media::AudioStreamType::ACCESSIBILITY;
+            return AudioStreamType::ACCESSIBILITY;
         case AUDIO_STREAM_ASSISTANT:
-            return media::AudioStreamType::ASSISTANT;
+            return AudioStreamType::ASSISTANT;
         case AUDIO_STREAM_REROUTING:
-            return media::AudioStreamType::REROUTING;
+            return AudioStreamType::SYS_RESERVED_REROUTING;
         case AUDIO_STREAM_PATCH:
-            return media::AudioStreamType::PATCH;
+            return AudioStreamType::SYS_RESERVED_PATCH;
         case AUDIO_STREAM_CALL_ASSISTANT:
-            return media::AudioStreamType::CALL_ASSISTANT;
+            return AudioStreamType::CALL_ASSISTANT;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<audio_source_t> aidl2legacy_AudioSourceType_audio_source_t(
-        media::AudioSourceType aidl) {
+ConversionResult<audio_source_t> aidl2legacy_AudioSource_audio_source_t(
+        AudioSource aidl) {
     switch (aidl) {
-        case media::AudioSourceType::INVALID:
-            // This value does not have an enum
+        case AudioSource::SYS_RESERVED_INVALID:
             return AUDIO_SOURCE_INVALID;
-        case media::AudioSourceType::DEFAULT:
+        case AudioSource::DEFAULT:
             return AUDIO_SOURCE_DEFAULT;
-        case media::AudioSourceType::MIC:
+        case AudioSource::MIC:
             return AUDIO_SOURCE_MIC;
-        case media::AudioSourceType::VOICE_UPLINK:
+        case AudioSource::VOICE_UPLINK:
             return AUDIO_SOURCE_VOICE_UPLINK;
-        case media::AudioSourceType::VOICE_DOWNLINK:
+        case AudioSource::VOICE_DOWNLINK:
             return AUDIO_SOURCE_VOICE_DOWNLINK;
-        case media::AudioSourceType::VOICE_CALL:
+        case AudioSource::VOICE_CALL:
             return AUDIO_SOURCE_VOICE_CALL;
-        case media::AudioSourceType::CAMCORDER:
+        case AudioSource::CAMCORDER:
             return AUDIO_SOURCE_CAMCORDER;
-        case media::AudioSourceType::VOICE_RECOGNITION:
+        case AudioSource::VOICE_RECOGNITION:
             return AUDIO_SOURCE_VOICE_RECOGNITION;
-        case media::AudioSourceType::VOICE_COMMUNICATION:
+        case AudioSource::VOICE_COMMUNICATION:
             return AUDIO_SOURCE_VOICE_COMMUNICATION;
-        case media::AudioSourceType::REMOTE_SUBMIX:
+        case AudioSource::REMOTE_SUBMIX:
             return AUDIO_SOURCE_REMOTE_SUBMIX;
-        case media::AudioSourceType::UNPROCESSED:
+        case AudioSource::UNPROCESSED:
             return AUDIO_SOURCE_UNPROCESSED;
-        case media::AudioSourceType::VOICE_PERFORMANCE:
+        case AudioSource::VOICE_PERFORMANCE:
             return AUDIO_SOURCE_VOICE_PERFORMANCE;
-        case media::AudioSourceType::ECHO_REFERENCE:
+        case AudioSource::ULTRASOUND:
+            return AUDIO_SOURCE_ULTRASOUND;
+        case AudioSource::ECHO_REFERENCE:
             return AUDIO_SOURCE_ECHO_REFERENCE;
-        case media::AudioSourceType::FM_TUNER:
+        case AudioSource::FM_TUNER:
             return AUDIO_SOURCE_FM_TUNER;
-        case media::AudioSourceType::HOTWORD:
+        case AudioSource::HOTWORD:
             return AUDIO_SOURCE_HOTWORD;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioSourceType> legacy2aidl_audio_source_t_AudioSourceType(
+ConversionResult<AudioSource> legacy2aidl_audio_source_t_AudioSource(
         audio_source_t legacy) {
     switch (legacy) {
         case AUDIO_SOURCE_INVALID:
-            return media::AudioSourceType::INVALID;
+            return AudioSource::SYS_RESERVED_INVALID;
         case AUDIO_SOURCE_DEFAULT:
-            return media::AudioSourceType::DEFAULT;
+            return AudioSource::DEFAULT;
         case AUDIO_SOURCE_MIC:
-            return media::AudioSourceType::MIC;
+            return AudioSource::MIC;
         case AUDIO_SOURCE_VOICE_UPLINK:
-            return media::AudioSourceType::VOICE_UPLINK;
+            return AudioSource::VOICE_UPLINK;
         case AUDIO_SOURCE_VOICE_DOWNLINK:
-            return media::AudioSourceType::VOICE_DOWNLINK;
+            return AudioSource::VOICE_DOWNLINK;
         case AUDIO_SOURCE_VOICE_CALL:
-            return media::AudioSourceType::VOICE_CALL;
+            return AudioSource::VOICE_CALL;
         case AUDIO_SOURCE_CAMCORDER:
-            return media::AudioSourceType::CAMCORDER;
+            return AudioSource::CAMCORDER;
         case AUDIO_SOURCE_VOICE_RECOGNITION:
-            return media::AudioSourceType::VOICE_RECOGNITION;
+            return AudioSource::VOICE_RECOGNITION;
         case AUDIO_SOURCE_VOICE_COMMUNICATION:
-            return media::AudioSourceType::VOICE_COMMUNICATION;
+            return AudioSource::VOICE_COMMUNICATION;
         case AUDIO_SOURCE_REMOTE_SUBMIX:
-            return media::AudioSourceType::REMOTE_SUBMIX;
+            return AudioSource::REMOTE_SUBMIX;
         case AUDIO_SOURCE_UNPROCESSED:
-            return media::AudioSourceType::UNPROCESSED;
+            return AudioSource::UNPROCESSED;
         case AUDIO_SOURCE_VOICE_PERFORMANCE:
-            return media::AudioSourceType::VOICE_PERFORMANCE;
+            return AudioSource::VOICE_PERFORMANCE;
+        case AUDIO_SOURCE_ULTRASOUND:
+            return AudioSource::ULTRASOUND;
         case AUDIO_SOURCE_ECHO_REFERENCE:
-            return media::AudioSourceType::ECHO_REFERENCE;
+            return AudioSource::ECHO_REFERENCE;
         case AUDIO_SOURCE_FM_TUNER:
-            return media::AudioSourceType::FM_TUNER;
+            return AudioSource::FM_TUNER;
         case AUDIO_SOURCE_HOTWORD:
-            return media::AudioSourceType::HOTWORD;
+            return AudioSource::HOTWORD;
     }
     return unexpected(BAD_VALUE);
 }
@@ -902,8 +1823,8 @@
 // This type is unnamed in the original definition, thus we name it here.
 using audio_port_config_mix_ext_usecase = decltype(audio_port_config_mix_ext::usecase);
 
-ConversionResult<audio_port_config_mix_ext_usecase> aidl2legacy_AudioPortConfigMixExtUseCase(
-        const media::AudioPortConfigMixExtUseCase& aidl, media::AudioPortRole role) {
+ConversionResult<audio_port_config_mix_ext_usecase> aidl2legacy_AudioPortMixExtUseCase(
+        const AudioPortMixExtUseCase& aidl, media::AudioPortRole role) {
     audio_port_config_mix_ext_usecase legacy;
 
     switch (role) {
@@ -920,16 +1841,16 @@
 
         case media::AudioPortRole::SINK:
             // This is not a bug. A SINK role corresponds to the source field.
-            legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSourceType_audio_source_t(
+            legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSource_audio_source_t(
                     VALUE_OR_RETURN(UNION_GET(aidl, source))));
             return legacy;
     }
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
-ConversionResult<media::AudioPortConfigMixExtUseCase> legacy2aidl_AudioPortConfigMixExtUseCase(
+ConversionResult<AudioPortMixExtUseCase> legacy2aidl_AudioPortMixExtUseCase(
         const audio_port_config_mix_ext_usecase& legacy, audio_port_role_t role) {
-    media::AudioPortConfigMixExtUseCase aidl;
+    AudioPortMixExtUseCase aidl;
 
     switch (role) {
         case AUDIO_PORT_ROLE_NONE:
@@ -943,52 +1864,53 @@
         case AUDIO_PORT_ROLE_SINK:
             // This is not a bug. A SINK role corresponds to the source field.
             UNION_SET(aidl, source,
-                      VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.source)));
+                      VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.source)));
             return aidl;
     }
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
-ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortConfigMixExt(
-        const media::AudioPortConfigMixExt& aidl, media::AudioPortRole role) {
+ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortMixExt(
+        const AudioPortMixExt& aidl, media::AudioPortRole role,
+        const media::AudioPortMixExtSys& aidlMixExt) {
     audio_port_config_mix_ext legacy;
-    legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
+    legacy.hw_module = VALUE_OR_RETURN(
+            aidl2legacy_int32_t_audio_module_handle_t(aidlMixExt.hwModule));
     legacy.handle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.handle));
-    legacy.usecase = VALUE_OR_RETURN(aidl2legacy_AudioPortConfigMixExtUseCase(aidl.usecase, role));
+    legacy.usecase = VALUE_OR_RETURN(aidl2legacy_AudioPortMixExtUseCase(aidl.usecase, role));
     return legacy;
 }
 
-ConversionResult<media::AudioPortConfigMixExt> legacy2aidl_AudioPortConfigMixExt(
-        const audio_port_config_mix_ext& legacy, audio_port_role_t role) {
-    media::AudioPortConfigMixExt aidl;
-    aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
-    aidl.handle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
-    aidl.usecase = VALUE_OR_RETURN(legacy2aidl_AudioPortConfigMixExtUseCase(legacy.usecase, role));
-    return aidl;
+status_t legacy2aidl_AudioPortMixExt(
+        const audio_port_config_mix_ext& legacy, audio_port_role_t role,
+        AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt) {
+    aidlMixExt->hwModule = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+    aidl->handle = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
+    aidl->usecase = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_AudioPortMixExtUseCase(legacy.usecase, role));
+    return OK;
 }
 
 ConversionResult<audio_port_config_session_ext>
-aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
-        const media::AudioPortConfigSessionExt& aidl) {
+aidl2legacy_int32_t_audio_port_config_session_ext(int32_t aidl) {
     audio_port_config_session_ext legacy;
-    legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.session));
+    legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl));
     return legacy;
 }
 
-ConversionResult<media::AudioPortConfigSessionExt>
-legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
+ConversionResult<int32_t>
+legacy2aidl_audio_port_config_session_ext_int32_t(
         const audio_port_config_session_ext& legacy) {
-    media::AudioPortConfigSessionExt aidl;
-    aidl.session = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.session));
-    return aidl;
+    return legacy2aidl_audio_session_t_int32_t(legacy.session);
 }
 
 // This type is unnamed in the original definition, thus we name it here.
 using audio_port_config_ext = decltype(audio_port_config::ext);
 
-ConversionResult<audio_port_config_ext> aidl2legacy_AudioPortConfigExt(
-        const media::AudioPortConfigExt& aidl, media::AudioPortType type,
-        media::AudioPortRole role) {
+ConversionResult<audio_port_config_ext> aidl2legacy_AudioPortExt_audio_port_config_ext(
+        const AudioPortExt& aidl, media::AudioPortType type,
+        media::AudioPortRole role, const media::AudioPortExtSys& aidlSys) {
     audio_port_config_ext legacy;
     switch (type) {
         case media::AudioPortType::NONE:
@@ -997,16 +1919,19 @@
             return legacy;
         case media::AudioPortType::DEVICE:
             legacy.device = VALUE_OR_RETURN(
-                    aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
-                            VALUE_OR_RETURN(UNION_GET(aidl, device))));
+                    aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(
+                            VALUE_OR_RETURN(UNION_GET(aidl, device)),
+                            VALUE_OR_RETURN(UNION_GET(aidlSys, device))));
             return legacy;
         case media::AudioPortType::MIX:
             legacy.mix = VALUE_OR_RETURN(
-                    aidl2legacy_AudioPortConfigMixExt(VALUE_OR_RETURN(UNION_GET(aidl, mix)), role));
+                    aidl2legacy_AudioPortMixExt(
+                            VALUE_OR_RETURN(UNION_GET(aidl, mix)), role,
+                            VALUE_OR_RETURN(UNION_GET(aidlSys, mix))));
             return legacy;
         case media::AudioPortType::SESSION:
             legacy.session = VALUE_OR_RETURN(
-                    aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
+                    aidl2legacy_int32_t_audio_port_config_session_ext(
                             VALUE_OR_RETURN(UNION_GET(aidl, session))));
             return legacy;
 
@@ -1014,90 +1939,113 @@
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
-ConversionResult<media::AudioPortConfigExt> legacy2aidl_AudioPortConfigExt(
-        const audio_port_config_ext& legacy, audio_port_type_t type, audio_port_role_t role) {
-    media::AudioPortConfigExt aidl;
-
+status_t legacy2aidl_AudioPortExt(
+        const audio_port_config_ext& legacy, audio_port_type_t type, audio_port_role_t role,
+        AudioPortExt* aidl, media::AudioPortExtSys* aidlSys) {
     switch (type) {
         case AUDIO_PORT_TYPE_NONE:
-            UNION_SET(aidl, unspecified, false);
-            return aidl;
-        case AUDIO_PORT_TYPE_DEVICE:
-            UNION_SET(aidl, device,
-                      VALUE_OR_RETURN(
-                        legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
-                          legacy.device)));
-            return aidl;
-        case AUDIO_PORT_TYPE_MIX:
-            UNION_SET(aidl, mix,
-                      VALUE_OR_RETURN(legacy2aidl_AudioPortConfigMixExt(legacy.mix, role)));
-            return aidl;
+            UNION_SET(*aidl, unspecified, false);
+            UNION_SET(*aidlSys, unspecified, false);
+            return OK;
+        case AUDIO_PORT_TYPE_DEVICE: {
+            AudioPortDeviceExt device;
+            media::AudioPortDeviceExtSys deviceSys;
+            RETURN_STATUS_IF_ERROR(
+                    legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(
+                            legacy.device, &device, &deviceSys));
+            UNION_SET(*aidl, device, device);
+            UNION_SET(*aidlSys, device, deviceSys);
+            return OK;
+        }
+        case AUDIO_PORT_TYPE_MIX: {
+            AudioPortMixExt mix;
+            media::AudioPortMixExtSys mixSys;
+            RETURN_STATUS_IF_ERROR(legacy2aidl_AudioPortMixExt(legacy.mix, role, &mix, &mixSys));
+            UNION_SET(*aidl, mix, mix);
+            UNION_SET(*aidlSys, mix, mixSys);
+            return OK;
+        }
         case AUDIO_PORT_TYPE_SESSION:
-            UNION_SET(aidl, session,
-                      VALUE_OR_RETURN(
-                        legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
-                          legacy.session)));
-            return aidl;
+            UNION_SET(*aidl, session, VALUE_OR_RETURN_STATUS(
+                            legacy2aidl_audio_port_config_session_ext_int32_t(legacy.session)));
+            UNION_SET(*aidlSys, unspecified, false);
+            return OK;
     }
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
         const media::AudioPortConfig& aidl) {
-    audio_port_config legacy;
-    legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.id));
-    legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.role));
-    legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.type));
-    legacy.config_mask = VALUE_OR_RETURN(aidl2legacy_int32_t_config_mask(aidl.configMask));
-    if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::SAMPLE_RATE)) {
-        legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.sampleRate));
+    audio_port_config legacy{};
+    legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.hal.id));
+    legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.sys.role));
+    legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.sys.type));
+    const bool isInput =
+            VALUE_OR_RETURN(direction(aidl.sys.role, aidl.sys.type)) == Direction::INPUT;
+    if (aidl.hal.sampleRate.has_value()) {
+        legacy.sample_rate = VALUE_OR_RETURN(
+                convertIntegral<unsigned int>(aidl.hal.sampleRate.value().value));
+        legacy.config_mask |= AUDIO_PORT_CONFIG_SAMPLE_RATE;
     }
-    if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::CHANNEL_MASK)) {
+    if (aidl.hal.channelMask.has_value()) {
         legacy.channel_mask =
-                VALUE_OR_RETURN(aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
+                VALUE_OR_RETURN(
+                        aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+                                aidl.hal.channelMask.value(), isInput));
+        legacy.config_mask |= AUDIO_PORT_CONFIG_CHANNEL_MASK;
     }
-    if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::FORMAT)) {
-        legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+    if (aidl.hal.format.has_value()) {
+        legacy.format = VALUE_OR_RETURN(
+                aidl2legacy_AudioFormatDescription_audio_format_t(aidl.hal.format.value()));
+        legacy.config_mask |= AUDIO_PORT_CONFIG_FORMAT;
     }
-    if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::GAIN)) {
-        legacy.gain = VALUE_OR_RETURN(
-                aidl2legacy_AudioGainConfig_audio_gain_config(aidl.gain, aidl.role, aidl.type));
+    if (aidl.hal.gain.has_value()) {
+        legacy.gain = VALUE_OR_RETURN(aidl2legacy_AudioGainConfig_audio_gain_config(
+                        aidl.hal.gain.value(), isInput));
+        legacy.config_mask |= AUDIO_PORT_CONFIG_GAIN;
     }
-    if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::FLAGS)) {
+    if (aidl.hal.flags.has_value()) {
         legacy.flags = VALUE_OR_RETURN(
-                aidl2legacy_AudioIoFlags_audio_io_flags(aidl.flags, aidl.role, aidl.type));
+                aidl2legacy_AudioIoFlags_audio_io_flags(aidl.hal.flags.value(), isInput));
+        legacy.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
     }
-    legacy.ext = VALUE_OR_RETURN(aidl2legacy_AudioPortConfigExt(aidl.ext, aidl.type, aidl.role));
+    legacy.ext = VALUE_OR_RETURN(
+            aidl2legacy_AudioPortExt_audio_port_config_ext(
+                    aidl.hal.ext, aidl.sys.type, aidl.sys.role, aidl.sys.ext));
     return legacy;
 }
 
 ConversionResult<media::AudioPortConfig> legacy2aidl_audio_port_config_AudioPortConfig(
         const audio_port_config& legacy) {
     media::AudioPortConfig aidl;
-    aidl.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
-    aidl.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
-    aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
-    aidl.configMask = VALUE_OR_RETURN(legacy2aidl_config_mask_int32_t(legacy.config_mask));
+    aidl.hal.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
+    aidl.sys.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
+    aidl.sys.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
+    const bool isInput = VALUE_OR_RETURN(
+            direction(legacy.role, legacy.type)) == Direction::INPUT;
     if (legacy.config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
-        aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
+        Int aidl_sampleRate;
+        aidl_sampleRate.value = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
+        aidl.hal.sampleRate = aidl_sampleRate;
     }
     if (legacy.config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
-        aidl.channelMask =
-                VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
+        aidl.hal.channelMask = VALUE_OR_RETURN(
+                legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
     }
     if (legacy.config_mask & AUDIO_PORT_CONFIG_FORMAT) {
-        aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+        aidl.hal.format = VALUE_OR_RETURN(
+                legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
     }
     if (legacy.config_mask & AUDIO_PORT_CONFIG_GAIN) {
-        aidl.gain = VALUE_OR_RETURN(legacy2aidl_audio_gain_config_AudioGainConfig(
-                legacy.gain, legacy.role, legacy.type));
+        aidl.hal.gain = VALUE_OR_RETURN(
+                legacy2aidl_audio_gain_config_AudioGainConfig(legacy.gain, isInput));
     }
     if (legacy.config_mask & AUDIO_PORT_CONFIG_FLAGS) {
-        aidl.flags = VALUE_OR_RETURN(
-                legacy2aidl_audio_io_flags_AudioIoFlags(legacy.flags, legacy.role, legacy.type));
+        aidl.hal.flags = VALUE_OR_RETURN(
+                legacy2aidl_audio_io_flags_AudioIoFlags(legacy.flags, isInput));
     }
-    aidl.ext =
-            VALUE_OR_RETURN(legacy2aidl_AudioPortConfigExt(legacy.ext, legacy.type, legacy.role));
+    RETURN_IF_ERROR(legacy2aidl_AudioPortExt(legacy.ext, legacy.type, legacy.role,
+                    &aidl.hal.ext, &aidl.sys.ext));
     return aidl;
 }
 
@@ -1148,33 +2096,40 @@
 
 ConversionResult<sp<AudioIoDescriptor>> aidl2legacy_AudioIoDescriptor_AudioIoDescriptor(
         const media::AudioIoDescriptor& aidl) {
-    sp<AudioIoDescriptor> legacy(new AudioIoDescriptor());
-    legacy->mIoHandle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.ioHandle));
-    legacy->mPatch = VALUE_OR_RETURN(aidl2legacy_AudioPatch_audio_patch(aidl.patch));
-    legacy->mSamplingRate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.samplingRate));
-    legacy->mFormat = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
-    legacy->mChannelMask =
-            VALUE_OR_RETURN(aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
-    legacy->mFrameCount = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
-    legacy->mFrameCountHAL = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCountHAL));
-    legacy->mLatency = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.latency));
-    legacy->mPortId = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
-    return legacy;
+    const audio_io_handle_t io_handle = VALUE_OR_RETURN(
+            aidl2legacy_int32_t_audio_io_handle_t(aidl.ioHandle));
+    const struct audio_patch patch = VALUE_OR_RETURN(
+            aidl2legacy_AudioPatch_audio_patch(aidl.patch));
+    const bool isInput = aidl.isInput;
+    const uint32_t sampling_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.samplingRate));
+    const audio_format_t format = VALUE_OR_RETURN(
+            aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
+    const audio_channel_mask_t channel_mask = VALUE_OR_RETURN(
+            aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
+    const size_t frame_count = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
+    const size_t frame_count_hal = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCountHAL));
+    const uint32_t latency = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.latency));
+    const audio_port_handle_t port_id = VALUE_OR_RETURN(
+            aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
+    return sp<AudioIoDescriptor>::make(io_handle, patch, isInput, sampling_rate, format,
+            channel_mask, frame_count, frame_count_hal, latency, port_id);
 }
 
 ConversionResult<media::AudioIoDescriptor> legacy2aidl_AudioIoDescriptor_AudioIoDescriptor(
         const sp<AudioIoDescriptor>& legacy) {
     media::AudioIoDescriptor aidl;
-    aidl.ioHandle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy->mIoHandle));
-    aidl.patch = VALUE_OR_RETURN(legacy2aidl_audio_patch_AudioPatch(legacy->mPatch));
-    aidl.samplingRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->mSamplingRate));
-    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy->mFormat));
-    aidl.channelMask = VALUE_OR_RETURN(
-            legacy2aidl_audio_channel_mask_t_int32_t(legacy->mChannelMask));
-    aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->mFrameCount));
-    aidl.frameCountHAL = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->mFrameCountHAL));
-    aidl.latency = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->mLatency));
-    aidl.portId = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy->mPortId));
+    aidl.ioHandle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy->getIoHandle()));
+    aidl.patch = VALUE_OR_RETURN(legacy2aidl_audio_patch_AudioPatch(legacy->getPatch()));
+    aidl.isInput = legacy->getIsInput();
+    aidl.samplingRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->getSamplingRate()));
+    aidl.format = VALUE_OR_RETURN(
+            legacy2aidl_audio_format_t_AudioFormatDescription(legacy->getFormat()));
+    aidl.channelMask = VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+                    legacy->getChannelMask(), legacy->getIsInput()));
+    aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->getFrameCount()));
+    aidl.frameCountHAL = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->getFrameCountHAL()));
+    aidl.latency = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->getLatency()));
+    aidl.portId = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy->getPortId()));
     return aidl;
 }
 
@@ -1195,137 +2150,143 @@
 }
 
 ConversionResult<audio_content_type_t>
-aidl2legacy_AudioContentType_audio_content_type_t(media::AudioContentType aidl) {
+aidl2legacy_AudioContentType_audio_content_type_t(AudioContentType aidl) {
     switch (aidl) {
-        case media::AudioContentType::UNKNOWN:
+        case AudioContentType::UNKNOWN:
             return AUDIO_CONTENT_TYPE_UNKNOWN;
-        case media::AudioContentType::SPEECH:
+        case AudioContentType::SPEECH:
             return AUDIO_CONTENT_TYPE_SPEECH;
-        case media::AudioContentType::MUSIC:
+        case AudioContentType::MUSIC:
             return AUDIO_CONTENT_TYPE_MUSIC;
-        case media::AudioContentType::MOVIE:
+        case AudioContentType::MOVIE:
             return AUDIO_CONTENT_TYPE_MOVIE;
-        case media::AudioContentType::SONIFICATION:
+        case AudioContentType::SONIFICATION:
             return AUDIO_CONTENT_TYPE_SONIFICATION;
+        case AudioContentType::ULTRASOUND:
+            return AUDIO_CONTENT_TYPE_ULTRASOUND;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioContentType>
+ConversionResult<AudioContentType>
 legacy2aidl_audio_content_type_t_AudioContentType(audio_content_type_t legacy) {
     switch (legacy) {
         case AUDIO_CONTENT_TYPE_UNKNOWN:
-            return media::AudioContentType::UNKNOWN;
+            return AudioContentType::UNKNOWN;
         case AUDIO_CONTENT_TYPE_SPEECH:
-            return media::AudioContentType::SPEECH;
+            return AudioContentType::SPEECH;
         case AUDIO_CONTENT_TYPE_MUSIC:
-            return media::AudioContentType::MUSIC;
+            return AudioContentType::MUSIC;
         case AUDIO_CONTENT_TYPE_MOVIE:
-            return media::AudioContentType::MOVIE;
+            return AudioContentType::MOVIE;
         case AUDIO_CONTENT_TYPE_SONIFICATION:
-            return media::AudioContentType::SONIFICATION;
+            return AudioContentType::SONIFICATION;
+        case AUDIO_CONTENT_TYPE_ULTRASOUND:
+            return AudioContentType::ULTRASOUND;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_usage_t>
-aidl2legacy_AudioUsage_audio_usage_t(media::AudioUsage aidl) {
+aidl2legacy_AudioUsage_audio_usage_t(AudioUsage aidl) {
     switch (aidl) {
-        case media::AudioUsage::UNKNOWN:
+        case AudioUsage::INVALID:
+            break;  // return error
+        case AudioUsage::UNKNOWN:
             return AUDIO_USAGE_UNKNOWN;
-        case media::AudioUsage::MEDIA:
+        case AudioUsage::MEDIA:
             return AUDIO_USAGE_MEDIA;
-        case media::AudioUsage::VOICE_COMMUNICATION:
+        case AudioUsage::VOICE_COMMUNICATION:
             return AUDIO_USAGE_VOICE_COMMUNICATION;
-        case media::AudioUsage::VOICE_COMMUNICATION_SIGNALLING:
+        case AudioUsage::VOICE_COMMUNICATION_SIGNALLING:
             return AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
-        case media::AudioUsage::ALARM:
+        case AudioUsage::ALARM:
             return AUDIO_USAGE_ALARM;
-        case media::AudioUsage::NOTIFICATION:
+        case AudioUsage::NOTIFICATION:
             return AUDIO_USAGE_NOTIFICATION;
-        case media::AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE:
+        case AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE:
             return AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
-        case media::AudioUsage::NOTIFICATION_COMMUNICATION_REQUEST:
+        case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_REQUEST:
             return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST;
-        case media::AudioUsage::NOTIFICATION_COMMUNICATION_INSTANT:
+        case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_INSTANT:
             return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT;
-        case media::AudioUsage::NOTIFICATION_COMMUNICATION_DELAYED:
+        case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_DELAYED:
             return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED;
-        case media::AudioUsage::NOTIFICATION_EVENT:
+        case AudioUsage::NOTIFICATION_EVENT:
             return AUDIO_USAGE_NOTIFICATION_EVENT;
-        case media::AudioUsage::ASSISTANCE_ACCESSIBILITY:
+        case AudioUsage::ASSISTANCE_ACCESSIBILITY:
             return AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
-        case media::AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE:
+        case AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE:
             return AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE;
-        case media::AudioUsage::ASSISTANCE_SONIFICATION:
+        case AudioUsage::ASSISTANCE_SONIFICATION:
             return AUDIO_USAGE_ASSISTANCE_SONIFICATION;
-        case media::AudioUsage::GAME:
+        case AudioUsage::GAME:
             return AUDIO_USAGE_GAME;
-        case media::AudioUsage::VIRTUAL_SOURCE:
+        case AudioUsage::VIRTUAL_SOURCE:
             return AUDIO_USAGE_VIRTUAL_SOURCE;
-        case media::AudioUsage::ASSISTANT:
+        case AudioUsage::ASSISTANT:
             return AUDIO_USAGE_ASSISTANT;
-        case media::AudioUsage::CALL_ASSISTANT:
+        case AudioUsage::CALL_ASSISTANT:
             return AUDIO_USAGE_CALL_ASSISTANT;
-        case media::AudioUsage::EMERGENCY:
+        case AudioUsage::EMERGENCY:
             return AUDIO_USAGE_EMERGENCY;
-        case media::AudioUsage::SAFETY:
+        case AudioUsage::SAFETY:
             return AUDIO_USAGE_SAFETY;
-        case media::AudioUsage::VEHICLE_STATUS:
+        case AudioUsage::VEHICLE_STATUS:
             return AUDIO_USAGE_VEHICLE_STATUS;
-        case media::AudioUsage::ANNOUNCEMENT:
+        case AudioUsage::ANNOUNCEMENT:
             return AUDIO_USAGE_ANNOUNCEMENT;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioUsage>
+ConversionResult<AudioUsage>
 legacy2aidl_audio_usage_t_AudioUsage(audio_usage_t legacy) {
     switch (legacy) {
         case AUDIO_USAGE_UNKNOWN:
-            return media::AudioUsage::UNKNOWN;
+            return AudioUsage::UNKNOWN;
         case AUDIO_USAGE_MEDIA:
-            return media::AudioUsage::MEDIA;
+            return AudioUsage::MEDIA;
         case AUDIO_USAGE_VOICE_COMMUNICATION:
-            return media::AudioUsage::VOICE_COMMUNICATION;
+            return AudioUsage::VOICE_COMMUNICATION;
         case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
-            return media::AudioUsage::VOICE_COMMUNICATION_SIGNALLING;
+            return AudioUsage::VOICE_COMMUNICATION_SIGNALLING;
         case AUDIO_USAGE_ALARM:
-            return media::AudioUsage::ALARM;
+            return AudioUsage::ALARM;
         case AUDIO_USAGE_NOTIFICATION:
-            return media::AudioUsage::NOTIFICATION;
+            return AudioUsage::NOTIFICATION;
         case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
-            return media::AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE;
+            return AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE;
         case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
-            return media::AudioUsage::NOTIFICATION_COMMUNICATION_REQUEST;
+            return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_REQUEST;
         case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
-            return media::AudioUsage::NOTIFICATION_COMMUNICATION_INSTANT;
+            return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_INSTANT;
         case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
-            return media::AudioUsage::NOTIFICATION_COMMUNICATION_DELAYED;
+            return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_DELAYED;
         case AUDIO_USAGE_NOTIFICATION_EVENT:
-            return media::AudioUsage::NOTIFICATION_EVENT;
+            return AudioUsage::NOTIFICATION_EVENT;
         case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
-            return media::AudioUsage::ASSISTANCE_ACCESSIBILITY;
+            return AudioUsage::ASSISTANCE_ACCESSIBILITY;
         case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
-            return media::AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE;
+            return AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE;
         case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
-            return media::AudioUsage::ASSISTANCE_SONIFICATION;
+            return AudioUsage::ASSISTANCE_SONIFICATION;
         case AUDIO_USAGE_GAME:
-            return media::AudioUsage::GAME;
+            return AudioUsage::GAME;
         case AUDIO_USAGE_VIRTUAL_SOURCE:
-            return media::AudioUsage::VIRTUAL_SOURCE;
+            return AudioUsage::VIRTUAL_SOURCE;
         case AUDIO_USAGE_ASSISTANT:
-            return media::AudioUsage::ASSISTANT;
+            return AudioUsage::ASSISTANT;
         case AUDIO_USAGE_CALL_ASSISTANT:
-            return media::AudioUsage::CALL_ASSISTANT;
+            return AudioUsage::CALL_ASSISTANT;
         case AUDIO_USAGE_EMERGENCY:
-            return media::AudioUsage::EMERGENCY;
+            return AudioUsage::EMERGENCY;
         case AUDIO_USAGE_SAFETY:
-            return media::AudioUsage::SAFETY;
+            return AudioUsage::SAFETY;
         case AUDIO_USAGE_VEHICLE_STATUS:
-            return media::AudioUsage::VEHICLE_STATUS;
+            return AudioUsage::VEHICLE_STATUS;
         case AUDIO_USAGE_ANNOUNCEMENT:
-            return media::AudioUsage::ANNOUNCEMENT;
+            return AudioUsage::ANNOUNCEMENT;
     }
     return unexpected(BAD_VALUE);
 }
@@ -1361,6 +2322,12 @@
             return AUDIO_FLAG_NO_SYSTEM_CAPTURE;
         case media::AudioFlag::CAPTURE_PRIVATE:
             return AUDIO_FLAG_CAPTURE_PRIVATE;
+        case media::AudioFlag::CONTENT_SPATIALIZED:
+            return AUDIO_FLAG_CONTENT_SPATIALIZED;
+        case media::AudioFlag::NEVER_SPATIALIZE:
+            return AUDIO_FLAG_NEVER_SPATIALIZE;
+        case media::AudioFlag::CALL_REDIRECTION:
+            return AUDIO_FLAG_CALL_REDIRECTION;
     }
     return unexpected(BAD_VALUE);
 }
@@ -1398,6 +2365,12 @@
             return media::AudioFlag::NO_SYSTEM_CAPTURE;
         case AUDIO_FLAG_CAPTURE_PRIVATE:
             return media::AudioFlag::CAPTURE_PRIVATE;
+        case AUDIO_FLAG_CONTENT_SPATIALIZED:
+            return media::AudioFlag::CONTENT_SPATIALIZED;
+        case AUDIO_FLAG_NEVER_SPATIALIZE:
+            return media::AudioFlag::NEVER_SPATIALIZE;
+        case AUDIO_FLAG_CALL_REDIRECTION:
+            return media::AudioFlag::CALL_REDIRECTION;
     }
     return unexpected(BAD_VALUE);
 }
@@ -1423,7 +2396,7 @@
     legacy.content_type = VALUE_OR_RETURN(
             aidl2legacy_AudioContentType_audio_content_type_t(aidl.contentType));
     legacy.usage = VALUE_OR_RETURN(aidl2legacy_AudioUsage_audio_usage_t(aidl.usage));
-    legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSourceType_audio_source_t(aidl.source));
+    legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSource_audio_source_t(aidl.source));
     legacy.flags = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_flags_mask_t_mask(aidl.flags));
     RETURN_IF_ERROR(aidl2legacy_string(aidl.tags, legacy.tags, sizeof(legacy.tags)));
     return legacy;
@@ -1435,51 +2408,51 @@
     aidl.contentType = VALUE_OR_RETURN(
             legacy2aidl_audio_content_type_t_AudioContentType(legacy.content_type));
     aidl.usage = VALUE_OR_RETURN(legacy2aidl_audio_usage_t_AudioUsage(legacy.usage));
-    aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.source));
+    aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.source));
     aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_flags_mask_t_int32_t_mask(legacy.flags));
     aidl.tags = VALUE_OR_RETURN(legacy2aidl_string(legacy.tags, sizeof(legacy.tags)));
     return aidl;
 }
 
 ConversionResult<audio_encapsulation_mode_t>
-aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(media::AudioEncapsulationMode aidl) {
+aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(AudioEncapsulationMode aidl) {
     switch (aidl) {
-        case media::AudioEncapsulationMode::NONE:
+        case AudioEncapsulationMode::INVALID:
+            break;  // return error
+        case AudioEncapsulationMode::NONE:
             return AUDIO_ENCAPSULATION_MODE_NONE;
-        case media::AudioEncapsulationMode::ELEMENTARY_STREAM:
+        case AudioEncapsulationMode::ELEMENTARY_STREAM:
             return AUDIO_ENCAPSULATION_MODE_ELEMENTARY_STREAM;
-        case media::AudioEncapsulationMode::HANDLE:
+        case AudioEncapsulationMode::HANDLE:
             return AUDIO_ENCAPSULATION_MODE_HANDLE;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioEncapsulationMode>
+ConversionResult<AudioEncapsulationMode>
 legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode(audio_encapsulation_mode_t legacy) {
     switch (legacy) {
         case AUDIO_ENCAPSULATION_MODE_NONE:
-            return media::AudioEncapsulationMode::NONE;
+            return AudioEncapsulationMode::NONE;
         case AUDIO_ENCAPSULATION_MODE_ELEMENTARY_STREAM:
-            return media::AudioEncapsulationMode::ELEMENTARY_STREAM;
+            return AudioEncapsulationMode::ELEMENTARY_STREAM;
         case AUDIO_ENCAPSULATION_MODE_HANDLE:
-            return media::AudioEncapsulationMode::HANDLE;
+            return AudioEncapsulationMode::HANDLE;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_offload_info_t>
-aidl2legacy_AudioOffloadInfo_audio_offload_info_t(const media::AudioOffloadInfo& aidl) {
-    audio_offload_info_t legacy;
-    legacy.version = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.version));
-    legacy.size = sizeof(audio_offload_info_t);
-    audio_config_base_t config = VALUE_OR_RETURN(
-            aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config));
-    legacy.sample_rate = config.sample_rate;
-    legacy.channel_mask = config.channel_mask;
-    legacy.format = config.format;
+aidl2legacy_AudioOffloadInfo_audio_offload_info_t(const AudioOffloadInfo& aidl) {
+    audio_offload_info_t legacy = AUDIO_INFO_INITIALIZER;
+    audio_config_base_t base = VALUE_OR_RETURN(
+            aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.base, false /*isInput*/));
+    legacy.sample_rate = base.sample_rate;
+    legacy.channel_mask = base.channel_mask;
+    legacy.format = base.format;
     legacy.stream_type = VALUE_OR_RETURN(
             aidl2legacy_AudioStreamType_audio_stream_type_t(aidl.streamType));
-    legacy.bit_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.bitRate));
+    legacy.bit_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.bitRatePerSecond));
     legacy.duration_us = VALUE_OR_RETURN(convertIntegral<int64_t>(aidl.durationUs));
     legacy.has_video = aidl.hasVideo;
     legacy.is_streaming = aidl.isStreaming;
@@ -1493,21 +2466,20 @@
     return legacy;
 }
 
-ConversionResult<media::AudioOffloadInfo>
+ConversionResult<AudioOffloadInfo>
 legacy2aidl_audio_offload_info_t_AudioOffloadInfo(const audio_offload_info_t& legacy) {
-    media::AudioOffloadInfo aidl;
+    AudioOffloadInfo aidl;
     // Version 0.1 fields.
     if (legacy.size < offsetof(audio_offload_info_t, usage) + sizeof(audio_offload_info_t::usage)) {
         return unexpected(BAD_VALUE);
     }
-    aidl.version = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.version));
-    aidl.config.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
-    aidl.config.channelMask = VALUE_OR_RETURN(
-            legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
-    aidl.config.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+    const audio_config_base_t base = { .sample_rate = legacy.sample_rate,
+        .channel_mask = legacy.channel_mask, .format = legacy.format };
+    aidl.base = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(
+                    base, false /*isInput*/));
     aidl.streamType = VALUE_OR_RETURN(
             legacy2aidl_audio_stream_type_t_AudioStreamType(legacy.stream_type));
-    aidl.bitRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.bit_rate));
+    aidl.bitRatePerSecond = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.bit_rate));
     aidl.durationUs = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy.duration_us));
     aidl.hasVideo = legacy.has_video;
     aidl.isStreaming = legacy.is_streaming;
@@ -1531,25 +2503,25 @@
 }
 
 ConversionResult<audio_config_t>
-aidl2legacy_AudioConfig_audio_config_t(const media::AudioConfig& aidl) {
-    audio_config_t legacy;
-    legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.sampleRate));
-    legacy.channel_mask = VALUE_OR_RETURN(
-            aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
-    legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+aidl2legacy_AudioConfig_audio_config_t(const AudioConfig& aidl, bool isInput) {
+    const audio_config_base_t legacyBase = VALUE_OR_RETURN(
+            aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.base, isInput));
+    audio_config_t legacy = AUDIO_CONFIG_INITIALIZER;
+    legacy.sample_rate = legacyBase.sample_rate;
+    legacy.channel_mask = legacyBase.channel_mask;
+    legacy.format = legacyBase.format;
     legacy.offload_info = VALUE_OR_RETURN(
             aidl2legacy_AudioOffloadInfo_audio_offload_info_t(aidl.offloadInfo));
     legacy.frame_count = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.frameCount));
     return legacy;
 }
 
-ConversionResult<media::AudioConfig>
-legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy) {
-    media::AudioConfig aidl;
-    aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
-    aidl.channelMask = VALUE_OR_RETURN(
-            legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
-    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+ConversionResult<AudioConfig>
+legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy, bool isInput) {
+    const audio_config_base_t base = { .sample_rate = legacy.sample_rate,
+        .channel_mask = legacy.channel_mask, .format = legacy.format };
+    AudioConfig aidl;
+    aidl.base = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(base, isInput));
     aidl.offloadInfo = VALUE_OR_RETURN(
             legacy2aidl_audio_offload_info_t_AudioOffloadInfo(legacy.offload_info));
     aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy.frame_count));
@@ -1557,22 +2529,22 @@
 }
 
 ConversionResult<audio_config_base_t>
-aidl2legacy_AudioConfigBase_audio_config_base_t(const media::AudioConfigBase& aidl) {
+aidl2legacy_AudioConfigBase_audio_config_base_t(const AudioConfigBase& aidl, bool isInput) {
     audio_config_base_t legacy;
     legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.sampleRate));
     legacy.channel_mask = VALUE_OR_RETURN(
-            aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
-    legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+            aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
+    legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
     return legacy;
 }
 
-ConversionResult<media::AudioConfigBase>
-legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy) {
-    media::AudioConfigBase aidl;
+ConversionResult<AudioConfigBase>
+legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy, bool isInput) {
+    AudioConfigBase aidl;
     aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
     aidl.channelMask = VALUE_OR_RETURN(
-            legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
-    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+            legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
+    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
     return aidl;
 }
 
@@ -1631,7 +2603,7 @@
 }
 
 ConversionResult<audio_uuid_t>
-aidl2legacy_AudioUuid_audio_uuid_t(const media::AudioUuid& aidl) {
+aidl2legacy_AudioUuid_audio_uuid_t(const AudioUuid& aidl) {
     audio_uuid_t legacy;
     legacy.timeLow = VALUE_OR_RETURN(convertReinterpret<uint32_t>(aidl.timeLow));
     legacy.timeMid = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.timeMid));
@@ -1644,9 +2616,9 @@
     return legacy;
 }
 
-ConversionResult<media::AudioUuid>
+ConversionResult<AudioUuid>
 legacy2aidl_audio_uuid_t_AudioUuid(const audio_uuid_t& legacy) {
-    media::AudioUuid aidl;
+    AudioUuid aidl;
     aidl.timeLow = VALUE_OR_RETURN(convertReinterpret<int32_t>(legacy.timeLow));
     aidl.timeMid = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.timeMid));
     aidl.timeHiAndVersion = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.timeHiAndVersion));
@@ -1687,28 +2659,28 @@
 
 ConversionResult<audio_encapsulation_metadata_type_t>
 aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t(
-        media::AudioEncapsulationMetadataType aidl) {
+        AudioEncapsulationMetadataType aidl) {
     switch (aidl) {
-        case media::AudioEncapsulationMetadataType::NONE:
+        case AudioEncapsulationMetadataType::NONE:
             return AUDIO_ENCAPSULATION_METADATA_TYPE_NONE;
-        case media::AudioEncapsulationMetadataType::FRAMEWORK_TUNER:
+        case AudioEncapsulationMetadataType::FRAMEWORK_TUNER:
             return AUDIO_ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER;
-        case media::AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR:
+        case AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR:
             return AUDIO_ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioEncapsulationMetadataType>
+ConversionResult<AudioEncapsulationMetadataType>
 legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType(
         audio_encapsulation_metadata_type_t legacy) {
     switch (legacy) {
         case AUDIO_ENCAPSULATION_METADATA_TYPE_NONE:
-            return media::AudioEncapsulationMetadataType::NONE;
+            return AudioEncapsulationMetadataType::NONE;
         case AUDIO_ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER:
-            return media::AudioEncapsulationMetadataType::FRAMEWORK_TUNER;
+            return AudioEncapsulationMetadataType::FRAMEWORK_TUNER;
         case AUDIO_ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR:
-            return media::AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR;
+            return AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR;
     }
     return unexpected(BAD_VALUE);
 }
@@ -1718,9 +2690,9 @@
     return convertBitmask<uint32_t,
             int32_t,
             audio_encapsulation_mode_t,
-            media::AudioEncapsulationMode>(
+            AudioEncapsulationMode>(
             aidl, aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t,
-            indexToEnum_index<media::AudioEncapsulationMode>,
+            indexToEnum_index<AudioEncapsulationMode>,
             enumToMask_index<uint32_t, audio_encapsulation_mode_t>);
 }
 
@@ -1728,11 +2700,11 @@
 legacy2aidl_AudioEncapsulationMode_mask(uint32_t legacy) {
     return convertBitmask<int32_t,
             uint32_t,
-            media::AudioEncapsulationMode,
+            AudioEncapsulationMode,
             audio_encapsulation_mode_t>(
             legacy, legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode,
             indexToEnum_index<audio_encapsulation_mode_t>,
-            enumToMask_index<int32_t, media::AudioEncapsulationMode>);
+            enumToMask_index<int32_t, AudioEncapsulationMode>);
 }
 
 ConversionResult<uint32_t>
@@ -1740,9 +2712,9 @@
     return convertBitmask<uint32_t,
             int32_t,
             audio_encapsulation_metadata_type_t,
-            media::AudioEncapsulationMetadataType>(
+            AudioEncapsulationMetadataType>(
             aidl, aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t,
-            indexToEnum_index<media::AudioEncapsulationMetadataType>,
+            indexToEnum_index<AudioEncapsulationMetadataType>,
             enumToMask_index<uint32_t, audio_encapsulation_metadata_type_t>);
 }
 
@@ -1750,104 +2722,79 @@
 legacy2aidl_AudioEncapsulationMetadataType_mask(uint32_t legacy) {
     return convertBitmask<int32_t,
             uint32_t,
-            media::AudioEncapsulationMetadataType,
+            AudioEncapsulationMetadataType,
             audio_encapsulation_metadata_type_t>(
             legacy, legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType,
             indexToEnum_index<audio_encapsulation_metadata_type_t>,
-            enumToMask_index<int32_t, media::AudioEncapsulationMetadataType>);
-}
-
-ConversionResult<audio_mix_latency_class_t>
-aidl2legacy_AudioMixLatencyClass_audio_mix_latency_class_t(
-        media::AudioMixLatencyClass aidl) {
-    switch (aidl) {
-        case media::AudioMixLatencyClass::LOW:
-            return AUDIO_LATENCY_LOW;
-        case media::AudioMixLatencyClass::NORMAL:
-            return AUDIO_LATENCY_NORMAL;
-    }
-    return unexpected(BAD_VALUE);
-}
-
-ConversionResult<media::AudioMixLatencyClass>
-legacy2aidl_audio_mix_latency_class_t_AudioMixLatencyClass(
-        audio_mix_latency_class_t legacy) {
-    switch (legacy) {
-        case AUDIO_LATENCY_LOW:
-            return media::AudioMixLatencyClass::LOW;
-        case AUDIO_LATENCY_NORMAL:
-            return media::AudioMixLatencyClass::NORMAL;
-    }
-    return unexpected(BAD_VALUE);
+            enumToMask_index<int32_t, AudioEncapsulationMetadataType>);
 }
 
 ConversionResult<audio_port_device_ext>
-aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(const media::AudioPortDeviceExt& aidl) {
+aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
+        const AudioPortDeviceExt& aidl, const media::AudioPortDeviceExtSys& aidlSys) {
     audio_port_device_ext legacy;
-    legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
-    legacy.type = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.device.type));
-    RETURN_IF_ERROR(
-            aidl2legacy_string(aidl.device.address, legacy.address, sizeof(legacy.address)));
+    legacy.hw_module = VALUE_OR_RETURN(
+            aidl2legacy_int32_t_audio_module_handle_t(aidlSys.hwModule));
+    RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
+                    aidl.device, &legacy.type, legacy.address));
     legacy.encapsulation_modes = VALUE_OR_RETURN(
-            aidl2legacy_AudioEncapsulationMode_mask(aidl.encapsulationModes));
+            aidl2legacy_AudioEncapsulationMode_mask(aidlSys.encapsulationModes));
     legacy.encapsulation_metadata_types = VALUE_OR_RETURN(
-            aidl2legacy_AudioEncapsulationMetadataType_mask(aidl.encapsulationMetadataTypes));
+            aidl2legacy_AudioEncapsulationMetadataType_mask(
+                    aidlSys.encapsulationMetadataTypes));
     return legacy;
 }
 
-ConversionResult<media::AudioPortDeviceExt>
-legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(const audio_port_device_ext& legacy) {
-    media::AudioPortDeviceExt aidl;
-    aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
-    aidl.device.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.type));
-    aidl.device.address = VALUE_OR_RETURN(
-            legacy2aidl_string(legacy.address, sizeof(legacy.address)));
-    aidl.encapsulationModes = VALUE_OR_RETURN(
+status_t legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(
+        const audio_port_device_ext& legacy,
+        AudioPortDeviceExt* aidl, media::AudioPortDeviceExtSys* aidlDeviceExt) {
+    aidlDeviceExt->hwModule = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+    aidl->device = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_device_AudioDevice(legacy.type, legacy.address));
+    aidlDeviceExt->encapsulationModes = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioEncapsulationMode_mask(legacy.encapsulation_modes));
-    aidl.encapsulationMetadataTypes = VALUE_OR_RETURN(
+    aidlDeviceExt->encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioEncapsulationMetadataType_mask(legacy.encapsulation_metadata_types));
-    return aidl;
+    return OK;
 }
 
 ConversionResult<audio_port_mix_ext>
-aidl2legacy_AudioPortMixExt_audio_port_mix_ext(const media::AudioPortMixExt& aidl) {
-    audio_port_mix_ext legacy;
-    legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
+aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
+        const AudioPortMixExt& aidl, const media::AudioPortMixExtSys& aidlSys) {
+    audio_port_mix_ext legacy{};
+    legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidlSys.hwModule));
     legacy.handle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.handle));
-    legacy.latency_class = VALUE_OR_RETURN(
-            aidl2legacy_AudioMixLatencyClass_audio_mix_latency_class_t(aidl.latencyClass));
     return legacy;
 }
 
-ConversionResult<media::AudioPortMixExt>
-legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy) {
-    media::AudioPortMixExt aidl;
-    aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
-    aidl.handle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
-    aidl.latencyClass = VALUE_OR_RETURN(
-            legacy2aidl_audio_mix_latency_class_t_AudioMixLatencyClass(legacy.latency_class));
-    return aidl;
+status_t
+legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy,
+        AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt) {
+    aidlMixExt->hwModule = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+    aidl->handle = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
+    return OK;
 }
 
 ConversionResult<audio_port_session_ext>
-aidl2legacy_AudioPortSessionExt_audio_port_session_ext(const media::AudioPortSessionExt& aidl) {
+aidl2legacy_int32_t_audio_port_session_ext(int32_t aidl) {
     audio_port_session_ext legacy;
-    legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.session));
+    legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl));
     return legacy;
 }
 
-ConversionResult<media::AudioPortSessionExt>
-legacy2aidl_audio_port_session_ext_AudioPortSessionExt(const audio_port_session_ext& legacy) {
-    media::AudioPortSessionExt aidl;
-    aidl.session = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.session));
-    return aidl;
+ConversionResult<int32_t>
+legacy2aidl_audio_port_session_ext_int32_t(const audio_port_session_ext& legacy) {
+    return legacy2aidl_audio_session_t_int32_t(legacy.session);
 }
 
 // This type is unnamed in the original definition, thus we name it here.
 using audio_port_v7_ext = decltype(audio_port_v7::ext);
 
-ConversionResult<audio_port_v7_ext> aidl2legacy_AudioPortExt(
-        const media::AudioPortExt& aidl, media::AudioPortType type) {
+ConversionResult<audio_port_v7_ext> aidl2legacy_AudioPortExt_audio_port_v7_ext(
+        const AudioPortExt& aidl, media::AudioPortType type,
+        const media::AudioPortExtSys& aidlSys) {
     audio_port_v7_ext legacy;
     switch (type) {
         case media::AudioPortType::NONE:
@@ -1857,66 +2804,83 @@
         case media::AudioPortType::DEVICE:
             legacy.device = VALUE_OR_RETURN(
                     aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
-                            VALUE_OR_RETURN(UNION_GET(aidl, device))));
+                            VALUE_OR_RETURN(UNION_GET(aidl, device)),
+                            VALUE_OR_RETURN(UNION_GET(aidlSys, device))));
             return legacy;
         case media::AudioPortType::MIX:
             legacy.mix = VALUE_OR_RETURN(
                     aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
-                            VALUE_OR_RETURN(UNION_GET(aidl, mix))));
+                            VALUE_OR_RETURN(UNION_GET(aidl, mix)),
+                            VALUE_OR_RETURN(UNION_GET(aidlSys, mix))));
             return legacy;
         case media::AudioPortType::SESSION:
-            legacy.session = VALUE_OR_RETURN(aidl2legacy_AudioPortSessionExt_audio_port_session_ext(
-                    VALUE_OR_RETURN(UNION_GET(aidl, session))));
+            legacy.session = VALUE_OR_RETURN(
+                    aidl2legacy_int32_t_audio_port_session_ext(
+                            VALUE_OR_RETURN(UNION_GET(aidl, session))));
             return legacy;
 
     }
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
-ConversionResult<media::AudioPortExt> legacy2aidl_AudioPortExt(
-        const audio_port_v7_ext& legacy, audio_port_type_t type) {
-    media::AudioPortExt aidl;
+status_t legacy2aidl_AudioPortExt(
+        const audio_port_v7_ext& legacy, audio_port_type_t type,
+        AudioPortExt* aidl, media::AudioPortExtSys* aidlSys) {
     switch (type) {
         case AUDIO_PORT_TYPE_NONE:
-            UNION_SET(aidl, unspecified, false);
-            return aidl;
-        case AUDIO_PORT_TYPE_DEVICE:
-            UNION_SET(aidl, device,
-                      VALUE_OR_RETURN(
-                              legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(legacy.device)));
-            return aidl;
-        case AUDIO_PORT_TYPE_MIX:
-            UNION_SET(aidl, mix,
-                      VALUE_OR_RETURN(legacy2aidl_audio_port_mix_ext_AudioPortMixExt(legacy.mix)));
-            return aidl;
+            UNION_SET(*aidl, unspecified, false);
+            UNION_SET(*aidlSys, unspecified, false);
+            return OK;
+        case AUDIO_PORT_TYPE_DEVICE: {
+            AudioPortDeviceExt device;
+            media::AudioPortDeviceExtSys deviceSys;
+            RETURN_STATUS_IF_ERROR(
+                    legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(
+                            legacy.device, &device, &deviceSys));
+            UNION_SET(*aidl, device, device);
+            UNION_SET(*aidlSys, device, deviceSys);
+            return OK;
+        }
+        case AUDIO_PORT_TYPE_MIX: {
+            AudioPortMixExt mix;
+            media::AudioPortMixExtSys mixSys;
+            RETURN_STATUS_IF_ERROR(
+                    legacy2aidl_audio_port_mix_ext_AudioPortMixExt(
+                            legacy.mix, &mix, &mixSys));
+            UNION_SET(*aidl, mix, mix);
+            UNION_SET(*aidlSys, mix, mixSys);
+            return OK;
+        }
         case AUDIO_PORT_TYPE_SESSION:
-            UNION_SET(aidl, session,
-                      VALUE_OR_RETURN(legacy2aidl_audio_port_session_ext_AudioPortSessionExt(
-                              legacy.session)));
-            return aidl;
+            UNION_SET(*aidl, session, VALUE_OR_RETURN_STATUS(
+                            legacy2aidl_audio_port_session_ext_int32_t(legacy.session)));
+            UNION_SET(*aidlSys, unspecified, false);
+            return OK;
     }
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<audio_profile>
-aidl2legacy_AudioProfile_audio_profile(const media::AudioProfile& aidl) {
+aidl2legacy_AudioProfile_audio_profile(const AudioProfile& aidl, bool isInput) {
     audio_profile legacy;
-    legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+    legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
 
-    if (aidl.samplingRates.size() > std::size(legacy.sample_rates)) {
+    if (aidl.sampleRates.size() > std::size(legacy.sample_rates)) {
         return unexpected(BAD_VALUE);
     }
     RETURN_IF_ERROR(
-            convertRange(aidl.samplingRates.begin(), aidl.samplingRates.end(), legacy.sample_rates,
+            convertRange(aidl.sampleRates.begin(), aidl.sampleRates.end(), legacy.sample_rates,
                          convertIntegral<int32_t, unsigned int>));
-    legacy.num_sample_rates = aidl.samplingRates.size();
+    legacy.num_sample_rates = aidl.sampleRates.size();
 
     if (aidl.channelMasks.size() > std::size(legacy.channel_masks)) {
         return unexpected(BAD_VALUE);
     }
     RETURN_IF_ERROR(
             convertRange(aidl.channelMasks.begin(), aidl.channelMasks.end(), legacy.channel_masks,
-                         aidl2legacy_int32_t_audio_channel_mask_t));
+                    [isInput](const AudioChannelLayout& l) {
+                        return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
+                    }));
     legacy.num_channel_masks = aidl.channelMasks.size();
 
     legacy.encapsulation_type = VALUE_OR_RETURN(
@@ -1924,17 +2888,17 @@
     return legacy;
 }
 
-ConversionResult<media::AudioProfile>
-legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy) {
-    media::AudioProfile aidl;
-    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+ConversionResult<AudioProfile>
+legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy, bool isInput) {
+    AudioProfile aidl;
+    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
 
     if (legacy.num_sample_rates > std::size(legacy.sample_rates)) {
         return unexpected(BAD_VALUE);
     }
     RETURN_IF_ERROR(
             convertRange(legacy.sample_rates, legacy.sample_rates + legacy.num_sample_rates,
-                         std::back_inserter(aidl.samplingRates),
+                         std::back_inserter(aidl.sampleRates),
                          convertIntegral<unsigned int, int32_t>));
 
     if (legacy.num_channel_masks > std::size(legacy.channel_masks)) {
@@ -1943,7 +2907,9 @@
     RETURN_IF_ERROR(
             convertRange(legacy.channel_masks, legacy.channel_masks + legacy.num_channel_masks,
                          std::back_inserter(aidl.channelMasks),
-                         legacy2aidl_audio_channel_mask_t_int32_t));
+                    [isInput](audio_channel_mask_t m) {
+                        return legacy2aidl_audio_channel_mask_t_AudioChannelLayout(m, isInput);
+                    }));
 
     aidl.encapsulationType = VALUE_OR_RETURN(
             legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
@@ -1952,11 +2918,11 @@
 }
 
 ConversionResult<audio_gain>
-aidl2legacy_AudioGain_audio_gain(const media::AudioGain& aidl) {
+aidl2legacy_AudioGain_audio_gain(const AudioGain& aidl, bool isInput) {
     audio_gain legacy;
     legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
-    legacy.channel_mask = VALUE_OR_RETURN(
-            aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
+    legacy.channel_mask = VALUE_OR_RETURN(aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+                    aidl.channelMask, isInput));
     legacy.min_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.minValue));
     legacy.max_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.maxValue));
     legacy.default_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.defaultValue));
@@ -1966,12 +2932,12 @@
     return legacy;
 }
 
-ConversionResult<media::AudioGain>
-legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy) {
-    media::AudioGain aidl;
+ConversionResult<AudioGain>
+legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy, bool isInput) {
+    AudioGain aidl;
     aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
     aidl.channelMask = VALUE_OR_RETURN(
-            legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
+            legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
     aidl.minValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.min_value));
     aidl.maxValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.max_value));
     aidl.defaultValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.default_value));
@@ -1984,63 +2950,76 @@
 ConversionResult<audio_port_v7>
 aidl2legacy_AudioPort_audio_port_v7(const media::AudioPort& aidl) {
     audio_port_v7 legacy;
-    legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.id));
-    legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.role));
-    legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.type));
-    RETURN_IF_ERROR(aidl2legacy_string(aidl.name, legacy.name, sizeof(legacy.name)));
+    legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.hal.id));
+    legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.sys.role));
+    legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.sys.type));
+    RETURN_IF_ERROR(aidl2legacy_string(aidl.hal.name, legacy.name, sizeof(legacy.name)));
 
-    if (aidl.profiles.size() > std::size(legacy.audio_profiles)) {
+    if (aidl.hal.profiles.size() > std::size(legacy.audio_profiles)) {
         return unexpected(BAD_VALUE);
     }
-    RETURN_IF_ERROR(convertRange(aidl.profiles.begin(), aidl.profiles.end(), legacy.audio_profiles,
-                                 aidl2legacy_AudioProfile_audio_profile));
-    legacy.num_audio_profiles = aidl.profiles.size();
+    const bool isInput =
+            VALUE_OR_RETURN(direction(aidl.sys.role, aidl.sys.type)) == Direction::INPUT;
+    RETURN_IF_ERROR(convertRange(
+                    aidl.hal.profiles.begin(), aidl.hal.profiles.end(), legacy.audio_profiles,
+                    [isInput](const AudioProfile& p) {
+                        return aidl2legacy_AudioProfile_audio_profile(p, isInput);
+                    }));
+    legacy.num_audio_profiles = aidl.hal.profiles.size();
 
-    if (aidl.extraAudioDescriptors.size() > std::size(legacy.extra_audio_descriptors)) {
+    if (aidl.hal.extraAudioDescriptors.size() > std::size(legacy.extra_audio_descriptors)) {
         return unexpected(BAD_VALUE);
     }
     RETURN_IF_ERROR(
-            convertRange(aidl.extraAudioDescriptors.begin(), aidl.extraAudioDescriptors.end(),
-                         legacy.extra_audio_descriptors,
-                         aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor));
-    legacy.num_extra_audio_descriptors = aidl.extraAudioDescriptors.size();
+            convertRange(
+                    aidl.hal.extraAudioDescriptors.begin(), aidl.hal.extraAudioDescriptors.end(),
+                    legacy.extra_audio_descriptors,
+                    aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor));
+    legacy.num_extra_audio_descriptors = aidl.hal.extraAudioDescriptors.size();
 
-    if (aidl.gains.size() > std::size(legacy.gains)) {
+    if (aidl.hal.gains.size() > std::size(legacy.gains)) {
         return unexpected(BAD_VALUE);
     }
-    RETURN_IF_ERROR(convertRange(aidl.gains.begin(), aidl.gains.end(), legacy.gains,
-                                 aidl2legacy_AudioGain_audio_gain));
-    legacy.num_gains = aidl.gains.size();
+    RETURN_IF_ERROR(convertRange(aidl.hal.gains.begin(), aidl.hal.gains.end(), legacy.gains,
+                                 [isInput](const AudioGain& g) {
+                                     return aidl2legacy_AudioGain_audio_gain(g, isInput);
+                                 }));
+    legacy.num_gains = aidl.hal.gains.size();
 
     legacy.active_config = VALUE_OR_RETURN(
-            aidl2legacy_AudioPortConfig_audio_port_config(aidl.activeConfig));
-    legacy.ext = VALUE_OR_RETURN(aidl2legacy_AudioPortExt(aidl.ext, aidl.type));
+            aidl2legacy_AudioPortConfig_audio_port_config(aidl.sys.activeConfig));
+    legacy.ext = VALUE_OR_RETURN(
+            aidl2legacy_AudioPortExt_audio_port_v7_ext(aidl.hal.ext, aidl.sys.type, aidl.sys.ext));
     return legacy;
 }
 
 ConversionResult<media::AudioPort>
 legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy) {
     media::AudioPort aidl;
-    aidl.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
-    aidl.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
-    aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
-    aidl.name = VALUE_OR_RETURN(legacy2aidl_string(legacy.name, sizeof(legacy.name)));
+    aidl.hal.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
+    aidl.sys.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
+    aidl.sys.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
+    aidl.hal.name = VALUE_OR_RETURN(legacy2aidl_string(legacy.name, sizeof(legacy.name)));
 
     if (legacy.num_audio_profiles > std::size(legacy.audio_profiles)) {
         return unexpected(BAD_VALUE);
     }
+    const bool isInput = VALUE_OR_RETURN(direction(legacy.role, legacy.type)) == Direction::INPUT;
     RETURN_IF_ERROR(
             convertRange(legacy.audio_profiles, legacy.audio_profiles + legacy.num_audio_profiles,
-                         std::back_inserter(aidl.profiles),
-                         legacy2aidl_audio_profile_AudioProfile));
+                         std::back_inserter(aidl.hal.profiles),
+                         [isInput](const audio_profile& p) {
+                             return legacy2aidl_audio_profile_AudioProfile(p, isInput);
+                         }));
 
     if (legacy.num_extra_audio_descriptors > std::size(legacy.extra_audio_descriptors)) {
         return unexpected(BAD_VALUE);
     }
+    aidl.sys.profiles.resize(legacy.num_audio_profiles);
     RETURN_IF_ERROR(
             convertRange(legacy.extra_audio_descriptors,
                     legacy.extra_audio_descriptors + legacy.num_extra_audio_descriptors,
-                    std::back_inserter(aidl.extraAudioDescriptors),
+                    std::back_inserter(aidl.hal.extraAudioDescriptors),
                     legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor));
 
     if (legacy.num_gains > std::size(legacy.gains)) {
@@ -2048,53 +3027,66 @@
     }
     RETURN_IF_ERROR(
             convertRange(legacy.gains, legacy.gains + legacy.num_gains,
-                         std::back_inserter(aidl.gains),
-                         legacy2aidl_audio_gain_AudioGain));
+                         std::back_inserter(aidl.hal.gains),
+                         [isInput](const audio_gain& g) {
+                             return legacy2aidl_audio_gain_AudioGain(g, isInput);
+                         }));
+    aidl.sys.gains.resize(legacy.num_gains);
 
-    aidl.activeConfig = VALUE_OR_RETURN(
+    aidl.sys.activeConfig = VALUE_OR_RETURN(
             legacy2aidl_audio_port_config_AudioPortConfig(legacy.active_config));
-    aidl.ext = VALUE_OR_RETURN(legacy2aidl_AudioPortExt(legacy.ext, legacy.type));
+    aidl.sys.activeConfig.hal.portId = aidl.hal.id;
+    RETURN_IF_ERROR(
+            legacy2aidl_AudioPortExt(legacy.ext, legacy.type, &aidl.hal.ext, &aidl.sys.ext));
     return aidl;
 }
 
 ConversionResult<audio_mode_t>
-aidl2legacy_AudioMode_audio_mode_t(media::AudioMode aidl) {
+aidl2legacy_AudioMode_audio_mode_t(AudioMode aidl) {
     switch (aidl) {
-        case media::AudioMode::INVALID:
+        case AudioMode::SYS_RESERVED_INVALID:
             return AUDIO_MODE_INVALID;
-        case media::AudioMode::CURRENT:
+        case AudioMode::SYS_RESERVED_CURRENT:
             return AUDIO_MODE_CURRENT;
-        case media::AudioMode::NORMAL:
+        case AudioMode::NORMAL:
             return AUDIO_MODE_NORMAL;
-        case media::AudioMode::RINGTONE:
+        case AudioMode::RINGTONE:
             return AUDIO_MODE_RINGTONE;
-        case media::AudioMode::IN_CALL:
+        case AudioMode::IN_CALL:
             return AUDIO_MODE_IN_CALL;
-        case media::AudioMode::IN_COMMUNICATION:
+        case AudioMode::IN_COMMUNICATION:
             return AUDIO_MODE_IN_COMMUNICATION;
-        case media::AudioMode::CALL_SCREEN:
+        case AudioMode::CALL_SCREEN:
             return AUDIO_MODE_CALL_SCREEN;
+        case AudioMode::SYS_RESERVED_CALL_REDIRECT:
+            return AUDIO_MODE_CALL_REDIRECT;
+        case AudioMode::SYS_RESERVED_COMMUNICATION_REDIRECT:
+            return AUDIO_MODE_COMMUNICATION_REDIRECT;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioMode>
+ConversionResult<AudioMode>
 legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy) {
     switch (legacy) {
         case AUDIO_MODE_INVALID:
-            return media::AudioMode::INVALID;
+            return AudioMode::SYS_RESERVED_INVALID;
         case AUDIO_MODE_CURRENT:
-            return media::AudioMode::CURRENT;
+            return AudioMode::SYS_RESERVED_CURRENT;
         case AUDIO_MODE_NORMAL:
-            return media::AudioMode::NORMAL;
+            return AudioMode::NORMAL;
         case AUDIO_MODE_RINGTONE:
-            return media::AudioMode::RINGTONE;
+            return AudioMode::RINGTONE;
         case AUDIO_MODE_IN_CALL:
-            return media::AudioMode::IN_CALL;
+            return AudioMode::IN_CALL;
         case AUDIO_MODE_IN_COMMUNICATION:
-            return media::AudioMode::IN_COMMUNICATION;
+            return AudioMode::IN_COMMUNICATION;
         case AUDIO_MODE_CALL_SCREEN:
-            return media::AudioMode::CALL_SCREEN;
+            return AudioMode::CALL_SCREEN;
+        case AUDIO_MODE_CALL_REDIRECT:
+            return AudioMode::SYS_RESERVED_CALL_REDIRECT;
+        case AUDIO_MODE_COMMUNICATION_REDIRECT:
+            return AudioMode::SYS_RESERVED_COMMUNICATION_REDIRECT;
         case AUDIO_MODE_CNT:
             break;
     }
@@ -2244,30 +3236,30 @@
 }
 
 ConversionResult<audio_standard_t>
-aidl2legacy_AudioStandard_audio_standard_t(media::AudioStandard aidl) {
+aidl2legacy_AudioStandard_audio_standard_t(AudioStandard aidl) {
     switch (aidl) {
-        case media::AudioStandard::NONE:
+        case AudioStandard::NONE:
             return AUDIO_STANDARD_NONE;
-        case media::AudioStandard::EDID:
+        case AudioStandard::EDID:
             return AUDIO_STANDARD_EDID;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioStandard>
+ConversionResult<AudioStandard>
 legacy2aidl_audio_standard_t_AudioStandard(audio_standard_t legacy) {
     switch (legacy) {
         case AUDIO_STANDARD_NONE:
-            return media::AudioStandard::NONE;
+            return AudioStandard::NONE;
         case AUDIO_STANDARD_EDID:
-            return media::AudioStandard::EDID;
+            return AudioStandard::EDID;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_extra_audio_descriptor>
 aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor(
-        const media::ExtraAudioDescriptor& aidl) {
+        const ExtraAudioDescriptor& aidl) {
     audio_extra_audio_descriptor legacy;
     legacy.standard = VALUE_OR_RETURN(aidl2legacy_AudioStandard_audio_standard_t(aidl.standard));
     if (aidl.audioDescriptor.size() > EXTRA_AUDIO_DESCRIPTOR_SIZE) {
@@ -2282,10 +3274,10 @@
     return legacy;
 }
 
-ConversionResult<media::ExtraAudioDescriptor>
+ConversionResult<ExtraAudioDescriptor>
 legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor(
         const audio_extra_audio_descriptor& legacy) {
-    media::ExtraAudioDescriptor aidl;
+    ExtraAudioDescriptor aidl;
     aidl.standard = VALUE_OR_RETURN(legacy2aidl_audio_standard_t_AudioStandard(legacy.standard));
     if (legacy.descriptor_length > EXTRA_AUDIO_DESCRIPTOR_SIZE) {
         return unexpected(BAD_VALUE);
@@ -2301,24 +3293,24 @@
 
 ConversionResult<audio_encapsulation_type_t>
 aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
-        const media::AudioEncapsulationType& aidl) {
+        const AudioEncapsulationType& aidl) {
     switch (aidl) {
-        case media::AudioEncapsulationType::NONE:
+        case AudioEncapsulationType::NONE:
             return AUDIO_ENCAPSULATION_TYPE_NONE;
-        case media::AudioEncapsulationType::IEC61937:
+        case AudioEncapsulationType::IEC61937:
             return AUDIO_ENCAPSULATION_TYPE_IEC61937;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioEncapsulationType>
+ConversionResult<AudioEncapsulationType>
 legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
         const audio_encapsulation_type_t & legacy) {
     switch (legacy) {
         case AUDIO_ENCAPSULATION_TYPE_NONE:
-            return media::AudioEncapsulationType::NONE;
+            return AudioEncapsulationType::NONE;
         case AUDIO_ENCAPSULATION_TYPE_IEC61937:
-            return media::AudioEncapsulationType::IEC61937;
+            return AudioEncapsulationType::IEC61937;
     }
     return unexpected(BAD_VALUE);
 }
@@ -2347,4 +3339,53 @@
     return trackSecondaryOutputInfo;
 }
 
+ConversionResult<audio_direct_mode_t>
+aidl2legacy_AudioDirectMode_audio_direct_mode_t(media::AudioDirectMode aidl) {
+    switch (aidl) {
+        case media::AudioDirectMode::NONE:
+            return AUDIO_DIRECT_NOT_SUPPORTED;
+        case media::AudioDirectMode::OFFLOAD:
+            return AUDIO_DIRECT_OFFLOAD_SUPPORTED;
+        case media::AudioDirectMode::OFFLOAD_GAPLESS:
+            return AUDIO_DIRECT_OFFLOAD_GAPLESS_SUPPORTED;
+        case media::AudioDirectMode::BITSTREAM:
+            return AUDIO_DIRECT_BITSTREAM_SUPPORTED;
+    }
+    return unexpected(BAD_VALUE);
+}
+ConversionResult<media::AudioDirectMode>
+legacy2aidl_audio_direct_mode_t_AudioDirectMode(audio_direct_mode_t legacy) {
+    switch (legacy) {
+        case AUDIO_DIRECT_NOT_SUPPORTED:
+            return media::AudioDirectMode::NONE;
+        case AUDIO_DIRECT_OFFLOAD_SUPPORTED:
+            return media::AudioDirectMode::OFFLOAD;
+        case AUDIO_DIRECT_OFFLOAD_GAPLESS_SUPPORTED:
+            return media::AudioDirectMode::OFFLOAD_GAPLESS;
+        case AUDIO_DIRECT_BITSTREAM_SUPPORTED:
+            return media::AudioDirectMode::BITSTREAM;
+    }
+    return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_direct_mode_t> aidl2legacy_int32_t_audio_direct_mode_t_mask(int32_t aidl) {
+    using LegacyMask = std::underlying_type_t<audio_direct_mode_t>;
+
+    LegacyMask converted = VALUE_OR_RETURN(
+            (convertBitmask<LegacyMask, int32_t, audio_direct_mode_t, media::AudioDirectMode>(
+                    aidl, aidl2legacy_AudioDirectMode_audio_direct_mode_t,
+                    indexToEnum_index<media::AudioDirectMode>,
+                    enumToMask_bitmask<LegacyMask, audio_direct_mode_t>)));
+    return static_cast<audio_direct_mode_t>(converted);
+}
+ConversionResult<int32_t> legacy2aidl_audio_direct_mode_t_int32_t_mask(audio_direct_mode_t legacy) {
+    using LegacyMask = std::underlying_type_t<audio_direct_mode_t>;
+
+    LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
+    return convertBitmask<int32_t, LegacyMask, media::AudioDirectMode, audio_direct_mode_t>(
+            legacyMask, legacy2aidl_audio_direct_mode_t_AudioDirectMode,
+            indexToEnum_bitmask<audio_direct_mode_t>,
+            enumToMask_index<int32_t, media::AudioDirectMode>);
+}
+
 }  // namespace android
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 9c307ff..33e2848 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -11,6 +11,10 @@
     name: "libaudioclient_headers",
     vendor_available: true,
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media",
+    ],
     host_supported: true,
 
     header_libs: [
@@ -25,11 +29,13 @@
     static_libs: [
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
+        "spatializer-aidl-cpp",
         "av-types-aidl-cpp",
     ],
     export_static_lib_headers: [
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
+        "spatializer-aidl-cpp",
         "av-types-aidl-cpp",
     ],
     target: {
@@ -39,7 +45,7 @@
     },
 }
 
-cc_library_shared {
+cc_library {
     name: "libaudiopolicy",
     srcs: [
         "AudioAttributes.cpp",
@@ -49,6 +55,7 @@
         "PolicyAidlConversion.cpp"
     ],
     shared_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
@@ -69,6 +76,7 @@
     include_dirs: ["system/media/audio_utils/include"],
     export_include_dirs: ["include"],
     export_shared_lib_headers: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
@@ -109,9 +117,11 @@
         "TrackPlayerBase.cpp",
     ],
     shared_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
+        "spatializer-aidl-cpp",
         "audiopolicy-types-aidl-cpp",
         "av-types-aidl-cpp",
         "capture_state_listener-aidl-cpp",
@@ -131,14 +141,16 @@
         "libprocessgroup",
         "libshmemcompat",
         "libutils",
-        "libvibrator",
         "framework-permission-aidl-cpp",
+        "packagemanager_aidl-cpp",
     ],
     export_shared_lib_headers: [
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
+        "spatializer-aidl-cpp",
         "framework-permission-aidl-cpp",
         "libbinder",
+        "libmediametrics",
     ],
 
     include_dirs: [
@@ -197,7 +209,7 @@
     ],
     apex_available: [
         "//apex_available:platform",
-        "com.android.bluetooth.updatable",
+        "com.android.bluetooth",
         "com.android.media",
         "com.android.media.swcodec",
     ],
@@ -224,16 +236,19 @@
         "libaudioclient_aidl_conversion_util",
     ],
     shared_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "libbase",
         "libbinder",
         "liblog",
         "libshmemcompat",
+        "libstagefright_foundation",
         "libutils",
         "shared-file-region-aidl-cpp",
         "framework-permission-aidl-cpp",
     ],
     export_shared_lib_headers: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "libbase",
         "shared-file-region-aidl-cpp",
@@ -303,57 +318,32 @@
     srcs: [
         "aidl/android/media/AudioAttributesInternal.aidl",
         "aidl/android/media/AudioClient.aidl",
-        "aidl/android/media/AudioConfig.aidl",
-        "aidl/android/media/AudioConfigBase.aidl",
-        "aidl/android/media/AudioContentType.aidl",
-        "aidl/android/media/AudioDevice.aidl",
+        "aidl/android/media/AudioDirectMode.aidl",
         "aidl/android/media/AudioDualMonoMode.aidl",
-        "aidl/android/media/AudioEncapsulationMode.aidl",
-        "aidl/android/media/AudioEncapsulationMetadataType.aidl",
-        "aidl/android/media/AudioEncapsulationType.aidl",
         "aidl/android/media/AudioFlag.aidl",
-        "aidl/android/media/AudioGain.aidl",
-        "aidl/android/media/AudioGainConfig.aidl",
-        "aidl/android/media/AudioGainMode.aidl",
-        "aidl/android/media/AudioInputFlags.aidl",
+        "aidl/android/media/AudioGainSys.aidl",
         "aidl/android/media/AudioIoConfigEvent.aidl",
         "aidl/android/media/AudioIoDescriptor.aidl",
-        "aidl/android/media/AudioIoFlags.aidl",
-        "aidl/android/media/AudioMixLatencyClass.aidl",
-        "aidl/android/media/AudioMode.aidl",
-        "aidl/android/media/AudioOffloadInfo.aidl",
-        "aidl/android/media/AudioOutputFlags.aidl",
         "aidl/android/media/AudioPatch.aidl",
         "aidl/android/media/AudioPlaybackRate.aidl",
         "aidl/android/media/AudioPort.aidl",
+        "aidl/android/media/AudioPortSys.aidl",
         "aidl/android/media/AudioPortConfig.aidl",
-        "aidl/android/media/AudioPortConfigType.aidl",
-        "aidl/android/media/AudioPortConfigDeviceExt.aidl",
-        "aidl/android/media/AudioPortConfigExt.aidl",
-        "aidl/android/media/AudioPortConfigMixExt.aidl",
-        "aidl/android/media/AudioPortConfigMixExtUseCase.aidl",
-        "aidl/android/media/AudioPortConfigSessionExt.aidl",
-        "aidl/android/media/AudioPortDeviceExt.aidl",
-        "aidl/android/media/AudioPortExt.aidl",
-        "aidl/android/media/AudioPortMixExt.aidl",
+        "aidl/android/media/AudioPortConfigSys.aidl",
+        "aidl/android/media/AudioPortDeviceExtSys.aidl",
+        "aidl/android/media/AudioPortExtSys.aidl",
+        "aidl/android/media/AudioPortMixExtSys.aidl",
         "aidl/android/media/AudioPortRole.aidl",
-        "aidl/android/media/AudioPortSessionExt.aidl",
         "aidl/android/media/AudioPortType.aidl",
-        "aidl/android/media/AudioProfile.aidl",
-        "aidl/android/media/AudioSourceType.aidl",
-        "aidl/android/media/AudioStandard.aidl",
-        "aidl/android/media/AudioStreamType.aidl",
+        "aidl/android/media/AudioProfileSys.aidl",
         "aidl/android/media/AudioTimestampInternal.aidl",
         "aidl/android/media/AudioUniqueIdUse.aidl",
-        "aidl/android/media/AudioUsage.aidl",
-        "aidl/android/media/AudioUuid.aidl",
         "aidl/android/media/AudioVibratorInfo.aidl",
         "aidl/android/media/EffectDescriptor.aidl",
-        "aidl/android/media/ExtraAudioDescriptor.aidl",
         "aidl/android/media/TrackSecondaryOutputInfo.aidl",
     ],
     imports: [
-        "audio_common-aidl",
+        "android.media.audio.common.types",
         "framework-permission-aidl",
     ],
     backend: {
@@ -364,6 +354,9 @@
                 "com.android.media",
             ],
         },
+        java: {
+            sdk_version: "module_current",
+        },
     },
 }
 aidl_interface {
@@ -389,9 +382,12 @@
         "aidl/android/media/AudioVolumeGroup.aidl",
         "aidl/android/media/DeviceRole.aidl",
         "aidl/android/media/SoundTriggerSession.aidl",
+        "aidl/android/media/SpatializationLevel.aidl",
+        "aidl/android/media/SpatializationMode.aidl",
+        "aidl/android/media/SpatializerHeadTrackingMode.aidl",
     ],
     imports: [
-        "audio_common-aidl",
+        "android.media.audio.common.types",
         "audioclient-types-aidl",
     ],
     backend: {
@@ -402,6 +398,9 @@
                 "com.android.media",
             ],
         },
+        java: {
+            sdk_version: "module_current",
+        },
     },
 }
 
@@ -431,7 +430,7 @@
         "aidl/android/media/IAudioTrackCallback.aidl",
     ],
     imports: [
-        "audio_common-aidl",
+        "android.media.audio.common.types",
         "audioclient-types-aidl",
         "av-types-aidl",
         "effect-aidl",
@@ -447,6 +446,9 @@
                 "com.android.media",
             ],
         },
+        java: {
+            sdk_version: "module_current",
+        },
     },
 }
 
@@ -459,17 +461,18 @@
     srcs: [
         "aidl/android/media/GetInputForAttrResponse.aidl",
         "aidl/android/media/GetOutputForAttrResponse.aidl",
-        "aidl/android/media/Int.aidl",
+        "aidl/android/media/GetSpatializerResponse.aidl",
         "aidl/android/media/RecordClientInfo.aidl",
         "aidl/android/media/IAudioPolicyService.aidl",
         "aidl/android/media/IAudioPolicyServiceClient.aidl",
     ],
     imports: [
-        "audio_common-aidl",
+        "android.media.audio.common.types",
         "audioclient-types-aidl",
         "audiopolicy-types-aidl",
         "capture_state_listener-aidl",
         "framework-permission-aidl",
+        "spatializer-aidl",
     ],
 
     double_loadable: true,
@@ -481,5 +484,38 @@
                 "com.android.media",
             ],
         },
+        java: {
+            sdk_version: "module_current",
+        },
+    },
+}
+
+aidl_interface {
+    name: "spatializer-aidl",
+    unstable: true,
+    local_include_dir: "aidl",
+    host_supported: true,
+    vendor_available: true,
+    srcs: [
+        "aidl/android/media/INativeSpatializerCallback.aidl",
+        "aidl/android/media/ISpatializer.aidl",
+        "aidl/android/media/ISpatializerHeadTrackingCallback.aidl",
+    ],
+    imports: [
+        "audiopolicy-types-aidl",
+    ],
+
+    double_loadable: true,
+    backend: {
+        cpp: {
+            min_sdk_version: "29",
+            apex_available: [
+                "//apex_available:platform",
+                "com.android.media",
+            ],
+        },
+        java: {
+            sdk_version: "module_current",
+        },
     },
 }
diff --git a/media/libaudioclient/AudioAttributes.cpp b/media/libaudioclient/AudioAttributes.cpp
index 83bf5a7..260c06c 100644
--- a/media/libaudioclient/AudioAttributes.cpp
+++ b/media/libaudioclient/AudioAttributes.cpp
@@ -24,9 +24,6 @@
 #include <media/AudioAttributes.h>
 #include <media/PolicyAidlConversion.h>
 
-#define RETURN_STATUS_IF_ERROR(x) \
-    { auto _tmp = (x); if (_tmp != OK) return _tmp; }
-
 namespace android {
 
 status_t AudioAttributes::readFromParcel(const Parcel* parcel) {
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index 6ad5483..eee7f7e 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -32,16 +32,12 @@
 #include <private/media/AudioEffectShared.h>
 #include <utils/Log.h>
 
-#define RETURN_STATUS_IF_ERROR(x)    \
-    {                                \
-        auto _tmp = (x);             \
-        if (_tmp != OK) return _tmp; \
-    }
-
 namespace android {
 using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
 using media::IAudioPolicyService;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioUuid;
 
 namespace {
 
@@ -70,7 +66,8 @@
                 audio_session_t sessionId,
                 audio_io_handle_t io,
                 const AudioDeviceTypeAddr& device,
-                bool probe)
+                bool probe,
+                bool notifyFramesProcessed)
 {
     sp<media::IEffect> iEffect;
     sp<IMemory> cblk;
@@ -124,12 +121,16 @@
     request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(device));
     request.attributionSource = mClientAttributionSource;
     request.probe = probe;
+    request.notifyFramesProcessed = notifyFramesProcessed;
 
     media::CreateEffectResponse response;
 
     mStatus = audioFlinger->createEffect(request, &response);
 
     if (mStatus == OK) {
+        if (response.alreadyExists) {
+            mStatus = ALREADY_EXISTS;
+        }
         mId = response.id;
         enabled = response.enabled;
         iEffect = response.effect;
@@ -194,7 +195,8 @@
                 audio_session_t sessionId,
                 audio_io_handle_t io,
                 const AudioDeviceTypeAddr& device,
-                bool probe)
+                bool probe,
+                bool notifyFramesProcessed)
 {
     effect_uuid_t type;
     effect_uuid_t *pType = nullptr;
@@ -211,7 +213,8 @@
         pUuid = &uuid;
     }
 
-    return set(pType, pUuid, priority, cbf, user, sessionId, io, device, probe);
+    return set(pType, pUuid, priority, cbf, user, sessionId, io,
+               device, probe, notifyFramesProcessed);
 }
 
 
@@ -522,6 +525,13 @@
     }
 }
 
+void AudioEffect::framesProcessed(int32_t frames)
+{
+    if (mCbf != NULL) {
+        mCbf(EVENT_FRAMES_PROCESSED, mUserData, &frames);
+    }
+}
+
 // -------------------------------------------------------------------------
 
 status_t AudioEffect::queryNumberEffects(uint32_t *numEffects)
@@ -560,7 +570,7 @@
 
     int32_t audioSessionAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_session_t_int32_t(audioSession));
-    media::Int countAidl;
+    media::audio::common::Int countAidl;
     countAidl.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*count));
     std::vector<media::EffectDescriptor> retAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -608,12 +618,12 @@
         uuid = *EFFECT_UUID_NULL;
     }
 
-    media::AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
-    media::AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
+    AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
+    AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
     std::string opPackageNameAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_String16_string(opPackageName));
-    media::AudioSourceType sourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(source));
+    AudioSource sourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(source));
     int32_t retAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->addSourceDefaultEffect(typeAidl, opPackageNameAidl, uuidAidl, priority, sourceAidl,
@@ -651,11 +661,11 @@
         uuid = *EFFECT_UUID_NULL;
     }
 
-    media::AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
-    media::AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
+    AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
+    AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
     std::string opPackageNameAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_String16_string(opPackageName));
-    media::AudioUsage usageAidl = VALUE_OR_RETURN_STATUS(
+    media::audio::common::AudioUsage usageAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_usage_t_AudioUsage(usage));
     int32_t retAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
diff --git a/media/libaudioclient/AudioProductStrategy.cpp b/media/libaudioclient/AudioProductStrategy.cpp
index f98027a..ecd423a 100644
--- a/media/libaudioclient/AudioProductStrategy.cpp
+++ b/media/libaudioclient/AudioProductStrategy.cpp
@@ -21,9 +21,6 @@
 #include <media/AudioAttributes.h>
 #include <media/PolicyAidlConversion.h>
 
-#define RETURN_STATUS_IF_ERROR(x) \
-    { auto _tmp = (x); if (_tmp != OK) return _tmp; }
-
 namespace android {
 
 status_t AudioProductStrategy::readFromParcel(const Parcel* parcel) {
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index a1d3bdb..edcb86a 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -20,8 +20,10 @@
 
 #include <inttypes.h>
 #include <android-base/macros.h>
+#include <android-base/stringprintf.h>
 #include <sys/resource.h>
 
+#include <audio_utils/format.h>
 #include <audiomanager/AudioManager.h>
 #include <audiomanager/IAudioManager.h>
 #include <binder/Binder.h>
@@ -39,6 +41,7 @@
 
 namespace android {
 
+using ::android::base::StringPrintf;
 using android::content::AttributionSourceState;
 using aidl_utils::statusTFromBinderStatus;
 
@@ -66,8 +69,9 @@
 
     // We double the size of input buffer for ping pong use of record buffer.
     // Assumes audio_is_linear_pcm(format)
-    if ((*frameCount = (size * 2) / (audio_channel_count_from_in_mask(channelMask) *
-            audio_bytes_per_sample(format))) == 0) {
+    const auto sampleSize = audio_channel_count_from_in_mask(channelMask) *
+                                      audio_bytes_per_sample(format);
+    if (sampleSize == 0 || ((*frameCount = (size * 2) / sampleSize) == 0)) {
         ALOGE("%s(): Unsupported configuration: sampleRate %u, format %#x, channelMask %#x",
                 __func__, sampleRate, format, channelMask);
         return BAD_VALUE;
@@ -142,7 +146,7 @@
         audio_channel_mask_t channelMask,
         const AttributionSourceState& client,
         size_t frameCount,
-        callback_t cbf,
+        legacy_callback_t callback,
         void* user,
         uint32_t notificationFrames,
         audio_session_t sessionId,
@@ -162,7 +166,39 @@
 {
     uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mClientAttributionSource.uid));
     pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid));
-    (void)set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
+    (void)set(inputSource, sampleRate, format, channelMask, frameCount, callback, user,
+            notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
+            uid, pid, pAttributes, selectedDeviceId, selectedMicDirection,
+            microphoneFieldDimension);
+}
+
+AudioRecord::AudioRecord(
+        audio_source_t inputSource,
+        uint32_t sampleRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        const AttributionSourceState& client,
+        size_t frameCount,
+        const wp<IAudioRecordCallback>& callback,
+        uint32_t notificationFrames,
+        audio_session_t sessionId,
+        transfer_type transferType,
+        audio_input_flags_t flags,
+        const audio_attributes_t* pAttributes,
+        audio_port_handle_t selectedDeviceId,
+        audio_microphone_direction_t selectedMicDirection,
+        float microphoneFieldDimension)
+    : mActive(false),
+      mStatus(NO_INIT),
+      mClientAttributionSource(client),
+      mSessionId(AUDIO_SESSION_ALLOCATE),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(SP_DEFAULT),
+      mProxy(nullptr)
+{
+    uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mClientAttributionSource.uid));
+    pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid));
+    (void)set(inputSource, sampleRate, format, channelMask, frameCount, callback,
             notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
             uid, pid, pAttributes, selectedDeviceId, selectedMicDirection,
             microphoneFieldDimension);
@@ -205,26 +241,58 @@
     // Otherwise the callback thread will never exit.
     stop();
     if (mAudioRecordThread != 0) {
-        mProxy->interrupt();
         mAudioRecordThread->requestExit();  // see comment in AudioRecord.h
+        mProxy->interrupt();
         mAudioRecordThread->requestExitAndWait();
         mAudioRecordThread.clear();
     }
-    // No lock here: worst case we remove a NULL callback which will be a nop
+
+    AutoMutex lock(mLock);
     if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
         // This may not stop all of these device callbacks!
         // TODO: Add some sort of protection.
         AudioSystem::removeAudioDeviceCallback(this, mInput, mPortId);
+        mDeviceCallback.clear();
     }
 }
+namespace {
+class LegacyCallbackWrapper : public AudioRecord::IAudioRecordCallback {
+    const AudioRecord::legacy_callback_t mCallback;
+    void* const mData;
+
+  public:
+    LegacyCallbackWrapper(AudioRecord::legacy_callback_t callback, void* user)
+        : mCallback(callback), mData(user) {}
+
+    size_t onMoreData(const AudioRecord::Buffer& buffer) override {
+        AudioRecord::Buffer copy = buffer;
+        mCallback(AudioRecord::EVENT_MORE_DATA, mData, &copy);
+        return copy.size;
+    }
+
+    void onOverrun() override { mCallback(AudioRecord::EVENT_OVERRUN, mData, nullptr); }
+
+    void onMarker(uint32_t markerPosition) override {
+        mCallback(AudioRecord::EVENT_MARKER, mData, &markerPosition);
+    }
+
+    void onNewPos(uint32_t newPos) override {
+        mCallback(AudioRecord::EVENT_NEW_POS, mData, &newPos);
+    }
+
+    void onNewIAudioRecord() override {
+        mCallback(AudioRecord::EVENT_NEW_IAUDIORECORD, mData, nullptr);
+    }
+};
+}  // namespace
+
 status_t AudioRecord::set(
         audio_source_t inputSource,
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
         size_t frameCount,
-        callback_t cbf,
-        void* user,
+        const wp<IAudioRecordCallback>& callback,
         uint32_t notificationFrames,
         bool threadCanCallJava,
         audio_session_t sessionId,
@@ -239,8 +307,7 @@
         int32_t maxSharedAudioHistoryMs)
 {
     status_t status = NO_ERROR;
-    uint32_t channelCount;
-
+    const sp<IAudioRecordCallback> callbackHandle = callback.promote();
     // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
     ALOGV("%s(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
           "notificationFrames %u, sessionId %d, transferType %d, flags %#x, attributionSource %s"
@@ -271,39 +338,9 @@
     mSelectedMicFieldDimension = microphoneFieldDimension;
     mMaxSharedAudioHistoryMs = maxSharedAudioHistoryMs;
 
-    switch (transferType) {
-    case TRANSFER_DEFAULT:
-        if (cbf == NULL || threadCanCallJava) {
-            transferType = TRANSFER_SYNC;
-        } else {
-            transferType = TRANSFER_CALLBACK;
-        }
-        break;
-    case TRANSFER_CALLBACK:
-        if (cbf == NULL) {
-            ALOGE("%s(): Transfer type TRANSFER_CALLBACK but cbf == NULL", __func__);
-            status = BAD_VALUE;
-            goto exit;
-        }
-        break;
-    case TRANSFER_OBTAIN:
-    case TRANSFER_SYNC:
-        break;
-    default:
-        ALOGE("%s(): Invalid transfer type %d", __func__, transferType);
-        status = BAD_VALUE;
-        goto exit;
-    }
-    mTransfer = transferType;
-
-    // invariant that mAudioRecord != 0 is true only after set() returns successfully
-    if (mAudioRecord != 0) {
-        ALOGE("%s(): Track already in use", __func__);
-        status = INVALID_OPERATION;
-        goto exit;
-    }
-
-    if (pAttributes == NULL) {
+    std::string errorMessage;
+    // Copy the state variables early so they are available for error reporting.
+    if (pAttributes == nullptr) {
         mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
         mAttributes.source = inputSource;
         if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION
@@ -314,37 +351,74 @@
     } else {
         // stream type shouldn't be looked at, this track has audio attributes
         memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
-        ALOGV("%s(): Building AudioRecord with attributes: source=%d flags=0x%x tags=[%s]",
+        ALOGV("%s: Building AudioRecord with attributes: source=%d flags=0x%x tags=[%s]",
                 __func__, mAttributes.source, mAttributes.flags, mAttributes.tags);
     }
-
     mSampleRate = sampleRate;
-
-    // these below should probably come from the audioFlinger too...
     if (format == AUDIO_FORMAT_DEFAULT) {
         format = AUDIO_FORMAT_PCM_16_BIT;
     }
-
-    // validate parameters
-    // AudioFlinger capture only supports linear PCM
-    if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
-        ALOGE("%s(): Format %#x is not linear pcm", __func__, format);
-        status = BAD_VALUE;
-        goto exit;
+    if (!audio_is_linear_pcm(format)) {
+       // Compressed capture requires direct
+       flags = (audio_input_flags_t) (flags | AUDIO_INPUT_FLAG_DIRECT);
+       ALOGI("%s(): Format %#x is not linear pcm. Setting DIRECT, using flags %#x", __func__,
+             format, flags);
     }
     mFormat = format;
-
-    if (!audio_is_input_channel(channelMask)) {
-        ALOGE("%s(): Invalid channel mask %#x", __func__, channelMask);
-        status = BAD_VALUE;
-        goto exit;
-    }
     mChannelMask = channelMask;
-    channelCount = audio_channel_count_from_in_mask(channelMask);
-    mChannelCount = channelCount;
+    mSessionId = sessionId;
+    ALOGV("%s: mSessionId %d", __func__, mSessionId);
+    mOrigFlags = mFlags = flags;
 
-    if (audio_is_linear_pcm(format)) {
-        mFrameSize = channelCount * audio_bytes_per_sample(format);
+    mTransfer = transferType;
+    switch (mTransfer) {
+    case TRANSFER_DEFAULT:
+        if (callbackHandle == nullptr || threadCanCallJava) {
+            mTransfer = TRANSFER_SYNC;
+        } else {
+            mTransfer = TRANSFER_CALLBACK;
+        }
+        break;
+    case TRANSFER_CALLBACK:
+        if (callbackHandle == nullptr) {
+            errorMessage = StringPrintf(
+                    "%s: Transfer type TRANSFER_CALLBACK but callback == nullptr", __func__);
+            status = BAD_VALUE;
+            goto error;
+        }
+        break;
+    case TRANSFER_OBTAIN:
+    case TRANSFER_SYNC:
+        break;
+    default:
+        errorMessage = StringPrintf("%s: Invalid transfer type %d", __func__, mTransfer);
+        status = BAD_VALUE;
+        goto error;
+    }
+
+    // invariant that mAudioRecord != 0 is true only after set() returns successfully
+    if (mAudioRecord != 0) {
+        errorMessage = StringPrintf("%s: Track already in use", __func__);
+        status = INVALID_OPERATION;
+        goto error;
+    }
+
+    if (!audio_is_valid_format(mFormat)) {
+        errorMessage = StringPrintf("%s: Format %#x is not valid", __func__, mFormat);
+        status = BAD_VALUE;
+        goto error;
+    }
+
+    if (!audio_is_input_channel(mChannelMask)) {
+        errorMessage = StringPrintf("%s: Invalid channel mask %#x", __func__, mChannelMask);
+        status = BAD_VALUE;
+        goto error;
+    }
+
+    mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
+
+    if (audio_is_linear_pcm(mFormat)) {
+        mFrameSize = mChannelCount * audio_bytes_per_sample(mFormat);
     } else {
         mFrameSize = sizeof(uint8_t);
     }
@@ -355,13 +429,8 @@
     mNotificationFramesReq = notificationFrames;
     // mNotificationFramesAct is initialized in createRecord_l
 
-    mSessionId = sessionId;
-    ALOGV("%s(): mSessionId %d", __func__, mSessionId);
-
-    mOrigFlags = mFlags = flags;
-    mCbf = cbf;
-
-    if (cbf != NULL) {
+    mCallback = callbackHandle;
+    if (mCallback != nullptr) {
         mAudioRecordThread = new AudioRecordThread(*this);
         mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
         // thread begins in paused state, and will not reference us until start()
@@ -381,10 +450,10 @@
             mAudioRecordThread->requestExitAndWait();
             mAudioRecordThread.clear();
         }
+        // bypass error message to avoid logging twice (createRecord_l logs the error).
         goto exit;
     }
 
-    mUserData = user;
     // TODO: add audio hardware input latency here
     mLatency = (1000LL * mFrameCount) / mSampleRate;
     mMarkerPosition = 0;
@@ -398,14 +467,48 @@
     mFramesRead = 0;
     mFramesReadServerOffset = 0;
 
-exit:
-    mStatus = status;
+error:
     if (status != NO_ERROR) {
         mMediaMetrics.markError(status, __FUNCTION__);
+        ALOGE_IF(!errorMessage.empty(), "%s", errorMessage.c_str());
+        reportError(status, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE, errorMessage.c_str());
     }
+exit:
+    mStatus = status;
     return status;
 }
 
+status_t AudioRecord::set(
+        audio_source_t inputSource,
+        uint32_t sampleRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        size_t frameCount,
+        legacy_callback_t callback,
+        void* user,
+        uint32_t notificationFrames,
+        bool threadCanCallJava,
+        audio_session_t sessionId,
+        transfer_type transferType,
+        audio_input_flags_t flags,
+        uid_t uid,
+        pid_t pid,
+        const audio_attributes_t* pAttributes,
+        audio_port_handle_t selectedDeviceId,
+        audio_microphone_direction_t selectedMicDirection,
+        float microphoneFieldDimension,
+        int32_t maxSharedAudioHistoryMs)
+{
+    if (callback != nullptr) {
+        mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+    } else if (user) {
+        LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+    }
+    return set(inputSource, sampleRate, format, channelMask, frameCount, mLegacyCallbackWrapper,
+        notificationFrames, threadCanCallJava, sessionId, transferType, flags, uid, pid,
+        pAttributes, selectedDeviceId, selectedMicDirection, microphoneFieldDimension,
+        maxSharedAudioHistoryMs);
+}
 // -------------------------------------------------------------------------
 
 status_t AudioRecord::start(AudioSystem::sync_event_t event, audio_session_t triggerSession)
@@ -535,12 +638,12 @@
 
 status_t AudioRecord::setMarkerPosition(uint32_t marker)
 {
+    AutoMutex lock(mLock);
     // The only purpose of setting marker position is to get a callback
-    if (mCbf == NULL) {
+    if (mCallback.promote() == nullptr) {
         return INVALID_OPERATION;
     }
 
-    AutoMutex lock(mLock);
     mMarkerPosition = marker;
     mMarkerReached = false;
 
@@ -565,12 +668,12 @@
 
 status_t AudioRecord::setPositionUpdatePeriod(uint32_t updatePeriod)
 {
+    AutoMutex lock(mLock);
     // The only purpose of setting position update period is to get a callback
-    if (mCbf == NULL) {
+    if (mCallback.promote() == nullptr) {
         return INVALID_OPERATION;
     }
 
-    AutoMutex lock(mLock);
     mNewPosition = mProxy->getPosition() + updatePeriod;
     mUpdatePeriod = updatePeriod;
 
@@ -621,6 +724,11 @@
     if (status == OK) {
         timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesRead;
         timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
+        if (!audio_is_linear_pcm(mFormat)) {
+            // Don't do retrograde corrections or server offset if track is
+            // compressed
+            return OK;
+        }
         // server side frame offset in case AudioRecord has been restored.
         for (int i = ExtendedTimestamp::LOCATION_SERVER;
                 i < ExtendedTimestamp::LOCATION_MAX; ++i) {
@@ -668,6 +776,8 @@
 // ---- Explicit Routing ---------------------------------------------------
 status_t AudioRecord::setInputDevice(audio_port_handle_t deviceId) {
     AutoMutex lock(mLock);
+    ALOGV("%s(%d): deviceId=%d mSelectedDeviceId=%d",
+            __func__, mPortId, deviceId, mSelectedDeviceId);
     if (mSelectedDeviceId != deviceId) {
         mSelectedDeviceId = deviceId;
         if (mStatus == NO_ERROR) {
@@ -754,15 +864,16 @@
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
     IAudioFlinger::CreateRecordInput input;
     IAudioFlinger::CreateRecordOutput output;
-    audio_session_t originalSessionId;
+    [[maybe_unused]] audio_session_t originalSessionId;
     void *iMemPointer;
     audio_track_cblk_t* cblk;
     status_t status;
     static const int32_t kMaxCreateAttempts = 3;
     int32_t remainingAttempts = kMaxCreateAttempts;
+    std::string errorMessage;
 
     if (audioFlinger == 0) {
-        ALOGE("%s(%d): Could not get audioflinger", __func__, mPortId);
+        errorMessage = StringPrintf("%s(%d): Could not get audioflinger", __func__, mPortId);
         status = NO_INIT;
         goto exit;
     }
@@ -828,8 +939,9 @@
             break;
         }
         if (status != FAILED_TRANSACTION || --remainingAttempts <= 0) {
-            ALOGE("%s(%d): AudioFlinger could not create record track, status: %d",
-                  __func__, mPortId, status);
+            errorMessage = StringPrintf(
+                    "%s(%d): AudioFlinger could not create record track, status: %d",
+                    __func__, mPortId, status);
             goto exit;
         }
         // FAILED_TRANSACTION happens under very specific conditions causing a state mismatch
@@ -856,9 +968,13 @@
     mRoutedDeviceId = output.selectedDeviceId;
     mSessionId = output.sessionId;
     mSampleRate = output.sampleRate;
+    mServerConfig = output.serverConfig;
+    mServerFrameSize = audio_bytes_per_frame(
+            audio_channel_count_from_in_mask(mServerConfig.channel_mask), mServerConfig.format);
+    mServerSampleSize = audio_bytes_per_sample(mServerConfig.format);
 
     if (output.cblk == 0) {
-        ALOGE("%s(%d): Could not get control block", __func__, mPortId);
+        errorMessage = StringPrintf("%s(%d): Could not get control block", __func__, mPortId);
         status = NO_INIT;
         goto exit;
     }
@@ -868,7 +984,8 @@
     //       issue (e.g. by copying).
     iMemPointer = output.cblk ->unsecurePointer();
     if (iMemPointer == NULL) {
-        ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
+        errorMessage = StringPrintf(
+                "%s(%d): Could not get control block pointer", __func__, mPortId);
         status = NO_INIT;
         goto exit;
     }
@@ -887,7 +1004,8 @@
         //       issue (e.g. by copying).
         buffers = output.buffers->unsecurePointer();
         if (buffers == NULL) {
-            ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
+            errorMessage = StringPrintf(
+                    "%s(%d): Could not get buffer pointer", __func__, mPortId);
             status = NO_INIT;
             goto exit;
         }
@@ -919,6 +1037,10 @@
                 mNotificationFramesReq, output.notificationFrameCount, output.frameCount);
     }
     mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
+    if (mServerConfig.format != mFormat && mCallback.promote() != nullptr) {
+        mFormatConversionBufRaw = std::make_unique<uint8_t[]>(mNotificationFramesAct * mFrameSize);
+        mFormatConversionBuffer.raw = mFormatConversionBufRaw.get();
+    }
 
     //mInput != input includes the case where mInput == AUDIO_IO_HANDLE_NONE for first creation
     if (mDeviceCallback != 0) {
@@ -945,7 +1067,7 @@
     }
 
     // update proxy
-    mProxy = new AudioRecordClientProxy(cblk, buffers, mFrameCount, mFrameSize);
+    mProxy = new AudioRecordClientProxy(cblk, buffers, mFrameCount, mServerFrameSize);
     mProxy->setEpoch(epoch);
     mProxy->setMinimum(mNotificationFramesAct);
 
@@ -972,16 +1094,44 @@
         .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
         // the following are NOT immutable
         .set(AMEDIAMETRICS_PROP_STATE, stateToString(mActive))
+        .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
         .set(AMEDIAMETRICS_PROP_SELECTEDMICDIRECTION, (int32_t)mSelectedMicDirection)
         .set(AMEDIAMETRICS_PROP_SELECTEDMICFIELDDIRECTION, (double)mSelectedMicFieldDimension)
         .record();
 
 exit:
+    if (status != NO_ERROR) {
+        ALOGE_IF(!errorMessage.empty(), "%s", errorMessage.c_str());
+        reportError(status, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE, errorMessage.c_str());
+    }
+
     mStatus = status;
     // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
     return status;
 }
 
+// Report error associated with the event and some configuration details.
+void AudioRecord::reportError(status_t status, const char *event, const char *message) const
+{
+    if (status == NO_ERROR) return;
+    // We report error on the native side because some callers do not come
+    // from Java.
+    // Ensure these variables are initialized in set().
+    mediametrics::LogItem(AMEDIAMETRICS_KEY_AUDIO_RECORD_ERROR)
+        .set(AMEDIAMETRICS_PROP_EVENT, event)
+        .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
+        .set(AMEDIAMETRICS_PROP_STATUSMESSAGE, message)
+        .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
+        .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
+        .set(AMEDIAMETRICS_PROP_SOURCE, toString(mAttributes.source).c_str())
+        .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
+        .set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
+        .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
+        .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
+        .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
+        .record();
+}
+
 status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
 {
     if (audioBuffer == NULL) {
@@ -1045,7 +1195,13 @@
             if (status == DEAD_OBJECT) {
                 // re-create track, unless someone else has already done so
                 if (newSequence == oldSequence) {
-                    status = restoreRecord_l("obtainBuffer");
+                    if (!audio_is_linear_pcm(mFormat)) {
+                        // If compressed capture, don't attempt to restore the track.
+                        // Return a DEAD_OBJECT error and let the caller recreate.
+                        tryCounter = 0;
+                    } else {
+                        status = restoreRecord_l("obtainBuffer");
+                    }
                     if (status != NO_ERROR) {
                         buffer.mFrameCount = 0;
                         buffer.mRaw = NULL;
@@ -1075,7 +1231,7 @@
     } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
 
     audioBuffer->frameCount = buffer.mFrameCount;
-    audioBuffer->size = buffer.mFrameCount * mFrameSize;
+    audioBuffer->size = buffer.mFrameCount * mServerFrameSize;
     audioBuffer->raw = buffer.mRaw;
     audioBuffer->sequence = oldSequence;
     if (nonContig != NULL) {
@@ -1088,7 +1244,7 @@
 {
     // FIXME add error checking on mode, by adding an internal version
 
-    size_t stepCount = audioBuffer->size / mFrameSize;
+    size_t stepCount = audioBuffer->frameCount;
     if (stepCount == 0) {
         return;
     }
@@ -1150,8 +1306,9 @@
             return ssize_t(err);
         }
 
-        size_t bytesRead = audioBuffer.size;
-        memcpy(buffer, audioBuffer.i8, bytesRead);
+        size_t bytesRead = audioBuffer.frameCount * mFrameSize;
+        memcpy_by_audio_format(buffer, mFormat, audioBuffer.raw, mServerConfig.format,
+                               audioBuffer.size / mServerSampleSize);
         buffer = ((char *) buffer) + bytesRead;
         userSize -= bytesRead;
         read += bytesRead;
@@ -1170,6 +1327,11 @@
 nsecs_t AudioRecord::processAudioBuffer()
 {
     mLock.lock();
+    const sp<IAudioRecordCallback> callback = mCallback.promote();
+    if (!callback) {
+        mCallback = nullptr;
+        return NS_NEVER;
+    }
     if (mAwaitBoost) {
         mAwaitBoost = false;
         mLock.unlock();
@@ -1245,26 +1407,26 @@
     uint32_t sequence = mSequence;
 
     // These fields don't need to be cached, because they are assigned only by set():
-    //      mTransfer, mCbf, mUserData, mSampleRate, mFrameSize
+    //      mTransfer, mCallback, mUserData, mSampleRate, mFrameSize
 
     mLock.unlock();
 
     // perform callbacks while unlocked
     if (newOverrun) {
-        mCbf(EVENT_OVERRUN, mUserData, NULL);
+        callback->onOverrun();
+
     }
     if (markerReached) {
-        mCbf(EVENT_MARKER, mUserData, &markerPosition);
+        callback->onMarker(markerPosition.value());
     }
     while (newPosCount > 0) {
-        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
-        mCbf(EVENT_NEW_POS, mUserData, &temp);
+        callback->onNewPos(newPosition.value());
         newPosition += updatePeriod;
         newPosCount--;
     }
     if (mObservedSequence != sequence) {
         mObservedSequence = sequence;
-        mCbf(EVENT_NEW_IAUDIORECORD, mUserData, NULL);
+        callback->onNewIAudioRecord();
     }
 
     // if inactive, then don't run me again until re-started
@@ -1348,9 +1510,19 @@
             }
         }
 
-        size_t reqSize = audioBuffer.size;
-        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
-        size_t readSize = audioBuffer.size;
+        Buffer* buffer = &audioBuffer;
+        if (mServerConfig.format != mFormat) {
+            buffer = &mFormatConversionBuffer;
+            buffer->frameCount = audioBuffer.frameCount;
+            buffer->size = buffer->frameCount * mFrameSize;
+            buffer->sequence = audioBuffer.sequence;
+            memcpy_by_audio_format(buffer->raw, mFormat, audioBuffer.raw,
+                                   mServerConfig.format, audioBuffer.size / mServerSampleSize);
+        }
+
+        const size_t reqSize = buffer->size;
+        const size_t readSize = callback->onMoreData(*buffer);
+        buffer->size = readSize;
 
         // Validate on returned size
         if (ssize_t(readSize) < 0 || readSize > reqSize) {
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index f1eeaa3..be81481 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -40,19 +40,25 @@
        if (!_tmp.ok()) return aidl_utils::binderStatusFromStatusT(_tmp.error()); \
        std::move(_tmp.value()); })
 
-#define RETURN_STATUS_IF_ERROR(x)    \
-    {                                \
-        auto _tmp = (x);             \
-        if (_tmp != OK) return _tmp; \
-    }
-
 // ----------------------------------------------------------------------------
 
 namespace android {
 using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
+using content::AttributionSourceState;
 using media::IAudioPolicyService;
-using android::content::AttributionSourceState;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
+using media::audio::common::AudioOffloadInfo;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::Int;
 
 // client singleton for AudioFlinger binder interface
 Mutex AudioSystem::gLock;
@@ -336,7 +342,7 @@
     if (desc == 0) {
         *samplingRate = af->sampleRate(ioHandle);
     } else {
-        *samplingRate = desc->mSamplingRate;
+        *samplingRate = desc->getSamplingRate();
     }
     if (*samplingRate == 0) {
         ALOGE("AudioSystem::getSamplingRate failed for ioHandle %d", ioHandle);
@@ -371,7 +377,7 @@
     if (desc == 0) {
         *frameCount = af->frameCount(ioHandle);
     } else {
-        *frameCount = desc->mFrameCount;
+        *frameCount = desc->getFrameCount();
     }
     if (*frameCount == 0) {
         ALOGE("AudioSystem::getFrameCount failed for ioHandle %d", ioHandle);
@@ -406,7 +412,7 @@
     if (outputDesc == 0) {
         *latency = af->latency(output);
     } else {
-        *latency = outputDesc->mLatency;
+        *latency = outputDesc->getLatency();
     }
 
     ALOGV("getLatency() output %d, latency %d", output, *latency);
@@ -480,6 +486,12 @@
     return af->systemReady();
 }
 
+status_t AudioSystem::audioPolicyReady() {
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == 0) return NO_INIT;
+    return af->audioPolicyReady();
+}
+
 status_t AudioSystem::getFrameCountHAL(audio_io_handle_t ioHandle,
                                        size_t* frameCount) {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
@@ -488,7 +500,7 @@
     if (desc == 0) {
         *frameCount = af->frameCountHAL(ioHandle);
     } else {
-        *frameCount = desc->mFrameCountHAL;
+        *frameCount = desc->getFrameCountHAL();
     }
     if (*frameCount == 0) {
         ALOGE("AudioSystem::getFrameCountHAL failed for ioHandle %d", ioHandle);
@@ -529,15 +541,15 @@
 Status AudioSystem::AudioFlingerClient::ioConfigChanged(
         media::AudioIoConfigEvent _event,
         const media::AudioIoDescriptor& _ioDesc) {
-    audio_io_config_event event = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioIoConfigEvent_audio_io_config_event(_event));
+    audio_io_config_event_t event = VALUE_OR_RETURN_BINDER_STATUS(
+            aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(_event));
     sp<AudioIoDescriptor> ioDesc(
             VALUE_OR_RETURN_BINDER_STATUS(
                     aidl2legacy_AudioIoDescriptor_AudioIoDescriptor(_ioDesc)));
 
     ALOGV("ioConfigChanged() event %d", event);
 
-    if (ioDesc->mIoHandle == AUDIO_IO_HANDLE_NONE) return Status::ok();
+    if (ioDesc->getIoHandle() == AUDIO_IO_HANDLE_NONE) return Status::ok();
 
     audio_port_handle_t deviceId = AUDIO_PORT_HANDLE_NONE;
     std::vector<sp<AudioDeviceCallback>> callbacksToCall;
@@ -550,93 +562,88 @@
             case AUDIO_OUTPUT_REGISTERED:
             case AUDIO_INPUT_OPENED:
             case AUDIO_INPUT_REGISTERED: {
-                sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->mIoHandle);
+                sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->getIoHandle());
                 if (oldDesc == 0) {
-                    mIoDescriptors.add(ioDesc->mIoHandle, ioDesc);
+                    mIoDescriptors.add(ioDesc->getIoHandle(), ioDesc);
                 } else {
                     deviceId = oldDesc->getDeviceId();
-                    mIoDescriptors.replaceValueFor(ioDesc->mIoHandle, ioDesc);
+                    mIoDescriptors.replaceValueFor(ioDesc->getIoHandle(), ioDesc);
                 }
 
                 if (ioDesc->getDeviceId() != AUDIO_PORT_HANDLE_NONE) {
                     deviceId = ioDesc->getDeviceId();
                     if (event == AUDIO_OUTPUT_OPENED || event == AUDIO_INPUT_OPENED) {
-                        auto it = mAudioDeviceCallbacks.find(ioDesc->mIoHandle);
+                        auto it = mAudioDeviceCallbacks.find(ioDesc->getIoHandle());
                         if (it != mAudioDeviceCallbacks.end()) {
                             callbacks = it->second;
                         }
                     }
                 }
-                ALOGV("ioConfigChanged() new %s %s %d samplingRate %u, format %#x channel mask %#x "
-                      "frameCount %zu deviceId %d",
+                ALOGV("ioConfigChanged() new %s %s %s",
                       event == AUDIO_OUTPUT_OPENED || event == AUDIO_OUTPUT_REGISTERED ?
                       "output" : "input",
                       event == AUDIO_OUTPUT_OPENED || event == AUDIO_INPUT_OPENED ?
                       "opened" : "registered",
-                      ioDesc->mIoHandle, ioDesc->mSamplingRate, ioDesc->mFormat,
-                      ioDesc->mChannelMask,
-                      ioDesc->mFrameCount, ioDesc->getDeviceId());
+                      ioDesc->toDebugString().c_str());
             }
                 break;
             case AUDIO_OUTPUT_CLOSED:
             case AUDIO_INPUT_CLOSED: {
-                if (getIoDescriptor_l(ioDesc->mIoHandle) == 0) {
+                if (getIoDescriptor_l(ioDesc->getIoHandle()) == 0) {
                     ALOGW("ioConfigChanged() closing unknown %s %d",
-                          event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->mIoHandle);
+                          event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->getIoHandle());
                     break;
                 }
                 ALOGV("ioConfigChanged() %s %d closed",
-                      event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->mIoHandle);
+                      event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->getIoHandle());
 
-                mIoDescriptors.removeItem(ioDesc->mIoHandle);
-                mAudioDeviceCallbacks.erase(ioDesc->mIoHandle);
+                mIoDescriptors.removeItem(ioDesc->getIoHandle());
+                mAudioDeviceCallbacks.erase(ioDesc->getIoHandle());
             }
                 break;
 
             case AUDIO_OUTPUT_CONFIG_CHANGED:
             case AUDIO_INPUT_CONFIG_CHANGED: {
-                sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->mIoHandle);
+                sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->getIoHandle());
                 if (oldDesc == 0) {
                     ALOGW("ioConfigChanged() modifying unknown %s! %d",
                           event == AUDIO_OUTPUT_CONFIG_CHANGED ? "output" : "input",
-                          ioDesc->mIoHandle);
+                          ioDesc->getIoHandle());
                     break;
                 }
 
                 deviceId = oldDesc->getDeviceId();
-                mIoDescriptors.replaceValueFor(ioDesc->mIoHandle, ioDesc);
+                mIoDescriptors.replaceValueFor(ioDesc->getIoHandle(), ioDesc);
 
                 if (deviceId != ioDesc->getDeviceId()) {
                     deviceId = ioDesc->getDeviceId();
-                    auto it = mAudioDeviceCallbacks.find(ioDesc->mIoHandle);
+                    auto it = mAudioDeviceCallbacks.find(ioDesc->getIoHandle());
                     if (it != mAudioDeviceCallbacks.end()) {
                         callbacks = it->second;
                     }
                 }
-                ALOGV("ioConfigChanged() new config for %s %d samplingRate %u, format %#x "
-                      "channel mask %#x frameCount %zu frameCountHAL %zu deviceId %d",
+                ALOGV("ioConfigChanged() new config for %s %s",
                       event == AUDIO_OUTPUT_CONFIG_CHANGED ? "output" : "input",
-                      ioDesc->mIoHandle, ioDesc->mSamplingRate, ioDesc->mFormat,
-                      ioDesc->mChannelMask, ioDesc->mFrameCount, ioDesc->mFrameCountHAL,
-                      ioDesc->getDeviceId());
+                      ioDesc->toDebugString().c_str());
 
             }
                 break;
             case AUDIO_CLIENT_STARTED: {
-                sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->mIoHandle);
+                sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->getIoHandle());
                 if (oldDesc == 0) {
-                    ALOGW("ioConfigChanged() start client on unknown io! %d", ioDesc->mIoHandle);
+                    ALOGW("ioConfigChanged() start client on unknown io! %d",
+                            ioDesc->getIoHandle());
                     break;
                 }
                 ALOGV("ioConfigChanged() AUDIO_CLIENT_STARTED  io %d port %d num callbacks %zu",
-                      ioDesc->mIoHandle, ioDesc->mPortId, mAudioDeviceCallbacks.size());
-                oldDesc->mPatch = ioDesc->mPatch;
-                auto it = mAudioDeviceCallbacks.find(ioDesc->mIoHandle);
+                      ioDesc->getIoHandle(), ioDesc->getPortId(), mAudioDeviceCallbacks.size());
+                oldDesc->setPatch(ioDesc->getPatch());
+                auto it = mAudioDeviceCallbacks.find(ioDesc->getIoHandle());
                 if (it != mAudioDeviceCallbacks.end()) {
                     auto cbks = it->second;
-                    auto it2 = cbks.find(ioDesc->mPortId);
+                    auto it2 = cbks.find(ioDesc->getPortId());
                     if (it2 != cbks.end()) {
-                        callbacks.emplace(ioDesc->mPortId, it2->second);
+                        callbacks.emplace(ioDesc->getPortId(), it2->second);
                         deviceId = oldDesc->getDeviceId();
                     }
                 }
@@ -655,8 +662,8 @@
     // Callbacks must be called without mLock held. May lead to dead lock if calling for
     // example getRoutedDevice that updates the device and tries to acquire mLock.
     for (auto cb  : callbacksToCall) {
-        // If callbacksToCall is not empty, it implies ioDesc->mIoHandle and deviceId are valid
-        cb->onAudioDeviceUpdate(ioDesc->mIoHandle, deviceId);
+        // If callbacksToCall is not empty, it implies ioDesc->getIoHandle() and deviceId are valid
+        cb->onAudioDeviceUpdate(ioDesc->getIoHandle(), deviceId);
     }
 
     return Status::ok();
@@ -845,9 +852,8 @@
         name = device_name;
     }
 
-    media::AudioDevice deviceAidl;
-    deviceAidl.type = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
-    deviceAidl.address = address;
+    AudioDevice deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_device_AudioDevice(device, address));
 
     return statusTFromBinderStatus(
             aps->setDeviceConnectionState(
@@ -855,7 +861,8 @@
                     VALUE_OR_RETURN_STATUS(
                             legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(state)),
                     name,
-                    VALUE_OR_RETURN_STATUS(legacy2aidl_audio_format_t_AudioFormat(encodedFormat))));
+                    VALUE_OR_RETURN_STATUS(
+                            legacy2aidl_audio_format_t_AudioFormatDescription(encodedFormat))));
 }
 
 audio_policy_dev_state_t AudioSystem::getDeviceConnectionState(audio_devices_t device,
@@ -864,9 +871,8 @@
     if (aps == 0) return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
 
     auto result = [&]() -> ConversionResult<audio_policy_dev_state_t> {
-        media::AudioDevice deviceAidl;
-        deviceAidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(device));
-        deviceAidl.address = device_address;
+        AudioDevice deviceAidl = VALUE_OR_RETURN(
+                legacy2aidl_audio_device_AudioDevice(device, device_address));
 
         media::AudioPolicyDeviceState result;
         RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -894,13 +900,12 @@
         name = device_name;
     }
 
-    media::AudioDevice deviceAidl;
-    deviceAidl.type = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
-    deviceAidl.address = address;
+    AudioDevice deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_device_AudioDevice(device, address));
 
     return statusTFromBinderStatus(
             aps->handleDeviceConfigChange(deviceAidl, name, VALUE_OR_RETURN_STATUS(
-                    legacy2aidl_audio_format_t_AudioFormat(encodedFormat))));
+                    legacy2aidl_audio_format_t_AudioFormatDescription(encodedFormat))));
 }
 
 status_t AudioSystem::setPhoneState(audio_mode_t state, uid_t uid) {
@@ -949,7 +954,7 @@
     if (aps == 0) return AUDIO_IO_HANDLE_NONE;
 
     auto result = [&]() -> ConversionResult<audio_io_handle_t> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+        AudioStreamType streamAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         int32_t outputAidl;
         RETURN_IF_ERROR(
@@ -997,8 +1002,8 @@
     media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_attributes_t_AudioAttributesInternal(*attr));
     int32_t sessionAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(session));
-    media::AudioConfig configAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_config_t_AudioConfig(*config));
+    AudioConfig configAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(*config, false /*isInput*/));
     int32_t flagsAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
     int32_t selectedDeviceIdAidl = VALUE_OR_RETURN_STATUS(
@@ -1091,8 +1096,8 @@
     int32_t inputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(*input));
     int32_t riidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_unique_id_t_int32_t(riid));
     int32_t sessionAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(session));
-    media::AudioConfigBase configAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_config_base_t_AudioConfigBase(*config));
+    AudioConfigBase configAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_base_t_AudioConfigBase(*config, true /*isInput*/));
     int32_t flagsAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
     int32_t selectedDeviceIdAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_port_handle_t_int32_t(*selectedDeviceId));
@@ -1148,7 +1153,7 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t indexMinAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(indexMin));
     int32_t indexMaxAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(indexMax));
@@ -1162,10 +1167,11 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t indexAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(index));
-    int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+    AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     return statusTFromBinderStatus(
             aps->setStreamVolumeIndex(streamAidl, deviceAidl, indexAidl));
 }
@@ -1176,9 +1182,10 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
-    int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+    AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     int32_t indexAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getStreamVolumeIndex(streamAidl, deviceAidl, &indexAidl)));
@@ -1197,7 +1204,8 @@
     media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
     int32_t indexAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(index));
-    int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+    AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     return statusTFromBinderStatus(
             aps->setVolumeIndexForAttributes(attrAidl, deviceAidl, indexAidl));
 }
@@ -1210,7 +1218,8 @@
 
     media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
-    int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+    AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     int32_t indexAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getVolumeIndexForAttributes(attrAidl, deviceAidl, &indexAidl)));
@@ -1249,7 +1258,7 @@
     if (aps == 0) return PRODUCT_STRATEGY_NONE;
 
     auto result = [&]() -> ConversionResult<product_strategy_t> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+        AudioStreamType streamAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         int32_t resultAidl;
         RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -1259,19 +1268,20 @@
     return result.value_or(PRODUCT_STRATEGY_NONE);
 }
 
-audio_devices_t AudioSystem::getDevicesForStream(audio_stream_type_t stream) {
+DeviceTypeSet AudioSystem::getDevicesForStream(audio_stream_type_t stream) {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
-    if (aps == 0) return AUDIO_DEVICE_NONE;
+    if (aps == 0) return DeviceTypeSet{};
 
-    auto result = [&]() -> ConversionResult<audio_devices_t> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+    auto result = [&]() -> ConversionResult<DeviceTypeSet> {
+        AudioStreamType streamAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
-        int32_t resultAidl;
+        std::vector<AudioDeviceDescription> resultAidl;
         RETURN_IF_ERROR(statusTFromBinderStatus(
                 aps->getDevicesForStream(streamAidl, &resultAidl)));
-        return aidl2legacy_int32_t_audio_devices_t(resultAidl);
+        return convertContainer<DeviceTypeSet>(resultAidl,
+                aidl2legacy_AudioDeviceDescription_audio_devices_t);
     }();
-    return result.value_or(AUDIO_DEVICE_NONE);
+    return result.value_or(DeviceTypeSet{});
 }
 
 status_t AudioSystem::getDevicesForAttributes(const AudioAttributes& aa,
@@ -1284,7 +1294,7 @@
 
     media::AudioAttributesEx aaAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioAttributes_AudioAttributesEx(aa));
-    std::vector<media::AudioDevice> retAidl;
+    std::vector<AudioDevice> retAidl;
     RETURN_STATUS_IF_ERROR(
             statusTFromBinderStatus(aps->getDevicesForAttributes(aaAidl, &retAidl)));
     *devices = VALUE_OR_RETURN_STATUS(
@@ -1362,7 +1372,7 @@
     if (aps == 0) return PERMISSION_DENIED;
     if (state == NULL) return BAD_VALUE;
 
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t inPastMsAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(inPastMs));
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -1376,7 +1386,7 @@
     if (aps == 0) return PERMISSION_DENIED;
     if (state == NULL) return BAD_VALUE;
 
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t inPastMsAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(inPastMs));
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -1389,8 +1399,8 @@
     if (aps == 0) return PERMISSION_DENIED;
     if (state == NULL) return BAD_VALUE;
 
-    media::AudioSourceType streamAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(stream));
+    AudioSource streamAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(stream));
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->isSourceActive(streamAidl, state)));
     return OK;
@@ -1434,9 +1444,9 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == nullptr) return PERMISSION_DENIED;
 
-    std::vector<media::AudioUsage> systemUsagesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioUsage>>(systemUsages,
-                                                             legacy2aidl_audio_usage_t_AudioUsage));
+    std::vector<AudioUsage> systemUsagesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioUsage>>(systemUsages,
+                                                      legacy2aidl_audio_usage_t_AudioUsage));
     return statusTFromBinderStatus(aps->setSupportedSystemUsages(systemUsagesAidl));
 }
 
@@ -1456,7 +1466,7 @@
     if (aps == 0) return AUDIO_OFFLOAD_NOT_SUPPORTED;
 
     auto result = [&]() -> ConversionResult<audio_offload_mode_t> {
-        media::AudioOffloadInfo infoAidl = VALUE_OR_RETURN(
+        AudioOffloadInfo infoAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_offload_info_t_AudioOffloadInfo(info));
         media::AudioOffloadMode retAidl;
         RETURN_IF_ERROR(
@@ -1484,7 +1494,7 @@
             legacy2aidl_audio_port_role_t_AudioPortRole(role));
     media::AudioPortType typeAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_port_type_t_AudioPortType(type));
-    media::Int numPortsAidl;
+    Int numPortsAidl;
     numPortsAidl.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*num_ports));
     std::vector<media::AudioPort> portsAidl;
     int32_t generationAidl;
@@ -1551,7 +1561,7 @@
     if (aps == 0) return PERMISSION_DENIED;
 
 
-    media::Int numPatchesAidl;
+    Int numPatchesAidl;
     numPatchesAidl.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*num_patches));
     std::vector<media::AudioPatch> patchesAidl;
     int32_t generationAidl;
@@ -1690,7 +1700,8 @@
             statusTFromBinderStatus(aps->acquireSoundTriggerSession(&retAidl)));
     *session = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_session_t(retAidl.session));
     *ioHandle = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_io_handle_t(retAidl.ioHandle));
-    *device = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_devices_t(retAidl.device));
+    *device = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(retAidl.device));
     return OK;
 }
 
@@ -1707,7 +1718,7 @@
     if (aps == 0) return AUDIO_MODE_INVALID;
 
     auto result = [&]() -> ConversionResult<audio_mode_t> {
-        media::AudioMode retAidl;
+        media::audio::common::AudioMode retAidl;
         RETURN_IF_ERROR(statusTFromBinderStatus(aps->getPhoneState(&retAidl)));
         return aidl2legacy_AudioMode_audio_mode_t(retAidl);
     }();
@@ -1732,8 +1743,8 @@
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t uidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(uid));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
                                                               legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(aps->setUidDeviceAffinities(uidAidl, devicesAidl));
 }
@@ -1752,9 +1763,9 @@
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t userIdAidl = VALUE_OR_RETURN_STATUS(convertReinterpret<int32_t>(userId));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(
             aps->setUserIdDeviceAffinities(userIdAidl, devicesAidl));
 }
@@ -1827,10 +1838,11 @@
     if (aps == 0) return NAN;
 
     auto result = [&]() -> ConversionResult<float> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+        AudioStreamType streamAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         int32_t indexAidl = VALUE_OR_RETURN(convertIntegral<int32_t>(index));
-        int32_t deviceAidl = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(device));
+        AudioDeviceDescription deviceAidl = VALUE_OR_RETURN(
+                legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
         float retAidl;
         RETURN_IF_ERROR(statusTFromBinderStatus(
                 aps->getStreamVolumeDB(streamAidl, indexAidl, deviceAidl, &retAidl)));
@@ -1862,10 +1874,10 @@
 
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    media::Int numSurroundFormatsAidl;
+    Int numSurroundFormatsAidl;
     numSurroundFormatsAidl.value =
             VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*numSurroundFormats));
-    std::vector<media::audio::common::AudioFormat> surroundFormatsAidl;
+    std::vector<AudioFormatDescription> surroundFormatsAidl;
     std::vector<bool> surroundFormatsEnabledAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getSurroundFormats(&numSurroundFormatsAidl, &surroundFormatsAidl,
@@ -1875,7 +1887,7 @@
             convertIntegral<unsigned int>(numSurroundFormatsAidl.value));
     RETURN_STATUS_IF_ERROR(
             convertRange(surroundFormatsAidl.begin(), surroundFormatsAidl.end(), surroundFormats,
-                         aidl2legacy_AudioFormat_audio_format_t));
+                         aidl2legacy_AudioFormatDescription_audio_format_t));
     std::copy(surroundFormatsEnabledAidl.begin(), surroundFormatsEnabledAidl.end(),
             surroundFormatsEnabled);
     return OK;
@@ -1889,10 +1901,10 @@
 
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    media::Int numSurroundFormatsAidl;
+    Int numSurroundFormatsAidl;
     numSurroundFormatsAidl.value =
             VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*numSurroundFormats));
-    std::vector<media::audio::common::AudioFormat> surroundFormatsAidl;
+    std::vector<AudioFormatDescription> surroundFormatsAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getReportedSurroundFormats(&numSurroundFormatsAidl, &surroundFormatsAidl)));
 
@@ -1900,7 +1912,7 @@
             convertIntegral<unsigned int>(numSurroundFormatsAidl.value));
     RETURN_STATUS_IF_ERROR(
             convertRange(surroundFormatsAidl.begin(), surroundFormatsAidl.end(), surroundFormats,
-                         aidl2legacy_AudioFormat_audio_format_t));
+                         aidl2legacy_AudioFormatDescription_audio_format_t));
     return OK;
 }
 
@@ -1908,8 +1920,8 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    media::audio::common::AudioFormat audioFormatAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_format_t_AudioFormat(audioFormat));
+    AudioFormatDescription audioFormatAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_format_t_AudioFormatDescription(audioFormat));
     return statusTFromBinderStatus(
             aps->setSurroundFormatEnabled(audioFormatAidl, enabled));
 }
@@ -1960,8 +1972,21 @@
     return result.value_or(false);
 }
 
-status_t AudioSystem::getHwOffloadEncodingFormatsSupportedForA2DP(
-        std::vector<audio_format_t>* formats) {
+bool AudioSystem::isUltrasoundSupported() {
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return false;
+
+    auto result = [&]() -> ConversionResult<bool> {
+        bool retVal;
+        RETURN_IF_ERROR(
+                statusTFromBinderStatus(aps->isUltrasoundSupported(&retVal)));
+        return retVal;
+    }();
+    return result.value_or(false);
+}
+
+status_t AudioSystem::getHwOffloadFormatsSupportedForBluetoothMedia(
+        audio_devices_t device, std::vector<audio_format_t>* formats) {
     if (formats == nullptr) {
         return BAD_VALUE;
     }
@@ -1970,12 +1995,15 @@
             & aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    std::vector<media::audio::common::AudioFormat> formatsAidl;
+    std::vector<AudioFormatDescription> formatsAidl;
+    AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
-            aps->getHwOffloadEncodingFormatsSupportedForA2DP(&formatsAidl)));
+            aps->getHwOffloadFormatsSupportedForBluetoothMedia(deviceAidl, &formatsAidl)));
     *formats = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<audio_format_t>>(formatsAidl,
-                                                          aidl2legacy_AudioFormat_audio_format_t));
+            convertContainer<std::vector<audio_format_t>>(
+                    formatsAidl,
+                    aidl2legacy_AudioFormatDescription_audio_format_t));
     return OK;
 }
 
@@ -2114,9 +2142,9 @@
 
     int32_t strategyAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_product_strategy_t_int32_t(strategy));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(
             aps->setDevicesRoleForStrategy(strategyAidl, roleAidl, devicesAidl));
 }
@@ -2142,7 +2170,7 @@
     }
     int32_t strategyAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_product_strategy_t_int32_t(strategy));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl;
+    std::vector<AudioDevice> devicesAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getDevicesForRoleAndStrategy(strategyAidl, roleAidl, &devicesAidl)));
     devices = VALUE_OR_RETURN_STATUS(
@@ -2159,12 +2187,12 @@
         return PERMISSION_DENIED;
     }
 
-    media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+    AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(audioSource));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(
             aps->setDevicesRoleForCapturePreset(audioSourceAidl, roleAidl, devicesAidl));
 }
@@ -2176,12 +2204,12 @@
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
-    media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+    AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(audioSource));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(
             aps->addDevicesRoleForCapturePreset(audioSourceAidl, roleAidl, devicesAidl));
 }
@@ -2192,12 +2220,12 @@
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
-    media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+    AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(audioSource));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(
             aps->removeDevicesRoleForCapturePreset(audioSourceAidl, roleAidl, devicesAidl));
 }
@@ -2208,8 +2236,8 @@
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
-    media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+    AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(audioSource));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
     return statusTFromBinderStatus(
             aps->clearDevicesRoleForCapturePreset(audioSourceAidl, roleAidl));
@@ -2222,10 +2250,10 @@
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
-    media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+    AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(audioSource));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl;
+    std::vector<AudioDevice> devicesAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getDevicesForRoleAndCapturePreset(audioSourceAidl, roleAidl, &devicesAidl)));
     devices = VALUE_OR_RETURN_STATUS(
@@ -2234,6 +2262,94 @@
     return OK;
 }
 
+status_t AudioSystem::getSpatializer(const sp<media::INativeSpatializerCallback>& callback,
+                                          sp<media::ISpatializer>* spatializer) {
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (spatializer == nullptr) {
+        return BAD_VALUE;
+    }
+    if (aps == 0) {
+        return PERMISSION_DENIED;
+    }
+    media::GetSpatializerResponse response;
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            aps->getSpatializer(callback, &response)));
+
+    *spatializer = response.spatializer;
+    return OK;
+}
+
+status_t AudioSystem::canBeSpatialized(const audio_attributes_t *attr,
+                                    const audio_config_t *config,
+                                    const AudioDeviceTypeAddrVector &devices,
+                                    bool *canBeSpatialized) {
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) {
+        return PERMISSION_DENIED;
+    }
+    audio_attributes_t attributes = attr != nullptr ? *attr : AUDIO_ATTRIBUTES_INITIALIZER;
+    audio_config_t configuration = config != nullptr ? *config : AUDIO_CONFIG_INITIALIZER;
+
+    std::optional<media::AudioAttributesInternal> attrAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_attributes_t_AudioAttributesInternal(attributes));
+    std::optional<AudioConfig> configAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(configuration, false /*isInput*/));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            aps->canBeSpatialized(attrAidl, configAidl, devicesAidl, canBeSpatialized)));
+    return OK;
+}
+
+status_t AudioSystem::getDirectPlaybackSupport(const audio_attributes_t *attr,
+                                               const audio_config_t *config,
+                                               audio_direct_mode_t* directMode) {
+    if (attr == nullptr || config == nullptr || directMode == nullptr) {
+        return BAD_VALUE;
+    }
+
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) {
+        return PERMISSION_DENIED;
+    }
+
+    media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_attributes_t_AudioAttributesInternal(*attr));
+    AudioConfig configAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(*config, false /*isInput*/));
+
+    media::AudioDirectMode retAidl;
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            aps->getDirectPlaybackSupport(attrAidl, configAidl, &retAidl)));
+    *directMode = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_direct_mode_t_mask(
+            static_cast<int32_t>(retAidl)));
+    return NO_ERROR;
+}
+
+status_t AudioSystem::getDirectProfilesForAttributes(const audio_attributes_t* attr,
+                                                std::vector<audio_profile>* audioProfiles) {
+    if (attr == nullptr) {
+        return BAD_VALUE;
+    }
+
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) {
+        return PERMISSION_DENIED;
+    }
+
+    media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_attributes_t_AudioAttributesInternal(*attr));
+
+    std::vector<media::audio::common::AudioProfile> audioProfilesAidl;
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            aps->getDirectProfilesForAttributes(attrAidl, &audioProfilesAidl)));
+    *audioProfiles = VALUE_OR_RETURN_STATUS(convertContainer<std::vector<audio_profile>>(
+                    audioProfilesAidl, aidl2legacy_AudioProfile_audio_profile, false /*isInput*/));
+
+    return NO_ERROR;
+}
+
 class CaptureStateListenerImpl : public media::BnCaptureStateListener,
                                  public IBinder::DeathRecipient {
 public:
@@ -2298,6 +2414,31 @@
     return af->setVibratorInfos(vibratorInfos);
 }
 
+status_t AudioSystem::getMmapPolicyInfo(
+        AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == nullptr) {
+        return PERMISSION_DENIED;
+    }
+    return af->getMmapPolicyInfos(policyType, policyInfos);
+}
+
+int32_t AudioSystem::getAAudioMixerBurstCount() {
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == nullptr) {
+        return PERMISSION_DENIED;
+    }
+    return af->getAAudioMixerBurstCount();
+}
+
+int32_t AudioSystem::getAAudioHardwareBurstMinUsec() {
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == nullptr) {
+        return PERMISSION_DENIED;
+    }
+    return af->getAAudioHardwareBurstMinUsec();
+}
+
 // ---------------------------------------------------------------------------
 
 int AudioSystem::AudioPolicyServiceClient::addAudioPortCallback(
@@ -2409,12 +2550,12 @@
 Status AudioSystem::AudioPolicyServiceClient::onRecordingConfigurationUpdate(
         int32_t event,
         const media::RecordClientInfo& clientInfo,
-        const media::AudioConfigBase& clientConfig,
+        const AudioConfigBase& clientConfig,
         const std::vector<media::EffectDescriptor>& clientEffects,
-        const media::AudioConfigBase& deviceConfig,
+        const AudioConfigBase& deviceConfig,
         const std::vector<media::EffectDescriptor>& effects,
         int32_t patchHandle,
-        media::AudioSourceType source) {
+        AudioSource source) {
     record_config_callback cb = NULL;
     {
         Mutex::Autolock _l(AudioSystem::gLock);
@@ -2426,13 +2567,13 @@
         record_client_info_t clientInfoLegacy = VALUE_OR_RETURN_BINDER_STATUS(
                 aidl2legacy_RecordClientInfo_record_client_info_t(clientInfo));
         audio_config_base_t clientConfigLegacy = VALUE_OR_RETURN_BINDER_STATUS(
-                aidl2legacy_AudioConfigBase_audio_config_base_t(clientConfig));
+                aidl2legacy_AudioConfigBase_audio_config_base_t(clientConfig, true /*isInput*/));
         std::vector<effect_descriptor_t> clientEffectsLegacy = VALUE_OR_RETURN_BINDER_STATUS(
                 convertContainer<std::vector<effect_descriptor_t>>(
                         clientEffects,
                         aidl2legacy_EffectDescriptor_effect_descriptor_t));
         audio_config_base_t deviceConfigLegacy = VALUE_OR_RETURN_BINDER_STATUS(
-                aidl2legacy_AudioConfigBase_audio_config_base_t(deviceConfig));
+                aidl2legacy_AudioConfigBase_audio_config_base_t(deviceConfig, true /*isInput*/));
         std::vector<effect_descriptor_t> effectsLegacy = VALUE_OR_RETURN_BINDER_STATUS(
                 convertContainer<std::vector<effect_descriptor_t>>(
                         effects,
@@ -2440,7 +2581,7 @@
         audio_patch_handle_t patchHandleLegacy = VALUE_OR_RETURN_BINDER_STATUS(
                 aidl2legacy_int32_t_audio_patch_handle_t(patchHandle));
         audio_source_t sourceLegacy = VALUE_OR_RETURN_BINDER_STATUS(
-                aidl2legacy_AudioSourceType_audio_source_t(source));
+                aidl2legacy_AudioSource_audio_source_t(source));
         cb(eventLegacy, &clientInfoLegacy, &clientConfigLegacy, clientEffectsLegacy,
            &deviceConfigLegacy, effectsLegacy, patchHandleLegacy, sourceLegacy);
     }
@@ -2484,7 +2625,7 @@
     legacy.riid = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_unique_id_t(aidl.riid));
     legacy.uid = VALUE_OR_RETURN(aidl2legacy_int32_t_uid_t(aidl.uid));
     legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.session));
-    legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSourceType_audio_source_t(aidl.source));
+    legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSource_audio_source_t(aidl.source));
     legacy.port_id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
     legacy.silenced = aidl.silenced;
     return legacy;
@@ -2496,7 +2637,7 @@
     aidl.riid = VALUE_OR_RETURN(legacy2aidl_audio_unique_id_t_int32_t(legacy.riid));
     aidl.uid = VALUE_OR_RETURN(legacy2aidl_uid_t_int32_t(legacy.uid));
     aidl.session = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.session));
-    aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.source));
+    aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.source));
     aidl.portId = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.port_id));
     aidl.silenced = legacy.silenced;
     return aidl;
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 5f802de..de14e1c 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -21,9 +21,11 @@
 #include <inttypes.h>
 #include <math.h>
 #include <sys/resource.h>
+#include <thread>
 
 #include <android/media/IAudioPolicyService.h>
 #include <android-base/macros.h>
+#include <android-base/stringprintf.h>
 #include <audio_utils/clock.h>
 #include <audio_utils/primitives.h>
 #include <binder/IPCThreadState.h>
@@ -43,6 +45,7 @@
 static const int kMaxLoopCountNotifications = 32;
 
 using ::android::aidl_utils::statusTFromBinderStatus;
+using ::android::base::StringPrintf;
 
 namespace android {
 // ---------------------------------------------------------------------------
@@ -170,8 +173,8 @@
     if (aps == 0) return false;
 
     auto result = [&]() -> ConversionResult<bool> {
-        media::AudioConfigBase configAidl = VALUE_OR_RETURN(
-                legacy2aidl_audio_config_base_t_AudioConfigBase(config));
+        media::audio::common::AudioConfigBase configAidl = VALUE_OR_RETURN(
+                legacy2aidl_audio_config_base_t_AudioConfigBase(config, false /*isInput*/));
         media::AudioAttributesInternal attributesAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_attributes_t_AudioAttributesInternal(attributes));
         bool retAidl;
@@ -196,6 +199,7 @@
 
 #define MM_PREFIX "android.media.audiotrack." // avoid cut-n-paste errors.
 
+    // Do not change this without changing the MediaMetricsService side.
     // Java API 28 entries, do not change.
     mMetricsItem->setCString(MM_PREFIX "streamtype", toString(track->streamType()).c_str());
     mMetricsItem->setCString(MM_PREFIX "type",
@@ -211,6 +215,7 @@
     mMetricsItem->setInt32(MM_PREFIX "frameCount", (int32_t)track->mFrameCount);
     mMetricsItem->setCString(MM_PREFIX "attributes", toString(track->mAttributes).c_str());
     mMetricsItem->setCString(MM_PREFIX "logSessionId", track->mLogSessionId.c_str());
+    mMetricsItem->setInt32(MM_PREFIX "underrunFrames", (int32_t)track->getUnderrunFrames());
 }
 
 // hand the user a snapshot of the metrics.
@@ -253,8 +258,7 @@
         audio_channel_mask_t channelMask,
         size_t frameCount,
         audio_output_flags_t flags,
-        callback_t cbf,
-        void* user,
+        const wp<IAudioTrackCallback> & callback,
         int32_t notificationFrames,
         audio_session_t sessionId,
         transfer_type transferType,
@@ -274,7 +278,85 @@
     mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
 
     (void)set(streamType, sampleRate, format, channelMask,
-            frameCount, flags, cbf, user, notificationFrames,
+            frameCount, flags, callback, notificationFrames,
+            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
+            attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+}
+
+namespace {
+    class LegacyCallbackWrapper : public AudioTrack::IAudioTrackCallback {
+      const AudioTrack::legacy_callback_t mCallback;
+      void * const mData;
+      public:
+        LegacyCallbackWrapper(AudioTrack::legacy_callback_t callback, void* user)
+            : mCallback(callback), mData(user) {}
+        size_t onMoreData(const AudioTrack::Buffer & buffer) override {
+          AudioTrack::Buffer copy = buffer;
+          mCallback(AudioTrack::EVENT_MORE_DATA, mData, static_cast<void*>(&copy));
+          return copy.size;
+        }
+        void onUnderrun() override {
+            mCallback(AudioTrack::EVENT_UNDERRUN, mData, nullptr);
+        }
+        void onLoopEnd(int32_t loopsRemaining) override {
+            mCallback(AudioTrack::EVENT_LOOP_END, mData, &loopsRemaining);
+        }
+        void onMarker(uint32_t markerPosition) override {
+            mCallback(AudioTrack::EVENT_MARKER, mData, &markerPosition);
+        }
+        void onNewPos(uint32_t newPos) override {
+            mCallback(AudioTrack::EVENT_NEW_POS, mData, &newPos);
+        }
+        void onBufferEnd() override {
+            mCallback(AudioTrack::EVENT_BUFFER_END, mData, nullptr);
+        }
+        void onNewIAudioTrack() override {
+            mCallback(AudioTrack::EVENT_NEW_IAUDIOTRACK, mData, nullptr);
+        }
+        void onStreamEnd() override {
+            mCallback(AudioTrack::EVENT_STREAM_END, mData, nullptr);
+        }
+        size_t onCanWriteMoreData(const AudioTrack::Buffer & buffer) override {
+          AudioTrack::Buffer copy = buffer;
+          mCallback(AudioTrack::EVENT_CAN_WRITE_MORE_DATA, mData, static_cast<void*>(&copy));
+          return copy.size;
+        }
+    };
+}
+
+AudioTrack::AudioTrack(
+        audio_stream_type_t streamType,
+        uint32_t sampleRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        size_t frameCount,
+        audio_output_flags_t flags,
+        legacy_callback_t callback,
+        void* user,
+        int32_t notificationFrames,
+        audio_session_t sessionId,
+        transfer_type transferType,
+        const audio_offload_info_t *offloadInfo,
+        const AttributionSourceState& attributionSource,
+        const audio_attributes_t* pAttributes,
+        bool doNotReconnect,
+        float maxRequiredSpeed,
+        audio_port_handle_t selectedDeviceId)
+    : mStatus(NO_INIT),
+      mState(STATE_STOPPED),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(SP_DEFAULT),
+      mPausedPosition(0),
+      mAudioTrackCallback(new AudioTrackCallback())
+{
+    mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
+    if (callback != nullptr) {
+        mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+    } else if (user) {
+        LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+    }
+    (void)set(streamType, sampleRate, format, channelMask,
+            frameCount, flags, mLegacyCallbackWrapper, notificationFrames,
             0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
             attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
 }
@@ -286,8 +368,7 @@
         audio_channel_mask_t channelMask,
         const sp<IMemory>& sharedBuffer,
         audio_output_flags_t flags,
-        callback_t cbf,
-        void* user,
+        const wp<IAudioTrackCallback>& callback,
         int32_t notificationFrames,
         audio_session_t sessionId,
         transfer_type transferType,
@@ -307,11 +388,49 @@
     mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
 
     (void)set(streamType, sampleRate, format, channelMask,
-            0 /*frameCount*/, flags, cbf, user, notificationFrames,
+            0 /*frameCount*/, flags, callback, notificationFrames,
             sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
             attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed);
 }
 
+AudioTrack::AudioTrack(
+        audio_stream_type_t streamType,
+        uint32_t sampleRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        const sp<IMemory>& sharedBuffer,
+        audio_output_flags_t flags,
+        legacy_callback_t callback,
+        void* user,
+        int32_t notificationFrames,
+        audio_session_t sessionId,
+        transfer_type transferType,
+        const audio_offload_info_t *offloadInfo,
+        const AttributionSourceState& attributionSource,
+        const audio_attributes_t* pAttributes,
+        bool doNotReconnect,
+        float maxRequiredSpeed)
+    : mStatus(NO_INIT),
+      mState(STATE_STOPPED),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(SP_DEFAULT),
+      mPausedPosition(0),
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+      mAudioTrackCallback(new AudioTrackCallback())
+{
+    mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
+    if (callback) {
+        mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+    } else if (user) {
+        LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+    }
+
+    (void)set(streamType, sampleRate, format, channelMask, 0 /*frameCount*/, flags,
+              mLegacyCallbackWrapper, notificationFrames, sharedBuffer,
+              false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, attributionSource,
+              pAttributes, doNotReconnect, maxRequiredSpeed);
+}
+
 AudioTrack::~AudioTrack()
 {
     // pull together the numbers, before we clean up our structures
@@ -352,12 +471,13 @@
     // Otherwise the callback thread will never exit.
     stop();
     if (mAudioTrackThread != 0) { // not thread safe
-        mProxy->interrupt();
         mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
+        mProxy->interrupt();
         mAudioTrackThread->requestExitAndWait();
         mAudioTrackThread.clear();
     }
-    // No lock here: worst case we remove a NULL callback which will be a nop
+
+    AutoMutex lock(mLock);
     if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
         // This may not stop all of these device callbacks!
         // TODO: Add some sort of protection.
@@ -373,8 +493,38 @@
         audio_channel_mask_t channelMask,
         size_t frameCount,
         audio_output_flags_t flags,
-        callback_t cbf,
-        void* user,
+        legacy_callback_t callback,
+        void * user,
+        int32_t notificationFrames,
+        const sp<IMemory>& sharedBuffer,
+        bool threadCanCallJava,
+        audio_session_t sessionId,
+        transfer_type transferType,
+        const audio_offload_info_t *offloadInfo,
+        const AttributionSourceState& attributionSource,
+        const audio_attributes_t* pAttributes,
+        bool doNotReconnect,
+        float maxRequiredSpeed,
+        audio_port_handle_t selectedDeviceId)
+{
+    if (callback) {
+        mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+    } else if (user) {
+        LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+    }
+    return set(streamType, sampleRate,format, channelMask, frameCount, flags,
+               mLegacyCallbackWrapper, notificationFrames, sharedBuffer, threadCanCallJava,
+               sessionId, transferType, offloadInfo, attributionSource, pAttributes,
+               doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+}
+status_t AudioTrack::set(
+        audio_stream_type_t streamType,
+        uint32_t sampleRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        size_t frameCount,
+        audio_output_flags_t flags,
+        const wp<IAudioTrackCallback>& callback,
         int32_t notificationFrames,
         const sp<IMemory>& sharedBuffer,
         bool threadCanCallJava,
@@ -393,7 +543,8 @@
     pid_t myPid;
     uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
     pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
-
+    sp<IAudioTrackCallback> _callback = callback.promote();
+    std::string errorMessage;
     // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
     ALOGV("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
           "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
@@ -402,86 +553,24 @@
           sessionId, transferType, attributionSource.uid, attributionSource.pid);
 
     mThreadCanCallJava = threadCanCallJava;
+
+    // These variables are pulled in an error report, so we initialize them early.
     mSelectedDeviceId = selectedDeviceId;
     mSessionId = sessionId;
+    mChannelMask = channelMask;
+    mReqFrameCount = mFrameCount = frameCount;
+    mSampleRate = sampleRate;
+    mOriginalSampleRate = sampleRate;
+    mAttributes = pAttributes != nullptr ? *pAttributes : AUDIO_ATTRIBUTES_INITIALIZER;
+    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
 
-    switch (transferType) {
-    case TRANSFER_DEFAULT:
-        if (sharedBuffer != 0) {
-            transferType = TRANSFER_SHARED;
-        } else if (cbf == NULL || threadCanCallJava) {
-            transferType = TRANSFER_SYNC;
-        } else {
-            transferType = TRANSFER_CALLBACK;
-        }
-        break;
-    case TRANSFER_CALLBACK:
-    case TRANSFER_SYNC_NOTIF_CALLBACK:
-        if (cbf == NULL || sharedBuffer != 0) {
-            ALOGE("%s(): Transfer type %s but cbf == NULL || sharedBuffer != 0",
-                    convertTransferToText(transferType), __func__);
-            status = BAD_VALUE;
-            goto exit;
-        }
-        break;
-    case TRANSFER_OBTAIN:
-    case TRANSFER_SYNC:
-        if (sharedBuffer != 0) {
-            ALOGE("%s(): Transfer type TRANSFER_OBTAIN but sharedBuffer != 0", __func__);
-            status = BAD_VALUE;
-            goto exit;
-        }
-        break;
-    case TRANSFER_SHARED:
-        if (sharedBuffer == 0) {
-            ALOGE("%s(): Transfer type TRANSFER_SHARED but sharedBuffer == 0", __func__);
-            status = BAD_VALUE;
-            goto exit;
-        }
-        break;
-    default:
-        ALOGE("%s(): Invalid transfer type %d",
-                __func__, transferType);
-        status = BAD_VALUE;
-        goto exit;
-    }
-    mSharedBuffer = sharedBuffer;
-    mTransfer = transferType;
-    mDoNotReconnect = doNotReconnect;
-
-    ALOGV_IF(sharedBuffer != 0, "%s(): sharedBuffer: %p, size: %zu",
-            __func__, sharedBuffer->unsecurePointer(), sharedBuffer->size());
-
-    ALOGV("%s(): streamType %d frameCount %zu flags %04x",
-            __func__, streamType, frameCount, flags);
-
-    // invariant that mAudioTrack != 0 is true only after set() returns successfully
-    if (mAudioTrack != 0) {
-        ALOGE("%s(): Track already in use", __func__);
-        status = INVALID_OPERATION;
-        goto exit;
-    }
-
-    // handle default values first.
-    if (streamType == AUDIO_STREAM_DEFAULT) {
-        streamType = AUDIO_STREAM_MUSIC;
-    }
-    if (pAttributes == NULL) {
-        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
-            ALOGE("%s(): Invalid stream type %d", __func__, streamType);
-            status = BAD_VALUE;
-            goto exit;
-        }
-        mOriginalStreamType = streamType;
-
-    } else {
+    // update format and flags before storing them in mFormat, mOrigFlags and mFlags
+    if (pAttributes != NULL) {
         // stream type shouldn't be looked at, this track has audio attributes
-        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
         ALOGV("%s(): Building AudioTrack with attributes:"
                 " usage=%d content=%d flags=0x%x tags=[%s]",
                 __func__,
                  mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
-        mOriginalStreamType = AUDIO_STREAM_DEFAULT;
         audio_flags_to_audio_output_flags(mAttributes.flags, &flags);
     }
 
@@ -492,23 +581,6 @@
         flags = static_cast<audio_output_flags_t>(flags | AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO);
     }
 
-    // validate parameters
-    if (!audio_is_valid_format(format)) {
-        ALOGE("%s(): Invalid format %#x", __func__, format);
-        status = BAD_VALUE;
-        goto exit;
-    }
-    mFormat = format;
-
-    if (!audio_is_output_channel(channelMask)) {
-        ALOGE("%s(): Invalid channel mask %#x",  __func__, channelMask);
-        status = BAD_VALUE;
-        goto exit;
-    }
-    mChannelMask = channelMask;
-    channelCount = audio_channel_count_from_out_mask(channelMask);
-    mChannelCount = channelCount;
-
     // force direct flag if format is not linear PCM
     // or offload was requested
     if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
@@ -527,7 +599,96 @@
         flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
     }
 
-    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
+    mFormat = format;
+    mOrigFlags = mFlags = flags;
+
+    switch (transferType) {
+    case TRANSFER_DEFAULT:
+        if (sharedBuffer != 0) {
+            transferType = TRANSFER_SHARED;
+        } else if (_callback == nullptr|| threadCanCallJava) {
+            transferType = TRANSFER_SYNC;
+        } else {
+            transferType = TRANSFER_CALLBACK;
+        }
+        break;
+    case TRANSFER_CALLBACK:
+    case TRANSFER_SYNC_NOTIF_CALLBACK:
+        if (_callback == nullptr || sharedBuffer != 0) {
+            errorMessage = StringPrintf(
+                    "%s: Transfer type %s but callback == nullptr || sharedBuffer != 0",
+                    convertTransferToText(transferType), __func__);
+            status = BAD_VALUE;
+            goto error;
+        }
+        break;
+    case TRANSFER_OBTAIN:
+    case TRANSFER_SYNC:
+        if (sharedBuffer != 0) {
+            errorMessage = StringPrintf(
+                    "%s: Transfer type TRANSFER_OBTAIN but sharedBuffer != 0", __func__);
+            status = BAD_VALUE;
+            goto error;
+        }
+        break;
+    case TRANSFER_SHARED:
+        if (sharedBuffer == 0) {
+            errorMessage = StringPrintf(
+                    "%s: Transfer type TRANSFER_SHARED but sharedBuffer == 0", __func__);
+            status = BAD_VALUE;
+            goto error;
+        }
+        break;
+    default:
+        errorMessage = StringPrintf("%s: Invalid transfer type %d", __func__, transferType);
+        status = BAD_VALUE;
+        goto error;
+    }
+    mSharedBuffer = sharedBuffer;
+    mTransfer = transferType;
+    mDoNotReconnect = doNotReconnect;
+
+    ALOGV_IF(sharedBuffer != 0, "%s(): sharedBuffer: %p, size: %zu",
+            __func__, sharedBuffer->unsecurePointer(), sharedBuffer->size());
+
+    // invariant that mAudioTrack != 0 is true only after set() returns successfully
+    if (mAudioTrack != 0) {
+        errorMessage = StringPrintf("%s: Track already in use", __func__);
+        status = INVALID_OPERATION;
+        goto error;
+    }
+
+    // handle default values first.
+    if (streamType == AUDIO_STREAM_DEFAULT) {
+        streamType = AUDIO_STREAM_MUSIC;
+    }
+    if (pAttributes == NULL) {
+        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
+            errorMessage = StringPrintf("%s: Invalid stream type %d", __func__, streamType);
+            status = BAD_VALUE;
+            goto error;
+        }
+        mOriginalStreamType = streamType;
+    } else {
+        mOriginalStreamType = AUDIO_STREAM_DEFAULT;
+    }
+
+    // validate parameters
+    if (!audio_is_valid_format(format)) {
+        errorMessage = StringPrintf("%s: Invalid format %#x", __func__, format);
+        status = BAD_VALUE;
+        goto error;
+    }
+
+    if (!audio_is_output_channel(channelMask)) {
+        errorMessage = StringPrintf("%s: Invalid channel mask %#x",  __func__, channelMask);
+        status = BAD_VALUE;
+        goto error;
+    }
+    channelCount = audio_channel_count_from_out_mask(channelMask);
+    mChannelCount = channelCount;
+
+    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
         if (audio_has_proportional_frames(format)) {
             mFrameSize = channelCount * audio_bytes_per_sample(format);
         } else {
@@ -541,13 +702,12 @@
     }
 
     // sampling rate must be specified for direct outputs
-    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
+    if (sampleRate == 0 && (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
+        errorMessage = StringPrintf(
+                "%s: sample rate must be specified for direct outputs", __func__);
         status = BAD_VALUE;
-        goto exit;
+        goto error;
     }
-    mSampleRate = sampleRate;
-    mOriginalSampleRate = sampleRate;
-    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
     // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
     mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
 
@@ -567,22 +727,22 @@
     mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
     mSendLevel = 0.0f;
     // mFrameCount is initialized in createTrack_l
-    mReqFrameCount = frameCount;
     if (notificationFrames >= 0) {
         mNotificationFramesReq = notificationFrames;
         mNotificationsPerBufferReq = 0;
     } else {
-        if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
-            ALOGE("%s(): notificationFrames=%d not permitted for non-fast track",
+        if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
+            errorMessage = StringPrintf(
+                    "%s: notificationFrames=%d not permitted for non-fast track",
                     __func__, notificationFrames);
             status = BAD_VALUE;
-            goto exit;
+            goto error;
         }
         if (frameCount > 0) {
             ALOGE("%s(): notificationFrames=%d not permitted with non-zero frameCount=%zu",
                     __func__, notificationFrames, frameCount);
             status = BAD_VALUE;
-            goto exit;
+            goto error;
         }
         mNotificationFramesReq = 0;
         const uint32_t minNotificationsPerBuffer = 1;
@@ -607,11 +767,10 @@
         mClientAttributionSource.pid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(callingPid));
     }
     mAuxEffectId = 0;
-    mOrigFlags = mFlags = flags;
-    mCbf = cbf;
+    mCallback = callback;
 
-    if (cbf != NULL) {
-        mAudioTrackThread = new AudioTrackThread(*this);
+    if (_callback != nullptr) {
+        mAudioTrackThread = sp<AudioTrackThread>::make(*this);
         mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
         // thread begins in paused state, and will not reference us until start()
     }
@@ -627,10 +786,10 @@
             mAudioTrackThread->requestExitAndWait();
             mAudioTrackThread.clear();
         }
+        // We do not goto error to prevent double-logging errors.
         goto exit;
     }
 
-    mUserData = user;
     mLoopCount = 0;
     mLoopStart = 0;
     mLoopEnd = 0;
@@ -661,6 +820,12 @@
     mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
     mVolumeHandler = new media::VolumeHandler();
 
+error:
+    if (status != NO_ERROR) {
+        ALOGE_IF(!errorMessage.empty(), "%s", errorMessage.c_str());
+        reportError(status, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE, errorMessage.c_str());
+    }
+    // fall through
 exit:
     mStatus = status;
     return status;
@@ -674,7 +839,7 @@
         uint32_t channelMask,
         size_t frameCount,
         audio_output_flags_t flags,
-        callback_t cbf,
+        legacy_callback_t callback,
         void* user,
         int32_t notificationFrames,
         const sp<IMemory>& sharedBuffer,
@@ -693,11 +858,15 @@
     attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(uid));
     attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(pid));
     attributionSource.token = sp<BBinder>::make();
-    return set(streamType, sampleRate, format,
-            static_cast<audio_channel_mask_t>(channelMask),
-            frameCount, flags, cbf, user, notificationFrames, sharedBuffer,
-            threadCanCallJava, sessionId, transferType, offloadInfo, attributionSource,
-            pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+    if (callback) {
+        mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+    } else if (user) {
+        LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+    }
+    return set(streamType, sampleRate, format, static_cast<audio_channel_mask_t>(channelMask),
+               frameCount, flags, mLegacyCallbackWrapper, notificationFrames, sharedBuffer,
+               threadCanCallJava, sessionId, transferType, offloadInfo, attributionSource,
+               pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
 }
 
 // -------------------------------------------------------------------------
@@ -947,6 +1116,61 @@
     mAudioTrack->flush();
 }
 
+bool AudioTrack::pauseAndWait(const std::chrono::milliseconds& timeout)
+{
+    using namespace std::chrono_literals;
+
+    // We use atomic access here for state variables - these are used as hints
+    // to ensure we have ramped down audio.
+    const int priorState = mProxy->getState();
+    const uint32_t priorPosition = mProxy->getPosition().unsignedValue();
+
+    pause();
+
+    // Only if we were previously active, do we wait to ramp down the audio.
+    if (priorState != CBLK_STATE_ACTIVE) return true;
+
+    AutoMutex lock(mLock);
+    // offload and direct tracks do not wait because pause volume ramp is handled by hardware.
+    if (isOffloadedOrDirect_l()) return true;
+
+    // Wait for the track state to be anything besides pausing.
+    // This ensures that the volume has ramped down.
+    constexpr auto SLEEP_INTERVAL_MS = 10ms;
+    constexpr auto POSITION_TIMEOUT_MS = 40ms; // don't wait longer than this for position change.
+    auto begin = std::chrono::steady_clock::now();
+    while (true) {
+        // Wait for state and position to change.
+        // After pause() the server state should be PAUSING, but that may immediately
+        // convert to PAUSED by prepareTracks before data is read into the mixer.
+        // Hence we check that the state is not PAUSING and that the server position
+        // has advanced to be a more reliable estimate that the volume ramp has completed.
+        const int state = mProxy->getState();
+        const uint32_t position = mProxy->getPosition().unsignedValue();
+
+        mLock.unlock(); // only local variables accessed until lock.
+        auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(
+                std::chrono::steady_clock::now() - begin);
+        if (state != CBLK_STATE_PAUSING &&
+                (elapsed >= POSITION_TIMEOUT_MS || position != priorPosition)) {
+            ALOGV("%s: success state:%d, position:%u after %lld ms"
+                    " (prior state:%d  prior position:%u)",
+                    __func__, state, position, elapsed.count(), priorState, priorPosition);
+            return true;
+        }
+        std::chrono::milliseconds remaining = timeout - elapsed;
+        if (remaining.count() <= 0) {
+            ALOGW("%s: timeout expired state:%d still pausing:%d after %lld ms",
+                    __func__, state, CBLK_STATE_PAUSING, elapsed.count());
+            return false;
+        }
+        // It is conceivable that the track is restored while sleeping;
+        // as this logic is advisory, we allow that.
+        std::this_thread::sleep_for(std::min(remaining, SLEEP_INTERVAL_MS));
+        mLock.lock();
+    }
+}
+
 void AudioTrack::pause()
 {
     const int64_t beginNs = systemTime();
@@ -1275,10 +1499,6 @@
     if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
         return NO_INIT;
     }
-    // Reject if timed track or compressed audio.
-    if (!audio_is_linear_pcm(mFormat)) {
-        return INVALID_OPERATION;
-    }
 
     ssize_t originalBufferSize = mProxy->getBufferSizeInFrames();
     ssize_t finalBufferSize  = mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
@@ -1372,7 +1592,7 @@
 status_t AudioTrack::setMarkerPosition(uint32_t marker)
 {
     // The only purpose of setting marker position is to get a callback
-    if (mCbf == NULL || isOffloadedOrDirect()) {
+    if (!mCallback.promote() || isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
 
@@ -1405,7 +1625,7 @@
 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
 {
     // The only purpose of setting position update period is to get a callback
-    if (mCbf == NULL || isOffloadedOrDirect()) {
+    if (!mCallback.promote() || isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
 
@@ -1555,6 +1775,8 @@
 
 status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
     AutoMutex lock(mLock);
+    ALOGV("%s(%d): deviceId=%d mSelectedDeviceId=%d",
+            __func__, mPortId, deviceId, mSelectedDeviceId);
     if (mSelectedDeviceId != deviceId) {
         mSelectedDeviceId = deviceId;
         if (mStatus == NO_ERROR) {
@@ -1648,12 +1870,13 @@
 {
     status_t status;
     bool callbackAdded = false;
+    std::string errorMessage;
 
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
     if (audioFlinger == 0) {
-        ALOGE("%s(%d): Could not get audioflinger",
+        errorMessage = StringPrintf("%s(%d): Could not get audioflinger",
                 __func__, mPortId);
-        status = NO_INIT;
+        status = DEAD_OBJECT;
         goto exit;
     }
 
@@ -1730,10 +1953,11 @@
     }
 
     if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
-        ALOGE("%s(%d): AudioFlinger could not create track, status: %d output %d",
+        errorMessage = StringPrintf(
+                "%s(%d): AudioFlinger could not create track, status: %d output %d",
                 __func__, mPortId, status, output.outputId);
         if (status == NO_ERROR) {
-            status = NO_INIT;
+            status = INVALID_OPERATION; // device not ready
         }
         goto exit;
     }
@@ -1764,8 +1988,8 @@
     output.audioTrack->getCblk(&sfr);
     sp<IMemory> iMem = VALUE_OR_FATAL(aidl2legacy_NullableSharedFileRegion_IMemory(sfr));
     if (iMem == 0) {
-        ALOGE("%s(%d): Could not get control block", __func__, mPortId);
-        status = NO_INIT;
+        errorMessage = StringPrintf("%s(%d): Could not get control block", __func__, mPortId);
+        status = FAILED_TRANSACTION;
         goto exit;
     }
     // TODO: Using unsecurePointer() has some associated security pitfalls
@@ -1774,8 +1998,9 @@
     //       issue (e.g. by copying).
     void *iMemPointer = iMem->unsecurePointer();
     if (iMemPointer == NULL) {
-        ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
-        status = NO_INIT;
+        errorMessage = StringPrintf(
+                "%s(%d): Could not get control block pointer", __func__, mPortId);
+        status = FAILED_TRANSACTION;
         goto exit;
     }
     // invariant that mAudioTrack != 0 is true only after set() returns successfully
@@ -1799,7 +2024,7 @@
                 mAwaitBoost = true;
             }
         } else {
-            ALOGD("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
+            ALOGV("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
                   __func__, mPortId, mReqFrameCount, mFrameCount);
         }
     }
@@ -1833,8 +2058,10 @@
         //       issue (e.g. by copying).
         buffers = mSharedBuffer->unsecurePointer();
         if (buffers == NULL) {
-            ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
-            status = NO_INIT;
+            errorMessage = StringPrintf(
+                    "%s(%d): Could not get buffer pointer", __func__, mPortId);
+            ALOGE("%s", errorMessage.c_str());
+            status = FAILED_TRANSACTION;
             goto exit;
         }
     }
@@ -1912,6 +2139,7 @@
         .set(AMEDIAMETRICS_PROP_VOLUME_LEFT, (double)mVolume[AUDIO_INTERLEAVE_LEFT])
         .set(AMEDIAMETRICS_PROP_VOLUME_RIGHT, (double)mVolume[AUDIO_INTERLEAVE_RIGHT])
         .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
+        .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)NO_ERROR)
         .set(AMEDIAMETRICS_PROP_AUXEFFECTID, (int32_t)mAuxEffectId)
         .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
         .set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
@@ -1932,17 +2160,47 @@
     }
 
 exit:
-    if (status != NO_ERROR && callbackAdded) {
-        // note: mOutput is always valid is callbackAdded is true
-        AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
+    if (status != NO_ERROR) {
+        if (callbackAdded) {
+            // note: mOutput is always valid is callbackAdded is true
+            AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
+        }
+        ALOGE_IF(!errorMessage.empty(), "%s", errorMessage.c_str());
+        reportError(status, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE, errorMessage.c_str());
     }
-
     mStatus = status;
 
     // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
     return status;
 }
 
+void AudioTrack::reportError(status_t status, const char *event, const char *message) const
+{
+    if (status == NO_ERROR) return;
+    // We report error on the native side because some callers do not come
+    // from Java.
+    // Ensure these variables are initialized in set().
+    mediametrics::LogItem(AMEDIAMETRICS_KEY_AUDIO_TRACK_ERROR)
+        .set(AMEDIAMETRICS_PROP_EVENT, event)
+        .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
+        .set(AMEDIAMETRICS_PROP_STATUSMESSAGE, message)
+        .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
+        .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
+        .set(AMEDIAMETRICS_PROP_CONTENTTYPE, toString(mAttributes.content_type).c_str())
+        .set(AMEDIAMETRICS_PROP_USAGE, toString(mAttributes.usage).c_str())
+        .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
+        .set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
+        .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
+        // the following are NOT immutable
+        // frame count is initially the requested frame count, but may be adjusted
+        // by AudioFlinger after creation.
+        .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
+        .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
+        .set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
+        .set(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)mPlaybackRate.mPitch)
+        .record();
+}
+
 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
 {
     if (audioBuffer == NULL) {
@@ -2172,10 +2430,14 @@
 {
     // Currently the AudioTrack thread is not created if there are no callbacks.
     // Would it ever make sense to run the thread, even without callbacks?
-    // If so, then replace this by checks at each use for mCbf != NULL.
+    // If so, then replace this by checks at each use for mCallback != NULL.
     LOG_ALWAYS_FATAL_IF(mCblk == NULL);
-
     mLock.lock();
+    sp<IAudioTrackCallback> callback = mCallback.promote();
+    if (!callback) {
+        mCallback = nullptr;
+        return NS_NEVER;
+    }
     if (mAwaitBoost) {
         mAwaitBoost = false;
         mLock.unlock();
@@ -2273,7 +2535,7 @@
     sp<AudioTrackClientProxy> proxy = mProxy;
 
     // Determine the number of new loop callback(s) that will be needed, while locked.
-    int loopCountNotifications = 0;
+    uint32_t loopCountNotifications = 0;
     uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
 
     if (mLoopCount > 0) {
@@ -2295,7 +2557,7 @@
     }
 
     // These fields don't need to be cached, because they are assigned only by set():
-    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
+    // mTransfer, mCallback, mUserData, mFormat, mFrameSize, mFlags
     // mFlags is also assigned by createTrack_l(), but not the bit we care about.
 
     mLock.unlock();
@@ -2320,7 +2582,7 @@
             if (status != DEAD_OBJECT) {
                 // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
                 // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
-                mCbf(EVENT_STREAM_END, mUserData, NULL);
+                callback->onStreamEnd();
             }
             {
                 AutoMutex lock(mLock);
@@ -2343,28 +2605,27 @@
 
     // perform callbacks while unlocked
     if (newUnderrun) {
-        mCbf(EVENT_UNDERRUN, mUserData, NULL);
+        callback->onUnderrun();
     }
     while (loopCountNotifications > 0) {
-        mCbf(EVENT_LOOP_END, mUserData, NULL);
         --loopCountNotifications;
+        callback->onLoopEnd(mLoopCount > 0 ? loopCountNotifications + mLoopCountNotified : -1);
     }
     if (flags & CBLK_BUFFER_END) {
-        mCbf(EVENT_BUFFER_END, mUserData, NULL);
+        callback->onBufferEnd();
     }
     if (markerReached) {
-        mCbf(EVENT_MARKER, mUserData, &markerPosition);
+        callback->onMarker(markerPosition.value());
     }
     while (newPosCount > 0) {
-        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
-        mCbf(EVENT_NEW_POS, mUserData, &temp);
+        callback->onNewPos(newPosition.value());
         newPosition += updatePeriod;
         newPosCount--;
     }
 
     if (mObservedSequence != sequence) {
         mObservedSequence = sequence;
-        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
+        callback->onNewIAudioTrack();
         // for offloaded tracks, just wait for the upper layers to recreate the track
         if (isOffloadedOrDirect()) {
             return NS_INACTIVE;
@@ -2502,10 +2763,9 @@
             // written in the next write() call, since it's not passed through the callback
             audioBuffer.size += nonContig;
         }
-        mCbf(mTransfer == TRANSFER_CALLBACK ? EVENT_MORE_DATA : EVENT_CAN_WRITE_MORE_DATA,
-                mUserData, &audioBuffer);
-        size_t writtenSize = audioBuffer.size;
-
+        const size_t writtenSize = (mTransfer == TRANSFER_CALLBACK)
+                                      ? callback->onMoreData(audioBuffer)
+                                      : callback->onCanWriteMoreData(audioBuffer);
         // Validate on returned size
         if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
             ALOGE("%s(%d): EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
@@ -2565,6 +2825,9 @@
             return ns;
         }
 
+        // releaseBuffer reads from audioBuffer.size
+        audioBuffer.size = writtenSize;
+
         size_t releasedFrames = writtenSize / mFrameSize;
         audioBuffer.frameCount = releasedFrames;
         mRemainingFrames -= releasedFrames;
diff --git a/media/libaudioclient/AudioTrackShared.cpp b/media/libaudioclient/AudioTrackShared.cpp
index 35719be..e3b79b2 100644
--- a/media/libaudioclient/AudioTrackShared.cpp
+++ b/media/libaudioclient/AudioTrackShared.cpp
@@ -409,7 +409,7 @@
         android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
         // it seems that a FUTEX_WAKE_PRIVATE will not wake a FUTEX_WAIT, even within same process
         (void) syscall(__NR_futex, &cblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE,
-                1);
+                INT_MAX);
     }
 }
 
@@ -419,7 +419,7 @@
     if (!(android_atomic_or(CBLK_INTERRUPT, &cblk->mFlags) & CBLK_INTERRUPT)) {
         android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
         (void) syscall(__NR_futex, &cblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE,
-                1);
+                INT_MAX);
     }
 }
 
@@ -490,6 +490,8 @@
 status_t AudioTrackClientProxy::waitStreamEndDone(const struct timespec *requested)
 {
     struct timespec total;          // total elapsed time spent waiting
+    struct timespec before;
+    bool beforeIsValid = false;
     total.tv_sec = 0;
     total.tv_nsec = 0;
     audio_track_cblk_t* cblk = mCblk;
@@ -570,17 +572,38 @@
         }
         int32_t old = android_atomic_and(~CBLK_FUTEX_WAKE, &cblk->mFutex);
         if (!(old & CBLK_FUTEX_WAKE)) {
+            if (!beforeIsValid) {
+                clock_gettime(CLOCK_MONOTONIC, &before);
+                beforeIsValid = true;
+            }
             errno = 0;
             (void) syscall(__NR_futex, &cblk->mFutex,
                     mClientInServer ? FUTEX_WAIT_PRIVATE : FUTEX_WAIT, old & ~CBLK_FUTEX_WAKE, ts);
-            switch (errno) {
+            status_t error = errno; // clock_gettime can affect errno
+            {
+                struct timespec after;
+                clock_gettime(CLOCK_MONOTONIC, &after);
+                total.tv_sec += after.tv_sec - before.tv_sec;
+                // Use auto instead of long to avoid the google-runtime-int warning.
+                auto deltaNs = after.tv_nsec - before.tv_nsec;
+                if (deltaNs < 0) {
+                    deltaNs += 1000000000;
+                    total.tv_sec--;
+                }
+                if ((total.tv_nsec += deltaNs) >= 1000000000) {
+                    total.tv_nsec -= 1000000000;
+                    total.tv_sec++;
+                }
+                before = after;
+            }
+            switch (error) {
             case 0:            // normal wakeup by server, or by binderDied()
             case EWOULDBLOCK:  // benign race condition with server
             case EINTR:        // wait was interrupted by signal or other spurious wakeup
             case ETIMEDOUT:    // time-out expired
                 break;
             default:
-                status = errno;
+                status = error;
                 ALOGE("%s unexpected error %s", __func__, strerror(status));
                 goto end;
             }
@@ -747,7 +770,7 @@
             int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
             if (!(old & CBLK_FUTEX_WAKE)) {
                 (void) syscall(__NR_futex, &cblk->mFutex,
-                        mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
+                        mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, INT_MAX);
             }
         }
         mFlushed += (newFront - front) & mask;
@@ -917,7 +940,7 @@
         int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
         if (!(old & CBLK_FUTEX_WAKE)) {
             (void) syscall(__NR_futex, &cblk->mFutex,
-                    mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
+                    mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, INT_MAX);
         }
     }
 
diff --git a/media/libaudioclient/AudioVolumeGroup.cpp b/media/libaudioclient/AudioVolumeGroup.cpp
index 361f7b8..ab95246 100644
--- a/media/libaudioclient/AudioVolumeGroup.cpp
+++ b/media/libaudioclient/AudioVolumeGroup.cpp
@@ -26,11 +26,10 @@
 #include <media/AudioAttributes.h>
 #include <media/PolicyAidlConversion.h>
 
-#define RETURN_STATUS_IF_ERROR(x) \
-    { auto _tmp = (x); if (_tmp != OK) return _tmp; }
-
 namespace android {
 
+using media::audio::common::AudioStreamType;
+
 status_t AudioVolumeGroup::readFromParcel(const Parcel *parcel)
 {
     media::AudioVolumeGroup aidl;
@@ -55,7 +54,7 @@
                     legacy.getAudioAttributes(),
                     legacy2aidl_audio_attributes_t_AudioAttributesInternal));
     aidl.streams = VALUE_OR_RETURN(
-            convertContainer<std::vector<media::AudioStreamType>>(legacy.getStreamTypes(),
+            convertContainer<std::vector<AudioStreamType>>(legacy.getStreamTypes(),
             legacy2aidl_audio_stream_type_t_AudioStreamType));
     return aidl;
 }
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index cae81f0..88e7396 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -17,6 +17,7 @@
 
 #define LOG_TAG "IAudioFlinger"
 //#define LOG_NDEBUG 0
+
 #include <utils/Log.h>
 
 #include <stdint.h>
@@ -30,6 +31,13 @@
 
 using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUuid;
 
 #define MAX_ITEMS_PER_LIST 1024
 
@@ -40,12 +48,6 @@
        std::move(_tmp.value()); \
      })
 
-#define RETURN_STATUS_IF_ERROR(x)    \
-    {                                \
-       auto _tmp = (x);              \
-       if (_tmp != OK) return _tmp;  \
-    }
-
 #define RETURN_BINDER_IF_ERROR(x)                         \
     {                                                     \
        auto _tmp = (x);                                   \
@@ -55,7 +57,9 @@
 ConversionResult<media::CreateTrackRequest> IAudioFlinger::CreateTrackInput::toAidl() const {
     media::CreateTrackRequest aidl;
     aidl.attr = VALUE_OR_RETURN(legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
-    aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(config));
+    // Do not be mislead by 'Input'--this is an input to 'createTrack', which creates output tracks.
+    aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(
+                    config, false /*isInput*/));
     aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient_AudioClient(clientInfo));
     aidl.sharedBuffer = VALUE_OR_RETURN(legacy2aidl_NullableIMemory_SharedFileRegion(sharedBuffer));
     aidl.notificationsPerBuffer = VALUE_OR_RETURN(convertIntegral<int32_t>(notificationsPerBuffer));
@@ -74,7 +78,9 @@
 IAudioFlinger::CreateTrackInput::fromAidl(const media::CreateTrackRequest& aidl) {
     IAudioFlinger::CreateTrackInput legacy;
     legacy.attr = VALUE_OR_RETURN(aidl2legacy_AudioAttributesInternal_audio_attributes_t(aidl.attr));
-    legacy.config = VALUE_OR_RETURN(aidl2legacy_AudioConfig_audio_config_t(aidl.config));
+    // Do not be mislead by 'Input'--this is an input to 'createTrack', which creates output tracks.
+    legacy.config = VALUE_OR_RETURN(
+            aidl2legacy_AudioConfig_audio_config_t(aidl.config, false /*isInput*/));
     legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient_AudioClient(aidl.clientInfo));
     legacy.sharedBuffer = VALUE_OR_RETURN(aidl2legacy_NullableSharedFileRegion_IMemory(aidl.sharedBuffer));
     legacy.notificationsPerBuffer = VALUE_OR_RETURN(
@@ -139,7 +145,8 @@
 IAudioFlinger::CreateRecordInput::toAidl() const {
     media::CreateRecordRequest aidl;
     aidl.attr = VALUE_OR_RETURN(legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
-    aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(config));
+    aidl.config = VALUE_OR_RETURN(
+            legacy2aidl_audio_config_base_t_AudioConfigBase(config, true /*isInput*/));
     aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient_AudioClient(clientInfo));
     aidl.riid = VALUE_OR_RETURN(legacy2aidl_audio_unique_id_t_int32_t(riid));
     aidl.maxSharedAudioHistoryMs = VALUE_OR_RETURN(
@@ -159,7 +166,8 @@
     IAudioFlinger::CreateRecordInput legacy;
     legacy.attr = VALUE_OR_RETURN(
             aidl2legacy_AudioAttributesInternal_audio_attributes_t(aidl.attr));
-    legacy.config = VALUE_OR_RETURN(aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config));
+    legacy.config = VALUE_OR_RETURN(
+            aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config, true /*isInput*/));
     legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient_AudioClient(aidl.clientInfo));
     legacy.riid = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_unique_id_t(aidl.riid));
     legacy.maxSharedAudioHistoryMs = VALUE_OR_RETURN(
@@ -189,6 +197,8 @@
     aidl.buffers = VALUE_OR_RETURN(legacy2aidl_NullableIMemory_SharedFileRegion(buffers));
     aidl.portId = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(portId));
     aidl.audioRecord = audioRecord;
+    aidl.serverConfig = VALUE_OR_RETURN(
+            legacy2aidl_audio_config_base_t_AudioConfigBase(serverConfig, true /*isInput*/));
     return aidl;
 }
 
@@ -209,6 +219,8 @@
     legacy.buffers = VALUE_OR_RETURN(aidl2legacy_NullableSharedFileRegion_IMemory(aidl.buffers));
     legacy.portId = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
     legacy.audioRecord = aidl.audioRecord;
+    legacy.serverConfig = VALUE_OR_RETURN(
+            aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.serverConfig, true /*isInput*/));
     return legacy;
 }
 
@@ -242,9 +254,9 @@
 audio_format_t AudioFlingerClientAdapter::format(audio_io_handle_t output) const {
     auto result = [&]() -> ConversionResult<audio_format_t> {
         int32_t outputAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output));
-        media::audio::common::AudioFormat aidlRet;
+        AudioFormatDescription aidlRet;
         RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->format(outputAidl, &aidlRet)));
-        return aidl2legacy_AudioFormat_audio_format_t(aidlRet);
+        return aidl2legacy_AudioFormatDescription_audio_format_t(aidlRet);
     }();
     return result.value_or(AUDIO_FORMAT_INVALID);
 }
@@ -309,14 +321,14 @@
 
 status_t AudioFlingerClientAdapter::setStreamVolume(audio_stream_type_t stream, float value,
                                                     audio_io_handle_t output) {
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
     return statusTFromBinderStatus(mDelegate->setStreamVolume(streamAidl, value, outputAidl));
 }
 
 status_t AudioFlingerClientAdapter::setStreamMute(audio_stream_type_t stream, bool muted) {
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     return statusTFromBinderStatus(mDelegate->setStreamMute(streamAidl, muted));
 }
@@ -324,7 +336,7 @@
 float AudioFlingerClientAdapter::streamVolume(audio_stream_type_t stream,
                                               audio_io_handle_t output) const {
     auto result = [&]() -> ConversionResult<float> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+        AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
         float aidlRet;
@@ -338,7 +350,7 @@
 
 bool AudioFlingerClientAdapter::streamMute(audio_stream_type_t stream) const {
     auto result = [&]() -> ConversionResult<bool> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+        AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         bool aidlRet;
         RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -350,7 +362,7 @@
 }
 
 status_t AudioFlingerClientAdapter::setMode(audio_mode_t mode) {
-    media::AudioMode modeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_mode_t_AudioMode(mode));
+    AudioMode modeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_mode_t_AudioMode(mode));
     return statusTFromBinderStatus(mDelegate->setMode(modeAidl));
 }
 
@@ -410,10 +422,10 @@
                                                      audio_channel_mask_t channelMask) const {
     auto result = [&]() -> ConversionResult<size_t> {
         int32_t sampleRateAidl = VALUE_OR_RETURN(convertIntegral<int32_t>(sampleRate));
-        media::audio::common::AudioFormat formatAidl = VALUE_OR_RETURN(
-                legacy2aidl_audio_format_t_AudioFormat(format));
-        int32_t channelMaskAidl = VALUE_OR_RETURN(
-                legacy2aidl_audio_channel_mask_t_int32_t(channelMask));
+        AudioFormatDescription formatAidl = VALUE_OR_RETURN(
+                legacy2aidl_audio_format_t_AudioFormatDescription(format));
+        AudioChannelLayout channelMaskAidl = VALUE_OR_RETURN(
+                legacy2aidl_audio_channel_mask_t_AudioChannelLayout(channelMask, true /*isInput*/));
         int64_t aidlRet;
         RETURN_IF_ERROR(statusTFromBinderStatus(
                 mDelegate->getInputBufferSize(sampleRateAidl, formatAidl, channelMaskAidl,
@@ -469,7 +481,7 @@
 }
 
 status_t AudioFlingerClientAdapter::invalidateStream(audio_stream_type_t stream) {
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     return statusTFromBinderStatus(mDelegate->invalidateStream(streamAidl));
 }
@@ -568,9 +580,9 @@
                                                         const effect_uuid_t* pTypeUUID,
                                                         uint32_t preferredTypeFlag,
                                                         effect_descriptor_t* pDescriptor) const {
-    media::AudioUuid effectUuidAidl = VALUE_OR_RETURN_STATUS(
+    AudioUuid effectUuidAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_uuid_t_AudioUuid(*pEffectUUID));
-    media::AudioUuid typeUuidAidl = VALUE_OR_RETURN_STATUS(
+    AudioUuid typeUuidAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_uuid_t_AudioUuid(*pTypeUUID));
     int32_t preferredTypeFlagAidl = VALUE_OR_RETURN_STATUS(
             convertReinterpret<int32_t>(preferredTypeFlag));
@@ -715,6 +727,10 @@
     return statusTFromBinderStatus(mDelegate->systemReady());
 }
 
+status_t AudioFlingerClientAdapter::audioPolicyReady() {
+    return statusTFromBinderStatus(mDelegate->audioPolicyReady());
+}
+
 size_t AudioFlingerClientAdapter::frameCountHAL(audio_io_handle_t ioHandle) const {
     auto result = [&]() -> ConversionResult<size_t> {
         int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
@@ -761,6 +777,32 @@
     return statusTFromBinderStatus(mDelegate->updateSecondaryOutputs(trackSecondaryOutputInfos));
 }
 
+status_t AudioFlingerClientAdapter::getMmapPolicyInfos(
+        AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+    return statusTFromBinderStatus(mDelegate->getMmapPolicyInfos(policyType, policyInfos));
+}
+
+int32_t AudioFlingerClientAdapter::getAAudioMixerBurstCount() {
+    auto result = [&]() -> ConversionResult<int32_t> {
+        int32_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->getAAudioMixerBurstCount(&aidlRet)));
+        return convertIntegral<int32_t>(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
+int32_t AudioFlingerClientAdapter::getAAudioHardwareBurstMinUsec() {
+    auto result = [&]() -> ConversionResult<int32_t> {
+        int32_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getAAudioHardwareBurstMinUsec(&aidlRet)));
+        return convertIntegral<int32_t>(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
 
 ////////////////////////////////////////////////////////////////////////////////////////////////////
 // AudioFlingerServerAdapter
@@ -806,11 +848,11 @@
 }
 
 Status AudioFlingerServerAdapter::format(int32_t output,
-                                         media::audio::common::AudioFormat* _aidl_return) {
+                                         AudioFormatDescription* _aidl_return) {
     audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_int32_t_audio_io_handle_t(output));
     *_aidl_return = VALUE_OR_RETURN_BINDER(
-            legacy2aidl_audio_format_t_AudioFormat(mDelegate->format(outputLegacy)));
+            legacy2aidl_audio_format_t_AudioFormatDescription(mDelegate->format(outputLegacy)));
     return Status::ok();
 }
 
@@ -856,7 +898,7 @@
     return Status::fromStatusT(mDelegate->getMasterBalance(_aidl_return));
 }
 
-Status AudioFlingerServerAdapter::setStreamVolume(media::AudioStreamType stream, float value,
+Status AudioFlingerServerAdapter::setStreamVolume(AudioStreamType stream, float value,
                                                   int32_t output) {
     audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
@@ -865,13 +907,13 @@
     return Status::fromStatusT(mDelegate->setStreamVolume(streamLegacy, value, outputLegacy));
 }
 
-Status AudioFlingerServerAdapter::setStreamMute(media::AudioStreamType stream, bool muted) {
+Status AudioFlingerServerAdapter::setStreamMute(AudioStreamType stream, bool muted) {
     audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
     return Status::fromStatusT(mDelegate->setStreamMute(streamLegacy, muted));
 }
 
-Status AudioFlingerServerAdapter::streamVolume(media::AudioStreamType stream, int32_t output,
+Status AudioFlingerServerAdapter::streamVolume(AudioStreamType stream, int32_t output,
                                                float* _aidl_return) {
     audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
@@ -881,14 +923,14 @@
     return Status::ok();
 }
 
-Status AudioFlingerServerAdapter::streamMute(media::AudioStreamType stream, bool* _aidl_return) {
+Status AudioFlingerServerAdapter::streamMute(AudioStreamType stream, bool* _aidl_return) {
     audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
     *_aidl_return = mDelegate->streamMute(streamLegacy);
     return Status::ok();
 }
 
-Status AudioFlingerServerAdapter::setMode(media::AudioMode mode) {
+Status AudioFlingerServerAdapter::setMode(AudioMode mode) {
     audio_mode_t modeLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioMode_audio_mode_t(mode));
     return Status::fromStatusT(mDelegate->setMode(modeLegacy));
 }
@@ -934,13 +976,14 @@
 }
 
 Status AudioFlingerServerAdapter::getInputBufferSize(int32_t sampleRate,
-                                                     media::audio::common::AudioFormat format,
-                                                     int32_t channelMask, int64_t* _aidl_return) {
+                                                     const AudioFormatDescription& format,
+                                                     const AudioChannelLayout& channelMask,
+                                                     int64_t* _aidl_return) {
     uint32_t sampleRateLegacy = VALUE_OR_RETURN_BINDER(convertIntegral<uint32_t>(sampleRate));
     audio_format_t formatLegacy = VALUE_OR_RETURN_BINDER(
-            aidl2legacy_AudioFormat_audio_format_t(format));
+            aidl2legacy_AudioFormatDescription_audio_format_t(format));
     audio_channel_mask_t channelMaskLegacy = VALUE_OR_RETURN_BINDER(
-            aidl2legacy_int32_t_audio_channel_mask_t(channelMask));
+            aidl2legacy_AudioChannelLayout_audio_channel_mask_t(channelMask, true /*isInput*/));
     size_t size = mDelegate->getInputBufferSize(sampleRateLegacy, formatLegacy, channelMaskLegacy);
     *_aidl_return = VALUE_OR_RETURN_BINDER(convertIntegral<int64_t>(size));
     return Status::ok();
@@ -991,7 +1034,7 @@
     return Status::fromStatusT(mDelegate->closeInput(inputLegacy));
 }
 
-Status AudioFlingerServerAdapter::invalidateStream(media::AudioStreamType stream) {
+Status AudioFlingerServerAdapter::invalidateStream(AudioStreamType stream) {
     audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
     return Status::fromStatusT(mDelegate->invalidateStream(streamLegacy));
@@ -1066,8 +1109,8 @@
     return Status::ok();
 }
 
-Status AudioFlingerServerAdapter::getEffectDescriptor(const media::AudioUuid& effectUUID,
-                                                      const media::AudioUuid& typeUUID,
+Status AudioFlingerServerAdapter::getEffectDescriptor(const AudioUuid& effectUUID,
+                                                      const AudioUuid& typeUUID,
                                                       int32_t preferredTypeFlag,
                                                       media::EffectDescriptor* _aidl_return) {
     effect_uuid_t effectUuidLegacy = VALUE_OR_RETURN_BINDER(
@@ -1189,6 +1232,11 @@
     return Status::fromStatusT(mDelegate->systemReady());
 }
 
+Status AudioFlingerServerAdapter::audioPolicyReady() {
+    mDelegate->audioPolicyReady();
+    return Status::ok();
+}
+
 Status AudioFlingerServerAdapter::frameCountHAL(int32_t ioHandle, int64_t* _aidl_return) {
     audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
@@ -1227,4 +1275,21 @@
     return Status::fromStatusT(mDelegate->updateSecondaryOutputs(trackSecondaryOutputs));
 }
 
+Status AudioFlingerServerAdapter::getMmapPolicyInfos(
+        AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *_aidl_return) {
+    return Status::fromStatusT(mDelegate->getMmapPolicyInfos(policyType, _aidl_return));
+}
+
+Status AudioFlingerServerAdapter::getAAudioMixerBurstCount(int32_t* _aidl_return) {
+    *_aidl_return = VALUE_OR_RETURN_BINDER(
+            convertIntegral<int32_t>(mDelegate->getAAudioMixerBurstCount()));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getAAudioHardwareBurstMinUsec(int32_t* _aidl_return) {
+    *_aidl_return = VALUE_OR_RETURN_BINDER(
+            convertIntegral<int32_t>(mDelegate->getAAudioHardwareBurstMinUsec()));
+    return Status::ok();
+}
+
 } // namespace android
diff --git a/media/libaudioclient/PolicyAidlConversion.cpp b/media/libaudioclient/PolicyAidlConversion.cpp
index 25fdb49..520f09c 100644
--- a/media/libaudioclient/PolicyAidlConversion.cpp
+++ b/media/libaudioclient/PolicyAidlConversion.cpp
@@ -25,16 +25,7 @@
 namespace android {
 
 using base::unexpected;
-
-ConversionResult<volume_group_t>
-aidl2legacy_int32_t_volume_group_t(int32_t aidl) {
-    return convertReinterpret<volume_group_t>(aidl);
-}
-
-ConversionResult<int32_t>
-legacy2aidl_volume_group_t_int32_t(volume_group_t legacy) {
-    return convertReinterpret<int32_t>(legacy);
-}
+using media::audio::common::AudioDeviceAddress;
 
 ConversionResult<uint32_t>
 aidl2legacy_AudioMixType_uint32_t(media::AudioMixType aidl) {
@@ -152,7 +143,7 @@
 
         case media::AudioMixMatchCriterionValue::source:
             legacy.mSource = VALUE_OR_RETURN(
-                    aidl2legacy_AudioSourceType_audio_source_t(UNION_GET(aidl, source).value()));
+                    aidl2legacy_AudioSource_audio_source_t(UNION_GET(aidl, source).value()));
             *rule |= RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET;
             return legacy;
 
@@ -184,7 +175,7 @@
 
         case RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET:
             UNION_SET(aidl, source,
-                      VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.mSource)));
+                      VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.mSource)));
             break;
 
         case RULE_MATCH_UID:
@@ -232,11 +223,14 @@
                                  std::back_inserter(legacy.mCriteria),
                                  aidl2legacy_AudioMixMatchCriterion));
     legacy.mMixType = VALUE_OR_RETURN(aidl2legacy_AudioMixType_uint32_t(aidl.mixType));
-    legacy.mFormat = VALUE_OR_RETURN(aidl2legacy_AudioConfig_audio_config_t(aidl.format));
+    // See 'convertAudioMixToNative' in 'android_media_AudioSystem.cpp' -- only
+    // an output mask is expected here.
+    legacy.mFormat = VALUE_OR_RETURN(aidl2legacy_AudioConfig_audio_config_t(
+                    aidl.format, false /*isInput*/));
     legacy.mRouteFlags = VALUE_OR_RETURN(
             aidl2legacy_AudioMixRouteFlag_uint32_t_mask(aidl.routeFlags));
-    legacy.mDeviceType = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.device.type));
-    legacy.mDeviceAddress = VALUE_OR_RETURN(aidl2legacy_string_view_String8(aidl.device.address));
+    RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
+                    aidl.device, &legacy.mDeviceType, &legacy.mDeviceAddress));
     legacy.mCbFlags = VALUE_OR_RETURN(aidl2legacy_AudioMixCallbackFlag_uint32_t_mask(aidl.cbFlags));
     legacy.mAllowPrivilegedMediaPlaybackCapture = aidl.allowPrivilegedMediaPlaybackCapture;
     legacy.mVoiceCommunicationCaptureAllowed = aidl.voiceCommunicationCaptureAllowed;
@@ -251,11 +245,15 @@
                     legacy.mCriteria,
                     legacy2aidl_AudioMixMatchCriterion));
     aidl.mixType = VALUE_OR_RETURN(legacy2aidl_uint32_t_AudioMixType(legacy.mMixType));
-    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(legacy.mFormat));
+    // See 'convertAudioMixToNative' in 'android_media_AudioSystem.cpp' -- only
+    // an output mask is expected here.
+    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(
+                    legacy.mFormat, false /*isInput*/));
     aidl.routeFlags = VALUE_OR_RETURN(
             legacy2aidl_uint32_t_AudioMixRouteFlag_mask(legacy.mRouteFlags));
-    aidl.device.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.mDeviceType));
-    aidl.device.address = VALUE_OR_RETURN(legacy2aidl_String8_string(legacy.mDeviceAddress));
+    aidl.device = VALUE_OR_RETURN(
+            legacy2aidl_audio_device_AudioDevice(
+                    legacy.mDeviceType, legacy.mDeviceAddress));
     aidl.cbFlags = VALUE_OR_RETURN(legacy2aidl_uint32_t_AudioMixCallbackFlag_mask(legacy.mCbFlags));
     aidl.allowPrivilegedMediaPlaybackCapture = legacy.mAllowPrivilegedMediaPlaybackCapture;
     aidl.voiceCommunicationCaptureAllowed = legacy.mVoiceCommunicationCaptureAllowed;
diff --git a/media/libaudioclient/TEST_MAPPING b/media/libaudioclient/TEST_MAPPING
new file mode 100644
index 0000000..d8c18c0
--- /dev/null
+++ b/media/libaudioclient/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+  "presubmit": [
+    {
+       "name": "audio_aidl_conversion_tests"
+    }
+  ]
+}
diff --git a/media/libaudioclient/ToneGenerator.cpp b/media/libaudioclient/ToneGenerator.cpp
index e5e8496..cd3eacb 100644
--- a/media/libaudioclient/ToneGenerator.cpp
+++ b/media/libaudioclient/ToneGenerator.cpp
@@ -977,7 +977,7 @@
 //    Method:        ToneGenerator::ToneGenerator()
 //
 //    Description:    Constructor. Initializes the tone sequencer, intantiates required sine wave
-//        generators, instantiates output audio track.
+//        generators, does not initialize output audio track.
 //
 //    Input:
 //        streamType:        Type of stream used for tone playback
@@ -1041,6 +1041,23 @@
         mRegion = CEPT;
     }
 
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+//    Method:        ToneGenerator::onFirstRef()
+//
+//    Description:  Called upon first RefBase reference. Initializes audio track
+//                  with weak pointer to self as the registered callback.
+//    Input:
+//        none
+//
+//    Output:
+//        none
+//
+////////////////////////////////////////////////////////////////////////////////
+
+void ToneGenerator::onFirstRef() {
     if (initAudioTrack()) {
         ALOGV("ToneGenerator INIT OK, time: %d", (unsigned int)(systemTime()/1000000));
     } else {
@@ -1048,9 +1065,6 @@
     }
 }
 
-
-
-
 ////////////////////////////////////////////////////////////////////////////////
 //
 //    Method:        ToneGenerator::~ToneGenerator()
@@ -1282,8 +1296,7 @@
             AUDIO_CHANNEL_OUT_MONO,
             frameCount,
             AUDIO_OUTPUT_FLAG_FAST,
-            audioCallback,
-            this, // user
+            wp<AudioTrack::IAudioTrackCallback>::fromExisting(this),
             0,    // notificationFrames
             0,    // sharedBuffer
             mThreadCanCallJava,
@@ -1308,50 +1321,47 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 //
-//    Method:        ToneGenerator::audioCallback()
+//    Method:        ToneGenerator::onMoreData()
 //
 //    Description:    AudioTrack callback implementation. Generates a block of
 //        PCM samples
 //        and manages tone generator sequencer: tones pulses, tone duration...
 //
 //    Input:
-//        user    reference (pointer to our ToneGenerator)
-//        info    audio buffer descriptor
+//        buffer  An buffer object containing a pointer which we will fill with
+//                buffer.size bytes.
 //
 //    Output:
-//        returned value: always true.
+//        The number of bytes we successfully wrote.
 //
 ////////////////////////////////////////////////////////////////////////////////
-void ToneGenerator::audioCallback(int event, void* user, void *info) {
+size_t ToneGenerator::onMoreData(const AudioTrack::Buffer& buffer) {
 
-    if (event != AudioTrack::EVENT_MORE_DATA) return;
-
-    AudioTrack::Buffer *buffer = static_cast<AudioTrack::Buffer *>(info);
-    ToneGenerator *lpToneGen = static_cast<ToneGenerator *>(user);
-    int16_t *lpOut = buffer->i16;
-    unsigned int lNumSmp = buffer->size/sizeof(int16_t);
-    const ToneDescriptor *lpToneDesc = lpToneGen->mpToneDesc;
-
-    if (buffer->size == 0) return;
-
+    int16_t *lpOut = buffer.i16;
+    uint32_t lNumSmp = (buffer.size / sizeof(int16_t) < UINT32_MAX) ?
+            buffer.size / sizeof(int16_t) : UINT32_MAX;
+    if (buffer.size == 0) return 0;
+    // We will write to the entire buffer unless we are stopped, then we return
+    // 0 at loop end
+    size_t bytesWritten = lNumSmp * sizeof(int16_t);
 
     // Clear output buffer: WaveGenerator accumulates into lpOut buffer
-    memset(lpOut, 0, buffer->size);
+    memset(lpOut, 0, buffer.size);
 
     while (lNumSmp) {
-        unsigned int lReqSmp = lNumSmp < lpToneGen->mProcessSize*2 ? lNumSmp : lpToneGen->mProcessSize;
+        unsigned int lReqSmp = lNumSmp < mProcessSize*2 ? lNumSmp : mProcessSize;
         unsigned int lGenSmp;
         unsigned int lWaveCmd = WaveGenerator::WAVEGEN_CONT;
         bool lSignal = false;
 
-        lpToneGen->mLock.lock();
+        mLock.lock();
 
 
         // Update pcm frame count and end time (current time at the end of this process)
-        lpToneGen->mTotalSmp += lReqSmp;
+        mTotalSmp += lReqSmp;
 
         // Update tone gen state machine and select wave gen command
-        switch (lpToneGen->mState) {
+        switch (mState) {
         case TONE_PLAYING:
             lWaveCmd = WaveGenerator::WAVEGEN_CONT;
             break;
@@ -1365,7 +1375,7 @@
             ALOGV("Stop/restart Cbk");
 
             lWaveCmd = WaveGenerator::WAVEGEN_STOP;
-            lpToneGen->mNextSegSmp = TONEGEN_INF; // forced to skip state machine management below
+            mNextSegSmp = TONEGEN_INF; // forced to skip state machine management below
             break;
         case TONE_STOPPED:
             ALOGV("Stopped Cbk");
@@ -1376,20 +1386,20 @@
         }
 
         // Exit if tone sequence is over
-        if (lpToneDesc->segments[lpToneGen->mCurSegment].duration == 0 ||
-            lpToneGen->mTotalSmp > lpToneGen->mMaxSmp) {
-            if (lpToneGen->mState == TONE_PLAYING) {
-                lpToneGen->mState = TONE_STOPPING;
+        if (mpToneDesc->segments[mCurSegment].duration == 0 ||
+            mTotalSmp > mMaxSmp) {
+            if (mState == TONE_PLAYING) {
+                mState = TONE_STOPPING;
             }
-            if (lpToneDesc->segments[lpToneGen->mCurSegment].duration == 0) {
+            if (mpToneDesc->segments[mCurSegment].duration == 0) {
                 goto audioCallback_EndLoop;
             }
             // fade out before stopping if maximum duration reached
             lWaveCmd = WaveGenerator::WAVEGEN_STOP;
-            lpToneGen->mNextSegSmp = TONEGEN_INF; // forced to skip state machine management below
+            mNextSegSmp = TONEGEN_INF; // forced to skip state machine management below
         }
 
-        if (lpToneGen->mTotalSmp > lpToneGen->mNextSegSmp) {
+        if (mTotalSmp > mNextSegSmp) {
             // Time to go to next sequence segment
 
             ALOGV("End Segment, time: %d", (unsigned int)(systemTime()/1000000));
@@ -1397,61 +1407,61 @@
             lGenSmp = lReqSmp;
 
             // If segment,  ON -> OFF transition : ramp volume down
-            if (lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[0] != 0) {
+            if (mpToneDesc->segments[mCurSegment].waveFreq[0] != 0) {
                 lWaveCmd = WaveGenerator::WAVEGEN_STOP;
                 unsigned int lFreqIdx = 0;
-                uint16_t lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[lFreqIdx];
+                uint16_t lFrequency = mpToneDesc->segments[mCurSegment].waveFreq[lFreqIdx];
 
                 while (lFrequency != 0) {
-                    WaveGenerator *lpWaveGen = lpToneGen->mWaveGens.valueFor(lFrequency);
+                    WaveGenerator *lpWaveGen = mWaveGens.valueFor(lFrequency);
                     lpWaveGen->getSamples(lpOut, lGenSmp, lWaveCmd);
-                    lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[++lFreqIdx];
+                    lFrequency = mpToneDesc->segments[mCurSegment].waveFreq[++lFreqIdx];
                 }
                 ALOGV("ON->OFF, lGenSmp: %d, lReqSmp: %d", lGenSmp, lReqSmp);
             }
 
             // check if we need to loop and loop for the reqd times
-            if (lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt) {
-                if (lpToneGen->mLoopCounter < lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt) {
+            if (mpToneDesc->segments[mCurSegment].loopCnt) {
+                if (mLoopCounter < mpToneDesc->segments[mCurSegment].loopCnt) {
                     ALOGV ("in if loop loopCnt(%d) loopctr(%d), CurSeg(%d)",
-                          lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt,
-                          lpToneGen->mLoopCounter,
-                          lpToneGen->mCurSegment);
-                    lpToneGen->mCurSegment = lpToneDesc->segments[lpToneGen->mCurSegment].loopIndx;
-                    ++lpToneGen->mLoopCounter;
+                          mpToneDesc->segments[mCurSegment].loopCnt,
+                          mLoopCounter,
+                          mCurSegment);
+                    mCurSegment = mpToneDesc->segments[mCurSegment].loopIndx;
+                    ++mLoopCounter;
                 } else {
                     // completed loop. go to next segment
-                    lpToneGen->mLoopCounter = 0;
-                    lpToneGen->mCurSegment++;
+                    mLoopCounter = 0;
+                    mCurSegment++;
                     ALOGV ("in else loop loopCnt(%d) loopctr(%d), CurSeg(%d)",
-                          lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt,
-                          lpToneGen->mLoopCounter,
-                          lpToneGen->mCurSegment);
+                          mpToneDesc->segments[mCurSegment].loopCnt,
+                          mLoopCounter,
+                          mCurSegment);
                 }
             } else {
-                lpToneGen->mCurSegment++;
+                mCurSegment++;
                 ALOGV ("Goto next seg loopCnt(%d) loopctr(%d), CurSeg(%d)",
-                      lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt,
-                      lpToneGen->mLoopCounter,
-                      lpToneGen->mCurSegment);
+                      mpToneDesc->segments[mCurSegment].loopCnt,
+                      mLoopCounter,
+                      mCurSegment);
 
             }
 
             // Handle loop if last segment reached
-            if (lpToneDesc->segments[lpToneGen->mCurSegment].duration == 0) {
-                ALOGV("Last Seg: %d", lpToneGen->mCurSegment);
+            if (mpToneDesc->segments[mCurSegment].duration == 0) {
+                ALOGV("Last Seg: %d", mCurSegment);
 
                 // Pre increment loop count and restart if total count not reached. Stop sequence otherwise
-                if (++lpToneGen->mCurCount <= lpToneDesc->repeatCnt) {
-                    ALOGV("Repeating Count: %d", lpToneGen->mCurCount);
+                if (++mCurCount <= mpToneDesc->repeatCnt) {
+                    ALOGV("Repeating Count: %d", mCurCount);
 
-                    lpToneGen->mCurSegment = lpToneDesc->repeatSegment;
-                    if (lpToneDesc->segments[lpToneDesc->repeatSegment].waveFreq[0] != 0) {
+                    mCurSegment = mpToneDesc->repeatSegment;
+                    if (mpToneDesc->segments[mpToneDesc->repeatSegment].waveFreq[0] != 0) {
                         lWaveCmd = WaveGenerator::WAVEGEN_START;
                     }
 
-                    ALOGV("New segment %d, Next Time: %lld", lpToneGen->mCurSegment,
-                            ((long long)(lpToneGen->mNextSegSmp)*1000)/lpToneGen->mSamplingRate);
+                    ALOGV("New segment %d, Next Time: %lld", mCurSegment,
+                            ((long long)(mNextSegSmp)*1000)/mSamplingRate);
 
 
                 } else {
@@ -1459,10 +1469,10 @@
                     ALOGV("End repeat, time: %d", (unsigned int)(systemTime()/1000000));
                 }
             } else {
-                ALOGV("New segment %d, Next Time: %lld", lpToneGen->mCurSegment,
-                        ((long long)(lpToneGen->mNextSegSmp)*1000)/lpToneGen->mSamplingRate);
+                ALOGV("New segment %d, Next Time: %lld", mCurSegment,
+                        ((long long)(mNextSegSmp)*1000)/mSamplingRate);
 
-                if (lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[0] != 0) {
+                if (mpToneDesc->segments[mCurSegment].waveFreq[0] != 0) {
                     // If next segment is not silent,  OFF -> ON transition : reset wave generator
                     lWaveCmd = WaveGenerator::WAVEGEN_START;
 
@@ -1472,13 +1482,13 @@
                 }
             }
 
-            // Update next segment transition position. No harm to do it also for last segment as lpToneGen->mNextSegSmp won't be used any more
-            lpToneGen->mNextSegSmp
-                    += (lpToneDesc->segments[lpToneGen->mCurSegment].duration * lpToneGen->mSamplingRate) / 1000;
+            // Update next segment transition position. No harm to do it also for last segment as
+            // mNextSegSmp won't be used any more
+            mNextSegSmp += (mpToneDesc->segments[mCurSegment].duration * mSamplingRate) / 1000;
 
         } else {
             // Inside a segment keep tone ON or OFF
-            if (lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[0] == 0) {
+            if (mpToneDesc->segments[mCurSegment].waveFreq[0] == 0) {
                 lGenSmp = 0;  // If odd segment, tone is currently OFF
             } else {
                 lGenSmp = lReqSmp;  // If event segment, tone is currently ON
@@ -1488,12 +1498,12 @@
         if (lGenSmp) {
             // If samples must be generated, call all active wave generators and acumulate waves in lpOut
             unsigned int lFreqIdx = 0;
-            uint16_t lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[lFreqIdx];
+            uint16_t lFrequency = mpToneDesc->segments[mCurSegment].waveFreq[lFreqIdx];
 
             while (lFrequency != 0) {
-                WaveGenerator *lpWaveGen = lpToneGen->mWaveGens.valueFor(lFrequency);
+                WaveGenerator *lpWaveGen = mWaveGens.valueFor(lFrequency);
                 lpWaveGen->getSamples(lpOut, lGenSmp, lWaveCmd);
-                lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[++lFreqIdx];
+                lFrequency = mpToneDesc->segments[mCurSegment].waveFreq[++lFreqIdx];
             }
         }
 
@@ -1501,21 +1511,19 @@
         lpOut += lReqSmp;
 
 audioCallback_EndLoop:
-
-        switch (lpToneGen->mState) {
+        switch (mState) {
         case TONE_RESTARTING:
             ALOGV("Cbk restarting track");
-            if (lpToneGen->prepareWave()) {
-                lpToneGen->mState = TONE_STARTING;
-                if (clock_gettime(CLOCK_MONOTONIC, &lpToneGen->mStartTime) != 0) {
-                    lpToneGen->mStartTime.tv_sec = 0;
+            if (prepareWave()) {
+                mState = TONE_STARTING;
+                if (clock_gettime(CLOCK_MONOTONIC, &mStartTime) != 0) {
+                    mStartTime.tv_sec = 0;
                 }
-                // must reload lpToneDesc as prepareWave() may change mpToneDesc
-                lpToneDesc = lpToneGen->mpToneDesc;
+                // must reload mpToneDesc as prepareWave() may change mpToneDesc
             } else {
                 ALOGW("Cbk restarting prepareWave() failed");
-                lpToneGen->mState = TONE_IDLE;
-                lpToneGen->mpAudioTrack->stop();
+                mState = TONE_IDLE;
+                mpAudioTrack->stop();
                 // Force loop exit
                 lNumSmp = 0;
             }
@@ -1523,22 +1531,22 @@
             break;
         case TONE_STOPPING:
             ALOGV("Cbk Stopping");
-            lpToneGen->mState = TONE_STOPPED;
+            mState = TONE_STOPPED;
             // Force loop exit
             lNumSmp = 0;
             break;
         case TONE_STOPPED:
-            lpToneGen->mState = TONE_INIT;
+            mState = TONE_INIT;
             ALOGV("Cbk Stopped track");
-            lpToneGen->mpAudioTrack->stop();
+            mpAudioTrack->stop();
             // Force loop exit
             lNumSmp = 0;
-            buffer->size = 0;
+            bytesWritten = 0;
             lSignal = true;
             break;
         case TONE_STARTING:
             ALOGV("Cbk starting track");
-            lpToneGen->mState = TONE_PLAYING;
+            mState = TONE_PLAYING;
             lSignal = true;
             break;
         case TONE_PLAYING:
@@ -1546,14 +1554,15 @@
         default:
             // Force loop exit
             lNumSmp = 0;
-            buffer->size = 0;
+            bytesWritten = 0;
             break;
         }
 
         if (lSignal)
-            lpToneGen->mWaitCbkCond.broadcast();
-        lpToneGen->mLock.unlock();
+            mWaitCbkCond.broadcast();
+        mLock.unlock();
     }
+    return bytesWritten;
 }
 
 
diff --git a/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl b/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl
index 04a02c7..335866f 100644
--- a/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl
@@ -17,7 +17,7 @@
 package android.media;
 
 import android.media.AudioAttributesInternal;
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
 
 /**
  * This is the equivalent of the android::AudioAttributes C++ type.
diff --git a/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl b/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl
index 699df0a..2e74206 100644
--- a/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl
@@ -16,9 +16,9 @@
 
 package android.media;
 
-import android.media.AudioContentType;
-import android.media.AudioSourceType;
-import android.media.AudioUsage;
+import android.media.audio.common.AudioContentType;
+import android.media.audio.common.AudioSource;
+import android.media.audio.common.AudioUsage;
 
 /**
  * The "Internal" suffix of this type name is to disambiguate it from the
@@ -28,7 +28,7 @@
 parcelable AudioAttributesInternal {
     AudioContentType contentType;
     AudioUsage usage;
-    AudioSourceType source;
+    AudioSource source;
     // Bitmask, indexed by AudioFlag.
     int flags;
     @utf8InCpp String tags; /* UTF8 */
diff --git a/media/libaudioclient/aidl/android/media/AudioConfig.aidl b/media/libaudioclient/aidl/android/media/AudioConfig.aidl
deleted file mode 100644
index 8dc97d3..0000000
--- a/media/libaudioclient/aidl/android/media/AudioConfig.aidl
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioOffloadInfo;
-import android.media.audio.common.AudioFormat;
-
-/**
- * {@hide}
- */
-parcelable AudioConfig {
-    int sampleRate;
-    /**
-     * Interpreted as audio_channel_mask_t.
-     * TODO(ytai): Create a designated type.
-     */
-    int channelMask;
-    AudioFormat format;
-    AudioOffloadInfo offloadInfo;
-    long frameCount;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl b/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
deleted file mode 100644
index 8353c0d..0000000
--- a/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.audio.common.AudioFormat;
-
-/**
- * {@hide}
- */
-parcelable AudioConfigBase {
-    int sampleRate;
-    /** Interpreted as audio_channel_mask_t. */
-    int channelMask;
-    AudioFormat format;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioContentType.aidl b/media/libaudioclient/aidl/android/media/AudioContentType.aidl
deleted file mode 100644
index f734fba..0000000
--- a/media/libaudioclient/aidl/android/media/AudioContentType.aidl
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-@Backing(type="int")
-enum AudioContentType {
-    UNKNOWN = 0,
-    SPEECH = 1,
-    MUSIC = 2,
-    MOVIE = 3,
-    SONIFICATION = 4,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioDevice.aidl b/media/libaudioclient/aidl/android/media/AudioDevice.aidl
deleted file mode 100644
index b200697..0000000
--- a/media/libaudioclient/aidl/android/media/AudioDevice.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioDevice {
-    /** Interpreted as audio_devices_t. */
-    int type;
-    @utf8InCpp String address;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioStandard.aidl b/media/libaudioclient/aidl/android/media/AudioDirectMode.aidl
similarity index 83%
rename from media/libaudioclient/aidl/android/media/AudioStandard.aidl
rename to media/libaudioclient/aidl/android/media/AudioDirectMode.aidl
index e131d0d..0da4721 100644
--- a/media/libaudioclient/aidl/android/media/AudioStandard.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioDirectMode.aidl
@@ -15,13 +15,10 @@
  */
 package android.media;
 
-/**
- * The audio standard that describe audio playback/capture capabilites.
- *
- * {@hide}
- */
 @Backing(type="int")
-enum AudioStandard {
+enum AudioDirectMode {
     NONE = 0,
-    EDID = 1,
+    OFFLOAD = 1,
+    OFFLOAD_GAPLESS = 2,
+    BITSTREAM = 4,
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationMetadataType.aidl b/media/libaudioclient/aidl/android/media/AudioEncapsulationMetadataType.aidl
deleted file mode 100644
index b03adfe..0000000
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationMetadataType.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioEncapsulationMetadataType {
-    NONE = 0,
-    FRAMEWORK_TUNER = 1,
-    DVB_AD_DESCRIPTOR = 2,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioFlag.aidl b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
index 58b493b..acf4e6d 100644
--- a/media/libaudioclient/aidl/android/media/AudioFlag.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
@@ -34,4 +34,7 @@
     MUTE_HAPTIC = 11,
     NO_SYSTEM_CAPTURE = 12,
     CAPTURE_PRIVATE = 13,
+    CONTENT_SPATIALIZED = 14,
+    NEVER_SPATIALIZE = 15,
+    CALL_REDIRECTION = 16,
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioGain.aidl b/media/libaudioclient/aidl/android/media/AudioGain.aidl
deleted file mode 100644
index 048b295..0000000
--- a/media/libaudioclient/aidl/android/media/AudioGain.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioGain {
-    int index;
-    boolean useInChannelMask;
-    boolean useForVolume;
-    /** Bitmask, indexed by AudioGainMode. */
-    int mode;
-    /** Interpreted as audio_channel_mask_t. */
-    int channelMask;
-    int minValue;
-    int maxValue;
-    int defaultValue;
-    int stepValue;
-    int minRampMs;
-    int maxRampMs;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl b/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
deleted file mode 100644
index b93c2dc..0000000
--- a/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioGainConfig {
-    /** Index of the corresponding audio_gain in the audio_port gains[] table. */
-    int index;
-
-    /** Mode requested for this command. Bitfield indexed by AudioGainMode. */
-    int mode;
-
-    /**
-     * Channels which gain value follows. N/A in joint mode.
-     * Interpreted as audio_channel_mask_t.
-     */
-    int channelMask;
-
-    /**
-     * Gain values in millibels.
-     * For each channel ordered from LSb to MSb in channel mask. The number of values is 1 in joint
-     * mode, otherwise equals the number of bits implied by channelMask.
-     */
-    int[]  values;
-
-    /** Ramp duration in ms. */
-    int rampDurationMs;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioGainMode.aidl b/media/libaudioclient/aidl/android/media/AudioGainMode.aidl
deleted file mode 100644
index e1b9f0b..0000000
--- a/media/libaudioclient/aidl/android/media/AudioGainMode.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioGainMode {
-    JOINT    = 0,
-    CHANNELS = 1,
-    RAMP     = 2,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationMode.aidl b/media/libaudioclient/aidl/android/media/AudioGainSys.aidl
similarity index 81%
rename from media/libaudioclient/aidl/android/media/AudioEncapsulationMode.aidl
rename to media/libaudioclient/aidl/android/media/AudioGainSys.aidl
index 9e04e82..426f4ed 100644
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationMode.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioGainSys.aidl
@@ -13,14 +13,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package android.media;
 
 /**
+ * Provides additional runtime information for AudioGain, used by the framework.
+ *
  * {@hide}
  */
-@Backing(type="int")
-enum AudioEncapsulationMode {
-     NONE = 0,
-     ELEMENTARY_STREAM = 1,
-     HANDLE = 2,
+parcelable AudioGainSys {
+    int index;
+    boolean isInput;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioInputFlags.aidl b/media/libaudioclient/aidl/android/media/AudioInputFlags.aidl
deleted file mode 100644
index bfc0eb0..0000000
--- a/media/libaudioclient/aidl/android/media/AudioInputFlags.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioInputFlags {
-    FAST       = 0,
-    HW_HOTWORD = 1,
-    RAW        = 2,
-    SYNC       = 3,
-    MMAP_NOIRQ = 4,
-    VOIP_TX    = 5,
-    HW_AV_SYNC = 6,
-    DIRECT     = 7,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
index 876ef9b..b01f902 100644
--- a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
@@ -17,7 +17,8 @@
 package android.media;
 
 import android.media.AudioPatch;
-import android.media.audio.common.AudioFormat;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
 
 /**
  * {@hide}
@@ -26,10 +27,10 @@
     /** Interpreted as audio_io_handle_t. */
     int ioHandle;
     AudioPatch patch;
+    boolean isInput;
     int samplingRate;
-    AudioFormat format;
-    /** Interpreted as audio_channel_mask_t. */
-    int channelMask;
+    AudioFormatDescription format;
+    AudioChannelLayout channelMask;
     long frameCount;
     long frameCountHAL;
     /** Only valid for output. */
diff --git a/media/libaudioclient/aidl/android/media/AudioIoFlags.aidl b/media/libaudioclient/aidl/android/media/AudioIoFlags.aidl
deleted file mode 100644
index f9b25bf..0000000
--- a/media/libaudioclient/aidl/android/media/AudioIoFlags.aidl
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-union AudioIoFlags {
-    /** Bitmask indexed by AudioInputFlags. */
-    int input;
-    /** Bitmask indexed by AudioOutputFlags. */
-    int output;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioMix.aidl b/media/libaudioclient/aidl/android/media/AudioMix.aidl
index 7473372..88b0450 100644
--- a/media/libaudioclient/aidl/android/media/AudioMix.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioMix.aidl
@@ -16,12 +16,12 @@
 
 package android.media;
 
-import android.media.AudioConfig;
-import android.media.AudioDevice;
 import android.media.AudioMixCallbackFlag;
 import android.media.AudioMixMatchCriterion;
 import android.media.AudioMixRouteFlag;
 import android.media.AudioMixType;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioDevice;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioMixLatencyClass.aidl b/media/libaudioclient/aidl/android/media/AudioMixLatencyClass.aidl
deleted file mode 100644
index d70b364..0000000
--- a/media/libaudioclient/aidl/android/media/AudioMixLatencyClass.aidl
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioMixLatencyClass {
-    LOW = 0,
-    NORMAL = 1,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl b/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl
index e26a9e1..921a93a 100644
--- a/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl
@@ -16,15 +16,15 @@
 
 package android.media;
 
-import android.media.AudioSourceType;
-import android.media.AudioUsage;
+import android.media.audio.common.AudioSource;
+import android.media.audio.common.AudioUsage;
 
 /**
  * {@hide}
  */
 union AudioMixMatchCriterionValue {
     AudioUsage usage = AudioUsage.UNKNOWN;
-    AudioSourceType source;
+    AudioSource source;
     /** Interpreted as uid_t. */
     int uid;
     int userId;
diff --git a/media/libaudioclient/aidl/android/media/AudioMode.aidl b/media/libaudioclient/aidl/android/media/AudioMode.aidl
deleted file mode 100644
index 7067dd3..0000000
--- a/media/libaudioclient/aidl/android/media/AudioMode.aidl
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioMode {
-    INVALID = -2,
-    CURRENT = -1,
-    NORMAL = 0,
-    RINGTONE = 1,
-    IN_CALL = 2,
-    IN_COMMUNICATION = 3,
-    CALL_SCREEN = 4,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl b/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl
deleted file mode 100644
index c86b3f0..0000000
--- a/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioConfigBase;
-import android.media.AudioEncapsulationMode;
-import android.media.AudioStreamType;
-import android.media.AudioUsage;
-import android.media.audio.common.AudioFormat;
-
-/**
- * {@hide}
- */
-parcelable AudioOffloadInfo {
-    /** Version of the info structure. Interpreted as a uint16_t version constant. */
-    int version;
-    /** Audio configuration. */
-    AudioConfigBase config;
-    /** Stream type. */
-    AudioStreamType streamType;
-    /** Bit rate in bits per second. */
-    int bitRate;
-    /** Duration in microseconds, -1 if unknown. */
-    long durationUs;
-    /** true if stream is tied to a video stream. */
-    boolean hasVideo;
-    /** true if streaming, false if local playback. */
-    boolean isStreaming;
-    int bitWidth;
-    /** Offload fragment size. */
-    int offloadBufferSize;
-    AudioUsage usage;
-    AudioEncapsulationMode encapsulationMode;
-    /** Content id from tuner HAL (0 if none). */
-    int contentId;
-    /** Sync id from tuner HAL (0 if none). */
-    int syncId;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl b/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl
deleted file mode 100644
index cebd8f0..0000000
--- a/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioOutputFlags {
-    DIRECT           = 0,
-    PRIMARY          = 1,
-    FAST             = 2,
-    DEEP_BUFFER      = 3,
-    COMPRESS_OFFLOAD = 4,
-    NON_BLOCKING     = 5,
-    HW_AV_SYNC       = 6,
-    TTS              = 7,
-    RAW              = 8,
-    SYNC             = 9,
-    IEC958_NONAUDIO  = 10,
-    DIRECT_PCM       = 11,
-    MMAP_NOIRQ       = 12,
-    VOIP_RX          = 13,
-    INCALL_MUSIC     = 14,
-    GAPLESS_OFFLOAD  = 15,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPort.aidl b/media/libaudioclient/aidl/android/media/AudioPort.aidl
index bf0e5b7..ff177c0 100644
--- a/media/libaudioclient/aidl/android/media/AudioPort.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPort.aidl
@@ -16,35 +16,13 @@
 
 package android.media;
 
-import android.media.AudioGain;
-import android.media.AudioPortConfig;
-import android.media.AudioPortExt;
-import android.media.AudioPortRole;
-import android.media.AudioPortType;
-import android.media.AudioProfile;
-import android.media.ExtraAudioDescriptor;
+import android.media.AudioPortSys;
+import android.media.audio.common.AudioPort;
 
 /**
  * {@hide}
  */
 parcelable AudioPort {
-    /** Port unique ID. Interpreted as audio_port_handle_t. */
-    int id;
-    /** Sink or source. */
-    AudioPortRole role;
-    /** Device, mix ... */
-    AudioPortType type;
-    @utf8InCpp String name;
-    /** AudioProfiles supported by this port (format, Rates, Channels). */
-    AudioProfile[] profiles;
-    /**
-     * ExtraAudioDescriptors supported by this port. The format is not unrecognized to the
-     * platform. The audio capability is described by a hardware descriptor.
-     */
-    ExtraAudioDescriptor[] extraAudioDescriptors;
-    /** Gain controllers. */
-    AudioGain[] gains;
-    /** Current audio port configuration. */
-    AudioPortConfig activeConfig;
-    AudioPortExt ext;
+    AudioPort hal;
+    AudioPortSys sys;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
index 2dd30a4..3a4ca31 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
@@ -16,44 +16,13 @@
 
 package android.media;
 
-import android.media.AudioGainConfig;
-import android.media.AudioIoFlags;
-import android.media.AudioPortConfigExt;
-import android.media.AudioPortConfigType;
-import android.media.AudioPortRole;
-import android.media.AudioPortType;
-import android.media.audio.common.AudioFormat;
+import android.media.AudioPortConfigSys;
+import android.media.audio.common.AudioPortConfig;
 
 /**
  * {@hide}
  */
 parcelable AudioPortConfig {
-    /**
-     * Port unique ID.
-     * Interpreted as audio_port_handle_t.
-     */
-    int id;
-    /** Sink or source. */
-    AudioPortRole role;
-    /** Device, mix ... */
-    AudioPortType type;
-    /** Bitmask, indexed by AudioPortConfigType. */
-    int configMask;
-    /** Sampling rate in Hz. */
-    int sampleRate;
-    /**
-     * Channel mask, if applicable.
-     * Interpreted as audio_channel_mask_t.
-     * TODO: bitmask?
-     */
-    int channelMask;
-    /**
-     * Format, if applicable.
-     */
-    AudioFormat format;
-    /** Gain to apply, if applicable. */
-    AudioGainConfig gain;
-    /** Framework only: HW_AV_SYNC, DIRECT, ... */
-    AudioIoFlags flags;
-    AudioPortConfigExt ext;
+    AudioPortConfig hal;
+    AudioPortConfigSys sys;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl
deleted file mode 100644
index a99aa9b..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioPortConfigDeviceExt {
-    /**
-     * Module the device is attached to.
-     * Interpreted as audio_module_handle_t.
-     */
-    int hwModule;
-    /**
-     * Device type (e.g AUDIO_DEVICE_OUT_SPEAKER).
-     * Interpreted as audio_devices_t.
-     * TODO: Convert to a standalone AIDL representation.
-     */
-    int type;
-    /** Device address. "" if N/A. */
-    @utf8InCpp String address;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl
deleted file mode 100644
index 5d635b6..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioPortConfigDeviceExt;
-import android.media.AudioPortConfigMixExt;
-import android.media.AudioPortConfigSessionExt;
-
-/**
- * {@hide}
- */
-union AudioPortConfigExt {
-    /**
-     * This represents an empty union. Value is ignored.
-     * TODO(ytai): replace with the canonical representation for an empty union, as soon as it is
-     *             established.
-     */
-    boolean unspecified;
-    /** Device specific info. */
-    AudioPortConfigDeviceExt device;
-    /** Mix specific info. */
-    AudioPortConfigMixExt mix;
-    /** Session specific info. */
-    AudioPortConfigSessionExt session;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigMixExt.aidl
deleted file mode 100644
index d3226f2..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExt.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioPortConfigMixExtUseCase;
-
-/**
- * {@hide}
- */
-parcelable AudioPortConfigMixExt {
-    /**
-     * Module the stream is attached to.
-     * Interpreted as audio_module_handle_t.
-     */
-    int hwModule;
-    /**
-     * I/O handle of the input/output stream.
-     * Interpreted as audio_io_handle_t.
-     */
-    int handle;
-    AudioPortConfigMixExtUseCase usecase;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl
deleted file mode 100644
index c61f044..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioSourceType;
-import android.media.AudioStreamType;
-
-/**
- * {@hide}
- */
-union AudioPortConfigMixExtUseCase {
-    /**
-     * This to be set if the containing config has the AudioPortRole::NONE role.
-     * This represents an empty value (value is ignored).
-     * TODO(ytai): replace with the canonical representation for an empty union, as soon as it is
-     *             established.
-     */
-    boolean unspecified;
-    /** This to be set if the containing config has the AudioPortRole::SOURCE role. */
-    AudioStreamType stream;
-    /** This to be set if the containing config has the AudioPortRole::SINK role. */
-    AudioSourceType source;
-}
diff --git a/media/libaudioclient/aidl/android/media/Int.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigSys.aidl
similarity index 71%
rename from media/libaudioclient/aidl/android/media/Int.aidl
rename to media/libaudioclient/aidl/android/media/AudioPortConfigSys.aidl
index 24f4d62..8692848 100644
--- a/media/libaudioclient/aidl/android/media/Int.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortConfigSys.aidl
@@ -16,12 +16,17 @@
 
 package android.media;
 
+import android.media.AudioPortExtSys;
+import android.media.AudioPortRole;
+import android.media.AudioPortType;
+
 /**
- * This is a simple wrapper around an 'int', putting it in a parcelable, so it can be used as an
- * inout parameter, be made @nullable, etc.
- *
  * {@hide}
  */
-parcelable Int {
-    int value;
+parcelable AudioPortConfigSys {
+    /** Sink or source. */
+    AudioPortRole role;
+    /** Device, mix ... */
+    AudioPortType type;
+    AudioPortExtSys ext;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigType.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigType.aidl
deleted file mode 100644
index 6e22b8d..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigType.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioPortConfigType {
-    SAMPLE_RATE  = 0,
-    CHANNEL_MASK = 1,
-    FORMAT       = 2,
-    GAIN         = 3,
-    FLAGS        = 4,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortDeviceExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl
similarity index 85%
rename from media/libaudioclient/aidl/android/media/AudioPortDeviceExt.aidl
rename to media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl
index b758f23..0f5a9b6 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortDeviceExt.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright (C) 2021 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,15 +16,12 @@
 
 package android.media;
 
-import android.media.AudioDevice;
-
 /**
  * {@hide}
  */
-parcelable AudioPortDeviceExt {
+parcelable AudioPortDeviceExtSys {
     /** Module the device is attached to. Interpreted as audio_module_handle_t. */
     int hwModule;
-    AudioDevice device;
     /** Bitmask, indexed by AudioEncapsulationMode. */
     int encapsulationModes;
     /** Bitmask, indexed by AudioEncapsulationMetadataType. */
diff --git a/media/libaudioclient/aidl/android/media/AudioPortExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortExt.aidl
deleted file mode 100644
index 453784b..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortExt.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioPortDeviceExt;
-import android.media.AudioPortMixExt;
-import android.media.AudioPortSessionExt;
-
-/**
- * {@hide}
- */
-union AudioPortExt {
-    /**
-     * This represents an empty union. Value is ignored.
-     * TODO(ytai): replace with the canonical representation for an empty union, as soon as it is
-     *             established.
-     */
-    boolean unspecified;
-    /** Device specific info. */
-    AudioPortDeviceExt device;
-    /** Mix specific info. */
-    AudioPortMixExt mix;
-    /** Session specific info. */
-    AudioPortSessionExt session;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl b/media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl
similarity index 62%
copy from media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
copy to media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl
index b08a604..2cdf4f6 100644
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl
@@ -16,14 +16,19 @@
 
 package android.media;
 
+import android.media.AudioPortDeviceExtSys;
+import android.media.AudioPortMixExtSys;
+
 /**
- * Audio encapsulation type is used to describe if the audio data should be sent with a particular
- * encapsulation type or not.
- *
  * {@hide}
  */
-@Backing(type="int")
-enum AudioEncapsulationType {
-    NONE     = 0,
-    IEC61937 = 1,
-}
\ No newline at end of file
+union AudioPortExtSys {
+    /**
+     * This represents an empty union. Value is ignored.
+     */
+    boolean unspecified;
+    /** System-only parameters when the port is an audio device. */
+    AudioPortDeviceExtSys device;
+    /** System-only parameters when the port is an audio mix. */
+    AudioPortMixExtSys mix;
+}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortMixExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortMixExt.aidl
deleted file mode 100644
index 62cdb8e..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortMixExt.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioMixLatencyClass;
-
-/**
- * {@hide}
- */
-parcelable AudioPortMixExt {
-    /** Module the stream is attached to. Interpreted as audio_module_handle_t. */
-    int hwModule;
-    /** I/O handle of the input/output stream. Interpreted as audio_io_handle_t. */
-    int handle;
-    /** Latency class */
-    AudioMixLatencyClass latencyClass;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioStandard.aidl b/media/libaudioclient/aidl/android/media/AudioPortMixExtSys.aidl
similarity index 81%
copy from media/libaudioclient/aidl/android/media/AudioStandard.aidl
copy to media/libaudioclient/aidl/android/media/AudioPortMixExtSys.aidl
index e131d0d..5999885 100644
--- a/media/libaudioclient/aidl/android/media/AudioStandard.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortMixExtSys.aidl
@@ -13,15 +13,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package android.media;
 
 /**
- * The audio standard that describe audio playback/capture capabilites.
- *
  * {@hide}
  */
-@Backing(type="int")
-enum AudioStandard {
-    NONE = 0,
-    EDID = 1,
+parcelable AudioPortMixExtSys {
+    /** Module the stream is attached to. Interpreted as audio_module_handle_t. */
+    int hwModule;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioPortSessionExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortSessionExt.aidl
deleted file mode 100644
index dbca168..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortSessionExt.aidl
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioPortSessionExt {
-    /** Audio session. Interpreted as audio_session_t. */
-    int session;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortSys.aidl b/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
new file mode 100644
index 0000000..f3b5c19
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.AudioGainSys;
+import android.media.AudioPortConfig;
+import android.media.AudioPortExtSys;
+import android.media.AudioPortRole;
+import android.media.AudioPortType;
+import android.media.AudioProfileSys;
+
+/**
+ * {@hide}
+ */
+parcelable AudioPortSys {
+    /** Sink or source. */
+    AudioPortRole role;
+    /** Device, mix ... */
+    AudioPortType type;
+    /** System-only parameters for each AudioProfile from 'port.profiles'. */
+    AudioProfileSys[] profiles;
+    /** System-only parameters for each AudioGain from 'port.gains'. */
+    AudioGainSys[] gains;
+    /** Current audio port configuration. */
+    AudioPortConfig activeConfig;
+    /** System-only extra parameters for 'port.ext'. */
+    AudioPortExtSys ext;
+}
diff --git a/media/libaudioclient/aidl/android/media/AudioProfile.aidl b/media/libaudioclient/aidl/android/media/AudioProfile.aidl
deleted file mode 100644
index afb288f..0000000
--- a/media/libaudioclient/aidl/android/media/AudioProfile.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioEncapsulationType;
-import android.media.audio.common.AudioFormat;
-
-/**
- * {@hide}
- */
-parcelable AudioProfile {
-    @utf8InCpp String name;
-    /** The format for an audio profile should only be set when initialized. */
-    AudioFormat format;
-    /** Interpreted as audio_channel_mask_t. */
-    int[] channelMasks;
-    int[] samplingRates;
-    boolean isDynamicFormat;
-    boolean isDynamicChannels;
-    boolean isDynamicRate;
-    AudioEncapsulationType encapsulationType;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationMode.aidl b/media/libaudioclient/aidl/android/media/AudioProfileSys.aidl
similarity index 68%
copy from media/libaudioclient/aidl/android/media/AudioEncapsulationMode.aidl
copy to media/libaudioclient/aidl/android/media/AudioProfileSys.aidl
index 9e04e82..329c9d5 100644
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationMode.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioProfileSys.aidl
@@ -13,14 +13,18 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package android.media;
 
 /**
+ * Provides indication whether the parameters of the AudioProfiles in the
+ * AudioPort are dynamic. Each instance of AudioProfileSys corresponds
+ * to an instance of AudioProfile.
+ *
  * {@hide}
  */
-@Backing(type="int")
-enum AudioEncapsulationMode {
-     NONE = 0,
-     ELEMENTARY_STREAM = 1,
-     HANDLE = 2,
+parcelable AudioProfileSys {
+    boolean isDynamicFormat;
+    boolean isDynamicChannels;
+    boolean isDynamicRate;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioSourceType.aidl b/media/libaudioclient/aidl/android/media/AudioSourceType.aidl
deleted file mode 100644
index 8673b92..0000000
--- a/media/libaudioclient/aidl/android/media/AudioSourceType.aidl
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioSourceType {
-    INVALID = -1,
-    DEFAULT = 0,
-    MIC = 1,
-    VOICE_UPLINK = 2,
-    VOICE_DOWNLINK = 3,
-    VOICE_CALL = 4,
-    CAMCORDER = 5,
-    VOICE_RECOGNITION = 6,
-    VOICE_COMMUNICATION = 7,
-    REMOTE_SUBMIX = 8,
-    UNPROCESSED = 9,
-    VOICE_PERFORMANCE = 10,
-    ECHO_REFERENCE = 1997,
-    FM_TUNER = 1998,
-    /**
-     * A low-priority, preemptible audio source for for background software
-     * hotword detection. Same tuning as VOICE_RECOGNITION.
-     * Used only internally by the framework.
-     */
-    HOTWORD = 1999,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioStreamType.aidl b/media/libaudioclient/aidl/android/media/AudioStreamType.aidl
deleted file mode 100644
index d777882..0000000
--- a/media/libaudioclient/aidl/android/media/AudioStreamType.aidl
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioStreamType {
-    DEFAULT = -1,
-    VOICE_CALL = 0,
-    SYSTEM = 1,
-    RING = 2,
-    MUSIC = 3,
-    ALARM = 4,
-    NOTIFICATION = 5,
-    BLUETOOTH_SCO = 6,
-    ENFORCED_AUDIBLE = 7,
-    DTMF = 8,
-    TTS = 9,
-    ACCESSIBILITY = 10,
-    ASSISTANT = 11,
-    /** For dynamic policy output mixes. Only used by the audio policy */
-    REROUTING = 12,
-    /** For audio flinger tracks volume. Only used by the audioflinger */
-    PATCH = 13,
-    /** stream for corresponding to AUDIO_USAGE_CALL_ASSISTANT */
-    CALL_ASSISTANT = 14,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioUsage.aidl b/media/libaudioclient/aidl/android/media/AudioUsage.aidl
deleted file mode 100644
index 66c5c30..0000000
--- a/media/libaudioclient/aidl/android/media/AudioUsage.aidl
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioUsage {
-    UNKNOWN = 0,
-    MEDIA = 1,
-    VOICE_COMMUNICATION = 2,
-    VOICE_COMMUNICATION_SIGNALLING = 3,
-    ALARM = 4,
-    NOTIFICATION = 5,
-    NOTIFICATION_TELEPHONY_RINGTONE = 6,
-    NOTIFICATION_COMMUNICATION_REQUEST = 7,
-    NOTIFICATION_COMMUNICATION_INSTANT = 8,
-    NOTIFICATION_COMMUNICATION_DELAYED = 9,
-    NOTIFICATION_EVENT = 10,
-    ASSISTANCE_ACCESSIBILITY = 11,
-    ASSISTANCE_NAVIGATION_GUIDANCE = 12,
-    ASSISTANCE_SONIFICATION = 13,
-    GAME = 14,
-    VIRTUAL_SOURCE = 15,
-    ASSISTANT = 16,
-    CALL_ASSISTANT = 17,
-    EMERGENCY = 1000,
-    SAFETY = 1001,
-    VEHICLE_STATUS = 1002,
-    ANNOUNCEMENT = 1003,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioUuid.aidl b/media/libaudioclient/aidl/android/media/AudioUuid.aidl
deleted file mode 100644
index bba9039..0000000
--- a/media/libaudioclient/aidl/android/media/AudioUuid.aidl
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioUuid {
-    int timeLow;
-    int timeMid;
-    int timeHiAndVersion;
-    int clockSeq;
-    byte[] node;  // Length = 6
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl b/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
index f88fc3c..8538d8a 100644
--- a/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
@@ -24,4 +24,5 @@
     int id;
     float resonantFrequency;
     float qFactor;
+    float maxAmplitude;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl b/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl
index 3a29a08..b95a1d3 100644
--- a/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl
@@ -17,7 +17,7 @@
 package android.media;
 
 import android.media.AudioAttributesInternal;
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl b/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
index 2d274f4..bcca04a 100644
--- a/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
@@ -16,10 +16,10 @@
 
 package android.media;
 
-import android.media.AudioDevice;
+import android.content.AttributionSourceState;
 import android.media.EffectDescriptor;
 import android.media.IEffectClient;
-import android.content.AttributionSourceState;
+import android.media.audio.common.AudioDevice;
 
 /**
  * Input arguments of the createEffect() method.
@@ -37,4 +37,6 @@
     AudioDevice device;
     AttributionSourceState attributionSource;
     boolean probe;
+    /** true if a callback must be sent each time audio frames are processed */
+    boolean notifyFramesProcessed;
 }
diff --git a/media/libaudioclient/aidl/android/media/CreateEffectResponse.aidl b/media/libaudioclient/aidl/android/media/CreateEffectResponse.aidl
index 0aa640a..e2755dd 100644
--- a/media/libaudioclient/aidl/android/media/CreateEffectResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateEffectResponse.aidl
@@ -29,4 +29,5 @@
     boolean enabled;
     @nullable IEffect effect;
     EffectDescriptor desc;
+    boolean alreadyExists;
 }
diff --git a/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl b/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
index 7e3c240..b938a3e 100644
--- a/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
@@ -18,7 +18,7 @@
 
 import android.media.AudioAttributesInternal;
 import android.media.AudioClient;
-import android.media.AudioConfigBase;
+import android.media.audio.common.AudioConfigBase;
 
 /**
  * CreateRecordRequest contains all input arguments sent by AudioRecord to AudioFlinger
diff --git a/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl b/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl
index d78b3fc..7d159d0 100644
--- a/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl
@@ -18,6 +18,7 @@
 
 import android.media.IAudioRecord;
 import android.media.SharedFileRegion;
+import android.media.audio.common.AudioConfigBase;
 
 /**
  * CreateRecordResponse contains all output arguments returned by AudioFlinger to AudioRecord
@@ -43,4 +44,5 @@
     int portId;
     /** The newly created record. */
     @nullable IAudioRecord audioRecord;
+    AudioConfigBase serverConfig;
 }
diff --git a/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl b/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl
index 014b3ca..212221e 100644
--- a/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl
@@ -18,9 +18,9 @@
 
 import android.media.AudioAttributesInternal;
 import android.media.AudioClient;
-import android.media.AudioConfig;
 import android.media.IAudioTrackCallback;
 import android.media.SharedFileRegion;
+import android.media.audio.common.AudioConfig;
 
 /**
  * CreateTrackInput contains all input arguments sent by AudioTrack to AudioFlinger
diff --git a/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl b/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl
index 40473fa..da6f454 100644
--- a/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
 import android.media.IAudioTrack;
 
 /**
diff --git a/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl b/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl
index 35a3d74..e5b5158 100644
--- a/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl
+++ b/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioUuid;
+import android.media.audio.common.AudioUuid;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/ExtraAudioDescriptor.aidl b/media/libaudioclient/aidl/android/media/ExtraAudioDescriptor.aidl
deleted file mode 100644
index ec5b67a..0000000
--- a/media/libaudioclient/aidl/android/media/ExtraAudioDescriptor.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioEncapsulationType;
-import android.media.AudioStandard;
-
-/**
- * The audio descriptor that descibes playback/capture capabilities according to
- * a particular standard.
- *
- * {@hide}
- */
-parcelable ExtraAudioDescriptor {
-    AudioStandard standard;
-    byte[] audioDescriptor;
-    AudioEncapsulationType encapsulationType;
-}
diff --git a/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl b/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl
index 164fb9d..963877a 100644
--- a/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl b/media/libaudioclient/aidl/android/media/GetSpatializerResponse.aidl
similarity index 72%
rename from media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
rename to media/libaudioclient/aidl/android/media/GetSpatializerResponse.aidl
index b08a604..25115ac 100644
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
+++ b/media/libaudioclient/aidl/android/media/GetSpatializerResponse.aidl
@@ -16,14 +16,13 @@
 
 package android.media;
 
+import android.media.ISpatializer;
+
 /**
- * Audio encapsulation type is used to describe if the audio data should be sent with a particular
- * encapsulation type or not.
- *
+ * Used as a return value for IAudioPolicyService.getSpatializer() method
  * {@hide}
  */
-@Backing(type="int")
-enum AudioEncapsulationType {
-    NONE     = 0,
-    IEC61937 = 1,
-}
\ No newline at end of file
+ parcelable GetSpatializerResponse {
+    /* The ISpatializer interface if successful, null if not */
+    @nullable ISpatializer spatializer;
+}
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
index d2cae6d..c55c66e 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -16,13 +16,10 @@
 
 package android.media;
 
-import android.media.AudioMode;
 import android.media.AudioPatch;
 import android.media.AudioPort;
 import android.media.AudioPortConfig;
-import android.media.AudioStreamType;
 import android.media.AudioUniqueIdUse;
-import android.media.AudioUuid;
 import android.media.AudioVibratorInfo;
 import android.media.CreateEffectRequest;
 import android.media.CreateEffectResponse;
@@ -41,7 +38,13 @@
 import android.media.MicrophoneInfoData;
 import android.media.RenderPosition;
 import android.media.TrackSecondaryOutputInfo;
-import android.media.audio.common.AudioFormat;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
+import android.media.audio.common.AudioMMapPolicyInfo;
+import android.media.audio.common.AudioMMapPolicyType;
+import android.media.audio.common.AudioMode;
+import android.media.audio.common.AudioStreamType;
+import android.media.audio.common.AudioUuid;
 
 /**
  * {@hide}
@@ -62,7 +65,7 @@
      */
     int sampleRate(int /* audio_io_handle_t */ ioHandle);
 
-    AudioFormat format(int /* audio_io_handle_t */ output);
+    AudioFormatDescription format(int /* audio_io_handle_t */ output);
 
     long frameCount(int /* audio_io_handle_t */ ioHandle);
 
@@ -115,8 +118,8 @@
     // Retrieve the audio recording buffer size in bytes.
     // FIXME This API assumes a route, and so should be deprecated.
     long getInputBufferSize(int sampleRate,
-                            AudioFormat format,
-                            int /* audio_channel_mask_t */ channelMask);
+                            in AudioFormatDescription format,
+                            in AudioChannelLayout channelMask);
 
     OpenOutputResponse openOutput(in OpenOutputRequest request);
     int /* audio_io_handle_t */ openDuplicateOutput(int /* audio_io_handle_t */ output1,
@@ -197,6 +200,9 @@
     /* Indicate JAVA services are ready (scheduling, power management ...) */
     oneway void systemReady();
 
+    /* Indicate audio policy service is ready */
+    oneway void audioPolicyReady();
+
     // Returns the number of frames per audio HAL buffer.
     long frameCountHAL(int /* audio_io_handle_t */ ioHandle);
 
@@ -213,4 +219,10 @@
     // This usually happens when there is a dynamic policy registered.
     void updateSecondaryOutputs(
             in TrackSecondaryOutputInfo[] trackSecondaryOutputInfos);
+
+    AudioMMapPolicyInfo[] getMmapPolicyInfos(AudioMMapPolicyType policyType);
+
+    int getAAudioMixerBurstCount();
+
+    int getAAudioHardwareBurstMinUsec();
 }
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
index 4c3955a..c3e8dfb 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
@@ -18,16 +18,10 @@
 
 import android.content.AttributionSourceState;
 
-import android.media.audio.common.AudioFormat;
-
 import android.media.AudioAttributesEx;
 import android.media.AudioAttributesInternal;
-import android.media.AudioConfig;
-import android.media.AudioConfigBase;
-import android.media.AudioDevice;
+import android.media.AudioDirectMode;
 import android.media.AudioMix;
-import android.media.AudioMode;
-import android.media.AudioOffloadInfo;
 import android.media.AudioOffloadMode;
 import android.media.AudioPatch;
 import android.media.AudioPolicyDeviceState;
@@ -38,19 +32,29 @@
 import android.media.AudioPortRole;
 import android.media.AudioPortType;
 import android.media.AudioProductStrategy;
-import android.media.AudioSourceType;
-import android.media.AudioStreamType;
-import android.media.AudioUsage;
-import android.media.AudioUuid;
 import android.media.AudioVolumeGroup;
 import android.media.DeviceRole;
 import android.media.EffectDescriptor;
 import android.media.GetInputForAttrResponse;
 import android.media.GetOutputForAttrResponse;
+import android.media.GetSpatializerResponse;
 import android.media.IAudioPolicyServiceClient;
 import android.media.ICaptureStateListener;
-import android.media.Int;
+import android.media.INativeSpatializerCallback;
 import android.media.SoundTriggerSession;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioConfigBase;
+import android.media.audio.common.AudioDevice;
+import android.media.audio.common.AudioDeviceDescription;
+import android.media.audio.common.AudioFormatDescription;
+import android.media.audio.common.AudioMode;
+import android.media.audio.common.AudioProfile;
+import android.media.audio.common.AudioOffloadInfo;
+import android.media.audio.common.AudioSource;
+import android.media.audio.common.AudioStreamType;
+import android.media.audio.common.AudioUsage;
+import android.media.audio.common.AudioUuid;
+import android.media.audio.common.Int;
 
 /**
  * IAudioPolicyService interface (see AudioPolicyInterface for method descriptions).
@@ -63,13 +67,13 @@
     void setDeviceConnectionState(in AudioDevice device,
                                   in AudioPolicyDeviceState state,
                                   @utf8InCpp String deviceName,
-                                  in AudioFormat encodedFormat);
+                                  in AudioFormatDescription encodedFormat);
 
     AudioPolicyDeviceState getDeviceConnectionState(in AudioDevice device);
 
     void handleDeviceConfigChange(in AudioDevice device,
                                   @utf8InCpp String deviceName,
-                                  in AudioFormat encodedFormat);
+                                  in AudioFormatDescription encodedFormat);
 
     void setPhoneState(AudioMode state, int /* uid_t */ uid);
 
@@ -114,18 +118,18 @@
                           int indexMax);
 
     void setStreamVolumeIndex(AudioStreamType stream,
-                              int /* audio_devices_t */ device,
+                              in AudioDeviceDescription device,
                               int index);
 
     int getStreamVolumeIndex(AudioStreamType stream,
-                             int /* audio_devices_t */ device);
+                             in AudioDeviceDescription device);
 
     void setVolumeIndexForAttributes(in AudioAttributesInternal attr,
-                                     int /* audio_devices_t */ device,
+                                     in AudioDeviceDescription device,
                                      int index);
 
     int getVolumeIndexForAttributes(in AudioAttributesInternal attr,
-                                    int /* audio_devices_t */ device);
+                                    in AudioDeviceDescription device);
 
     int getMaxVolumeIndexForAttributes(in AudioAttributesInternal attr);
 
@@ -133,7 +137,7 @@
 
     int /* product_strategy_t */ getStrategyForStream(AudioStreamType stream);
 
-    int /* bitmask of audio_devices_t */ getDevicesForStream(AudioStreamType stream);
+    AudioDeviceDescription[] getDevicesForStream(AudioStreamType stream);
 
     AudioDevice[] getDevicesForAttributes(in AudioAttributesEx attr);
 
@@ -155,7 +159,7 @@
 
     boolean isStreamActiveRemotely(AudioStreamType stream, int inPastMs);
 
-    boolean isSourceActive(AudioSourceType source);
+    boolean isSourceActive(AudioSource source);
 
     /**
      * On input, count represents the maximum length of the returned array.
@@ -170,7 +174,7 @@
                                                        @utf8InCpp String opPackageName,
                                                        in AudioUuid uuid,
                                                        int priority,
-                                                       AudioSourceType source);
+                                                       AudioSource source);
 
     int /* audio_unique_id_t */ addStreamDefaultEffect(in AudioUuid type,
                                                        @utf8InCpp String opPackageName,
@@ -268,7 +272,7 @@
 
     boolean getMasterMono();
 
-    float getStreamVolumeDB(AudioStreamType stream, int index, int /* audio_devices_t */ device);
+    float getStreamVolumeDB(AudioStreamType stream, int index, in AudioDeviceDescription device);
 
     /**
      * Populates supported surround formats and their enabled state in formats and formatsEnabled.
@@ -279,7 +283,7 @@
      * number of elements without actually retrieving them.
      */
     void getSurroundFormats(inout Int count,
-                            out AudioFormat[] formats,
+                            out AudioFormatDescription[] formats,
                             out boolean[] formatsEnabled);
 
     /**
@@ -291,11 +295,12 @@
      * number of elements without actually retrieving them.
      */
     void getReportedSurroundFormats(inout Int count,
-                                    out AudioFormat[] formats);
+                                    out AudioFormatDescription[] formats);
 
-    AudioFormat[] getHwOffloadEncodingFormatsSupportedForA2DP();
+    AudioFormatDescription[] getHwOffloadFormatsSupportedForBluetoothMedia(
+                                    in AudioDeviceDescription device);
 
-    void setSurroundFormatEnabled(AudioFormat audioFormat, boolean enabled);
+    void setSurroundFormatEnabled(in AudioFormatDescription audioFormat, boolean enabled);
 
     void setAssistantUid(int /* uid_t */ uid);
 
@@ -307,6 +312,8 @@
 
     boolean isHapticPlaybackSupported();
 
+    boolean isUltrasoundSupported();
+
     AudioProductStrategy[] listAudioProductStrategies();
     int /* product_strategy_t */ getProductStrategyFromAudioAttributes(in AudioAttributesEx aa,
                                                                        boolean fallbackOnDefault);
@@ -329,23 +336,60 @@
     AudioDevice[] getDevicesForRoleAndStrategy(int /* product_strategy_t */ strategy,
                                                DeviceRole role);
 
-    void setDevicesRoleForCapturePreset(AudioSourceType audioSource,
+    void setDevicesRoleForCapturePreset(AudioSource audioSource,
                                         DeviceRole role,
                                         in AudioDevice[] devices);
 
-    void addDevicesRoleForCapturePreset(AudioSourceType audioSource,
+    void addDevicesRoleForCapturePreset(AudioSource audioSource,
                                         DeviceRole role,
                                         in AudioDevice[] devices);
 
-    void removeDevicesRoleForCapturePreset(AudioSourceType audioSource,
+    void removeDevicesRoleForCapturePreset(AudioSource audioSource,
                                            DeviceRole role,
                                            in AudioDevice[] devices);
 
-    void clearDevicesRoleForCapturePreset(AudioSourceType audioSource,
+    void clearDevicesRoleForCapturePreset(AudioSource audioSource,
                                           DeviceRole role);
 
-    AudioDevice[] getDevicesForRoleAndCapturePreset(AudioSourceType audioSource,
+    AudioDevice[] getDevicesForRoleAndCapturePreset(AudioSource audioSource,
                                                     DeviceRole role);
 
     boolean registerSoundTriggerCaptureStateListener(ICaptureStateListener listener);
+
+    /** If a spatializer stage effect is present on the platform, this will return an
+     * ISpatializer interface (see GetSpatializerResponse,aidl) to control this
+     * feature.
+     * If no spatializer stage is present, a null interface is returned.
+     * The INativeSpatializerCallback passed must not be null.
+     * Only one ISpatializer interface can exist at a given time. The native audio policy
+     * service will reject the request if an interface was already acquired and previous owner
+     * did not die or call ISpatializer.release().
+     */
+    GetSpatializerResponse getSpatializer(INativeSpatializerCallback callback);
+
+    /** Queries if some kind of spatialization will be performed if the audio playback context
+     * described by the provided arguments is present.
+     * The context is made of:
+     * - The audio attributes describing the playback use case.
+     * - The audio configuration describing the audio format, channels, sampling rate...
+     * - The devices describing the sink audio device selected for playback.
+     * All arguments are optional and only the specified arguments are used to match against
+     * supported criteria. For instance, supplying no argument will tell if spatialization is
+     * supported or not in general.
+     */
+    boolean canBeSpatialized(in @nullable AudioAttributesInternal attr,
+                             in @nullable AudioConfig config,
+                             in AudioDevice[] devices);
+
+    /**
+     * Query how the direct playback is currently supported on the device.
+     */
+    AudioDirectMode getDirectPlaybackSupport(in AudioAttributesInternal attr,
+                                              in AudioConfig config);
+
+    /**
+     * Query audio profiles available for direct playback on the current output device(s)
+     * for the specified audio attributes.
+     */
+    AudioProfile[] getDirectProfilesForAttributes(in AudioAttributesInternal attr);
 }
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl
index a7782b8..d93a59d 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl
@@ -16,10 +16,10 @@
 
 package android.media;
 
-import android.media.AudioConfigBase;
-import android.media.AudioSourceType;
 import android.media.EffectDescriptor;
 import android.media.RecordClientInfo;
+import android.media.audio.common.AudioConfigBase;
+import android.media.audio.common.AudioSource;
 
 /**
  * {@hide}
@@ -43,7 +43,7 @@
                                         in AudioConfigBase deviceConfig,
                                         in EffectDescriptor[] effects,
                                         int /* audio_patch_handle_t */ patchHandle,
-                                        AudioSourceType source);
+                                        AudioSource source);
      /** Notifies a change of audio routing */
      void onRoutingUpdated();
 }
diff --git a/media/libaudioclient/aidl/android/media/IEffectClient.aidl b/media/libaudioclient/aidl/android/media/IEffectClient.aidl
index 3b6bcf1..37b442d 100644
--- a/media/libaudioclient/aidl/android/media/IEffectClient.aidl
+++ b/media/libaudioclient/aidl/android/media/IEffectClient.aidl
@@ -43,4 +43,10 @@
      * TODO(ytai): replace opaque byte arrays with strongly typed parameters.
      */
     oneway void commandExecuted(int cmdCode, in byte[] cmdData, in byte[] replyData);
+
+    /**
+     * Called whenever audio frames have been processed by the effect engine.
+     * @param frames number of frames processed.
+     */
+    oneway void framesProcessed(int frames);
 }
diff --git a/media/libaudioclient/aidl/android/media/INativeSpatializerCallback.aidl b/media/libaudioclient/aidl/android/media/INativeSpatializerCallback.aidl
new file mode 100644
index 0000000..88b8108
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/INativeSpatializerCallback.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.SpatializationLevel;
+import android.media.SpatializerHeadTrackingMode;
+
+/**
+ * The INativeSpatializerCallback interface is a callback associated to the
+ * ISpatializer interface. The callback is used by the spatializer
+ * implementation in native audio server to communicate state changes to the
+ * client controlling the spatializer with the ISpatializer interface.
+ * {@hide}
+ */
+oneway interface INativeSpatializerCallback {
+    /** Called when the spatialization level applied by the spatializer changes
+     * (e.g. when the spatializer is enabled or disabled)
+     */
+    void onLevelChanged(SpatializationLevel level);
+
+    /** Called when the output stream the Spatializer is attached to changes.
+     * Indicates the IO Handle of the new output.
+     */
+    void onOutputChanged(int output);
+}
diff --git a/media/libaudioclient/aidl/android/media/ISpatializer.aidl b/media/libaudioclient/aidl/android/media/ISpatializer.aidl
new file mode 100644
index 0000000..b871238
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/ISpatializer.aidl
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.ISpatializerHeadTrackingCallback;
+import android.media.SpatializationLevel;
+import android.media.SpatializationMode;
+import android.media.SpatializerHeadTrackingMode;
+
+
+/**
+ * The ISpatializer interface is used to control the native audio service implementation
+ * of the spatializer stage with headtracking when present on a platform.
+ * It is intended for exclusive use by the java AudioService running in system_server.
+ * It provides APIs to discover the feature availability and options as well as control and report
+ * the active state and modes of the spatializer and head tracking effect.
+ * {@hide}
+ */
+interface ISpatializer {
+    /** Releases a ISpatializer interface previously acquired. */
+    void release();
+
+    /** Reports the list of supported spatialization levels (see SpatializationLevel.aidl).
+     * The list should never be empty if an ISpatializer interface was successfully
+     * retrieved with IAudioPolicyService.getSpatializer().
+     */
+    SpatializationLevel[] getSupportedLevels();
+
+    /** Selects the desired spatialization level (see SpatializationLevel.aidl). Selecting a level
+     * different from SpatializationLevel.NONE with create the specialized multichannel output
+     * mixer, create and enable the spatializer effect and let the audio policy attach eligible
+     * AudioTrack to this output stream.
+     */
+    void setLevel(SpatializationLevel level);
+
+    /** Gets the selected spatialization level (see SpatializationLevel.aidl) */
+    SpatializationLevel getLevel();
+
+    /** Reports if the spatializer engine supports head tracking or not.
+     * This is a pre condition independent of the fact that a head tracking sensor is
+     * registered or not.
+     */
+    boolean isHeadTrackingSupported();
+
+    /** Reports the list of supported head tracking modes (see SpatializerHeadTrackingMode.aidl).
+     * The list can be empty if the spatializer implementation does not support head tracking or if
+     * no head tracking sensor is registered (see setHeadSensor() and setScreenSensor()).
+     */
+    SpatializerHeadTrackingMode[] getSupportedHeadTrackingModes();
+
+    /** Selects the desired head tracking mode (see SpatializerHeadTrackingMode.aidl) */
+    void setDesiredHeadTrackingMode(SpatializerHeadTrackingMode mode);
+
+    /** Gets the actual head tracking mode. Can be different from the desired mode if conditions to
+     * enable the desired mode are not met (e.g if the head tracking device was removed)
+     */
+    SpatializerHeadTrackingMode getActualHeadTrackingMode();
+
+    /** Reset the head tracking algorithm to consider current head pose as neutral */
+    void recenterHeadTracker();
+
+    /** Set the screen to stage transform to use by the head tracking algorithm
+     * The screen to stage transform is conveyed as a vector of 6 elements,
+     * where the first three are a translation vector and
+     * the last three are a rotation vector.
+     */
+    void setGlobalTransform(in float[] screenToStage);
+
+    /**
+     * Set the sensor that is to be used for head-tracking.
+     * -1 can be used to disable head-tracking.
+     */
+    void setHeadSensor(int sensorHandle);
+
+    /**
+     * Set the sensor that is to be used for screen-tracking.
+     * -1 can be used to disable screen-tracking.
+     */
+    void setScreenSensor(int sensorHandle);
+
+    /**
+     * Sets the display orientation.
+     * Orientation is expressed in the angle of rotation from the physical "up" side of the screen
+     * to the logical "up" side of the content displayed the screen. Counterclockwise angles, as
+     * viewed while facing the screen are positive.
+     */
+    void setDisplayOrientation(float physicalToLogicalAngle);
+
+    /**
+     * Sets the hinge angle for foldable devices.
+     */
+    void setHingeAngle(float hingeAngle);
+
+    /** Reports the list of supported spatialization modess (see SpatializationMode.aidl).
+     * The list should never be empty if an ISpatializer interface was successfully
+     * retrieved with IAudioPolicyService.getSpatializer().
+     */
+    SpatializationMode[] getSupportedModes();
+
+    /**
+     * Registers a callback to monitor head tracking functions.
+     * Only one callback can be registered on a Spatializer.
+     * The last callback registered wins and passing a nullptr unregisters
+     * last registered callback.
+     */
+    void registerHeadTrackingCallback(@nullable ISpatializerHeadTrackingCallback callback);
+
+    /**
+     * Sets a parameter to the spatializer engine. Used by effect implementor for vendor
+     * specific configuration.
+     */
+     void setParameter(int key, in byte[] value);
+
+    /**
+     * Gets a parameter from the spatializer engine. Used by effect implementor for vendor
+     * specific configuration.
+     */
+     void getParameter(int key, inout byte[] value);
+
+    /**
+     * Gets the io handle of the output stream the spatializer is connected to.
+     */
+     int getOutput();
+}
diff --git a/media/libaudioclient/aidl/android/media/ISpatializerHeadTrackingCallback.aidl b/media/libaudioclient/aidl/android/media/ISpatializerHeadTrackingCallback.aidl
new file mode 100644
index 0000000..23d5e13
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/ISpatializerHeadTrackingCallback.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.SpatializationLevel;
+import android.media.SpatializerHeadTrackingMode;
+
+/**
+ * The ISpatializerHeadTrackingCallback interface is a callback associated to the
+ * Spatializer head tracking function. It can be registered via the ISpatializer
+ * interface to monitor head tracking related states.
+ * {@hide}
+ */
+oneway interface ISpatializerHeadTrackingCallback {
+    /** Called when the head tracking mode has changed
+     */
+    void onHeadTrackingModeChanged(SpatializerHeadTrackingMode mode);
+
+    /** Called when the head to stage pose hase been updated
+     * The head to stage pose is conveyed as a vector of 6 elements,
+     * where the first three are a translation vector and
+     * the last three are a rotation vector.
+     */
+    void onHeadToSoundStagePoseUpdated(in float[] headToStage);
+}
diff --git a/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl b/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl
index 2e55526..75ff8e9 100644
--- a/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl
@@ -16,9 +16,9 @@
 
 package android.media;
 
-import android.media.AudioConfig;
-import android.media.AudioDevice;
-import android.media.AudioSourceType;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioDevice;
+import android.media.audio.common.AudioSource;
 
 /**
  * {@hide}
@@ -30,7 +30,7 @@
     int input;
     AudioConfig config;
     AudioDevice device;
-    AudioSourceType source;
+    AudioSource source;
     /** Bitmask, indexed by AudioInputFlag. */
     int flags;
 }
diff --git a/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl b/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl
index b613ba5..41bc38a 100644
--- a/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl
@@ -16,8 +16,8 @@
 
 package android.media;
 
-import android.media.AudioConfig;
-import android.media.AudioDevice;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioDevice;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
index 06b12e9..90e7ea6 100644
--- a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
@@ -16,8 +16,9 @@
 
 package android.media;
 
-import android.media.AudioConfig;
 import android.media.AudioPort;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioConfigBase;
 
 /**
  * {@hide}
@@ -25,7 +26,8 @@
 parcelable OpenOutputRequest {
     /** Interpreted as audio_module_handle_t. */
     int module;
-    AudioConfig config;
+    AudioConfig halConfig;
+    AudioConfigBase mixerConfig;
     /** Type must be DEVICE. */
     AudioPort device;
     /** Bitmask, indexed by AudioOutputFlag. */
diff --git a/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl b/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl
index a051969..451a0bf 100644
--- a/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioConfig;
+import android.media.audio.common.AudioConfig;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl b/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl
index 3280460..7dad58d 100644
--- a/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl
+++ b/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioSourceType;
+import android.media.audio.common.AudioSource;
 
 /**
  * {@hide}
@@ -28,7 +28,7 @@
     int uid;
     /** Interpreted as audio_session_t. */
     int session;
-    AudioSourceType source;
+    AudioSource source;
     /** Interpreted as audio_port_handle_t. */
     int portId;
     boolean silenced;
diff --git a/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl b/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl
index a829e59..4b540a9 100644
--- a/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl
+++ b/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl
@@ -16,6 +16,8 @@
 
 package android.media;
 
+import android.media.audio.common.AudioDeviceDescription;
+
 /**
  * {@hide}
  */
@@ -24,6 +26,6 @@
     int session;
     /** Interpreted as audio_io_handle_t. */
     int ioHandle;
-    /** Interpreted as audio_devices_t. */
-    int device;
+    /** Device type. */
+    AudioDeviceDescription device;
 }
diff --git a/media/libaudioclient/aidl/android/media/SpatializationLevel.aidl b/media/libaudioclient/aidl/android/media/SpatializationLevel.aidl
new file mode 100644
index 0000000..961c5a1
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/SpatializationLevel.aidl
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * The spatialization level supported by the spatializer stage effect implementation.
+ * Used by methods of the ISpatializer interface.
+ * {@hide}
+ */
+@Backing(type="byte")
+enum SpatializationLevel {
+    /** Spatialization is disabled. */
+    NONE = 0,
+    /** The spatializer accepts audio with positional multichannel masks (e.g 5.1). */
+    SPATIALIZER_MULTICHANNEL = 1,
+    /** The spatializer accepts audio made of a channel bed of positional multichannels (e.g 5.1)
+     * and audio objects positioned independently via meta data.
+     */
+    SPATIALIZER_MCHAN_BED_PLUS_OBJECTS = 2,
+}
diff --git a/media/libaudioclient/aidl/android/media/SpatializationMode.aidl b/media/libaudioclient/aidl/android/media/SpatializationMode.aidl
new file mode 100644
index 0000000..5d8fd93
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/SpatializationMode.aidl
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * The spatialization mode supported by the spatializer stage effect implementation.
+ * Used by methods of the ISpatializer interface.
+ * {@hide}
+ */
+@Backing(type="byte")
+enum SpatializationMode {
+    /** The spatializer supports binaural mode (over headphones type devices). */
+    SPATIALIZATER_BINAURAL = 0,
+    /** The spatializer supports transaural mode (over speaker type devices). */
+    SPATIALIZATER_TRANSAURAL = 1,
+}
diff --git a/media/libaudioclient/aidl/android/media/SpatializerHeadTrackingMode.aidl b/media/libaudioclient/aidl/android/media/SpatializerHeadTrackingMode.aidl
new file mode 100644
index 0000000..58e0f61
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/SpatializerHeadTrackingMode.aidl
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+
+/**
+ * The head tracking mode supported by the spatializer effect implementation.
+ * Used by methods of the ISpatializer interface.
+ * {@hide}
+ */
+@Backing(type="byte")
+enum SpatializerHeadTrackingMode {
+    /** Head tracking is active in a mode not listed below (forward compatibility) */
+    OTHER = 0,
+    /** Head tracking is disabled */
+    DISABLED = 1,
+    /** Head tracking is performed relative to the real work environment */
+    RELATIVE_WORLD = 2,
+    /** Head tracking is performed relative to the device's screen */
+    RELATIVE_SCREEN = 3,
+}
diff --git a/media/libaudioclient/fuzzer/Android.bp b/media/libaudioclient/fuzzer/Android.bp
index b290aa8..969e3e6 100644
--- a/media/libaudioclient/fuzzer/Android.bp
+++ b/media/libaudioclient/fuzzer/Android.bp
@@ -46,6 +46,7 @@
     ],
     shared_libs: [
         "android.hardware.audio.common-util",
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
diff --git a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
index d03c6fa..7667501 100644
--- a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
+++ b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
@@ -58,7 +58,8 @@
 
 constexpr audio_mode_t kModes[] = {
     AUDIO_MODE_INVALID, AUDIO_MODE_CURRENT,          AUDIO_MODE_NORMAL,     AUDIO_MODE_RINGTONE,
-    AUDIO_MODE_IN_CALL, AUDIO_MODE_IN_COMMUNICATION, AUDIO_MODE_CALL_SCREEN};
+    AUDIO_MODE_IN_CALL, AUDIO_MODE_IN_COMMUNICATION, AUDIO_MODE_CALL_SCREEN,
+    AUDIO_MODE_CALL_REDIRECT, AUDIO_MODE_COMMUNICATION_REDIRECT};
 
 constexpr audio_session_t kSessionId[] = {AUDIO_SESSION_NONE, AUDIO_SESSION_OUTPUT_STAGE,
                                           AUDIO_SESSION_DEVICE};
@@ -231,7 +232,7 @@
     attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
     attributionSource.token = sp<BBinder>::make();
     track->set(AUDIO_STREAM_DEFAULT, sampleRate, format, channelMask, frameCount, flags, nullptr,
-               nullptr, notificationFrames, sharedBuffer, false, sessionId,
+               notificationFrames, sharedBuffer, false, sessionId,
                ((fast && sharedBuffer == 0) || offload) ? AudioTrack::TRANSFER_CALLBACK
                                                         : AudioTrack::TRANSFER_DEFAULT,
                offload ? &offloadInfo : nullptr, attributionSource, &attributes, false, 1.0f,
@@ -314,7 +315,7 @@
     attributionSource.packageName = std::string(mFdp.ConsumeRandomLengthString().c_str());
     attributionSource.token = sp<BBinder>::make();
     sp<AudioRecord> record = new AudioRecord(attributionSource);
-    record->set(AUDIO_SOURCE_DEFAULT, sampleRate, format, channelMask, frameCount, nullptr, nullptr,
+    record->set(AUDIO_SOURCE_DEFAULT, sampleRate, format, channelMask, frameCount, nullptr,
                 notificationFrames, false, sessionId,
                 fast ? AudioRecord::TRANSFER_CALLBACK : AudioRecord::TRANSFER_DEFAULT, flags,
                 getuid(), getpid(), &attributes, AUDIO_PORT_HANDLE_NONE);
@@ -383,6 +384,9 @@
                                    const std::vector<uint8_t> &replyData __unused) override {
         return binder::Status::ok();
     }
+    binder::Status framesProcessed(int32_t frames __unused) override {
+        return binder::Status::ok();
+    }
 };
 
 status_t AudioFlingerFuzzer::invokeAudioEffect() {
@@ -424,6 +428,7 @@
     request.attributionSource.packageName = opPackageName;
     request.attributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(getpid()));
     request.probe = false;
+    request.notifyFramesProcessed = false;
 
     media::CreateEffectResponse response{};
     status_t status = af->createEffect(request, &response);
@@ -597,9 +602,10 @@
     media::OpenInputRequest request{};
     request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
     request.input = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(input));
-    request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+    request.config = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(config, true /*isInput*/));
     request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(deviceTypeAddr));
-    request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSourceType(source));
+    request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSource(source));
     request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
 
     media::OpenInputResponse response{};
@@ -648,11 +654,16 @@
     sp<DeviceDescriptorBase> device = new DeviceDescriptorBase(getValue(&mFdp, kDevices));
     audio_output_flags_t flags = getValue(&mFdp, kOutputFlags);
 
+    audio_config_base_t mixerConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+
     media::OpenOutputRequest request{};
     media::OpenOutputResponse response{};
 
     request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
-    request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+    request.halConfig = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(config, false /*isInput*/));
+    request.mixerConfig = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_base_t_AudioConfigBase(mixerConfig, false /*isInput*/));
     request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_DeviceDescriptorBase(device));
     request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
 
diff --git a/media/libaudioclient/include/media/AidlConversion.h b/media/libaudioclient/include/media/AidlConversion.h
index 4ec69c7..e769303 100644
--- a/media/libaudioclient/include/media/AidlConversion.h
+++ b/media/libaudioclient/include/media/AidlConversion.h
@@ -23,34 +23,43 @@
 
 #include <android/media/AudioAttributesInternal.h>
 #include <android/media/AudioClient.h>
-#include <android/media/AudioConfig.h>
-#include <android/media/AudioConfigBase.h>
+#include <android/media/AudioDirectMode.h>
 #include <android/media/AudioDualMonoMode.h>
-#include <android/media/AudioEncapsulationMode.h>
-#include <android/media/AudioEncapsulationMetadataType.h>
-#include <android/media/AudioEncapsulationType.h>
 #include <android/media/AudioFlag.h>
-#include <android/media/AudioGain.h>
-#include <android/media/AudioGainMode.h>
-#include <android/media/AudioInputFlags.h>
 #include <android/media/AudioIoConfigEvent.h>
 #include <android/media/AudioIoDescriptor.h>
-#include <android/media/AudioMixLatencyClass.h>
-#include <android/media/AudioMode.h>
-#include <android/media/AudioOutputFlags.h>
 #include <android/media/AudioPlaybackRate.h>
 #include <android/media/AudioPort.h>
-#include <android/media/AudioPortConfigType.h>
-#include <android/media/AudioPortDeviceExt.h>
-#include <android/media/AudioPortExt.h>
-#include <android/media/AudioPortMixExt.h>
-#include <android/media/AudioPortSessionExt.h>
-#include <android/media/AudioProfile.h>
+#include <android/media/AudioPortConfig.h>
+#include <android/media/AudioPortDeviceExtSys.h>
 #include <android/media/AudioTimestampInternal.h>
 #include <android/media/AudioUniqueIdUse.h>
 #include <android/media/EffectDescriptor.h>
-#include <android/media/ExtraAudioDescriptor.h>
 #include <android/media/TrackSecondaryOutputInfo.h>
+#include <android/media/audio/common/AudioChannelLayout.h>
+#include <android/media/audio/common/AudioConfig.h>
+#include <android/media/audio/common/AudioConfigBase.h>
+#include <android/media/audio/common/AudioContentType.h>
+#include <android/media/audio/common/AudioDeviceDescription.h>
+#include <android/media/audio/common/AudioEncapsulationMetadataType.h>
+#include <android/media/audio/common/AudioEncapsulationMode.h>
+#include <android/media/audio/common/AudioEncapsulationType.h>
+#include <android/media/audio/common/AudioFormatDescription.h>
+#include <android/media/audio/common/AudioGain.h>
+#include <android/media/audio/common/AudioGainConfig.h>
+#include <android/media/audio/common/AudioGainMode.h>
+#include <android/media/audio/common/AudioInputFlags.h>
+#include <android/media/audio/common/AudioMode.h>
+#include <android/media/audio/common/AudioOffloadInfo.h>
+#include <android/media/audio/common/AudioOutputFlags.h>
+#include <android/media/audio/common/AudioPortExt.h>
+#include <android/media/audio/common/AudioPortMixExt.h>
+#include <android/media/audio/common/AudioProfile.h>
+#include <android/media/audio/common/AudioSource.h>
+#include <android/media/audio/common/AudioStandard.h>
+#include <android/media/audio/common/AudioUsage.h>
+#include <android/media/audio/common/AudioUuid.h>
+#include <android/media/audio/common/ExtraAudioDescriptor.h>
 
 #include <android/media/SharedFileRegion.h>
 #include <binder/IMemory.h>
@@ -86,19 +95,9 @@
 ConversionResult<audio_hw_sync_t> aidl2legacy_int32_t_audio_hw_sync_t(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_audio_hw_sync_t_int32_t(audio_hw_sync_t legacy);
 
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<int32_t> aidl2legacy_AudioPortConfigType_int32_t(
-        media::AudioPortConfigType aidl);
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<media::AudioPortConfigType> legacy2aidl_int32_t_AudioPortConfigType(
-        int32_t legacy);
-
 ConversionResult<unsigned int> aidl2legacy_int32_t_config_mask(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_config_mask_int32_t(unsigned int legacy);
 
-ConversionResult<audio_channel_mask_t> aidl2legacy_int32_t_audio_channel_mask_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_channel_mask_t_int32_t(audio_channel_mask_t legacy);
-
 ConversionResult<pid_t> aidl2legacy_int32_t_pid_t(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_pid_t_int32_t(pid_t legacy);
 
@@ -116,10 +115,10 @@
 ConversionResult<std::optional<std::string_view>>
 legacy2aidl_optional_String16_optional_string(std::optional<String16> legacy);
 
-ConversionResult<audio_io_config_event> aidl2legacy_AudioIoConfigEvent_audio_io_config_event(
+ConversionResult<audio_io_config_event_t> aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(
         media::AudioIoConfigEvent aidl);
-ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_AudioIoConfigEvent(
-        audio_io_config_event legacy);
+ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_t_AudioIoConfigEvent(
+        audio_io_config_event_t legacy);
 
 ConversionResult<audio_port_role_t> aidl2legacy_AudioPortRole_audio_port_role_t(
         media::AudioPortRole aidl);
@@ -131,36 +130,59 @@
 ConversionResult<media::AudioPortType> legacy2aidl_audio_port_type_t_AudioPortType(
         audio_port_type_t legacy);
 
-ConversionResult<audio_format_t> aidl2legacy_AudioFormat_audio_format_t(
-        media::audio::common::AudioFormat aidl);
-ConversionResult<media::audio::common::AudioFormat> legacy2aidl_audio_format_t_AudioFormat(
-        audio_format_t legacy);
+ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+        const media::audio::common::AudioChannelLayout& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioChannelLayout>
+legacy2aidl_audio_channel_mask_t_AudioChannelLayout(audio_channel_mask_t legacy, bool isInput);
+
+ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
+        const media::audio::common::AudioDeviceDescription& aidl);
+ConversionResult<media::audio::common::AudioDeviceDescription>
+legacy2aidl_audio_devices_t_AudioDeviceDescription(audio_devices_t legacy);
+
+status_t aidl2legacy_AudioDevice_audio_device(
+        const media::audio::common::AudioDevice& aidl,
+        audio_devices_t* legacyType, char* legacyAddress);
+status_t aidl2legacy_AudioDevice_audio_device(
+        const media::audio::common::AudioDevice& aidl,
+        audio_devices_t* legacyType, String8* legacyAddress);
+status_t aidl2legacy_AudioDevice_audio_device(
+        const media::audio::common::AudioDevice& aidl,
+        audio_devices_t* legacyType, std::string* legacyAddress);
+ConversionResult<media::audio::common::AudioDevice>
+legacy2aidl_audio_device_AudioDevice(
+        audio_devices_t legacyType, const char* legacyAddress);
+ConversionResult<media::audio::common::AudioDevice>
+legacy2aidl_audio_device_AudioDevice(
+        audio_devices_t legacyType, const String8& legacyAddress);
+
+ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
+        const media::audio::common::AudioFormatDescription& aidl);
+ConversionResult<media::audio::common::AudioFormatDescription>
+legacy2aidl_audio_format_t_AudioFormatDescription(audio_format_t legacy);
 
 ConversionResult<audio_gain_mode_t>
-aidl2legacy_AudioGainMode_audio_gain_mode_t(media::AudioGainMode aidl);
-ConversionResult<media::AudioGainMode>
+aidl2legacy_AudioGainMode_audio_gain_mode_t(media::audio::common::AudioGainMode aidl);
+ConversionResult<media::audio::common::AudioGainMode>
 legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy);
 
 ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy);
 
-ConversionResult<audio_devices_t> aidl2legacy_int32_t_audio_devices_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_devices_t_int32_t(audio_devices_t legacy);
-
 ConversionResult<audio_gain_config> aidl2legacy_AudioGainConfig_audio_gain_config(
-        const media::AudioGainConfig& aidl, media::AudioPortRole role, media::AudioPortType type);
-ConversionResult<media::AudioGainConfig> legacy2aidl_audio_gain_config_AudioGainConfig(
-        const audio_gain_config& legacy, audio_port_role_t role, audio_port_type_t type);
+        const media::audio::common::AudioGainConfig& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioGainConfig>
+legacy2aidl_audio_gain_config_AudioGainConfig(const audio_gain_config& legacy, bool isInput);
 
-ConversionResult<audio_input_flags_t> aidl2legacy_AudioInputFlags_audio_input_flags_t(
-        media::AudioInputFlags aidl);
-ConversionResult<media::AudioInputFlags> legacy2aidl_audio_input_flags_t_AudioInputFlags(
-        audio_input_flags_t legacy);
+ConversionResult<audio_input_flags_t>
+aidl2legacy_AudioInputFlags_audio_input_flags_t(media::audio::common::AudioInputFlags aidl);
+ConversionResult<media::audio::common::AudioInputFlags>
+legacy2aidl_audio_input_flags_t_AudioInputFlags(audio_input_flags_t legacy);
 
-ConversionResult<audio_output_flags_t> aidl2legacy_AudioOutputFlags_audio_output_flags_t(
-        media::AudioOutputFlags aidl);
-ConversionResult<media::AudioOutputFlags> legacy2aidl_audio_output_flags_t_AudioOutputFlags(
-        audio_output_flags_t legacy);
+ConversionResult<audio_output_flags_t>
+aidl2legacy_AudioOutputFlags_audio_output_flags_t(media::audio::common::AudioOutputFlags aidl);
+ConversionResult<media::audio::common::AudioOutputFlags>
+legacy2aidl_audio_output_flags_t_AudioOutputFlags(audio_output_flags_t legacy);
 
 ConversionResult<audio_input_flags_t> aidl2legacy_int32_t_audio_input_flags_t_mask(
         int32_t aidl);
@@ -173,40 +195,43 @@
         audio_output_flags_t legacy);
 
 ConversionResult<audio_io_flags> aidl2legacy_AudioIoFlags_audio_io_flags(
-        const media::AudioIoFlags& aidl, media::AudioPortRole role, media::AudioPortType type);
-ConversionResult<media::AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
-        const audio_io_flags& legacy, audio_port_role_t role, audio_port_type_t type);
+        const media::audio::common::AudioIoFlags& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
+        const audio_io_flags& legacy, bool isInput);
 
 ConversionResult<audio_port_config_device_ext>
-aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
-        const media::AudioPortConfigDeviceExt& aidl);
-ConversionResult<media::AudioPortConfigDeviceExt>
-legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
-        const audio_port_config_device_ext& legacy);
+aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(
+        const media::audio::common::AudioPortDeviceExt& aidl,
+        const media::AudioPortDeviceExtSys& aidlDeviceExt);
+status_t legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(
+        const audio_port_config_device_ext& legacy,
+        media::audio::common::AudioPortDeviceExt* aidl,
+        media::AudioPortDeviceExtSys* aidlDeviceExt);
 
 ConversionResult<audio_stream_type_t> aidl2legacy_AudioStreamType_audio_stream_type_t(
-        media::AudioStreamType aidl);
-ConversionResult<media::AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
-        audio_stream_type_t legacy);
+        media::audio::common::AudioStreamType aidl);
+ConversionResult<media::audio::common::AudioStreamType>
+legacy2aidl_audio_stream_type_t_AudioStreamType(audio_stream_type_t legacy);
 
-ConversionResult<audio_source_t> aidl2legacy_AudioSourceType_audio_source_t(
-        media::AudioSourceType aidl);
-ConversionResult<media::AudioSourceType> legacy2aidl_audio_source_t_AudioSourceType(
+ConversionResult<audio_source_t> aidl2legacy_AudioSource_audio_source_t(
+        media::audio::common::AudioSource aidl);
+ConversionResult<media::audio::common::AudioSource>
+        legacy2aidl_audio_source_t_AudioSource(
         audio_source_t legacy);
 
 ConversionResult<audio_session_t> aidl2legacy_int32_t_audio_session_t(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_audio_session_t_int32_t(audio_session_t legacy);
 
-ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortConfigMixExt(
-        const media::AudioPortConfigMixExt& aidl, media::AudioPortRole role);
-ConversionResult<media::AudioPortConfigMixExt> legacy2aidl_AudioPortConfigMixExt(
-        const audio_port_config_mix_ext& legacy, audio_port_role_t role);
+ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortMixExt(
+        const media::audio::common::AudioPortMixExt& aidl, media::AudioPortRole role,
+        const media::AudioPortMixExtSys& aidlMixExt);
+status_t legacy2aidl_AudioPortMixExt(
+        const audio_port_config_mix_ext& legacy, audio_port_role_t role,
+        media::audio::common::AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt);
 
 ConversionResult<audio_port_config_session_ext>
-aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
-        const media::AudioPortConfigSessionExt& aidl);
-ConversionResult<media::AudioPortConfigSessionExt>
-legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
+aidl2legacy_int32_t_audio_port_config_session_ext(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
         const audio_port_config_session_ext& legacy);
 
 ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
@@ -221,7 +246,6 @@
 
 ConversionResult<sp<AudioIoDescriptor>> aidl2legacy_AudioIoDescriptor_AudioIoDescriptor(
         const media::AudioIoDescriptor& aidl);
-
 ConversionResult<media::AudioIoDescriptor> legacy2aidl_AudioIoDescriptor_AudioIoDescriptor(
         const sp<AudioIoDescriptor>& legacy);
 
@@ -231,13 +255,14 @@
         const AudioClient& legacy);
 
 ConversionResult<audio_content_type_t>
-aidl2legacy_AudioContentType_audio_content_type_t(media::AudioContentType aidl);
-ConversionResult<media::AudioContentType>
+aidl2legacy_AudioContentType_audio_content_type_t(
+        media::audio::common::AudioContentType aidl);
+ConversionResult<media::audio::common::AudioContentType>
 legacy2aidl_audio_content_type_t_AudioContentType(audio_content_type_t legacy);
 
 ConversionResult<audio_usage_t>
-aidl2legacy_AudioUsage_audio_usage_t(media::AudioUsage aidl);
-ConversionResult<media::AudioUsage>
+aidl2legacy_AudioUsage_audio_usage_t(media::audio::common::AudioUsage aidl);
+ConversionResult<media::audio::common::AudioUsage>
 legacy2aidl_audio_usage_t_AudioUsage(audio_usage_t legacy);
 
 ConversionResult<audio_flags_mask_t>
@@ -256,24 +281,27 @@
 legacy2aidl_audio_attributes_t_AudioAttributesInternal(const audio_attributes_t& legacy);
 
 ConversionResult<audio_encapsulation_mode_t>
-aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(media::AudioEncapsulationMode aidl);
-ConversionResult<media::AudioEncapsulationMode>
+aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(
+        media::audio::common::AudioEncapsulationMode aidl);
+ConversionResult<media::audio::common::AudioEncapsulationMode>
 legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode(audio_encapsulation_mode_t legacy);
 
 ConversionResult<audio_offload_info_t>
-aidl2legacy_AudioOffloadInfo_audio_offload_info_t(const media::AudioOffloadInfo& aidl);
-ConversionResult<media::AudioOffloadInfo>
+aidl2legacy_AudioOffloadInfo_audio_offload_info_t(
+        const media::audio::common::AudioOffloadInfo& aidl);
+ConversionResult<media::audio::common::AudioOffloadInfo>
 legacy2aidl_audio_offload_info_t_AudioOffloadInfo(const audio_offload_info_t& legacy);
 
 ConversionResult<audio_config_t>
-aidl2legacy_AudioConfig_audio_config_t(const media::AudioConfig& aidl);
-ConversionResult<media::AudioConfig>
-legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy);
+aidl2legacy_AudioConfig_audio_config_t(const media::audio::common::AudioConfig& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioConfig>
+legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy, bool isInput);
 
 ConversionResult<audio_config_base_t>
-aidl2legacy_AudioConfigBase_audio_config_base_t(const media::AudioConfigBase& aidl);
-ConversionResult<media::AudioConfigBase>
-legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy);
+aidl2legacy_AudioConfigBase_audio_config_base_t(
+        const media::audio::common::AudioConfigBase& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioConfigBase>
+legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy, bool isInput);
 
 ConversionResult<sp<IMemory>>
 aidl2legacy_SharedFileRegion_IMemory(const media::SharedFileRegion& aidl);
@@ -291,8 +319,8 @@
 legacy2aidl_AudioTimestamp_AudioTimestampInternal(const AudioTimestamp& legacy);
 
 ConversionResult<audio_uuid_t>
-aidl2legacy_AudioUuid_audio_uuid_t(const media::AudioUuid& aidl);
-ConversionResult<media::AudioUuid>
+aidl2legacy_AudioUuid_audio_uuid_t(const media::audio::common::AudioUuid& aidl);
+ConversionResult<media::audio::common::AudioUuid>
 legacy2aidl_audio_uuid_t_AudioUuid(const audio_uuid_t& legacy);
 
 ConversionResult<effect_descriptor_t>
@@ -302,8 +330,8 @@
 
 ConversionResult<audio_encapsulation_metadata_type_t>
 aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t(
-        media::AudioEncapsulationMetadataType aidl);
-ConversionResult<media::AudioEncapsulationMetadataType>
+        media::audio::common::AudioEncapsulationMetadataType aidl);
+ConversionResult<media::audio::common::AudioEncapsulationMetadataType>
 legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType(
         audio_encapsulation_metadata_type_t legacy);
 
@@ -317,37 +345,39 @@
 ConversionResult<int32_t>
 legacy2aidl_AudioEncapsulationMetadataType_mask(uint32_t legacy);
 
-ConversionResult<audio_mix_latency_class_t>
-aidl2legacy_AudioMixLatencyClass_audio_mix_latency_class_t(
-        media::AudioMixLatencyClass aidl);
-ConversionResult<media::AudioMixLatencyClass>
-legacy2aidl_audio_mix_latency_class_t_AudioMixLatencyClass(
-        audio_mix_latency_class_t legacy);
-
 ConversionResult<audio_port_device_ext>
-aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(const media::AudioPortDeviceExt& aidl);
-ConversionResult<media::AudioPortDeviceExt>
-legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(const audio_port_device_ext& legacy);
+aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
+        const media::audio::common::AudioPortDeviceExt& aidl,
+        const media::AudioPortDeviceExtSys& aidlDeviceExt);
+status_t legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(
+        const audio_port_device_ext& legacy,
+        media::audio::common::AudioPortDeviceExt* aidl,
+        media::AudioPortDeviceExtSys* aidlDeviceExt);
 
 ConversionResult<audio_port_mix_ext>
-aidl2legacy_AudioPortMixExt_audio_port_mix_ext(const media::AudioPortMixExt& aidl);
-ConversionResult<media::AudioPortMixExt>
-legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy);
+aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
+        const media::audio::common::AudioPortMixExt& aidl,
+        const media::AudioPortMixExtSys& aidlMixExt);
+status_t legacy2aidl_audio_port_mix_ext_AudioPortMixExt(
+        const audio_port_mix_ext& legacy,
+        media::audio::common::AudioPortMixExt* aidl,
+        media::AudioPortMixExtSys* aidlMixExt);
 
 ConversionResult<audio_port_session_ext>
-aidl2legacy_AudioPortSessionExt_audio_port_session_ext(const media::AudioPortSessionExt& aidl);
-ConversionResult<media::AudioPortSessionExt>
-legacy2aidl_audio_port_session_ext_AudioPortSessionExt(const audio_port_session_ext& legacy);
+aidl2legacy_int32_t_audio_port_session_ext(int32_t aidl);
+ConversionResult<int32_t>
+legacy2aidl_audio_port_session_ext_int32_t(const audio_port_session_ext& legacy);
 
 ConversionResult<audio_profile>
-aidl2legacy_AudioProfile_audio_profile(const media::AudioProfile& aidl);
-ConversionResult<media::AudioProfile>
-legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy);
+aidl2legacy_AudioProfile_audio_profile(
+        const media::audio::common::AudioProfile& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioProfile>
+legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy, bool isInput);
 
 ConversionResult<audio_gain>
-aidl2legacy_AudioGain_audio_gain(const media::AudioGain& aidl);
-ConversionResult<media::AudioGain>
-legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy);
+aidl2legacy_AudioGain_audio_gain(const media::audio::common::AudioGain& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioGain>
+legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy, bool isInput);
 
 ConversionResult<audio_port_v7>
 aidl2legacy_AudioPort_audio_port_v7(const media::AudioPort& aidl);
@@ -355,8 +385,8 @@
 legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy);
 
 ConversionResult<audio_mode_t>
-aidl2legacy_AudioMode_audio_mode_t(media::AudioMode aidl);
-ConversionResult<media::AudioMode>
+aidl2legacy_AudioMode_audio_mode_t(media::audio::common::AudioMode aidl);
+ConversionResult<media::audio::common::AudioMode>
 legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy);
 
 ConversionResult<audio_unique_id_use_t>
@@ -390,21 +420,21 @@
 legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(const audio_playback_rate_t& legacy);
 
 ConversionResult<audio_standard_t>
-aidl2legacy_AudioStandard_audio_standard_t(media::AudioStandard aidl);
-ConversionResult<media::AudioStandard>
+aidl2legacy_AudioStandard_audio_standard_t(media::audio::common::AudioStandard aidl);
+ConversionResult<media::audio::common::AudioStandard>
 legacy2aidl_audio_standard_t_AudioStandard(audio_standard_t legacy);
 
 ConversionResult<audio_extra_audio_descriptor>
 aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor(
-        const media::ExtraAudioDescriptor& aidl);
-ConversionResult<media::ExtraAudioDescriptor>
+        const media::audio::common::ExtraAudioDescriptor& aidl);
+ConversionResult<media::audio::common::ExtraAudioDescriptor>
 legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor(
         const audio_extra_audio_descriptor& legacy);
 
 ConversionResult<audio_encapsulation_type_t>
 aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
-        const media::AudioEncapsulationType& aidl);
-ConversionResult<media::AudioEncapsulationType>
+        const media::audio::common::AudioEncapsulationType& aidl);
+ConversionResult<media::audio::common::AudioEncapsulationType>
 legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
         const audio_encapsulation_type_t & legacy);
 
@@ -416,5 +446,13 @@
 legacy2aidl_TrackSecondaryOutputInfoPair_TrackSecondaryOutputInfo(
         const TrackSecondaryOutputInfoPair& legacy);
 
+ConversionResult<audio_direct_mode_t>
+aidl2legacy_AudioDirectMode_audio_direct_mode_t(media::AudioDirectMode aidl);
+ConversionResult<media::AudioDirectMode>
+legacy2aidl_audio_direct_mode_t_AudioDirectMode(audio_direct_mode_t legacy);
+
+ConversionResult<audio_direct_mode_t> aidl2legacy_int32_t_audio_direct_mode_t_mask(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_direct_mode_t_int32_t_mask(audio_direct_mode_t legacy);
+
 
 }  // namespace android
diff --git a/media/libaudioclient/include/media/AidlConversionUtil.h b/media/libaudioclient/include/media/AidlConversionUtil.h
index c1a2be3..227d823 100644
--- a/media/libaudioclient/include/media/AidlConversionUtil.h
+++ b/media/libaudioclient/include/media/AidlConversionUtil.h
@@ -41,6 +41,9 @@
 #define RETURN_IF_ERROR(result) \
     if (status_t _tmp = (result); _tmp != OK) return base::unexpected(_tmp);
 
+#define RETURN_STATUS_IF_ERROR(result) \
+    if (status_t _tmp = (result); _tmp != OK) return _tmp;
+
 #define VALUE_OR_RETURN_STATUS(x)           \
     ({                                      \
        auto _tmp = (x);                     \
@@ -119,6 +122,62 @@
     return output;
 }
 
+/**
+ * A generic template that helps convert containers of convertible types
+ * using an item conversion function with an additional parameter.
+ */
+template<typename OutputContainer, typename InputContainer, typename Func, typename Parameter>
+ConversionResult<OutputContainer>
+convertContainer(const InputContainer& input, const Func& itemConversion, const Parameter& param) {
+    OutputContainer output;
+    auto ins = std::inserter(output, output.begin());
+    for (const auto& item : input) {
+        *ins = VALUE_OR_RETURN(itemConversion(item, param));
+    }
+    return output;
+}
+
+/**
+ * A generic template that helps to "zip" two input containers of the same size
+ * into a single vector of converted types. The conversion function must
+ * thus accept two arguments.
+ */
+template<typename OutputContainer, typename InputContainer1,
+        typename InputContainer2, typename Func>
+ConversionResult<OutputContainer>
+convertContainers(const InputContainer1& input1, const InputContainer2& input2,
+        const Func& itemConversion) {
+    auto iter2 = input2.begin();
+    OutputContainer output;
+    auto ins = std::inserter(output, output.begin());
+    for (const auto& item1 : input1) {
+        RETURN_IF_ERROR(iter2 != input2.end() ? OK : BAD_VALUE);
+        *ins = VALUE_OR_RETURN(itemConversion(item1, *iter2++));
+    }
+    return output;
+}
+
+/**
+ * A generic template that helps to "unzip" a per-element conversion into
+ * a pair of elements into a pair of containers. The conversion function
+ * must emit a pair of elements.
+ */
+template<typename OutputContainer1, typename OutputContainer2,
+        typename InputContainer, typename Func>
+ConversionResult<std::pair<OutputContainer1, OutputContainer2>>
+convertContainerSplit(const InputContainer& input, const Func& itemConversion) {
+    OutputContainer1 output1;
+    OutputContainer2 output2;
+    auto ins1 = std::inserter(output1, output1.begin());
+    auto ins2 = std::inserter(output2, output2.begin());
+    for (const auto& item : input) {
+        auto out_pair = VALUE_OR_RETURN(itemConversion(item));
+        *ins1 = out_pair.first;
+        *ins2 = out_pair.second;
+    }
+    return std::make_pair(output1, output2);
+}
+
 ////////////////////////////////////////////////////////////////////////////////////////////////////
 // The code below establishes:
 // IntegralTypeOf<T>, which works for either integral types (in which case it evaluates to T), or
diff --git a/media/libaudioclient/include/media/AudioCommonTypes.h b/media/libaudioclient/include/media/AudioCommonTypes.h
index 5dfe5fc..862a0f9 100644
--- a/media/libaudioclient/include/media/AudioCommonTypes.h
+++ b/media/libaudioclient/include/media/AudioCommonTypes.h
@@ -17,9 +17,75 @@
 
 #pragma once
 
+#include <functional>
+
+#include <android/media/audio/common/AudioChannelLayout.h>
+#include <android/media/audio/common/AudioDeviceDescription.h>
+#include <android/media/audio/common/AudioFormatDescription.h>
+#include <binder/Parcelable.h>
 #include <system/audio.h>
 #include <system/audio_policy.h>
-#include <binder/Parcelable.h>
+
+namespace {
+// see boost::hash_combine
+#if defined(__clang__)
+__attribute__((no_sanitize("unsigned-integer-overflow")))
+#endif
+static size_t hash_combine(size_t seed, size_t v) {
+    return std::hash<size_t>{}(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
+}
+}
+
+namespace std {
+
+// Note: when extending the types hashed below we need to account for the
+// possibility of processing types belonging to different versions of the type,
+// e.g. a HAL may be using a previous version of the AIDL interface.
+
+template<> struct hash<android::media::audio::common::AudioChannelLayout>
+{
+    std::size_t operator()(
+            const android::media::audio::common::AudioChannelLayout& acl) const noexcept {
+        using Tag = android::media::audio::common::AudioChannelLayout::Tag;
+        const size_t seed = std::hash<Tag>{}(acl.getTag());
+        switch (acl.getTag()) {
+            case Tag::none:
+                return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::none>()));
+            case Tag::invalid:
+                return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::invalid>()));
+            case Tag::indexMask:
+                return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::indexMask>()));
+            case Tag::layoutMask:
+                return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::layoutMask>()));
+            case Tag::voiceMask:
+                return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::voiceMask>()));
+        }
+        return seed;
+    }
+};
+
+template<> struct hash<android::media::audio::common::AudioDeviceDescription>
+{
+    std::size_t operator()(
+            const android::media::audio::common::AudioDeviceDescription& add) const noexcept {
+        return hash_combine(
+                std::hash<android::media::audio::common::AudioDeviceType>{}(add.type),
+                std::hash<std::string>{}(add.connection));
+    }
+};
+
+template<> struct hash<android::media::audio::common::AudioFormatDescription>
+{
+    std::size_t operator()(
+            const android::media::audio::common::AudioFormatDescription& afd) const noexcept {
+        return hash_combine(
+                std::hash<android::media::audio::common::AudioFormatType>{}(afd.type),
+                hash_combine(
+                        std::hash<android::media::audio::common::PcmType>{}(afd.pcm),
+                        std::hash<std::string>{}(afd.encoding)));
+    }
+};
+}  // namespace std
 
 namespace android {
 
@@ -41,8 +107,43 @@
     return !(lhs==rhs);
 }
 
+constexpr bool operator==(const audio_offload_info_t &lhs, const audio_offload_info_t &rhs)
+{
+    return lhs.version == rhs.version && lhs.size == rhs.size &&
+           lhs.sample_rate == rhs.sample_rate && lhs.channel_mask == rhs.channel_mask &&
+           lhs.format == rhs.format && lhs.stream_type == rhs.stream_type &&
+           lhs.bit_rate == rhs.bit_rate && lhs.duration_us == rhs.duration_us &&
+           lhs.has_video == rhs.has_video && lhs.is_streaming == rhs.is_streaming &&
+           lhs.bit_width == rhs.bit_width && lhs.offload_buffer_size == rhs.offload_buffer_size &&
+           lhs.usage == rhs.usage && lhs.encapsulation_mode == rhs.encapsulation_mode &&
+           lhs.content_id == rhs.content_id && lhs.sync_id == rhs.sync_id;
+}
+constexpr bool operator!=(const audio_offload_info_t &lhs, const audio_offload_info_t &rhs)
+{
+    return !(lhs==rhs);
+}
+
+constexpr bool operator==(const audio_config_t &lhs, const audio_config_t &rhs)
+{
+    return lhs.sample_rate == rhs.sample_rate && lhs.channel_mask == rhs.channel_mask &&
+           lhs.format == rhs.format && lhs.offload_info == rhs.offload_info;
+}
+constexpr bool operator!=(const audio_config_t &lhs, const audio_config_t &rhs)
+{
+    return !(lhs==rhs);
+}
+
+constexpr bool operator==(const audio_config_base_t &lhs, const audio_config_base_t &rhs)
+{
+    return lhs.sample_rate == rhs.sample_rate && lhs.channel_mask == rhs.channel_mask &&
+           lhs.format == rhs.format;
+}
+constexpr bool operator!=(const audio_config_base_t &lhs, const audio_config_base_t &rhs)
+{
+    return !(lhs==rhs);
+}
+
 enum volume_group_t : uint32_t;
 static const volume_group_t VOLUME_GROUP_NONE = static_cast<volume_group_t>(-1);
 
 } // namespace android
-
diff --git a/media/libaudioclient/include/media/AudioEffect.h b/media/libaudioclient/include/media/AudioEffect.h
index 3c19ec1..ee262f3 100644
--- a/media/libaudioclient/include/media/AudioEffect.h
+++ b/media/libaudioclient/include/media/AudioEffect.h
@@ -40,7 +40,7 @@
 
 // ----------------------------------------------------------------------------
 
-class AudioEffect : public RefBase
+class AudioEffect : public virtual RefBase
 {
 public:
 
@@ -283,7 +283,8 @@
         EVENT_CONTROL_STATUS_CHANGED = 0,
         EVENT_ENABLE_STATUS_CHANGED = 1,
         EVENT_PARAMETER_CHANGED = 2,
-        EVENT_ERROR = 3
+        EVENT_ERROR = 3,
+        EVENT_FRAMES_PROCESSED = 4,
     };
 
     /* Callback function notifying client application of a change in effect engine state or
@@ -389,7 +390,8 @@
                             audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
                             audio_io_handle_t io = AUDIO_IO_HANDLE_NONE,
                             const AudioDeviceTypeAddr& device = {},
-                            bool probe = false);
+                            bool probe = false,
+                            bool notifyFramesProcessed = false);
     /*
      * Same as above but with type and uuid specified by character strings.
      */
@@ -401,7 +403,8 @@
                             audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
                             audio_io_handle_t io = AUDIO_IO_HANDLE_NONE,
                             const AudioDeviceTypeAddr& device = {},
-                            bool probe = false);
+                            bool probe = false,
+                            bool notifyFramesProcessed = false);
 
     /* Result of constructing the AudioEffect. This must be checked
      * before using any AudioEffect API.
@@ -552,6 +555,7 @@
      virtual void commandExecuted(int32_t cmdCode,
                                   const std::vector<uint8_t>& cmdData,
                                   const std::vector<uint8_t>& replyData);
+     virtual void framesProcessed(int32_t frames);
 
 private:
 
@@ -587,6 +591,14 @@
             }
             return binder::Status::ok();
         }
+        binder::Status framesProcessed(int32_t frames) override {
+            sp<AudioEffect> effect = mEffect.promote();
+            if (effect != 0) {
+                effect->framesProcessed(frames);
+            }
+            return binder::Status::ok();
+        }
+
 
         // IBinder::DeathRecipient
         virtual void binderDied(const wp<IBinder>& /*who*/) {
diff --git a/media/libaudioclient/include/media/AudioIoDescriptor.h b/media/libaudioclient/include/media/AudioIoDescriptor.h
index 981d33a..405ec7d 100644
--- a/media/libaudioclient/include/media/AudioIoDescriptor.h
+++ b/media/libaudioclient/include/media/AudioIoDescriptor.h
@@ -17,9 +17,15 @@
 #ifndef ANDROID_AUDIO_IO_DESCRIPTOR_H
 #define ANDROID_AUDIO_IO_DESCRIPTOR_H
 
+#include <sstream>
+#include <string>
+
+#include <system/audio.h>
+#include <utils/RefBase.h>
+
 namespace android {
 
-enum audio_io_config_event {
+enum audio_io_config_event_t {
     AUDIO_OUTPUT_REGISTERED,
     AUDIO_OUTPUT_OPENED,
     AUDIO_OUTPUT_CLOSED,
@@ -33,41 +39,70 @@
 
 // audio input/output descriptor used to cache output configurations in client process to avoid
 // frequent calls through IAudioFlinger
-class AudioIoDescriptor : public RefBase {
+class AudioIoDescriptor : public virtual RefBase {
 public:
-    AudioIoDescriptor() :
-        mIoHandle(AUDIO_IO_HANDLE_NONE),
-        mSamplingRate(0), mFormat(AUDIO_FORMAT_DEFAULT), mChannelMask(AUDIO_CHANNEL_NONE),
-        mFrameCount(0), mFrameCountHAL(0), mLatency(0), mPortId(AUDIO_PORT_HANDLE_NONE)
-    {
-        memset(&mPatch, 0, sizeof(struct audio_patch));
-    }
+    AudioIoDescriptor() = default;
+    // For AUDIO_{INPUT|OUTPUT}_CLOSED events.
+    AudioIoDescriptor(audio_io_handle_t ioHandle) : mIoHandle(ioHandle) {}
+    // For AUDIO_CLIENT_STARTED events.
+    AudioIoDescriptor(
+            audio_io_handle_t ioHandle, const audio_patch& patch, audio_port_handle_t portId) :
+            mIoHandle(ioHandle), mPatch(patch), mPortId(portId) {}
+    // For everything else.
+    AudioIoDescriptor(
+            audio_io_handle_t ioHandle, const audio_patch& patch, bool isInput,
+            uint32_t samplingRate, audio_format_t format, audio_channel_mask_t channelMask,
+            size_t frameCount, size_t frameCountHal, uint32_t latency = 0,
+            audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE) :
+            mIoHandle(ioHandle), mPatch(patch), mIsInput(isInput),
+            mSamplingRate(samplingRate), mFormat(format), mChannelMask(channelMask),
+            mFrameCount(frameCount), mFrameCountHAL(frameCountHal), mLatency(latency),
+            mPortId(portId) {}
 
-    virtual ~AudioIoDescriptor() {}
-
-    audio_port_handle_t getDeviceId() {
+    audio_io_handle_t getIoHandle() const { return mIoHandle; }
+    const audio_patch& getPatch() const { return mPatch; }
+    bool getIsInput() const { return mIsInput; }
+    uint32_t getSamplingRate() const { return mSamplingRate; }
+    audio_format_t getFormat() const { return mFormat; }
+    audio_channel_mask_t getChannelMask() const { return mChannelMask; }
+    size_t getFrameCount() const { return mFrameCount; }
+    size_t getFrameCountHAL() const { return mFrameCountHAL; }
+    uint32_t getLatency() const { return mLatency; }
+    audio_port_handle_t getPortId() const { return mPortId; }
+    audio_port_handle_t getDeviceId() const {
         if (mPatch.num_sources != 0 && mPatch.num_sinks != 0) {
-            if (mPatch.sources[0].type == AUDIO_PORT_TYPE_MIX) {
-                // this is an output mix
-                // FIXME: the API only returns the first device in case of multiple device selection
-                return mPatch.sinks[0].id;
-            } else {
-                // this is an input mix
-                return mPatch.sources[0].id;
-            }
+            // FIXME: the API only returns the first device in case of multiple device selection
+            return mIsInput ? mPatch.sources[0].id : mPatch.sinks[0].id;
         }
         return AUDIO_PORT_HANDLE_NONE;
     }
+    void setPatch(const audio_patch& patch) { mPatch = patch; }
 
-    audio_io_handle_t       mIoHandle;
-    struct audio_patch      mPatch;
-    uint32_t                mSamplingRate;
-    audio_format_t          mFormat;
-    audio_channel_mask_t    mChannelMask;
-    size_t                  mFrameCount;
-    size_t                  mFrameCountHAL;
-    uint32_t                mLatency;   // only valid for output
-    audio_port_handle_t     mPortId;    // valid for event AUDIO_CLIENT_STARTED
+    std::string toDebugString() const {
+        std::ostringstream ss;
+        ss << mIoHandle << ", samplingRate " << mSamplingRate << ", "
+           << audio_format_to_string(mFormat) << ", "
+           << (audio_channel_mask_get_representation(mChannelMask) ==
+                   AUDIO_CHANNEL_REPRESENTATION_INDEX ?
+                   audio_channel_index_mask_to_string(mChannelMask) :
+                   (mIsInput ? audio_channel_in_mask_to_string(mChannelMask) :
+                           audio_channel_out_mask_to_string(mChannelMask)))
+           << ", frameCount " << mFrameCount << ", frameCountHAL " << mFrameCountHAL
+           << ", deviceId " << getDeviceId();
+        return ss.str();
+    }
+
+  private:
+    const audio_io_handle_t    mIoHandle = AUDIO_IO_HANDLE_NONE;
+          struct audio_patch   mPatch = {};
+    const bool                 mIsInput = false;
+    const uint32_t             mSamplingRate = 0;
+    const audio_format_t       mFormat = AUDIO_FORMAT_DEFAULT;
+    const audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
+    const size_t               mFrameCount = 0;
+    const size_t               mFrameCountHAL = 0;
+    const uint32_t             mLatency = 0;
+    const audio_port_handle_t  mPortId = AUDIO_PORT_HANDLE_NONE;
 };
 
 
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 326919a..3cfcbf3 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -47,7 +47,7 @@
 {
 public:
 
-    /* Events used by AudioRecord callback function (callback_t).
+    /* Events used by AudioRecord callback function (legacy_callback_t).
      * Keep in sync with frameworks/base/media/java/android/media/AudioRecord.java NATIVE_EVENT_*.
      */
     enum event_type {
@@ -65,7 +65,7 @@
     };
 
     /* Client should declare a Buffer and pass address to obtainBuffer()
-     * and releaseBuffer().  See also callback_t for EVENT_MORE_DATA.
+     * and releaseBuffer().  See also legacy_callback_t for EVENT_MORE_DATA.
      */
 
     class Buffer
@@ -117,7 +117,28 @@
      *          - EVENT_NEW_IAUDIORECORD: unused.
      */
 
-    typedef void (*callback_t)(int event, void* user, void *info);
+    typedef void (*legacy_callback_t)(int event, void* user, void *info);
+
+    class IAudioRecordCallback : public virtual RefBase {
+        friend AudioRecord;
+     protected:
+        // Request for client to read newly available data.
+        // Used for TRANSFER_CALLBACK mode.
+        // Parameters:
+        //  - buffer : Buffer to read from
+        // Returns:
+        //  - Number of bytes actually consumed.
+        virtual size_t onMoreData([[maybe_unused]] const AudioRecord::Buffer& buffer) { return 0; }
+        // A buffer overrun occurred.
+        virtual void onOverrun() {}
+        // Record head is at the specified marker (see setMarkerPosition()).
+        virtual void onMarker([[maybe_unused]] uint32_t markerPosition) {}
+        // Record head is at a new position (see setPositionUpdatePeriod()).
+        virtual void onNewPos([[maybe_unused]] uint32_t newPos) {}
+        // IAudioRecord was recreated due to re-routing, server invalidation or
+        // server crash.
+        virtual void onNewIAudioRecord() {}
+    };
 
     /* Returns the minimum frame count required for the successful creation of
      * an AudioRecord object.
@@ -182,20 +203,37 @@
      * pAttributes:        If not NULL, supersedes inputSource for use case selection.
      * threadCanCallJava:  Not present in parameter list, and so is fixed at false.
      */
-
                         AudioRecord(audio_source_t inputSource,
                                     uint32_t sampleRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
                                     const android::content::AttributionSourceState& client,
                                     size_t frameCount = 0,
-                                    callback_t cbf = NULL,
-                                    void* user = NULL,
+                                    const wp<IAudioRecordCallback> &callback = nullptr,
                                     uint32_t notificationFrames = 0,
                                     audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
                                     audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
-                                    const audio_attributes_t* pAttributes = NULL,
+                                    const audio_attributes_t* pAttributes = nullptr,
+                                    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+                                    audio_microphone_direction_t
+                                        selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
+                                    float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT);
+
+
+                        AudioRecord(audio_source_t inputSource,
+                                    uint32_t sampleRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    const android::content::AttributionSourceState& client,
+                                    size_t frameCount,
+                                    legacy_callback_t callback,
+                                    void* user,
+                                    uint32_t notificationFrames = 0,
+                                    audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+                                    transfer_type transferType = TRANSFER_DEFAULT,
+                                    audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+                                    const audio_attributes_t* pAttributes = nullptr,
                                     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
                                     audio_microphone_direction_t
                                         selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
@@ -223,13 +261,12 @@
      *
      * threadCanCallJava:  Whether callbacks are made from an attached thread and thus can call JNI.
      */
-            status_t    set(audio_source_t inputSource,
+           status_t    set(audio_source_t inputSource,
                             uint32_t sampleRate,
                             audio_format_t format,
                             audio_channel_mask_t channelMask,
                             size_t frameCount = 0,
-                            callback_t cbf = NULL,
-                            void* user = NULL,
+                            const wp<IAudioRecordCallback> &callback = nullptr,
                             uint32_t notificationFrames = 0,
                             bool threadCanCallJava = false,
                             audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
@@ -237,7 +274,28 @@
                             audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
                             uid_t uid = AUDIO_UID_INVALID,
                             pid_t pid = -1,
-                            const audio_attributes_t* pAttributes = NULL,
+                            const audio_attributes_t* pAttributes = nullptr,
+                            audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+                            audio_microphone_direction_t
+                                selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
+                            float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT,
+                            int32_t maxSharedAudioHistoryMs = 0);
+
+           status_t    set(audio_source_t inputSource,
+                            uint32_t sampleRate,
+                            audio_format_t format,
+                            audio_channel_mask_t channelMask,
+                            size_t frameCount,
+                            legacy_callback_t callback,
+                            void* user,
+                            uint32_t notificationFrames = 0,
+                            bool threadCanCallJava = false,
+                            audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+                            transfer_type transferType = TRANSFER_DEFAULT,
+                            audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+                            uid_t uid = AUDIO_UID_INVALID,
+                            pid_t pid = -1,
+                            const audio_attributes_t* pAttributes = nullptr,
                             audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
                             audio_microphone_direction_t
                                 selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
@@ -264,6 +322,7 @@
             size_t      frameCount() const  { return mFrameCount; }
             size_t      frameSize() const   { return mFrameSize; }
             audio_source_t inputSource() const  { return mAttributes.source; }
+            audio_channel_mask_t channelMask() const { return mChannelMask; }
 
     /*
      * Return the period of the notification callback in frames.
@@ -672,8 +731,9 @@
     bool                    mActive;
 
     // for client callback handler
-    callback_t              mCbf;                   // callback handler for events, or NULL
-    void*                   mUserData;
+
+    wp<IAudioRecordCallback> mCallback;
+    sp<IAudioRecordCallback> mLegacyCallbackWrapper;
 
     // for notification APIs
     uint32_t                mNotificationFramesReq; // requested number of frames between each
@@ -759,6 +819,13 @@
     bool                    mTimestampRetrogradePositionReported = false; // reduce log spam
     bool                    mTimestampRetrogradeTimeReported = false;     // reduce log spam
 
+    // Format conversion. Maybe needed for adding fast tracks whose format is different from server.
+    audio_config_base_t     mServerConfig;
+    size_t                  mServerFrameSize;
+    size_t                  mServerSampleSize;
+    std::unique_ptr<uint8_t[]> mFormatConversionBufRaw;
+    Buffer                  mFormatConversionBuffer;
+
 private:
     class DeathNotifier : public IBinder::DeathRecipient {
     public:
@@ -823,6 +890,8 @@
     MediaMetrics mMediaMetrics;
     std::string mMetricsId;  // GUARDED_BY(mLock), could change in createRecord_l().
     std::string mCallerName; // for example "aaudio"
+
+    void reportError(status_t status, const char *event, const char *message) const;
 };
 
 }; // namespace android
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 8ba23ad..4280a6a 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -19,24 +19,30 @@
 
 #include <sys/types.h>
 
+#include <set>
+#include <vector>
+
+#include <android/content/AttributionSourceState.h>
 #include <android/media/AudioVibratorInfo.h>
 #include <android/media/BnAudioFlingerClient.h>
 #include <android/media/BnAudioPolicyServiceClient.h>
-#include <android/content/AttributionSourceState.h>
+#include <android/media/INativeSpatializerCallback.h>
+#include <android/media/ISpatializer.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
 #include <media/AidlConversionUtil.h>
+#include <media/AudioContainers.h>
 #include <media/AudioDeviceTypeAddr.h>
 #include <media/AudioPolicy.h>
 #include <media/AudioProductStrategy.h>
 #include <media/AudioVolumeGroup.h>
 #include <media/AudioIoDescriptor.h>
 #include <media/MicrophoneInfo.h>
-#include <set>
 #include <system/audio.h>
 #include <system/audio_effect.h>
 #include <system/audio_policy.h>
 #include <utils/Errors.h>
 #include <utils/Mutex.h>
-#include <vector>
 
 using android::content::AttributionSourceState;
 
@@ -225,6 +231,9 @@
     // Indicate JAVA services are ready (scheduling, power management ...)
     static status_t systemReady();
 
+    // Indicate audio policy service is ready
+    static status_t audioPolicyReady();
+
     // Returns the number of frames per audio HAL buffer.
     // Corresponds to audio_stream->get_buffer_size()/audio_stream_in_frame_size() for input.
     // See also getFrameCount().
@@ -318,7 +327,7 @@
     static status_t getMinVolumeIndexForAttributes(const audio_attributes_t &attr, int &index);
 
     static product_strategy_t getStrategyForStream(audio_stream_type_t stream);
-    static audio_devices_t getDevicesForStream(audio_stream_type_t stream);
+    static DeviceTypeSet getDevicesForStream(audio_stream_type_t stream);
     static status_t getDevicesForAttributes(const AudioAttributes &aa,
                                             AudioDeviceTypeAddrVector *devices);
 
@@ -415,8 +424,8 @@
 
     static status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
 
-    static status_t getHwOffloadEncodingFormatsSupportedForA2DP(
-                                    std::vector<audio_format_t> *formats);
+    static status_t getHwOffloadFormatsSupportedForBluetoothMedia(
+                                    audio_devices_t device, std::vector<audio_format_t> *formats);
 
     // numSurroundFormats holds the maximum number of formats and bool value allowed in the array.
     // When numSurroundFormats is 0, surroundFormats and surroundFormatsEnabled will not be
@@ -435,6 +444,8 @@
 
     static bool     isHapticPlaybackSupported();
 
+    static bool     isUltrasoundSupported();
+
     static status_t listAudioProductStrategies(AudioProductStrategyVector &strategies);
     static status_t getProductStrategyFromAudioAttributes(
             const AudioAttributes &aa, product_strategy_t &productStrategy,
@@ -485,8 +496,74 @@
     static status_t getDeviceForStrategy(product_strategy_t strategy,
             AudioDeviceTypeAddr &device);
 
+
+    /**
+     * If a spatializer stage effect is present on the platform, this will return an
+     * ISpatializer interface to control this feature.
+     * If no spatializer stage is present, a null interface is returned.
+     * The INativeSpatializerCallback passed must not be null.
+     * Only one ISpatializer interface can exist at a given time. The native audio policy
+     * service will reject the request if an interface was already acquired and previous owner
+     * did not die or call ISpatializer.release().
+     * @param callback in: the callback to receive state updates if the ISpatializer
+     *        interface is acquired.
+     * @param spatializer out: the ISpatializer interface made available to control the
+     *        platform spatializer
+     * @return NO_ERROR in case of success, DEAD_OBJECT, NO_INIT, PERMISSION_DENIED, BAD_VALUE
+     *         in case of error.
+     */
+    static status_t getSpatializer(const sp<media::INativeSpatializerCallback>& callback,
+                                        sp<media::ISpatializer>* spatializer);
+
+    /**
+     * Queries if some kind of spatialization will be performed if the audio playback context
+     * described by the provided arguments is present.
+     * The context is made of:
+     * - The audio attributes describing the playback use case.
+     * - The audio configuration describing the audio format, channels, sampling rate ...
+     * - The devices describing the sink audio device selected for playback.
+     * All arguments are optional and only the specified arguments are used to match against
+     * supported criteria. For instance, supplying no argument will tell if spatialization is
+     * supported or not in general.
+     * @param attr audio attributes describing the playback use case
+     * @param config audio configuration describing the audio format, channels, sampling rate...
+     * @param devices the sink audio device selected for playback
+     * @param canBeSpatialized out: true if spatialization is enabled for this context,
+     *        false otherwise
+     * @return NO_ERROR in case of success, DEAD_OBJECT, NO_INIT, BAD_VALUE
+     *         in case of error.
+     */
+    static status_t canBeSpatialized(const audio_attributes_t *attr,
+                                     const audio_config_t *config,
+                                     const AudioDeviceTypeAddrVector &devices,
+                                     bool *canBeSpatialized);
+
+    /**
+     * Query how the direct playback is currently supported on the device.
+     * @param attr audio attributes describing the playback use case
+     * @param config audio configuration for the playback
+     * @param directMode out: a set of flags describing how the direct playback is currently
+     *        supported on the device
+     * @return NO_ERROR in case of success, DEAD_OBJECT, NO_INIT, BAD_VALUE, PERMISSION_DENIED
+     *         in case of error.
+     */
+    static status_t getDirectPlaybackSupport(const audio_attributes_t *attr,
+                                             const audio_config_t *config,
+                                             audio_direct_mode_t *directMode);
+
+
+    /**
+     * Query which direct audio profiles are available for the specified audio attributes.
+     * @param attr audio attributes describing the playback use case
+     * @param audioProfiles out: a vector of audio profiles
+     * @return NO_ERROR in case of success, DEAD_OBJECT, NO_INIT, BAD_VALUE, PERMISSION_DENIED
+     *         in case of error.
+     */
+    static status_t getDirectProfilesForAttributes(const audio_attributes_t* attr,
+                                            std::vector<audio_profile>* audioProfiles);
+
     // A listener for capture state changes.
-    class CaptureStateListener : public RefBase {
+    class CaptureStateListener : public virtual RefBase {
     public:
         // Called whenever capture state changes.
         virtual void onStateChanged(bool active) = 0;
@@ -497,11 +574,11 @@
         virtual ~CaptureStateListener() = default;
     };
 
-    // Regiseters a listener for sound trigger capture state changes.
+    // Registers a listener for sound trigger capture state changes.
     // There may only be one such listener registered at any point.
-    // The listener onStateChanged() method will be invoked sychronously from
+    // The listener onStateChanged() method will be invoked synchronously from
     // this call with the initial value.
-    // The listener onServiceDied() method will be invoked sychronously from
+    // The listener onServiceDied() method will be invoked synchronously from
     // this call if initial attempt to register failed.
     // If the audio policy service cannot be reached, this method will return
     // PERMISSION_DENIED and will not invoke the callback, otherwise, it will
@@ -511,7 +588,7 @@
 
     // ----------------------------------------------------------------------------
 
-    class AudioVolumeGroupCallback : public RefBase
+    class AudioVolumeGroupCallback : public virtual RefBase
     {
     public:
 
@@ -526,7 +603,7 @@
     static status_t addAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback);
     static status_t removeAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback);
 
-    class AudioPortCallback : public RefBase
+    class AudioPortCallback : public virtual RefBase
     {
     public:
 
@@ -542,7 +619,7 @@
     static status_t addAudioPortCallback(const sp<AudioPortCallback>& callback);
     static status_t removeAudioPortCallback(const sp<AudioPortCallback>& callback);
 
-    class AudioDeviceCallback : public RefBase
+    class AudioDeviceCallback : public virtual RefBase
     {
     public:
 
@@ -564,6 +641,14 @@
 
     static status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos);
 
+    static status_t getMmapPolicyInfo(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos);
+
+    static int32_t getAAudioMixerBurstCount();
+
+    static int32_t getAAudioHardwareBurstMinUsec();
+
 private:
 
     class AudioFlingerClient: public IBinder::DeathRecipient, public media::BnAudioFlingerClient
@@ -640,12 +725,12 @@
         binder::Status onRecordingConfigurationUpdate(
                 int32_t event,
                 const media::RecordClientInfo& clientInfo,
-                const media::AudioConfigBase& clientConfig,
+                const media::audio::common::AudioConfigBase& clientConfig,
                 const std::vector<media::EffectDescriptor>& clientEffects,
-                const media::AudioConfigBase& deviceConfig,
+                const media::audio::common::AudioConfigBase& deviceConfig,
                 const std::vector<media::EffectDescriptor>& effects,
                 int32_t patchHandle,
-                media::AudioSourceType source) override;
+                media::audio::common::AudioSource source) override;
         binder::Status onRoutingUpdated();
 
     private:
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index cb00990..16e10b5 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -28,6 +28,7 @@
 #include <utils/threads.h>
 #include <android/content/AttributionSourceState.h>
 
+#include <chrono>
 #include <string>
 
 #include "android/media/BnAudioTrackCallback.h"
@@ -145,7 +146,79 @@
      *          - EVENT_NEW_TIMESTAMP: pointer to const AudioTimestamp.
      */
 
-    typedef void (*callback_t)(int event, void* user, void *info);
+    typedef void (*legacy_callback_t)(int event, void* user, void* info);
+    class IAudioTrackCallback : public virtual RefBase {
+      friend AudioTrack;
+      protected:
+       /* Request to write more data to buffer.
+        * This event only occurs for TRANSFER_CALLBACK.
+        * If this event is delivered but the callback handler does not want to write more data,
+        * the handler must ignore the event by returning zero.
+        * This might occur, for example, if the application is waiting for source data or is at
+        * the end of stream.
+        * For data filling, it is preferred that the callback does not block and instead returns
+        * a short count of the amount of data actually delivered.
+        * Parameters:
+        *  - buffer: Buffer to fill
+        * Returns:
+        * Amount of data actually written in bytes.
+        */
+        virtual size_t onMoreData([[maybe_unused]] const AudioTrack::Buffer& buffer) { return 0; }
+
+        // Buffer underrun occurred. This will not occur for static tracks.
+        virtual void onUnderrun() {}
+
+       /* Sample loop end was reached; playback restarted from loop start if loop count was not 0
+        * for a static track.
+        * Parameters:
+        *  - loopsRemaining: Number of loops remaining to be played. -1 if infinite looping.
+        */
+        virtual void onLoopEnd([[maybe_unused]] int32_t loopsRemaining) {}
+
+       /* Playback head is at the specified marker (See setMarkerPosition()).
+        * Parameters:
+        *  - onMarker: Marker position in frames
+        */
+        virtual void onMarker([[maybe_unused]] uint32_t markerPosition) {}
+
+       /* Playback head is at a new position (See setPositionUpdatePeriod()).
+        * Parameters:
+        *  - newPos: New position in frames
+        */
+        virtual void onNewPos([[maybe_unused]] uint32_t newPos) {}
+
+        // Playback has completed for a static track.
+        virtual void onBufferEnd() {}
+
+        // IAudioTrack was re-created, either due to re-routing and voluntary invalidation
+        // by mediaserver, or mediaserver crash.
+        virtual void onNewIAudioTrack() {}
+
+        // Sent after all the buffers queued in AF and HW are played back (after stop is called)
+        // for an offloaded track.
+        virtual void onStreamEnd() {}
+
+       /* Delivered periodically and when there's a significant change
+        * in the mapping from frame position to presentation time.
+        * See AudioTimestamp for the information included with event.
+        * TODO not yet implemented.
+        * Parameters:
+        *  - timestamp: New frame position and presentation time mapping.
+        */
+        virtual void onNewTimestamp([[maybe_unused]] AudioTimestamp timestamp) {}
+
+       /* Notification that more data can be given by write()
+        * This event only occurs for TRANSFER_SYNC_NOTIF_CALLBACK.
+        * Similar to onMoreData(), return the number of frames actually written
+        * Parameters:
+        *  - buffer: Buffer to fill
+        * Returns:
+        * Amount of data actually written in bytes.
+        */
+        virtual size_t onCanWriteMoreData([[maybe_unused]] const AudioTrack::Buffer& buffer) {
+            return 0;
+        }
+    };
 
     /* Returns the minimum frame count required for the successful creation of
      * an AudioTrack object.
@@ -256,15 +329,34 @@
                                     audio_channel_mask_t channelMask,
                                     size_t frameCount    = 0,
                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                                    callback_t cbf       = NULL,
-                                    void* user           = NULL,
+                                    const wp<IAudioTrackCallback>& callback = nullptr,
                                     int32_t notificationFrames = 0,
                                     audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
-                                    const audio_offload_info_t *offloadInfo = NULL,
+                                    const audio_offload_info_t *offloadInfo = nullptr,
                                     const AttributionSourceState& attributionSource =
                                         AttributionSourceState(),
-                                    const audio_attributes_t* pAttributes = NULL,
+                                    const audio_attributes_t* pAttributes = nullptr,
+                                    bool doNotReconnect = false,
+                                    float maxRequiredSpeed = 1.0f,
+                                    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+
+
+                        AudioTrack( audio_stream_type_t streamType,
+                                    uint32_t sampleRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    size_t frameCount,
+                                    audio_output_flags_t flags,
+                                    legacy_callback_t cbf,
+                                    void* user = nullptr,
+                                    int32_t notificationFrames = 0,
+                                    audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
+                                    transfer_type transferType = TRANSFER_DEFAULT,
+                                    const audio_offload_info_t *offloadInfo = nullptr,
+                                    const AttributionSourceState& attributionSource =
+                                        AttributionSourceState(),
+                                    const audio_attributes_t* pAttributes = nullptr,
                                     bool doNotReconnect = false,
                                     float maxRequiredSpeed = 1.0f,
                                     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
@@ -280,22 +372,39 @@
      * It is recommended to pass a callback function to be notified of playback end by an
      * EVENT_UNDERRUN event.
      */
-
                         AudioTrack( audio_stream_type_t streamType,
                                     uint32_t sampleRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
                                     const sp<IMemory>& sharedBuffer,
                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                                    callback_t cbf      = NULL,
-                                    void* user          = NULL,
+                                    const wp<IAudioTrackCallback>& callback = nullptr,
                                     int32_t notificationFrames = 0,
                                     audio_session_t sessionId   = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
-                                    const audio_offload_info_t *offloadInfo = NULL,
+                                    const audio_offload_info_t *offloadInfo = nullptr,
                                     const AttributionSourceState& attributionSource =
                                         AttributionSourceState(),
-                                    const audio_attributes_t* pAttributes = NULL,
+                                    const audio_attributes_t* pAttributes = nullptr,
+                                    bool doNotReconnect = false,
+                                    float maxRequiredSpeed = 1.0f);
+
+
+                        AudioTrack( audio_stream_type_t streamType,
+                                    uint32_t sampleRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    const sp<IMemory>& sharedBuffer,
+                                    audio_output_flags_t flags,
+                                    legacy_callback_t cbf,
+                                    void* user          = nullptr,
+                                    int32_t notificationFrames = 0,
+                                    audio_session_t sessionId   = AUDIO_SESSION_ALLOCATE,
+                                    transfer_type transferType = TRANSFER_DEFAULT,
+                                    const audio_offload_info_t *offloadInfo = nullptr,
+                                    const AttributionSourceState& attributionSource =
+                                        AttributionSourceState(),
+                                    const audio_attributes_t* pAttributes = nullptr,
                                     bool doNotReconnect = false,
                                     float maxRequiredSpeed = 1.0f);
 
@@ -333,20 +442,41 @@
                             audio_channel_mask_t channelMask,
                             size_t frameCount   = 0,
                             audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                            callback_t cbf      = NULL,
-                            void* user          = NULL,
+                            const wp<IAudioTrackCallback>& callback = nullptr,
                             int32_t notificationFrames = 0,
                             const sp<IMemory>& sharedBuffer = 0,
                             bool threadCanCallJava = false,
                             audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
                             transfer_type transferType = TRANSFER_DEFAULT,
-                            const audio_offload_info_t *offloadInfo = NULL,
+                            const audio_offload_info_t *offloadInfo = nullptr,
                             const AttributionSourceState& attributionSource =
                                 AttributionSourceState(),
-                            const audio_attributes_t* pAttributes = NULL,
+                            const audio_attributes_t* pAttributes = nullptr,
                             bool doNotReconnect = false,
                             float maxRequiredSpeed = 1.0f,
                             audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+
+            status_t    set(audio_stream_type_t streamType,
+                            uint32_t sampleRate,
+                            audio_format_t format,
+                            audio_channel_mask_t channelMask,
+                            size_t frameCount,
+                            audio_output_flags_t flags,
+                            legacy_callback_t callback,
+                            void * user = nullptr,
+                            int32_t notificationFrames = 0,
+                            const sp<IMemory>& sharedBuffer = 0,
+                            bool threadCanCallJava = false,
+                            audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
+                            transfer_type transferType = TRANSFER_DEFAULT,
+                            const audio_offload_info_t *offloadInfo = nullptr,
+                            const AttributionSourceState& attributionSource =
+                                AttributionSourceState(),
+                            const audio_attributes_t* pAttributes = nullptr,
+                            bool doNotReconnect = false,
+                            float maxRequiredSpeed = 1.0f,
+                            audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+
     // FIXME(b/169889714): Vendor code depends on the old method signature at link time
             status_t    set(audio_stream_type_t streamType,
                             uint32_t sampleRate,
@@ -354,17 +484,17 @@
                             uint32_t channelMask,
                             size_t frameCount   = 0,
                             audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                            callback_t cbf      = NULL,
-                            void* user          = NULL,
+                            legacy_callback_t cbf = nullptr,
+                            void* user          = nullptr,
                             int32_t notificationFrames = 0,
                             const sp<IMemory>& sharedBuffer = 0,
                             bool threadCanCallJava = false,
                             audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
                             transfer_type transferType = TRANSFER_DEFAULT,
-                            const audio_offload_info_t *offloadInfo = NULL,
+                            const audio_offload_info_t *offloadInfo = nullptr,
                             uid_t uid = AUDIO_UID_INVALID,
                             pid_t pid = -1,
-                            const audio_attributes_t* pAttributes = NULL,
+                            const audio_attributes_t* pAttributes = nullptr,
                             bool doNotReconnect = false,
                             float maxRequiredSpeed = 1.0f,
                             audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
@@ -401,6 +531,7 @@
 
             uint32_t    channelCount() const { return mChannelCount; }
             size_t      frameCount() const  { return mFrameCount; }
+            audio_channel_mask_t channelMask() const { return mChannelMask; }
 
     /*
      * Return the period of the notification callback in frames.
@@ -427,8 +558,7 @@
      * less than or equal to the getBufferCapacityInFrames().
      * It may also be adjusted slightly for internal reasons.
      *
-     * Return the final size or a negative error if the track is unitialized
-     * or does not support variable sizes.
+     * Return the final size or a negative value (NO_INIT) if the track is uninitialized.
      */
             ssize_t     setBufferSizeInFrames(size_t size);
 
@@ -510,6 +640,14 @@
      */
             void        pause();
 
+    /* Pause and wait (with timeout) for the audio track to ramp to silence.
+     *
+     * \param timeout is the time limit to wait before returning.
+     *                A negative number is treated as 0.
+     * \return true if the track is ramped to silence, false if the timeout occurred.
+     */
+            bool        pauseAndWait(const std::chrono::milliseconds& timeout);
+
     /* Set volume for this track, mostly used for games' sound effects
      * left and right volumes. Levels must be >= 0.0 and <= 1.0.
      * This is the older API.  New applications should use setVolume(float) when possible.
@@ -1206,9 +1344,8 @@
     }
 
     // for client callback handler
-    callback_t              mCbf;                   // callback handler for events, or NULL
-    void*                   mUserData;
-
+    wp<IAudioTrackCallback> mCallback;                   // callback handler for events, or NULL
+    sp<IAudioTrackCallback> mLegacyCallbackWrapper;      // wrapper for legacy callback interface
     // for notification APIs
 
     // next 2 fields are const after constructor or set()
@@ -1380,6 +1517,9 @@
     std::string mMetricsId;  // GUARDED_BY(mLock), could change in createTrack_l().
     std::string mCallerName; // for example "aaudio"
 
+    // report error to mediametrics.
+    void reportError(status_t status, const char *event, const char *message) const;
+
 private:
     class AudioTrackCallback : public media::BnAudioTrackCallback {
     public:
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 0e059f7..b4ee4dc 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -37,10 +37,12 @@
 #include <string>
 #include <vector>
 
+#include <android/content/AttributionSourceState.h>
 #include <android/media/AudioVibratorInfo.h>
 #include <android/media/BnAudioFlingerService.h>
 #include <android/media/BpAudioFlingerService.h>
-#include <android/content/AttributionSourceState.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
 #include "android/media/CreateEffectRequest.h"
 #include "android/media/CreateEffectResponse.h"
 #include "android/media/CreateRecordRequest.h"
@@ -63,7 +65,7 @@
 
 // ----------------------------------------------------------------------------
 
-class IAudioFlinger : public RefBase {
+class IAudioFlinger : public virtual RefBase {
 public:
     static constexpr char DEFAULT_SERVICE_NAME[] = "media.audio_flinger";
 
@@ -166,6 +168,7 @@
         sp<IMemory> buffers;
         audio_port_handle_t portId;
         sp<media::IAudioRecord> audioRecord;
+        audio_config_base_t serverConfig;
 
         ConversionResult<media::CreateRecordResponse> toAidl() const;
         static ConversionResult<CreateRecordOutput>
@@ -329,6 +332,9 @@
     /* Indicate JAVA services are ready (scheduling, power management ...) */
     virtual status_t systemReady() = 0;
 
+    // Indicate audio policy service is ready
+    virtual status_t audioPolicyReady() = 0;
+
     // Returns the number of frames per audio HAL buffer.
     virtual size_t frameCountHAL(audio_io_handle_t ioHandle) const = 0;
 
@@ -344,6 +350,14 @@
 
     virtual status_t updateSecondaryOutputs(
             const TrackSecondaryOutputsMap& trackSecondaryOutputs) = 0;
+
+    virtual status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) = 0;
+
+    virtual int32_t getAAudioMixerBurstCount() = 0;
+
+    virtual int32_t getAAudioHardwareBurstMinUsec() = 0;
 };
 
 /**
@@ -432,6 +446,8 @@
     status_t setAudioPortConfig(const struct audio_port_config* config) override;
     audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId) override;
     status_t systemReady() override;
+    status_t audioPolicyReady() override;
+
     size_t frameCountHAL(audio_io_handle_t ioHandle) const override;
     status_t getMicrophones(std::vector<media::MicrophoneInfo>* microphones) override;
     status_t setAudioHalPids(const std::vector<pid_t>& pids) override;
@@ -439,6 +455,14 @@
     status_t updateSecondaryOutputs(
             const TrackSecondaryOutputsMap& trackSecondaryOutputs) override;
 
+    status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) override;
+
+    int32_t getAAudioMixerBurstCount() override;
+
+    int32_t getAAudioHardwareBurstMinUsec() override;
+
 private:
     const sp<media::IAudioFlingerService> mDelegate;
 };
@@ -514,6 +538,7 @@
             SET_AUDIO_PORT_CONFIG = media::BnAudioFlingerService::TRANSACTION_setAudioPortConfig,
             GET_AUDIO_HW_SYNC_FOR_SESSION = media::BnAudioFlingerService::TRANSACTION_getAudioHwSyncForSession,
             SYSTEM_READY = media::BnAudioFlingerService::TRANSACTION_systemReady,
+            AUDIO_POLICY_READY = media::BnAudioFlingerService::TRANSACTION_audioPolicyReady,
             FRAME_COUNT_HAL = media::BnAudioFlingerService::TRANSACTION_frameCountHAL,
             GET_MICROPHONES = media::BnAudioFlingerService::TRANSACTION_getMicrophones,
             SET_MASTER_BALANCE = media::BnAudioFlingerService::TRANSACTION_setMasterBalance,
@@ -522,6 +547,9 @@
             SET_AUDIO_HAL_PIDS = media::BnAudioFlingerService::TRANSACTION_setAudioHalPids,
             SET_VIBRATOR_INFOS = media::BnAudioFlingerService::TRANSACTION_setVibratorInfos,
             UPDATE_SECONDARY_OUTPUTS = media::BnAudioFlingerService::TRANSACTION_updateSecondaryOutputs,
+            GET_MMAP_POLICY_INFOS = media::BnAudioFlingerService::TRANSACTION_getMmapPolicyInfos,
+            GET_AAUDIO_MIXER_BURST_COUNT = media::BnAudioFlingerService::TRANSACTION_getAAudioMixerBurstCount,
+            GET_AAUDIO_HARDWARE_BURST_MIN_USEC = media::BnAudioFlingerService::TRANSACTION_getAAudioHardwareBurstMinUsec,
         };
 
         /**
@@ -563,7 +591,8 @@
     Status createRecord(const media::CreateRecordRequest& request,
                         media::CreateRecordResponse* _aidl_return) override;
     Status sampleRate(int32_t ioHandle, int32_t* _aidl_return) override;
-    Status format(int32_t output, media::audio::common::AudioFormat* _aidl_return) override;
+    Status format(int32_t output,
+                  media::audio::common::AudioFormatDescription* _aidl_return) override;
     Status frameCount(int32_t ioHandle, int64_t* _aidl_return) override;
     Status latency(int32_t output, int32_t* _aidl_return) override;
     Status setMasterVolume(float value) override;
@@ -572,12 +601,13 @@
     Status masterMute(bool* _aidl_return) override;
     Status setMasterBalance(float balance) override;
     Status getMasterBalance(float* _aidl_return) override;
-    Status setStreamVolume(media::AudioStreamType stream, float value, int32_t output) override;
-    Status setStreamMute(media::AudioStreamType stream, bool muted) override;
-    Status
-    streamVolume(media::AudioStreamType stream, int32_t output, float* _aidl_return) override;
-    Status streamMute(media::AudioStreamType stream, bool* _aidl_return) override;
-    Status setMode(media::AudioMode mode) override;
+    Status setStreamVolume(media::audio::common::AudioStreamType stream,
+                           float value, int32_t output) override;
+    Status setStreamMute(media::audio::common::AudioStreamType stream, bool muted) override;
+    Status streamVolume(media::audio::common::AudioStreamType stream,
+                        int32_t output, float* _aidl_return) override;
+    Status streamMute(media::audio::common::AudioStreamType stream, bool* _aidl_return) override;
+    Status setMode(media::audio::common::AudioMode mode) override;
     Status setMicMute(bool state) override;
     Status getMicMute(bool* _aidl_return) override;
     Status setRecordSilenced(int32_t portId, bool silenced) override;
@@ -585,8 +615,10 @@
     Status
     getParameters(int32_t ioHandle, const std::string& keys, std::string* _aidl_return) override;
     Status registerClient(const sp<media::IAudioFlingerClient>& client) override;
-    Status getInputBufferSize(int32_t sampleRate, media::audio::common::AudioFormat format,
-                              int32_t channelMask, int64_t* _aidl_return) override;
+    Status getInputBufferSize(int32_t sampleRate,
+                              const media::audio::common::AudioFormatDescription& format,
+                              const media::audio::common::AudioChannelLayout& channelMask,
+                              int64_t* _aidl_return) override;
     Status openOutput(const media::OpenOutputRequest& request,
                       media::OpenOutputResponse* _aidl_return) override;
     Status openDuplicateOutput(int32_t output1, int32_t output2, int32_t* _aidl_return) override;
@@ -596,7 +628,7 @@
     Status openInput(const media::OpenInputRequest& request,
                      media::OpenInputResponse* _aidl_return) override;
     Status closeInput(int32_t input) override;
-    Status invalidateStream(media::AudioStreamType stream) override;
+    Status invalidateStream(media::audio::common::AudioStreamType stream) override;
     Status setVoiceVolume(float volume) override;
     Status getRenderPosition(int32_t output, media::RenderPosition* _aidl_return) override;
     Status getInputFramesLost(int32_t ioHandle, int32_t* _aidl_return) override;
@@ -605,7 +637,8 @@
     Status releaseAudioSessionId(int32_t audioSession, int32_t pid) override;
     Status queryNumberEffects(int32_t* _aidl_return) override;
     Status queryEffect(int32_t index, media::EffectDescriptor* _aidl_return) override;
-    Status getEffectDescriptor(const media::AudioUuid& effectUUID, const media::AudioUuid& typeUUID,
+    Status getEffectDescriptor(const media::audio::common::AudioUuid& effectUUID,
+                               const media::audio::common::AudioUuid& typeUUID,
                                int32_t preferredTypeFlag,
                                media::EffectDescriptor* _aidl_return) override;
     Status createEffect(const media::CreateEffectRequest& request,
@@ -624,12 +657,18 @@
     Status setAudioPortConfig(const media::AudioPortConfig& config) override;
     Status getAudioHwSyncForSession(int32_t sessionId, int32_t* _aidl_return) override;
     Status systemReady() override;
+    Status audioPolicyReady() override;
     Status frameCountHAL(int32_t ioHandle, int64_t* _aidl_return) override;
     Status getMicrophones(std::vector<media::MicrophoneInfoData>* _aidl_return) override;
     Status setAudioHalPids(const std::vector<int32_t>& pids) override;
     Status setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos) override;
     Status updateSecondaryOutputs(
             const std::vector<media::TrackSecondaryOutputInfo>& trackSecondaryOutputInfos) override;
+    Status getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *_aidl_return) override;
+    Status getAAudioMixerBurstCount(int32_t* _aidl_return) override;
+    Status getAAudioHardwareBurstMinUsec(int32_t* _aidl_return) override;
 
 private:
     const sp<AudioFlingerServerAdapter::Delegate> mDelegate;
diff --git a/media/libaudioclient/include/media/PolicyAidlConversion.h b/media/libaudioclient/include/media/PolicyAidlConversion.h
index 873f27a..54e778e 100644
--- a/media/libaudioclient/include/media/PolicyAidlConversion.h
+++ b/media/libaudioclient/include/media/PolicyAidlConversion.h
@@ -23,10 +23,8 @@
 
 #include <android/media/AudioMix.h>
 #include <android/media/AudioMixCallbackFlag.h>
-#include <android/media/AudioMixLatencyClass.h>
 #include <android/media/AudioMixRouteFlag.h>
 #include <android/media/AudioMixType.h>
-#include <android/media/AudioMode.h>
 #include <android/media/AudioOffloadMode.h>
 #include <android/media/AudioPolicyForceUse.h>
 #include <android/media/AudioPolicyForcedConfig.h>
@@ -39,11 +37,6 @@
 
 namespace android {
 
-ConversionResult<volume_group_t>
-aidl2legacy_int32_t_volume_group_t(int32_t aidl);
-ConversionResult<int32_t>
-legacy2aidl_volume_group_t_int32_t(volume_group_t legacy);
-
 ConversionResult<product_strategy_t>
 aidl2legacy_int32_t_product_strategy_t(int32_t aidl);
 ConversionResult<int32_t>
diff --git a/media/libaudioclient/include/media/ToneGenerator.h b/media/libaudioclient/include/media/ToneGenerator.h
index a575616..43c0100 100644
--- a/media/libaudioclient/include/media/ToneGenerator.h
+++ b/media/libaudioclient/include/media/ToneGenerator.h
@@ -28,7 +28,7 @@
 
 namespace android {
 
-class ToneGenerator {
+class ToneGenerator : public AudioTrack::IAudioTrackCallback {
 public:
 
     // List of all available tones
@@ -156,6 +156,9 @@
 
     ToneGenerator(audio_stream_type_t streamType, float volume, bool threadCanCallJava = false,
             std::string opPackageName = {});
+
+    void onFirstRef() override;
+
     ~ToneGenerator();
 
     bool startTone(tone_type toneType, int durationMs = -1);
@@ -311,6 +314,7 @@
     unsigned int mProcessSize;  // Size of audio blocks generated at a time by audioCallback() (in PCM frames).
     struct timespec mStartTime; // tone start time: needed to guaranty actual tone duration
 
+    size_t onMoreData(const AudioTrack::Buffer& buffer) override;
     bool initAudioTrack();
     static void audioCallback(int event, void* user, void *info);
     bool prepareWave();
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index def7ca6..891293e 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -9,10 +9,35 @@
 
 cc_defaults {
     name: "libaudioclient_tests_defaults",
+    test_suites: ["device-tests"],
     cflags: [
         "-Wall",
         "-Werror",
     ],
+    sanitize: {
+        misc_undefined: [
+            "unsigned-integer-overflow",
+            "signed-integer-overflow",
+        ],
+    },
+}
+
+cc_test {
+    name: "audio_aidl_conversion_tests",
+    defaults: ["libaudioclient_tests_defaults"],
+    srcs: ["audio_aidl_legacy_conversion_tests.cpp"],
+    shared_libs: [
+        "libbinder",
+        "libcutils",
+        "liblog",
+        "libutils",
+    ],
+    static_libs: [
+        "android.media.audio.common.types-V1-cpp",
+        "audioclient-types-aidl-cpp",
+        "libaudioclient_aidl_conversion",
+        "libstagefright_foundation",
+    ],
 }
 
 cc_test {
@@ -30,8 +55,10 @@
 cc_test {
     name: "test_create_audiotrack",
     defaults: ["libaudioclient_tests_defaults"],
-    srcs: ["test_create_audiotrack.cpp",
-           "test_create_utils.cpp"],
+    srcs: [
+        "test_create_audiotrack.cpp",
+        "test_create_utils.cpp",
+    ],
     header_libs: [
         "libmedia_headers",
         "libmediametrics_headers",
@@ -49,8 +76,10 @@
 cc_test {
     name: "test_create_audiorecord",
     defaults: ["libaudioclient_tests_defaults"],
-    srcs: ["test_create_audiorecord.cpp",
-           "test_create_utils.cpp"],
+    srcs: [
+        "test_create_audiorecord.cpp",
+        "test_create_utils.cpp",
+    ],
     header_libs: [
         "libmedia_headers",
         "libmediametrics_headers",
diff --git a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
new file mode 100644
index 0000000..997f62a
--- /dev/null
+++ b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include <media/AudioCommonTypes.h>
+#include <media/AidlConversion.h>
+
+using namespace android;
+using namespace android::aidl_utils;
+
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioDeviceType;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioFormatType;
+using media::audio::common::PcmType;
+
+namespace {
+
+template<typename T> size_t hash(const T& t) {
+    return std::hash<T>{}(t);
+}
+
+AudioChannelLayout make_ACL_None() {
+    return AudioChannelLayout{};
+}
+
+AudioChannelLayout make_ACL_Invalid() {
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::invalid>(0);
+}
+
+AudioChannelLayout make_ACL_Stereo() {
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>(
+            AudioChannelLayout::LAYOUT_STEREO);
+}
+
+AudioChannelLayout make_ACL_LayoutArbitrary() {
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>(
+            // Use channels that exist both for input and output,
+            // but doesn't form a known layout mask.
+            AudioChannelLayout::CHANNEL_FRONT_LEFT |
+            AudioChannelLayout::CHANNEL_FRONT_RIGHT |
+            AudioChannelLayout::CHANNEL_TOP_SIDE_LEFT |
+            AudioChannelLayout::CHANNEL_TOP_SIDE_RIGHT);
+}
+
+AudioChannelLayout make_ACL_ChannelIndex2() {
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::indexMask>(
+            AudioChannelLayout::INDEX_MASK_2);
+}
+
+AudioChannelLayout make_ACL_ChannelIndexArbitrary() {
+    // Use channels 1 and 3.
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::indexMask>(5);
+}
+
+AudioChannelLayout make_ACL_VoiceCall() {
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::voiceMask>(
+            AudioChannelLayout::VOICE_CALL_MONO);
+}
+
+AudioDeviceDescription make_AudioDeviceDescription(AudioDeviceType type,
+        const std::string& connection = "") {
+    AudioDeviceDescription result;
+    result.type = type;
+    result.connection = connection;
+    return result;
+}
+
+AudioDeviceDescription make_ADD_None() {
+    return AudioDeviceDescription{};
+}
+
+AudioDeviceDescription make_ADD_DefaultIn() {
+    return make_AudioDeviceDescription(AudioDeviceType::IN_DEFAULT);
+}
+
+AudioDeviceDescription make_ADD_DefaultOut() {
+    return make_AudioDeviceDescription(AudioDeviceType::OUT_DEFAULT);
+}
+
+AudioDeviceDescription make_ADD_WiredHeadset() {
+    return make_AudioDeviceDescription(AudioDeviceType::OUT_HEADSET,
+            AudioDeviceDescription::CONNECTION_ANALOG());
+}
+
+AudioDeviceDescription make_ADD_BtScoHeadset() {
+    return make_AudioDeviceDescription(AudioDeviceType::OUT_HEADSET,
+            AudioDeviceDescription::CONNECTION_BT_SCO());
+}
+
+AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
+    AudioFormatDescription result;
+    result.type = type;
+    return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(PcmType pcm) {
+    auto result = make_AudioFormatDescription(AudioFormatType::PCM);
+    result.pcm = pcm;
+    return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
+    AudioFormatDescription result;
+    result.encoding = encoding;
+    return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(PcmType transport,
+        const std::string& encoding) {
+    auto result = make_AudioFormatDescription(encoding);
+    result.pcm = transport;
+    return result;
+}
+
+AudioFormatDescription make_AFD_Default() {
+    return AudioFormatDescription{};
+}
+
+AudioFormatDescription make_AFD_Invalid() {
+    return make_AudioFormatDescription(AudioFormatType::SYS_RESERVED_INVALID);
+}
+
+AudioFormatDescription make_AFD_Pcm16Bit() {
+    return make_AudioFormatDescription(PcmType::INT_16_BIT);
+}
+
+AudioFormatDescription make_AFD_Bitstream() {
+    return make_AudioFormatDescription("example");
+}
+
+AudioFormatDescription make_AFD_Encap() {
+    return make_AudioFormatDescription(PcmType::INT_16_BIT, "example.encap");
+}
+
+AudioFormatDescription make_AFD_Encap_with_Enc() {
+    auto afd = make_AFD_Encap();
+    afd.encoding += "+example";
+    return afd;
+}
+
+}  // namespace
+
+// Verify that two independently constructed ADDs/AFDs have the same hash.
+// This ensures that regardless of whether the ADD/AFD instance originates
+// from, it can be correctly compared to other ADD/AFD instance. Thus,
+// for example, a 16-bit integer format description provided by HAL
+// is identical to the same format description constructed by the framework.
+class HashIdentityTest : public ::testing::Test {
+  public:
+    template<typename T> void verifyHashIdentity(const std::vector<std::function<T()>>& valueGens) {
+        for (size_t i = 0; i < valueGens.size(); ++i) {
+            for (size_t j = 0; j < valueGens.size(); ++j) {
+                if (i == j) {
+                    EXPECT_EQ(hash(valueGens[i]()), hash(valueGens[i]())) << i;
+                } else {
+                    EXPECT_NE(hash(valueGens[i]()), hash(valueGens[j]())) << i << ", " << j;
+                }
+            }
+        }
+    }
+};
+
+TEST_F(HashIdentityTest, AudioChannelLayoutHashIdentity) {
+    verifyHashIdentity<AudioChannelLayout>({
+            make_ACL_None, make_ACL_Invalid, make_ACL_Stereo,
+            make_ACL_LayoutArbitrary, make_ACL_ChannelIndex2,
+            make_ACL_ChannelIndexArbitrary, make_ACL_VoiceCall});
+}
+
+TEST_F(HashIdentityTest, AudioDeviceDescriptionHashIdentity) {
+    verifyHashIdentity<AudioDeviceDescription>({
+            make_ADD_None, make_ADD_DefaultIn, make_ADD_DefaultOut, make_ADD_WiredHeadset,
+            make_ADD_BtScoHeadset});
+}
+
+TEST_F(HashIdentityTest, AudioFormatDescriptionHashIdentity) {
+    verifyHashIdentity<AudioFormatDescription>({
+            make_AFD_Default, make_AFD_Invalid, make_AFD_Pcm16Bit, make_AFD_Bitstream,
+            make_AFD_Encap, make_AFD_Encap_with_Enc});
+}
+
+using ChannelLayoutParam = std::tuple<AudioChannelLayout, bool /*isInput*/>;
+class AudioChannelLayoutRoundTripTest :
+        public testing::TestWithParam<ChannelLayoutParam> {};
+TEST_P(AudioChannelLayoutRoundTripTest, Aidl2Legacy2Aidl) {
+    const auto initial = std::get<0>(GetParam());
+    const bool isInput = std::get<1>(GetParam());
+    auto conv = aidl2legacy_AudioChannelLayout_audio_channel_mask_t(initial, isInput);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = legacy2aidl_audio_channel_mask_t_AudioChannelLayout(conv.value(), isInput);
+    ASSERT_TRUE(convBack.ok());
+    EXPECT_EQ(initial, convBack.value());
+}
+INSTANTIATE_TEST_SUITE_P(AudioChannelLayoutRoundTrip,
+        AudioChannelLayoutRoundTripTest,
+        testing::Combine(
+                testing::Values(AudioChannelLayout{}, make_ACL_Invalid(), make_ACL_Stereo(),
+                        make_ACL_LayoutArbitrary(), make_ACL_ChannelIndex2(),
+                        make_ACL_ChannelIndexArbitrary()),
+                testing::Values(false, true)));
+INSTANTIATE_TEST_SUITE_P(AudioChannelVoiceRoundTrip,
+        AudioChannelLayoutRoundTripTest,
+        // In legacy constants the voice call is only defined for input.
+        testing::Combine(testing::Values(make_ACL_VoiceCall()), testing::Values(true)));
+
+using ChannelLayoutEdgeCaseParam = std::tuple<int /*legacy*/, bool /*isInput*/, bool /*isValid*/>;
+class AudioChannelLayoutEdgeCaseTest :
+        public testing::TestWithParam<ChannelLayoutEdgeCaseParam> {};
+TEST_P(AudioChannelLayoutEdgeCaseTest, Legacy2Aidl) {
+    const audio_channel_mask_t legacy = static_cast<audio_channel_mask_t>(std::get<0>(GetParam()));
+    const bool isInput = std::get<1>(GetParam());
+    const bool isValid = std::get<2>(GetParam());
+    auto conv = legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy, isInput);
+    EXPECT_EQ(isValid, conv.ok());
+}
+INSTANTIATE_TEST_SUITE_P(AudioChannelLayoutEdgeCase,
+        AudioChannelLayoutEdgeCaseTest,
+        testing::Values(
+                // Valid legacy input masks.
+                std::make_tuple(AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO, true, true),
+                std::make_tuple(AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO, true, true),
+                std::make_tuple(AUDIO_CHANNEL_IN_VOICE_CALL_MONO, true, true),
+                // Valid legacy output masks.
+                std::make_tuple(
+                        // This has the same numerical representation as Mask 'A' below
+                        AUDIO_CHANNEL_OUT_FRONT_CENTER | AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
+                        AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT, false, true),
+                std::make_tuple(
+                        // This has the same numerical representation as Mask 'B' below
+                        AUDIO_CHANNEL_OUT_FRONT_CENTER | AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
+                        AUDIO_CHANNEL_OUT_TOP_BACK_LEFT, false, true),
+                // Invalid legacy input masks.
+                std::make_tuple(AUDIO_CHANNEL_IN_6, true, false),
+                std::make_tuple(
+                        AUDIO_CHANNEL_IN_6 | AUDIO_CHANNEL_IN_FRONT_PROCESSED, true, false),
+                std::make_tuple(
+                        AUDIO_CHANNEL_IN_PRESSURE | AUDIO_CHANNEL_IN_X_AXIS |
+                        AUDIO_CHANNEL_IN_Y_AXIS | AUDIO_CHANNEL_IN_Z_AXIS, true, false),
+                std::make_tuple(  // Mask 'A'
+                        AUDIO_CHANNEL_IN_STEREO | AUDIO_CHANNEL_IN_VOICE_UPLINK, true, false),
+                std::make_tuple(  // Mask 'B'
+                        AUDIO_CHANNEL_IN_STEREO | AUDIO_CHANNEL_IN_VOICE_DNLINK, true, false)));
+
+class AudioDeviceDescriptionRoundTripTest :
+        public testing::TestWithParam<AudioDeviceDescription> {};
+TEST_P(AudioDeviceDescriptionRoundTripTest, Aidl2Legacy2Aidl) {
+    const auto initial = GetParam();
+    auto conv = aidl2legacy_AudioDeviceDescription_audio_devices_t(initial);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = legacy2aidl_audio_devices_t_AudioDeviceDescription(conv.value());
+    ASSERT_TRUE(convBack.ok());
+    EXPECT_EQ(initial, convBack.value());
+}
+INSTANTIATE_TEST_SUITE_P(AudioDeviceDescriptionRoundTrip,
+        AudioDeviceDescriptionRoundTripTest,
+        testing::Values(AudioDeviceDescription{}, make_ADD_DefaultIn(),
+                make_ADD_DefaultOut(), make_ADD_WiredHeadset(), make_ADD_BtScoHeadset()));
+
+class AudioFormatDescriptionRoundTripTest :
+        public testing::TestWithParam<AudioFormatDescription> {};
+TEST_P(AudioFormatDescriptionRoundTripTest, Aidl2Legacy2Aidl) {
+    const auto initial = GetParam();
+    auto conv = aidl2legacy_AudioFormatDescription_audio_format_t(initial);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = legacy2aidl_audio_format_t_AudioFormatDescription(conv.value());
+    ASSERT_TRUE(convBack.ok());
+    EXPECT_EQ(initial, convBack.value());
+}
+INSTANTIATE_TEST_SUITE_P(AudioFormatDescriptionRoundTrip,
+        AudioFormatDescriptionRoundTripTest,
+        testing::Values(make_AFD_Invalid(), AudioFormatDescription{}, make_AFD_Pcm16Bit()));
diff --git a/media/libaudioclient/tests/test_create_audiorecord.cpp b/media/libaudioclient/tests/test_create_audiorecord.cpp
index 1cbcb71..2e0883b 100644
--- a/media/libaudioclient/tests/test_create_audiorecord.cpp
+++ b/media/libaudioclient/tests/test_create_audiorecord.cpp
@@ -98,14 +98,14 @@
         attributes.source = inputSource;
 
         sp<AudioRecord> record = new AudioRecord(attributionSource);
+        const auto emptyCallback = sp<AudioRecord::IAudioRecordCallback>::make();
 
         record->set(AUDIO_SOURCE_DEFAULT,
                    sampleRate,
                    format,
                    channelMask,
                    frameCount,
-                   fast ? callback : nullptr,
-                   nullptr,
+                   fast ? emptyCallback : nullptr,
                    notificationFrames,
                    false,
                    sessionId,
diff --git a/media/libaudioclient/tests/test_create_audiotrack.cpp b/media/libaudioclient/tests/test_create_audiotrack.cpp
index cf9b925..e7231d3 100644
--- a/media/libaudioclient/tests/test_create_audiotrack.cpp
+++ b/media/libaudioclient/tests/test_create_audiotrack.cpp
@@ -19,6 +19,7 @@
 #include <string.h>
 #include <unistd.h>
 
+#include <android/content/AttributionSourceState.h>
 #include <binder/MemoryBase.h>
 #include <binder/MemoryDealer.h>
 #include <binder/MemoryHeapBase.h>
@@ -108,17 +109,15 @@
         memset(&attributes, 0, sizeof(attributes));
         attributes.content_type = contentType;
         attributes.usage = usage;
-
         sp<AudioTrack> track = new AudioTrack();
-
+        const auto emptyCallback = sp<AudioTrack::IAudioTrackCallback>::make();
         track->set(AUDIO_STREAM_DEFAULT,
                    sampleRate,
                    format,
                    channelMask,
                    frameCount,
                    flags,
-                   (fast || offload) ? callback : nullptr,
-                   nullptr,
+                   (fast || offload) ? emptyCallback : nullptr,
                    notificationFrames,
                    sharedBuffer,
                    false,
@@ -126,8 +125,7 @@
                    ((fast && sharedBuffer == 0) || offload) ?
                            AudioTrack::TRANSFER_CALLBACK : AudioTrack::TRANSFER_DEFAULT,
                    offload ? &offloadInfo : nullptr,
-                   getuid(),
-                   getpid(),
+                   AttributionSourceState(),
                    &attributes,
                    false,
                    1.0f,
diff --git a/media/libaudioclient/tests/test_create_utils.cpp b/media/libaudioclient/tests/test_create_utils.cpp
index 8aa1f13..caf5227 100644
--- a/media/libaudioclient/tests/test_create_utils.cpp
+++ b/media/libaudioclient/tests/test_create_utils.cpp
@@ -68,10 +68,6 @@
     return true;
 }
 
-void callback(int event __unused, void* user __unused, void *info __unused)
-{
-}
-
 int main(int argc, char **argv, test_func_t testFunc)
 {
     FILE *inputFile = nullptr;
diff --git a/media/libaudioclient/tests/test_create_utils.h b/media/libaudioclient/tests/test_create_utils.h
index 2ad646e..9a6f9fa 100644
--- a/media/libaudioclient/tests/test_create_utils.h
+++ b/media/libaudioclient/tests/test_create_utils.h
@@ -31,7 +31,6 @@
 
 bool checkVersion(FILE *inputFile, const char *version);
 
-void callback(int event, void* user, void *info);
 
 typedef int (*test_func_t)(FILE *inputFile, int outputFileFd);
 
diff --git a/media/libaudiofoundation/Android.bp b/media/libaudiofoundation/Android.bp
index 3bef55b..159f898 100644
--- a/media/libaudiofoundation/Android.bp
+++ b/media/libaudiofoundation/Android.bp
@@ -11,6 +11,10 @@
     name: "libaudiofoundation_headers",
     vendor_available: true,
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media",
+    ],
 
     export_include_dirs: ["include"],
     header_libs: [
@@ -24,9 +28,11 @@
         "libmedia_helper_headers",
     ],
     static_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
     ],
     export_static_lib_headers: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
     ],
     host_supported: true,
@@ -52,6 +58,7 @@
     ],
 
     shared_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "libaudioclient_aidl_conversion",
         "libaudioutils",
@@ -63,6 +70,7 @@
     ],
 
     export_shared_lib_headers: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "libaudioclient_aidl_conversion",
     ],
diff --git a/media/libaudiofoundation/AudioContainers.cpp b/media/libaudiofoundation/AudioContainers.cpp
index 31257d5..553a319 100644
--- a/media/libaudiofoundation/AudioContainers.cpp
+++ b/media/libaudiofoundation/AudioContainers.cpp
@@ -63,48 +63,46 @@
     return audioDeviceInAllUsbSet;
 }
 
-bool deviceTypesToString(const DeviceTypeSet &deviceTypes, std::string &str) {
+const DeviceTypeSet& getAudioDeviceOutAllBleSet() {
+    static const DeviceTypeSet audioDeviceOutAllBleSet = DeviceTypeSet(
+            std::begin(AUDIO_DEVICE_OUT_ALL_BLE_ARRAY),
+            std::end(AUDIO_DEVICE_OUT_ALL_BLE_ARRAY));
+    return audioDeviceOutAllBleSet;
+}
+
+std::string deviceTypesToString(const DeviceTypeSet &deviceTypes) {
     if (deviceTypes.empty()) {
-        str = "Empty device types";
-        return true;
+        return "Empty device types";
     }
-    bool ret = true;
-    for (auto it = deviceTypes.begin(); it != deviceTypes.end();) {
-        std::string deviceTypeStr;
-        ret = audio_is_output_device(*it) ?
-              OutputDeviceConverter::toString(*it, deviceTypeStr) :
-              InputDeviceConverter::toString(*it, deviceTypeStr);
-        if (!ret) {
-            break;
+    std::stringstream ss;
+    for (auto it = deviceTypes.begin(); it != deviceTypes.end(); ++it) {
+        if (it != deviceTypes.begin()) {
+            ss << ", ";
         }
-        str.append(deviceTypeStr);
-        if (++it != deviceTypes.end()) {
-            str.append(" , ");
+        const char* strType = audio_device_to_string(*it);
+        if (strlen(strType) != 0) {
+            ss << strType;
+        } else {
+            ss << "unknown type:0x" << std::hex << *it;
         }
     }
-    if (!ret) {
-        str = "Unknown values";
-    }
-    return ret;
+    return ss.str();
+}
+
+bool deviceTypesToString(const DeviceTypeSet &deviceTypes, std::string &str) {
+    str = deviceTypesToString(deviceTypes);
+    return true;
 }
 
 std::string dumpDeviceTypes(const DeviceTypeSet &deviceTypes) {
-    std::string ret;
-    for (auto it = deviceTypes.begin(); it != deviceTypes.end();) {
-        std::stringstream ss;
-        ss << "0x" << std::hex << (*it);
-        ret.append(ss.str());
-        if (++it != deviceTypes.end()) {
-            ret.append(" , ");
+    std::stringstream ss;
+    for (auto it = deviceTypes.begin(); it != deviceTypes.end(); ++it) {
+        if (it != deviceTypes.begin()) {
+            ss << ", ";
         }
+        ss << "0x" << std::hex << (*it);
     }
-    return ret;
-}
-
-std::string toString(const DeviceTypeSet& deviceTypes) {
-    std::string ret;
-    deviceTypesToString(deviceTypes, ret);
-    return ret;
+    return ss.str();
 }
 
 } // namespace android
diff --git a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
index c5d7da8..4a7e956 100644
--- a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
+++ b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
@@ -25,6 +25,9 @@
 
 namespace android {
 
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceAddress;
+
 namespace {
 
 static const std::string SUPPRESSED = "SUPPRESSED";
@@ -97,10 +100,13 @@
 
 std::string AudioDeviceTypeAddr::toString(bool includeSensitiveInfo) const {
     std::stringstream sstream;
-    sstream << "type:0x" << std::hex << mType;
+    sstream << audio_device_to_string(mType);
+    if (sstream.str().empty()) {
+        sstream << "unknown type:0x" << std::hex << mType;
+    }
     // IP and MAC address are sensitive information. The sensitive information will be suppressed
     // is `includeSensitiveInfo` is false.
-    sstream << ",@:"
+    sstream << ", @:"
             << (!includeSensitiveInfo && mIsAddressSensitive ? SUPPRESSED : mAddress);
     return sstream.str();
 }
@@ -157,17 +163,16 @@
 }
 
 ConversionResult<AudioDeviceTypeAddr>
-aidl2legacy_AudioDeviceTypeAddress(const media::AudioDevice& aidl) {
-    audio_devices_t type = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.type));
-    return AudioDeviceTypeAddr(type, aidl.address);
+aidl2legacy_AudioDeviceTypeAddress(const AudioDevice& aidl) {
+    audio_devices_t type;
+    std::string address;
+    RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(aidl, &type, &address));
+    return AudioDeviceTypeAddr(type, address);
 }
 
-ConversionResult<media::AudioDevice>
+ConversionResult<AudioDevice>
 legacy2aidl_AudioDeviceTypeAddress(const AudioDeviceTypeAddr& legacy) {
-    media::AudioDevice aidl;
-    aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.mType));
-    aidl.address = legacy.getAddress();
-    return aidl;
+    return legacy2aidl_audio_device_AudioDevice(legacy.mType, legacy.getAddress());
 }
 
 } // namespace android
diff --git a/media/libaudiofoundation/AudioGain.cpp b/media/libaudiofoundation/AudioGain.cpp
index 1dee938..47e0edb 100644
--- a/media/libaudiofoundation/AudioGain.cpp
+++ b/media/libaudiofoundation/AudioGain.cpp
@@ -24,22 +24,18 @@
 #define ALOGVV(a...) do { } while(0)
 #endif
 
+#include <math.h>
+
 #include <algorithm>
 
 #include <android-base/stringprintf.h>
 #include <media/AudioGain.h>
 #include <utils/Log.h>
 
-#include <math.h>
-
 namespace android {
 
-AudioGain::AudioGain(int index, bool useInChannelMask)
-{
-    mIndex = index;
-    mUseInChannelMask = useInChannelMask;
-    memset(&mGain, 0, sizeof(struct audio_gain));
-}
+AudioGain::AudioGain(int index, bool isInput)
+        : mIndex(index), mIsInput(isInput) {}
 
 void AudioGain::getDefaultConfig(struct audio_gain_config *config)
 {
@@ -49,12 +45,9 @@
     if ((mGain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
         config->values[0] = mGain.default_value;
     } else {
-        uint32_t numValues;
-        if (mUseInChannelMask) {
-            numValues = audio_channel_count_from_in_mask(mGain.channel_mask);
-        } else {
-            numValues = audio_channel_count_from_out_mask(mGain.channel_mask);
-        }
+        const uint32_t numValues = mIsInput ?
+                audio_channel_count_from_in_mask(mGain.channel_mask) :
+                audio_channel_count_from_out_mask(mGain.channel_mask);
         for (size_t i = 0; i < numValues; i++) {
             config->values[i] = mGain.default_value;
         }
@@ -78,12 +71,9 @@
         if ((config->channel_mask & ~mGain.channel_mask) != 0) {
             return BAD_VALUE;
         }
-        uint32_t numValues;
-        if (mUseInChannelMask) {
-            numValues = audio_channel_count_from_in_mask(config->channel_mask);
-        } else {
-            numValues = audio_channel_count_from_out_mask(config->channel_mask);
-        }
+        const uint32_t numValues = mIsInput ?
+                audio_channel_count_from_in_mask(config->channel_mask) :
+                audio_channel_count_from_out_mask(config->channel_mask);
         for (size_t i = 0; i < numValues; i++) {
             if ((config->values[i] < mGain.min_value) ||
                     (config->values[i] > mGain.max_value)) {
@@ -116,7 +106,7 @@
 bool AudioGain::equals(const sp<AudioGain>& other) const
 {
     return other != nullptr &&
-           mUseInChannelMask == other->mUseInChannelMask &&
+           mIsInput == other->mIsInput &&
            mUseForVolume == other->mUseForVolume &&
            // Compare audio gain
            mGain.mode == other->mGain.mode &&
@@ -129,51 +119,24 @@
            mGain.max_ramp_ms == other->mGain.max_ramp_ms;
 }
 
-status_t AudioGain::writeToParcel(android::Parcel *parcel) const {
-    media::AudioGain parcelable;
-    return writeToParcelable(&parcelable)
-        ?: parcelable.writeToParcel(parcel);
+ConversionResult<AudioGain::Aidl> AudioGain::toParcelable() const {
+    media::audio::common::AudioGain aidl = VALUE_OR_RETURN(
+            legacy2aidl_audio_gain_AudioGain(mGain, mIsInput));
+    aidl.useForVolume = mUseForVolume;
+    media::AudioGainSys aidlSys;
+    aidlSys.index = VALUE_OR_RETURN(convertIntegral<int32_t>(mIndex));
+    aidlSys.isInput = mIsInput;
+    return std::make_pair(aidl, aidlSys);
 }
 
-status_t AudioGain::writeToParcelable(media::AudioGain* parcelable) const {
-    parcelable->index = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mIndex));
-    parcelable->useInChannelMask = mUseInChannelMask;
-    parcelable->useForVolume = mUseForVolume;
-    parcelable->mode = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_gain_mode_t_int32_t_mask(mGain.mode));
-    parcelable->channelMask = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_channel_mask_t_int32_t(mGain.channel_mask));
-    parcelable->minValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.min_value));
-    parcelable->maxValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.max_value));
-    parcelable->defaultValue = VALUE_OR_RETURN_STATUS(
-            convertIntegral<int32_t>(mGain.default_value));
-    parcelable->stepValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.step_value));
-    parcelable->minRampMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.min_ramp_ms));
-    parcelable->maxRampMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.max_ramp_ms));
-    return OK;
-}
-
-status_t AudioGain::readFromParcel(const android::Parcel *parcel) {
-    media::AudioGain parcelable;
-    return parcelable.readFromParcel(parcel)
-        ?: readFromParcelable(parcelable);
-}
-
-status_t AudioGain::readFromParcelable(const media::AudioGain& parcelable) {
-    mIndex = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.index));
-    mUseInChannelMask = parcelable.useInChannelMask;
-    mUseForVolume = parcelable.useForVolume;
-    mGain.mode = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_int32_t_audio_gain_mode_t_mask(parcelable.mode));
-    mGain.channel_mask = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_int32_t_audio_channel_mask_t(parcelable.channelMask));
-    mGain.min_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.minValue));
-    mGain.max_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.maxValue));
-    mGain.default_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.defaultValue));
-    mGain.step_value = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.stepValue));
-    mGain.min_ramp_ms = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.minRampMs));
-    mGain.max_ramp_ms = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.maxRampMs));
-    return OK;
+ConversionResult<sp<AudioGain>> AudioGain::fromParcelable(const AudioGain::Aidl& aidl) {
+    const media::audio::common::AudioGain& hal = aidl.first;
+    const media::AudioGainSys& sys = aidl.second;
+    auto index = VALUE_OR_RETURN(convertIntegral<int>(sys.index));
+    sp<AudioGain> legacy = sp<AudioGain>::make(index, sys.isInput);
+    legacy->mGain = VALUE_OR_RETURN(aidl2legacy_AudioGain_audio_gain(hal, sys.isInput));
+    legacy->mUseForVolume = hal.useForVolume;
+    return legacy;
 }
 
 bool AudioGains::equals(const AudioGains &other) const
@@ -184,59 +147,30 @@
                       });
 }
 
-status_t AudioGains::writeToParcel(android::Parcel *parcel) const {
-    status_t status = NO_ERROR;
-    if ((status = parcel->writeVectorSize(*this)) != NO_ERROR) return status;
-    for (const auto &audioGain : *this) {
-        if ((status = parcel->writeParcelable(*audioGain)) != NO_ERROR) {
-            break;
-        }
-    }
-    return status;
-}
-
-status_t AudioGains::readFromParcel(const android::Parcel *parcel) {
-    status_t status = NO_ERROR;
-    this->clear();
-    if ((status = parcel->resizeOutVector(this)) != NO_ERROR) return status;
-    for (size_t i = 0; i < this->size(); i++) {
-        this->at(i) = new AudioGain(0, false);
-        if ((status = parcel->readParcelable(this->at(i).get())) != NO_ERROR) {
-            this->clear();
-            break;
-        }
-    }
-    return status;
-}
-
 ConversionResult<sp<AudioGain>>
-aidl2legacy_AudioGain(const media::AudioGain& aidl) {
-    sp<AudioGain> legacy = new AudioGain(0, false);
-    status_t status = legacy->readFromParcelable(aidl);
-    if (status != OK) {
-        return base::unexpected(status);
-    }
-    return legacy;
+aidl2legacy_AudioGain(const AudioGain::Aidl& aidl) {
+    return AudioGain::fromParcelable(aidl);
 }
 
-ConversionResult<media::AudioGain>
+ConversionResult<AudioGain::Aidl>
 legacy2aidl_AudioGain(const sp<AudioGain>& legacy) {
-    media::AudioGain aidl;
-    status_t status = legacy->writeToParcelable(&aidl);
-    if (status != OK) {
-        return base::unexpected(status);
-    }
-    return aidl;
+    return legacy->toParcelable();
 }
 
 ConversionResult<AudioGains>
-aidl2legacy_AudioGains(const std::vector<media::AudioGain>& aidl) {
-    return convertContainer<AudioGains>(aidl, aidl2legacy_AudioGain);
+aidl2legacy_AudioGains(const AudioGains::Aidl& aidl) {
+    return convertContainers<AudioGains>(aidl.first, aidl.second,
+            [](const media::audio::common::AudioGain& g,
+               const media::AudioGainSys& gs) {
+                return aidl2legacy_AudioGain(std::make_pair(g, gs));
+            });
 }
 
-ConversionResult<std::vector<media::AudioGain>>
+ConversionResult<AudioGains::Aidl>
 legacy2aidl_AudioGains(const AudioGains& legacy) {
-    return convertContainer<std::vector<media::AudioGain>>(legacy, legacy2aidl_AudioGain);
+    return convertContainerSplit<
+            std::vector<media::audio::common::AudioGain>,
+            std::vector<media::AudioGainSys>>(legacy, legacy2aidl_AudioGain);
 }
 
 } // namespace android
diff --git a/media/libaudiofoundation/AudioPort.cpp b/media/libaudiofoundation/AudioPort.cpp
index fafabd9..4513323 100644
--- a/media/libaudiofoundation/AudioPort.cpp
+++ b/media/libaudiofoundation/AudioPort.cpp
@@ -18,13 +18,28 @@
 #include <algorithm>
 #include <utility>
 
-#include <android/media/ExtraAudioDescriptor.h>
 #include <android-base/stringprintf.h>
 #include <media/AudioPort.h>
 #include <utils/Log.h>
 
 namespace android {
 
+void AudioPort::setFlags(uint32_t flags)
+{
+    // force direct flag if offload flag is set: offloading implies a direct output stream
+    // and all common behaviors are driven by checking only the direct flag
+    // this should normally be set appropriately in the policy configuration file
+    if (mRole == AUDIO_PORT_ROLE_SOURCE &&
+            (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+        flags |= AUDIO_OUTPUT_FLAG_DIRECT;
+    }
+    if (useInputChannelMask()) {
+        mFlags.input = static_cast<audio_input_flags_t>(flags);
+    } else {
+        mFlags.output = static_cast<audio_output_flags_t>(flags);
+    }
+}
+
 void AudioPort::importAudioPort(const sp<AudioPort>& port, bool force __unused)
 {
     for (const auto& profileToImport : port->mProfiles) {
@@ -147,9 +162,16 @@
     }
 }
 
-void AudioPort::dump(std::string *dst, int spaces, bool verbose) const {
+void AudioPort::dump(std::string *dst, int spaces, const char* extraInfo, bool verbose) const {
     if (!mName.empty()) {
-        dst->append(base::StringPrintf("%*s- name: %s\n", spaces, "", mName.c_str()));
+        dst->append(base::StringPrintf("\"%s\"%s", mName.c_str(),
+                        extraInfo != nullptr ? "; " : ""));
+    }
+    if (extraInfo != nullptr) {
+        dst->append(base::StringPrintf("%s", extraInfo));
+    }
+    if (!mName.empty() || extraInfo != nullptr) {
+        dst->append("\n");
     }
     if (verbose) {
         std::string profilesStr;
@@ -196,39 +218,59 @@
            mType == other->getType() &&
            mRole == other->getRole() &&
            mProfiles.equals(other->getAudioProfiles()) &&
+           getFlags() == other->getFlags() &&
            mExtraAudioDescriptors == other->getExtraAudioDescriptors();
 }
 
-status_t AudioPort::writeToParcel(Parcel *parcel) const
-{
-    media::AudioPort parcelable;
-    return writeToParcelable(&parcelable)
-        ?: parcelable.writeToParcel(parcel);
-}
-
 status_t AudioPort::writeToParcelable(media::AudioPort* parcelable) const {
-    parcelable->name = mName;
-    parcelable->type = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_type_t_AudioPortType(mType));
-    parcelable->role = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_role_t_AudioPortRole(mRole));
-    parcelable->profiles = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioProfileVector(mProfiles));
-    parcelable->extraAudioDescriptors = mExtraAudioDescriptors;
-    parcelable->gains = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioGains(mGains));
+    parcelable->hal.name = mName;
+    parcelable->sys.type = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_port_type_t_AudioPortType(mType));
+    parcelable->sys.role = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_port_role_t_AudioPortRole(mRole));
+    auto aidlProfiles = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_AudioProfileVector(mProfiles, useInputChannelMask()));
+    parcelable->hal.profiles = aidlProfiles.first;
+    parcelable->sys.profiles = aidlProfiles.second;
+    parcelable->hal.flags = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_io_flags_AudioIoFlags(mFlags, useInputChannelMask()));
+    parcelable->hal.extraAudioDescriptors = mExtraAudioDescriptors;
+    auto aidlGains = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioGains(mGains));
+    parcelable->hal.gains = aidlGains.first;
+    parcelable->sys.gains = aidlGains.second;
+    if (mType == AUDIO_PORT_TYPE_MIX) {
+        media::audio::common::AudioPortMixExt mixExt{};
+        mixExt.maxOpenStreamCount = maxOpenCount;
+        mixExt.maxActiveStreamCount = maxActiveCount;
+        mixExt.recommendedMuteDurationMs = recommendedMuteDurationMs;
+        parcelable->hal.ext = media::audio::common::AudioPortExt::make<
+                media::audio::common::AudioPortExt::mix>(mixExt);
+    }
     return OK;
 }
 
-status_t AudioPort::readFromParcel(const Parcel *parcel) {
-    media::AudioPort parcelable;
-    return parcelable.readFromParcel(parcel)
-        ?: readFromParcelable(parcelable);
-}
-
 status_t AudioPort::readFromParcelable(const media::AudioPort& parcelable) {
-    mName = parcelable.name;
-    mType = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPortType_audio_port_type_t(parcelable.type));
-    mRole = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPortRole_audio_port_role_t(parcelable.role));
-    mProfiles = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioProfileVector(parcelable.profiles));
-    mExtraAudioDescriptors = parcelable.extraAudioDescriptors;
-    mGains = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioGains(parcelable.gains));
+    mName = parcelable.hal.name;
+    mType = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioPortType_audio_port_type_t(parcelable.sys.type));
+    mRole = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioPortRole_audio_port_role_t(parcelable.sys.role));
+    mProfiles = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioProfileVector(
+                    std::make_pair(parcelable.hal.profiles, parcelable.sys.profiles),
+                    useInputChannelMask()));
+    mFlags = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioIoFlags_audio_io_flags(parcelable.hal.flags, useInputChannelMask()));
+    mExtraAudioDescriptors = parcelable.hal.extraAudioDescriptors;
+    mGains = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioGains(std::make_pair(parcelable.hal.gains, parcelable.sys.gains)));
+    if (mType == AUDIO_PORT_TYPE_MIX) {
+        const media::audio::common::AudioPortMixExt& mixExt =
+                parcelable.hal.ext.get<media::audio::common::AudioPortExt::mix>();
+        maxOpenCount = mixExt.maxOpenStreamCount;
+        maxActiveCount = mixExt.maxActiveStreamCount;
+        recommendedMuteDurationMs = mixExt.recommendedMuteDurationMs;
+    }
     return OK;
 }
 
@@ -250,6 +292,9 @@
     if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
         mGain = config->gain;
     }
+    if (config->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+        mFlags = config->flags;
+    }
 
     return NO_ERROR;
 }
@@ -303,6 +348,9 @@
     } else {
         dstConfig->config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
     }
+
+    updateField(mFlags, &audio_port_config::flags,
+            dstConfig, srcConfig, AUDIO_PORT_CONFIG_FLAGS, { AUDIO_INPUT_FLAG_NONE });
 }
 
 bool AudioPortConfig::hasGainController(bool canUseForVolume) const
@@ -315,12 +363,14 @@
                            : audioport->getGains().size() > 0;
 }
 
-bool AudioPortConfig::equals(const sp<AudioPortConfig> &other) const
+bool AudioPortConfig::equals(const sp<AudioPortConfig> &other, bool isInput) const
 {
     return other != nullptr &&
            mSamplingRate == other->getSamplingRate() &&
            mFormat == other->getFormat() &&
            mChannelMask == other->getChannelMask() &&
+           (isInput ? mFlags.input == other->getFlags().input :
+                   mFlags.output == other->getFlags().output )&&
            // Compare audio gain config
            mGain.index == other->mGain.index &&
            mGain.mode == other->mGain.mode &&
@@ -330,54 +380,47 @@
            mGain.ramp_duration_ms == other->mGain.ramp_duration_ms;
 }
 
-status_t AudioPortConfig::writeToParcel(Parcel *parcel) const {
-    media::AudioPortConfig parcelable;
-    return writeToParcelable(&parcelable)
-        ?: parcelable.writeToParcel(parcel);
-}
-
-status_t AudioPortConfig::writeToParcelable(media::AudioPortConfig* parcelable) const {
-    parcelable->sampleRate = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mSamplingRate));
-    parcelable->format = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_format_t_AudioFormat(mFormat));
+status_t AudioPortConfig::writeToParcelable(
+        media::audio::common::AudioPortConfig* parcelable, bool isInput) const {
+    media::audio::common::Int aidl_sampleRate;
+    aidl_sampleRate.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mSamplingRate));
+    parcelable->sampleRate = aidl_sampleRate;
+    parcelable->format = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_format_t_AudioFormatDescription(mFormat));
     parcelable->channelMask = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_channel_mask_t_int32_t(mChannelMask));
+            legacy2aidl_audio_channel_mask_t_AudioChannelLayout(mChannelMask, isInput));
     parcelable->id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
-    parcelable->gain.index = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.index));
-    parcelable->gain.mode = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_gain_mode_t_int32_t_mask(mGain.mode));
-    parcelable->gain.channelMask = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_channel_mask_t_int32_t(mGain.channel_mask));
-    parcelable->gain.rampDurationMs = VALUE_OR_RETURN_STATUS(
-            convertIntegral<int32_t>(mGain.ramp_duration_ms));
-    parcelable->gain.values = VALUE_OR_RETURN_STATUS(convertContainer<std::vector<int32_t>>(
-            mGain.values, convertIntegral<int32_t, int>));
+    media::audio::common::AudioGainConfig aidl_gain = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_gain_config_AudioGainConfig(mGain, isInput));
+    parcelable->gain = aidl_gain;
+    parcelable->flags = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_io_flags_AudioIoFlags(mFlags, isInput));
     return OK;
 }
 
-status_t AudioPortConfig::readFromParcel(const Parcel *parcel) {
-    media::AudioPortConfig parcelable;
-    return parcelable.readFromParcel(parcel)
-        ?: readFromParcelable(parcelable);
-}
-
-status_t AudioPortConfig::readFromParcelable(const media::AudioPortConfig& parcelable) {
-    mSamplingRate = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.sampleRate));
-    mFormat = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioFormat_audio_format_t(parcelable.format));
-    mChannelMask = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_int32_t_audio_channel_mask_t(parcelable.channelMask));
-    mId = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_port_handle_t(parcelable.id));
-    mGain.index = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.gain.index));
-    mGain.mode = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_int32_t_audio_gain_mode_t_mask(parcelable.gain.mode));
-    mGain.channel_mask = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_int32_t_audio_channel_mask_t(parcelable.gain.channelMask));
-    mGain.ramp_duration_ms = VALUE_OR_RETURN_STATUS(
-            convertIntegral<unsigned int>(parcelable.gain.rampDurationMs));
-    if (parcelable.gain.values.size() > std::size(mGain.values)) {
-        return BAD_VALUE;
+status_t AudioPortConfig::readFromParcelable(
+        const media::audio::common::AudioPortConfig& parcelable, bool isInput) {
+    if (parcelable.sampleRate.has_value()) {
+        mSamplingRate = VALUE_OR_RETURN_STATUS(
+                convertIntegral<unsigned int>(parcelable.sampleRate.value().value));
     }
-    for (size_t i = 0; i < parcelable.gain.values.size(); ++i) {
-        mGain.values[i] = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.gain.values[i]));
+    if (parcelable.format.has_value()) {
+        mFormat = VALUE_OR_RETURN_STATUS(
+                aidl2legacy_AudioFormatDescription_audio_format_t(parcelable.format.value()));
+    }
+    if (parcelable.channelMask.has_value()) {
+        mChannelMask = VALUE_OR_RETURN_STATUS(
+                aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+                        parcelable.channelMask.value(), isInput));
+    }
+    mId = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_port_handle_t(parcelable.id));
+    if (parcelable.gain.has_value()) {
+        mGain = VALUE_OR_RETURN_STATUS(
+                aidl2legacy_AudioGainConfig_audio_gain_config(parcelable.gain.value(), isInput));
+    }
+    if (parcelable.flags.has_value()) {
+        mFlags = VALUE_OR_RETURN_STATUS(
+                aidl2legacy_AudioIoFlags_audio_io_flags(parcelable.flags.value(), isInput));
     }
     return OK;
 }
diff --git a/media/libaudiofoundation/AudioProfile.cpp b/media/libaudiofoundation/AudioProfile.cpp
index 8ac3f73..734fa9c 100644
--- a/media/libaudiofoundation/AudioProfile.cpp
+++ b/media/libaudiofoundation/AudioProfile.cpp
@@ -27,6 +27,8 @@
 
 namespace android {
 
+using media::audio::common::AudioChannelLayout;
+
 bool operator == (const AudioProfile &left, const AudioProfile &right)
 {
     return (left.getFormat() == right.getFormat()) &&
@@ -97,18 +99,14 @@
 
 void AudioProfile::dump(std::string *dst, int spaces) const
 {
-    dst->append(base::StringPrintf("%s%s%s\n", mIsDynamicFormat ? "[dynamic format]" : "",
+    dst->append(base::StringPrintf("\"%s\"; ", mName.c_str()));
+    dst->append(base::StringPrintf("%s%s%s%s", mIsDynamicFormat ? "[dynamic format]" : "",
              mIsDynamicChannels ? "[dynamic channels]" : "",
-             mIsDynamicRate ? "[dynamic rates]" : ""));
-    if (mName.length() != 0) {
-        dst->append(base::StringPrintf("%*s- name: %s\n", spaces, "", mName.c_str()));
-    }
-    std::string formatLiteral;
-    if (FormatConverter::toString(mFormat, formatLiteral)) {
-        dst->append(base::StringPrintf("%*s- format: %s\n", spaces, "", formatLiteral.c_str()));
-    }
+             mIsDynamicRate ? "[dynamic rates]" : "", isDynamic() ? "; " : ""));
+    dst->append(base::StringPrintf("%s (0x%x)\n", audio_format_to_string(mFormat), mFormat));
+
     if (!mSamplingRates.empty()) {
-        dst->append(base::StringPrintf("%*s- sampling rates:", spaces, ""));
+        dst->append(base::StringPrintf("%*ssampling rates: ", spaces, ""));
         for (auto it = mSamplingRates.begin(); it != mSamplingRates.end();) {
             dst->append(base::StringPrintf("%d", *it));
             dst->append(++it == mSamplingRates.end() ? "" : ", ");
@@ -117,7 +115,7 @@
     }
 
     if (!mChannelMasks.empty()) {
-        dst->append(base::StringPrintf("%*s- channel masks:", spaces, ""));
+        dst->append(base::StringPrintf("%*schannel masks: ", spaces, ""));
         for (auto it = mChannelMasks.begin(); it != mChannelMasks.end();) {
             dst->append(base::StringPrintf("0x%04x", *it));
             dst->append(++it == mChannelMasks.end() ? "" : ", ");
@@ -126,7 +124,7 @@
     }
 
     dst->append(base::StringPrintf(
-            "%*s- encapsulation type: %#x\n", spaces, "", mEncapsulationType));
+             "%*s%s\n", spaces, "", audio_encapsulation_type_to_string(mEncapsulationType)));
 }
 
 bool AudioProfile::equals(const sp<AudioProfile>& other) const
@@ -154,67 +152,88 @@
     return *this;
 }
 
-status_t AudioProfile::writeToParcel(Parcel *parcel) const {
-    media::AudioProfile parcelable = VALUE_OR_RETURN_STATUS(toParcelable());
-    return parcelable.writeToParcel(parcel);
- }
+ConversionResult<AudioProfile::Aidl>
+AudioProfile::toParcelable(bool isInput) const {
+    media::audio::common::AudioProfile parcelable = VALUE_OR_RETURN(toCommonParcelable(isInput));
+    media::AudioProfileSys parcelableSys;
+    parcelableSys.isDynamicFormat = mIsDynamicFormat;
+    parcelableSys.isDynamicChannels = mIsDynamicChannels;
+    parcelableSys.isDynamicRate = mIsDynamicRate;
+    return std::make_pair(parcelable, parcelableSys);
+}
 
-ConversionResult<media::AudioProfile>
-AudioProfile::toParcelable() const {
-    media::AudioProfile parcelable;
+ConversionResult<sp<AudioProfile>> AudioProfile::fromParcelable(
+        const AudioProfile::Aidl& aidl, bool isInput) {
+    sp<AudioProfile> legacy = VALUE_OR_RETURN(fromCommonParcelable(aidl.first, isInput));
+    const auto& parcelableSys = aidl.second;
+    legacy->mIsDynamicFormat = parcelableSys.isDynamicFormat;
+    legacy->mIsDynamicChannels = parcelableSys.isDynamicChannels;
+    legacy->mIsDynamicRate = parcelableSys.isDynamicRate;
+    return legacy;
+}
+
+ConversionResult<media::audio::common::AudioProfile>
+AudioProfile::toCommonParcelable(bool isInput) const {
+    media::audio::common::AudioProfile parcelable;
     parcelable.name = mName;
-    parcelable.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(mFormat));
+    parcelable.format = VALUE_OR_RETURN(
+            legacy2aidl_audio_format_t_AudioFormatDescription(mFormat));
+    // Note: legacy 'audio_profile' imposes a limit on the number of
+    // channel masks and sampling rates. That's why it's not used here
+    // and conversions are performed directly on the fields instead
+    // of using 'legacy2aidl_audio_profile_AudioProfile' from AidlConversion.
     parcelable.channelMasks = VALUE_OR_RETURN(
-            convertContainer<std::vector<int32_t>>(mChannelMasks,
-                                                   legacy2aidl_audio_channel_mask_t_int32_t));
-    parcelable.samplingRates = VALUE_OR_RETURN(
+            convertContainer<std::vector<AudioChannelLayout>>(
+            mChannelMasks,
+            [isInput](audio_channel_mask_t m) {
+                return legacy2aidl_audio_channel_mask_t_AudioChannelLayout(m, isInput);
+            }));
+    parcelable.sampleRates = VALUE_OR_RETURN(
             convertContainer<std::vector<int32_t>>(mSamplingRates,
                                                    convertIntegral<int32_t, uint32_t>));
-    parcelable.isDynamicFormat = mIsDynamicFormat;
-    parcelable.isDynamicChannels = mIsDynamicChannels;
-    parcelable.isDynamicRate = mIsDynamicRate;
     parcelable.encapsulationType = VALUE_OR_RETURN(
             legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(mEncapsulationType));
     return parcelable;
 }
 
-status_t AudioProfile::readFromParcel(const Parcel *parcel) {
-    media::AudioProfile parcelable;
-    if (status_t status = parcelable.readFromParcel(parcel); status != OK) {
-        return status;
-    }
-    *this = *VALUE_OR_RETURN_STATUS(fromParcelable(parcelable));
-    return OK;
-}
-
-ConversionResult<sp<AudioProfile>>
-AudioProfile::fromParcelable(const media::AudioProfile& parcelable) {
+ConversionResult<sp<AudioProfile>> AudioProfile::fromCommonParcelable(
+        const media::audio::common::AudioProfile& aidl, bool isInput) {
     sp<AudioProfile> legacy = new AudioProfile();
-    legacy->mName = parcelable.name;
-    legacy->mFormat = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(parcelable.format));
+    legacy->mName = aidl.name;
+    legacy->mFormat = VALUE_OR_RETURN(
+            aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
     legacy->mChannelMasks = VALUE_OR_RETURN(
-            convertContainer<ChannelMaskSet>(parcelable.channelMasks,
-                                             aidl2legacy_int32_t_audio_channel_mask_t));
+            convertContainer<ChannelMaskSet>(aidl.channelMasks,
+            [isInput](const AudioChannelLayout& l) {
+                return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
+            }));
     legacy->mSamplingRates = VALUE_OR_RETURN(
-            convertContainer<SampleRateSet>(parcelable.samplingRates,
+            convertContainer<SampleRateSet>(aidl.sampleRates,
                                             convertIntegral<uint32_t, int32_t>));
-    legacy->mIsDynamicFormat = parcelable.isDynamicFormat;
-    legacy->mIsDynamicChannels = parcelable.isDynamicChannels;
-    legacy->mIsDynamicRate = parcelable.isDynamicRate;
     legacy->mEncapsulationType = VALUE_OR_RETURN(
             aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
-                    parcelable.encapsulationType));
+                    aidl.encapsulationType));
     return legacy;
 }
 
 ConversionResult<sp<AudioProfile>>
-aidl2legacy_AudioProfile(const media::AudioProfile& aidl) {
-    return AudioProfile::fromParcelable(aidl);
+aidl2legacy_AudioProfile(const AudioProfile::Aidl& aidl, bool isInput) {
+    return AudioProfile::fromParcelable(aidl, isInput);
 }
 
-ConversionResult<media::AudioProfile>
-legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy) {
-    return legacy->toParcelable();
+ConversionResult<AudioProfile::Aidl>
+legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy, bool isInput) {
+    return legacy->toParcelable(isInput);
+}
+
+ConversionResult<sp<AudioProfile>>
+aidl2legacy_AudioProfile_common(const media::audio::common::AudioProfile& aidl, bool isInput) {
+    return AudioProfile::fromCommonParcelable(aidl, isInput);
+}
+
+ConversionResult<media::audio::common::AudioProfile>
+legacy2aidl_AudioProfile_common(const sp<AudioProfile>& legacy, bool isInput) {
+    return legacy->toCommonParcelable(isInput);
 }
 
 ssize_t AudioProfileVector::add(const sp<AudioProfile> &profile)
@@ -319,42 +338,16 @@
 
 void AudioProfileVector::dump(std::string *dst, int spaces) const
 {
-    dst->append(base::StringPrintf("%*s- Profiles:\n", spaces, ""));
+    dst->append(base::StringPrintf("%*s- Profiles (%zu):\n", spaces - 2, "", size()));
     for (size_t i = 0; i < size(); i++) {
-        dst->append(base::StringPrintf("%*sProfile %zu:", spaces + 4, "", i));
+        const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+        dst->append(prefix);
         std::string profileStr;
-        at(i)->dump(&profileStr, spaces + 8);
+        at(i)->dump(&profileStr, prefix.size());
         dst->append(profileStr);
     }
 }
 
-status_t AudioProfileVector::writeToParcel(Parcel *parcel) const
-{
-    status_t status = NO_ERROR;
-    if ((status = parcel->writeVectorSize(*this)) != NO_ERROR) return status;
-    for (const auto &audioProfile : *this) {
-        if ((status = parcel->writeParcelable(*audioProfile)) != NO_ERROR) {
-            break;
-        }
-    }
-    return status;
-}
-
-status_t AudioProfileVector::readFromParcel(const Parcel *parcel)
-{
-    status_t status = NO_ERROR;
-    this->clear();
-    if ((status = parcel->resizeOutVector(this)) != NO_ERROR) return status;
-    for (size_t i = 0; i < this->size(); ++i) {
-        this->at(i) = new AudioProfile(AUDIO_FORMAT_DEFAULT, AUDIO_CHANNEL_NONE, 0 /*sampleRate*/);
-        if ((status = parcel->readParcelable(this->at(i).get())) != NO_ERROR) {
-            this->clear();
-            break;
-        }
-    }
-    return status;
-}
-
 bool AudioProfileVector::equals(const AudioProfileVector& other) const
 {
     return std::equal(begin(), end(), other.begin(), other.end(),
@@ -364,13 +357,22 @@
 }
 
 ConversionResult<AudioProfileVector>
-aidl2legacy_AudioProfileVector(const std::vector<media::AudioProfile>& aidl) {
-    return convertContainer<AudioProfileVector>(aidl, aidl2legacy_AudioProfile);
+aidl2legacy_AudioProfileVector(const AudioProfileVector::Aidl& aidl, bool isInput) {
+    return convertContainers<AudioProfileVector>(aidl.first, aidl.second,
+            [isInput](const media::audio::common::AudioProfile& p,
+                      const media::AudioProfileSys& ps) {
+                return aidl2legacy_AudioProfile(std::make_pair(p, ps), isInput);
+            });
 }
 
-ConversionResult<std::vector<media::AudioProfile>>
-legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy) {
-    return convertContainer<std::vector<media::AudioProfile>>(legacy, legacy2aidl_AudioProfile);
+ConversionResult<AudioProfileVector::Aidl>
+legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy, bool isInput) {
+    return convertContainerSplit<
+            std::vector<media::audio::common::AudioProfile>,
+            std::vector<media::AudioProfileSys>>(legacy,
+            [isInput](const sp<AudioProfile>& p) {
+                return legacy2aidl_AudioProfile(p, isInput);
+            });
 }
 
 AudioProfileVector intersectAudioProfiles(const AudioProfileVector& profiles1,
diff --git a/media/libaudiofoundation/DeviceDescriptorBase.cpp b/media/libaudiofoundation/DeviceDescriptorBase.cpp
index 5cfea81..5ffbffc 100644
--- a/media/libaudiofoundation/DeviceDescriptorBase.cpp
+++ b/media/libaudiofoundation/DeviceDescriptorBase.cpp
@@ -30,16 +30,20 @@
 {
 }
 
-DeviceDescriptorBase::DeviceDescriptorBase(audio_devices_t type, const std::string& address) :
-        DeviceDescriptorBase(AudioDeviceTypeAddr(type, address))
+DeviceDescriptorBase::DeviceDescriptorBase(
+        audio_devices_t type, const std::string& address,
+        const FormatVector &encodedFormats) :
+        DeviceDescriptorBase(AudioDeviceTypeAddr(type, address), encodedFormats)
 {
 }
 
-DeviceDescriptorBase::DeviceDescriptorBase(const AudioDeviceTypeAddr &deviceTypeAddr) :
+DeviceDescriptorBase::DeviceDescriptorBase(
+        const AudioDeviceTypeAddr &deviceTypeAddr, const FormatVector &encodedFormats) :
         AudioPort("", AUDIO_PORT_TYPE_DEVICE,
                   audio_is_output_device(deviceTypeAddr.mType) ? AUDIO_PORT_ROLE_SINK :
                                          AUDIO_PORT_ROLE_SOURCE),
-        mDeviceTypeAddr(deviceTypeAddr)
+        mDeviceTypeAddr(deviceTypeAddr),
+        mEncodedFormats(encodedFormats)
 {
     if (mDeviceTypeAddr.address().empty() && audio_is_remote_submix_device(mDeviceTypeAddr.mType)) {
         mDeviceTypeAddr.setAddress("0");
@@ -106,32 +110,23 @@
     return NO_ERROR;
 }
 
-void DeviceDescriptorBase::dump(std::string *dst, int spaces, int index,
+void DeviceDescriptorBase::dump(std::string *dst, int spaces,
                                 const char* extraInfo, bool verbose) const
 {
-    dst->append(base::StringPrintf("%*sDevice %d:\n", spaces, "", index + 1));
     if (mId != 0) {
-        dst->append(base::StringPrintf("%*s- id: %2d\n", spaces, "", mId));
+        dst->append(base::StringPrintf("Port ID: %d; ", mId));
     }
-
     if (extraInfo != nullptr) {
-        dst->append(extraInfo);
+        dst->append(base::StringPrintf("%s; ", extraInfo));
     }
-
-    dst->append(base::StringPrintf("%*s- type: %-48s\n",
-            spaces, "", ::android::toString(mDeviceTypeAddr.mType).c_str()));
+    dst->append(base::StringPrintf("{%s}\n",
+                    mDeviceTypeAddr.toString(true /*includeSensitiveInfo*/).c_str()));
 
     dst->append(base::StringPrintf(
-            "%*s- supported encapsulation modes: %u\n", spaces, "", mEncapsulationModes));
-    dst->append(base::StringPrintf(
-            "%*s- supported encapsulation metadata types: %u\n",
-            spaces, "", mEncapsulationMetadataTypes));
+                    "%*sEncapsulation modes: %u, metadata types: %u\n", spaces, "",
+                    mEncapsulationModes, mEncapsulationMetadataTypes));
 
-    if (mDeviceTypeAddr.address().size() != 0) {
-        dst->append(base::StringPrintf(
-                "%*s- address: %-32s\n", spaces, "", mDeviceTypeAddr.getAddress()));
-    }
-    AudioPort::dump(dst, spaces, verbose);
+    AudioPort::dump(dst, spaces, nullptr, verbose);
 }
 
 std::string DeviceDescriptorBase::toString(bool includeSensitiveInfo) const
@@ -148,60 +143,83 @@
     AudioPort::log("  ");
 }
 
+template<typename T>
+bool checkEqual(const T& f1, const T& f2)
+{
+    std::set<typename T::value_type> s1(f1.begin(), f1.end());
+    std::set<typename T::value_type> s2(f2.begin(), f2.end());
+    return s1 == s2;
+}
+
 bool DeviceDescriptorBase::equals(const sp<DeviceDescriptorBase> &other) const
 {
     return other != nullptr &&
            static_cast<const AudioPort*>(this)->equals(other) &&
-           static_cast<const AudioPortConfig*>(this)->equals(other) &&
-           mDeviceTypeAddr.equals(other->mDeviceTypeAddr);
+           static_cast<const AudioPortConfig*>(this)->equals(other, useInputChannelMask()) &&
+           mDeviceTypeAddr.equals(other->mDeviceTypeAddr) &&
+           checkEqual(mEncodedFormats, other->mEncodedFormats);
 }
 
-
-status_t DeviceDescriptorBase::writeToParcel(Parcel *parcel) const
+bool DeviceDescriptorBase::supportsFormat(audio_format_t format)
 {
-    media::AudioPort parcelable;
-    return writeToParcelable(&parcelable)
-        ?: parcelable.writeToParcel(parcel);
+    if (mEncodedFormats.empty()) {
+        return true;
+    }
+
+    for (const auto& devFormat : mEncodedFormats) {
+        if (devFormat == format) {
+            return true;
+        }
+    }
+    return false;
 }
 
 status_t DeviceDescriptorBase::writeToParcelable(media::AudioPort* parcelable) const {
     AudioPort::writeToParcelable(parcelable);
-    AudioPortConfig::writeToParcelable(&parcelable->activeConfig);
-    parcelable->id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
+    AudioPortConfig::writeToParcelable(&parcelable->sys.activeConfig.hal, useInputChannelMask());
+    parcelable->hal.id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
+    parcelable->sys.activeConfig.hal.portId = parcelable->hal.id;
 
-    media::AudioPortDeviceExt ext;
-    ext.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(mDeviceTypeAddr));
-    ext.encapsulationModes = VALUE_OR_RETURN_STATUS(
+    media::audio::common::AudioPortDeviceExt deviceExt;
+    deviceExt.device = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_AudioDeviceTypeAddress(mDeviceTypeAddr));
+    deviceExt.encodedFormats = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<media::audio::common::AudioFormatDescription>>(
+                    mEncodedFormats, legacy2aidl_audio_format_t_AudioFormatDescription));
+    UNION_SET(parcelable->hal.ext, device, deviceExt);
+    media::AudioPortDeviceExtSys deviceSys;
+    deviceSys.encapsulationModes = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioEncapsulationMode_mask(mEncapsulationModes));
-    ext.encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
+    deviceSys.encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioEncapsulationMetadataType_mask(mEncapsulationMetadataTypes));
-    UNION_SET(parcelable->ext, device, std::move(ext));
+    UNION_SET(parcelable->sys.ext, device, deviceSys);
     return OK;
 }
 
-status_t DeviceDescriptorBase::readFromParcel(const Parcel *parcel) {
-    media::AudioPort parcelable;
-    return parcelable.readFromParcel(parcel)
-        ?: readFromParcelable(parcelable);
-}
-
 status_t DeviceDescriptorBase::readFromParcelable(const media::AudioPort& parcelable) {
-    if (parcelable.type != media::AudioPortType::DEVICE) {
+    if (parcelable.sys.type != media::AudioPortType::DEVICE) {
         return BAD_VALUE;
     }
     status_t status = AudioPort::readFromParcelable(parcelable)
-                      ?: AudioPortConfig::readFromParcelable(parcelable.activeConfig);
+            ?: AudioPortConfig::readFromParcelable(
+                    parcelable.sys.activeConfig.hal, useInputChannelMask());
     if (status != OK) {
         return status;
     }
 
-    media::AudioPortDeviceExt ext = VALUE_OR_RETURN_STATUS(UNION_GET(parcelable.ext, device));
+    media::audio::common::AudioPortDeviceExt deviceExt = VALUE_OR_RETURN_STATUS(
+            UNION_GET(parcelable.hal.ext, device));
     mDeviceTypeAddr = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioDeviceTypeAddress(ext.device));
+            aidl2legacy_AudioDeviceTypeAddress(deviceExt.device));
+    mEncodedFormats = VALUE_OR_RETURN_STATUS(
+            convertContainer<FormatVector>(deviceExt.encodedFormats,
+                    aidl2legacy_AudioFormatDescription_audio_format_t));
+    media::AudioPortDeviceExtSys deviceSys = VALUE_OR_RETURN_STATUS(
+            UNION_GET(parcelable.sys.ext, device));
     mEncapsulationModes = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioEncapsulationMode_mask(ext.encapsulationModes));
+            aidl2legacy_AudioEncapsulationMode_mask(deviceSys.encapsulationModes));
     mEncapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioEncapsulationMetadataType_mask(ext.encapsulationMetadataTypes));
+            aidl2legacy_AudioEncapsulationMetadataType_mask(deviceSys.encapsulationMetadataTypes));
     return OK;
 }
 
diff --git a/media/libaudiofoundation/include/media/AudioContainers.h b/media/libaudiofoundation/include/media/AudioContainers.h
index 204b365..a9c7824 100644
--- a/media/libaudiofoundation/include/media/AudioContainers.h
+++ b/media/libaudiofoundation/include/media/AudioContainers.h
@@ -40,6 +40,7 @@
 const DeviceTypeSet& getAudioDeviceOutAllUsbSet();
 const DeviceTypeSet& getAudioDeviceInAllSet();
 const DeviceTypeSet& getAudioDeviceInAllUsbSet();
+const DeviceTypeSet& getAudioDeviceOutAllBleSet();
 
 template<typename T>
 static std::vector<T> Intersection(const std::set<T>& a, const std::set<T>& b) {
@@ -110,25 +111,7 @@
     return types;
 }
 
-// FIXME: This is temporary helper function. Remove this when getting rid of all
-//  bit mask usages of audio device types.
-static inline DeviceTypeSet deviceTypesFromBitMask(audio_devices_t types) {
-    DeviceTypeSet deviceTypes;
-    if ((types & AUDIO_DEVICE_BIT_IN) == 0) {
-        for (auto deviceType : AUDIO_DEVICE_OUT_ALL_ARRAY) {
-            if ((types & deviceType) == deviceType) {
-                deviceTypes.insert(deviceType);
-            }
-        }
-    } else {
-        for (auto deviceType : AUDIO_DEVICE_IN_ALL_ARRAY) {
-            if ((types & deviceType) == deviceType) {
-                deviceTypes.insert(deviceType);
-            }
-        }
-    }
-    return deviceTypes;
-}
+std::string deviceTypesToString(const DeviceTypeSet& deviceTypes);
 
 bool deviceTypesToString(const DeviceTypeSet& deviceTypes, std::string &str);
 
@@ -137,7 +120,9 @@
 /**
  * Return human readable string for device types.
  */
-std::string toString(const DeviceTypeSet& deviceTypes);
+inline std::string toString(const DeviceTypeSet& deviceTypes) {
+    return deviceTypesToString(deviceTypes);
+}
 
 
 } // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h b/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
index 8edcc58..11aa222 100644
--- a/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
+++ b/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
@@ -19,7 +19,7 @@
 #include <string>
 #include <vector>
 
-#include <android/media/AudioDevice.h>
+#include <android/media/audio/common/AudioDevice.h>
 #include <binder/Parcelable.h>
 #include <binder/Parcel.h>
 #include <media/AudioContainers.h>
@@ -32,6 +32,7 @@
 class AudioDeviceTypeAddr : public Parcelable {
 public:
     AudioDeviceTypeAddr() = default;
+    AudioDeviceTypeAddr(const AudioDeviceTypeAddr&) = default;
 
     AudioDeviceTypeAddr(audio_devices_t type, const std::string& address);
 
@@ -88,8 +89,8 @@
 
 // Conversion routines, according to AidlConversion.h conventions.
 ConversionResult<AudioDeviceTypeAddr>
-aidl2legacy_AudioDeviceTypeAddress(const media::AudioDevice& aidl);
-ConversionResult<media::AudioDevice>
+aidl2legacy_AudioDeviceTypeAddress(const media::audio::common::AudioDevice& aidl);
+ConversionResult<media::audio::common::AudioDevice>
 legacy2aidl_AudioDeviceTypeAddress(const AudioDeviceTypeAddr& legacy);
 
 } // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioGain.h b/media/libaudiofoundation/include/media/AudioGain.h
index a06b686..10088f2 100644
--- a/media/libaudiofoundation/include/media/AudioGain.h
+++ b/media/libaudiofoundation/include/media/AudioGain.h
@@ -16,23 +16,23 @@
 
 #pragma once
 
-#include <android/media/AudioGain.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <android/media/AudioGainSys.h>
 #include <media/AidlConversion.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 #include <system/audio.h>
-#include <string>
-#include <vector>
 
 namespace android {
 
-class AudioGain: public RefBase, public Parcelable
+class AudioGain: public RefBase
 {
 public:
-    AudioGain(int index, bool useInChannelMask);
-    virtual ~AudioGain() {}
+    AudioGain(int index, bool isInput);
+    virtual ~AudioGain() = default;
 
     void setMode(audio_gain_mode_t mode) { mGain.mode = mode; }
     const audio_gain_mode_t &getMode() const { return mGain.mode; }
@@ -71,26 +71,24 @@
 
     bool equals(const sp<AudioGain>& other) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
-
-    status_t writeToParcelable(media::AudioGain* parcelable) const;
-    status_t readFromParcelable(const media::AudioGain& parcelable);
+    using Aidl = std::pair<media::audio::common::AudioGain, media::AudioGainSys>;
+    ConversionResult<Aidl> toParcelable() const;
+    static ConversionResult<sp<AudioGain>> fromParcelable(const Aidl& aidl);
 
 private:
     int               mIndex;
-    struct audio_gain mGain;
-    bool              mUseInChannelMask;
+    bool              mIsInput;
+    struct audio_gain mGain = {};
     bool              mUseForVolume = false;
 };
 
 // Conversion routines, according to AidlConversion.h conventions.
 ConversionResult<sp<AudioGain>>
-aidl2legacy_AudioGain(const media::AudioGain& aidl);
-ConversionResult<media::AudioGain>
+aidl2legacy_AudioGain(const AudioGain::Aidl& aidl);
+ConversionResult<AudioGain::Aidl>
 legacy2aidl_AudioGain(const sp<AudioGain>& legacy);
 
-class AudioGains : public std::vector<sp<AudioGain> >, public Parcelable
+class AudioGains : public std::vector<sp<AudioGain>>
 {
 public:
     bool canUseForVolume() const
@@ -103,7 +101,7 @@
         return false;
     }
 
-    int32_t add(const sp<AudioGain> gain)
+    int32_t add(const sp<AudioGain>& gain)
     {
         push_back(gain);
         return 0;
@@ -111,14 +109,15 @@
 
     bool equals(const AudioGains& other) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
+    using Aidl = std::pair<
+            std::vector<media::audio::common::AudioGain>,
+            std::vector<media::AudioGainSys>>;
 };
 
 // Conversion routines, according to AidlConversion.h conventions.
 ConversionResult<AudioGains>
-aidl2legacy_AudioGains(const std::vector<media::AudioGain>& aidl);
-ConversionResult<std::vector<media::AudioGain>>
+aidl2legacy_AudioGains(const AudioGains::Aidl& aidl);
+ConversionResult<AudioGains::Aidl>
 legacy2aidl_AudioGains(const AudioGains& legacy);
 
 } // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioPort.h b/media/libaudiofoundation/include/media/AudioPort.h
index 1cee1c9..d6a098f 100644
--- a/media/libaudiofoundation/include/media/AudioPort.h
+++ b/media/libaudiofoundation/include/media/AudioPort.h
@@ -21,7 +21,7 @@
 
 #include <android/media/AudioPort.h>
 #include <android/media/AudioPortConfig.h>
-#include <android/media/ExtraAudioDescriptor.h>
+#include <android/media/audio/common/ExtraAudioDescriptor.h>
 #include <binder/Parcel.h>
 #include <binder/Parcelable.h>
 #include <media/AudioGain.h>
@@ -33,7 +33,7 @@
 
 namespace android {
 
-class AudioPort : public virtual RefBase, public virtual Parcelable
+class AudioPort : public virtual RefBase
 {
 public:
     AudioPort(const std::string& name, audio_port_type_t type,  audio_port_role_t role) :
@@ -47,6 +47,9 @@
     audio_port_type_t getType() const { return mType; }
     audio_port_role_t getRole() const { return mRole; }
 
+    virtual void setFlags(uint32_t flags);
+    uint32_t getFlags() const { return useInputChannelMask() ? mFlags.input : mFlags.output; }
+
     void setGains(const AudioGains &gains) { mGains = gains; }
     const AudioGains &getGains() const { return mGains; }
 
@@ -69,10 +72,10 @@
     AudioProfileVector &getAudioProfiles() { return mProfiles; }
 
     void setExtraAudioDescriptors(
-            const std::vector<media::ExtraAudioDescriptor> extraAudioDescriptors) {
+            const std::vector<media::audio::common::ExtraAudioDescriptor> extraAudioDescriptors) {
         mExtraAudioDescriptors = extraAudioDescriptors;
     }
-    std::vector<media::ExtraAudioDescriptor> &getExtraAudioDescriptors() {
+    std::vector<media::audio::common::ExtraAudioDescriptor> &getExtraAudioDescriptors() {
         return mExtraAudioDescriptors;
     }
 
@@ -93,19 +96,47 @@
                 ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SINK));
     }
 
-    void dump(std::string *dst, int spaces, bool verbose = true) const;
+    bool isDirectOutput() const
+    {
+        return (mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
+                ((mFlags.output & AUDIO_OUTPUT_FLAG_DIRECT) != 0);
+    }
+
+    bool isMmap() const
+    {
+        return (mType == AUDIO_PORT_TYPE_MIX)
+                && (((mRole == AUDIO_PORT_ROLE_SOURCE) &&
+                        ((mFlags.output & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) != 0))
+                    || ((mRole == AUDIO_PORT_ROLE_SINK) &&
+                        ((mFlags.input & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0)));
+    }
+
+    void dump(std::string *dst, int spaces,
+              const char* extraInfo = nullptr, bool verbose = true) const;
 
     void log(const char* indent) const;
 
     bool equals(const sp<AudioPort>& other) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
-
     status_t writeToParcelable(media::AudioPort* parcelable) const;
     status_t readFromParcelable(const media::AudioPort& parcelable);
 
     AudioGains mGains; // gain controllers
+    // Maximum number of input or output streams that can be simultaneously
+    // opened for this profile. By convention 0 means no limit. To respect
+    // legacy behavior, initialized to 1 for output profiles and 0 for input
+    // profiles
+    // FIXME: IOProfile code used the same value for both cases.
+    uint32_t maxOpenCount = 1;
+    // Maximum number of input or output streams that can be simultaneously
+    // active for this profile. By convention 0 means no limit. To respect
+    // legacy behavior, initialized to 0 for output profiles and 1 for input
+    // profiles
+    // FIXME: IOProfile code used the same value for both cases.
+    uint32_t maxActiveCount = 1;
+    // Mute duration while changing device on this output profile.
+    uint32_t recommendedMuteDurationMs = 0;
+
 protected:
     std::string  mName;
     audio_port_type_t mType;
@@ -114,7 +145,8 @@
 
     // Audio capabilities that are defined by hardware descriptors when the format is unrecognized
     // by the platform, e.g. short audio descriptor in EDID for HDMI.
-    std::vector<media::ExtraAudioDescriptor> mExtraAudioDescriptors;
+    std::vector<media::audio::common::ExtraAudioDescriptor> mExtraAudioDescriptors;
+    union audio_io_flags mFlags = { .output = AUDIO_OUTPUT_FLAG_NONE };
 private:
     template <typename T, std::enable_if_t<std::is_same<T, struct audio_port>::value
                                         || std::is_same<T, struct audio_port_v7>::value, int> = 0>
@@ -130,7 +162,7 @@
 };
 
 
-class AudioPortConfig : public virtual RefBase, public virtual Parcelable
+class AudioPortConfig : public virtual RefBase
 {
 public:
     virtual ~AudioPortConfig() = default;
@@ -147,15 +179,16 @@
     audio_format_t getFormat() const { return mFormat; }
     audio_channel_mask_t getChannelMask() const { return mChannelMask; }
     audio_port_handle_t getId() const { return mId; }
+    audio_io_flags getFlags() const { return mFlags; }
 
     bool hasGainController(bool canUseForVolume = false) const;
 
-    bool equals(const sp<AudioPortConfig>& other) const;
+    bool equals(const sp<AudioPortConfig>& other, bool isInput) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
-    status_t writeToParcelable(media::AudioPortConfig* parcelable) const;
-    status_t readFromParcelable(const media::AudioPortConfig& parcelable);
+    status_t writeToParcelable(
+            media::audio::common::AudioPortConfig* parcelable, bool isInput) const;
+    status_t readFromParcelable(
+            const media::audio::common::AudioPortConfig& parcelable, bool isInput);
 
 protected:
     unsigned int mSamplingRate = 0u;
@@ -163,6 +196,7 @@
     audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
     audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
     struct audio_gain_config mGain = { .index = -1 };
+    union audio_io_flags mFlags = { AUDIO_INPUT_FLAG_NONE };
 };
 
 } // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioProfile.h b/media/libaudiofoundation/include/media/AudioProfile.h
index 6a36e78..c3a0fb2 100644
--- a/media/libaudiofoundation/include/media/AudioProfile.h
+++ b/media/libaudiofoundation/include/media/AudioProfile.h
@@ -17,11 +17,10 @@
 #pragma once
 
 #include <string>
+#include <utility>
 #include <vector>
 
-#include <android/media/AudioProfile.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
+#include <android/media/AudioProfileSys.h>
 #include <media/AidlConversion.h>
 #include <media/AudioContainers.h>
 #include <system/audio.h>
@@ -29,7 +28,7 @@
 
 namespace android {
 
-class AudioProfile final : public RefBase, public Parcelable
+class AudioProfile final : public RefBase
 {
 public:
     static sp<AudioProfile> createFullDynamic(audio_format_t dynamicFormat = AUDIO_FORMAT_DEFAULT);
@@ -70,7 +69,7 @@
     void setDynamicFormat(bool dynamic) { mIsDynamicFormat = dynamic; }
     bool isDynamicFormat() const { return mIsDynamicFormat; }
 
-    bool isDynamic() { return mIsDynamicFormat || mIsDynamicChannels || mIsDynamicRate; }
+    bool isDynamic() const { return mIsDynamicFormat || mIsDynamicChannels || mIsDynamicRate; }
 
     audio_encapsulation_type_t getEncapsulationType() const { return mEncapsulationType; }
     void setEncapsulationType(audio_encapsulation_type_t encapsulationType) {
@@ -81,11 +80,15 @@
 
     bool equals(const sp<AudioProfile>& other) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
+    using Aidl = std::pair<media::audio::common::AudioProfile, media::AudioProfileSys>;
+    ConversionResult<Aidl> toParcelable(bool isInput) const;
+    static ConversionResult<sp<AudioProfile>> fromParcelable(
+            const Aidl& aidl, bool isInput);
 
-    ConversionResult<media::AudioProfile> toParcelable() const;
-    static ConversionResult<sp<AudioProfile>> fromParcelable(const media::AudioProfile& parcelable);
+    ConversionResult<media::audio::common::AudioProfile>
+            toCommonParcelable(bool isInput) const;
+    static ConversionResult<sp<AudioProfile>> fromCommonParcelable(
+        const media::audio::common::AudioProfile& aidl, bool isInput);
 
 private:
 
@@ -106,11 +109,16 @@
 
 // Conversion routines, according to AidlConversion.h conventions.
 ConversionResult<sp<AudioProfile>>
-aidl2legacy_AudioProfile(const media::AudioProfile& aidl);
-ConversionResult<media::AudioProfile>
-legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy);
+aidl2legacy_AudioProfile(const AudioProfile::Aidl& aidl, bool isInput);
+ConversionResult<AudioProfile::Aidl>
+legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy, bool isInput);
 
-class AudioProfileVector : public std::vector<sp<AudioProfile>>, public Parcelable
+ConversionResult<sp<AudioProfile>>
+aidl2legacy_AudioProfile_common(const media::audio::common::AudioProfile& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioProfile>
+legacy2aidl_AudioProfile_common(const sp<AudioProfile>& legacy, bool isInput);
+
+class AudioProfileVector : public std::vector<sp<AudioProfile>>
 {
 public:
     virtual ~AudioProfileVector() = default;
@@ -137,17 +145,18 @@
 
     bool equals(const AudioProfileVector& other) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
+    using Aidl = std::pair<
+            std::vector<media::audio::common::AudioProfile>,
+            std::vector<media::AudioProfileSys>>;
 };
 
 bool operator == (const AudioProfile &left, const AudioProfile &right);
 
 // Conversion routines, according to AidlConversion.h conventions.
 ConversionResult<AudioProfileVector>
-aidl2legacy_AudioProfileVector(const std::vector<media::AudioProfile>& aidl);
-ConversionResult<std::vector<media::AudioProfile>>
-legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy);
+aidl2legacy_AudioProfileVector(const AudioProfileVector::Aidl& aidl, bool isInput);
+ConversionResult<AudioProfileVector::Aidl>
+legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy, bool isInput);
 
 AudioProfileVector intersectAudioProfiles(const AudioProfileVector& profiles1,
                                           const AudioProfileVector& profiles2);
diff --git a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
index 140ce36..1f0c768 100644
--- a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
+++ b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
@@ -36,16 +36,21 @@
 public:
      // Note that empty name refers by convention to a generic device.
     explicit DeviceDescriptorBase(audio_devices_t type);
-    DeviceDescriptorBase(audio_devices_t type, const std::string& address);
-    explicit DeviceDescriptorBase(const AudioDeviceTypeAddr& deviceTypeAddr);
+    DeviceDescriptorBase(audio_devices_t type, const std::string& address,
+            const FormatVector &encodedFormats = FormatVector{});
+    DeviceDescriptorBase(const AudioDeviceTypeAddr& deviceTypeAddr,
+            const FormatVector &encodedFormats = FormatVector{});
 
-    virtual ~DeviceDescriptorBase() {}
+    virtual ~DeviceDescriptorBase() = default;
 
     audio_devices_t type() const { return mDeviceTypeAddr.mType; }
     const std::string& address() const { return mDeviceTypeAddr.address(); }
     void setAddress(const std::string &address);
     const AudioDeviceTypeAddr& getDeviceTypeAddr() const { return mDeviceTypeAddr; }
 
+    const FormatVector& encodedFormats() const { return mEncodedFormats; }
+    bool supportsFormat(audio_format_t format);
+
     // AudioPortConfig
     virtual sp<AudioPort> getAudioPort() const {
         return static_cast<AudioPort*>(const_cast<DeviceDescriptorBase*>(this));
@@ -60,7 +65,7 @@
     status_t setEncapsulationModes(uint32_t encapsulationModes);
     status_t setEncapsulationMetadataTypes(uint32_t encapsulationMetadataTypes);
 
-    void dump(std::string *dst, int spaces, int index,
+    void dump(std::string *dst, int spaces,
               const char* extraInfo = nullptr, bool verbose = true) const;
     void log() const;
 
@@ -74,14 +79,12 @@
 
     bool equals(const sp<DeviceDescriptorBase>& other) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
-
     status_t writeToParcelable(media::AudioPort* parcelable) const;
     status_t readFromParcelable(const media::AudioPort& parcelable);
 
 protected:
     AudioDeviceTypeAddr mDeviceTypeAddr;
+    FormatVector        mEncodedFormats;
     uint32_t mEncapsulationModes = 0;
     uint32_t mEncapsulationMetadataTypes = 0;
 private:
diff --git a/media/libaudiofoundation/tests/Android.bp b/media/libaudiofoundation/tests/Android.bp
index bb9a5f2..3f1fbea 100644
--- a/media/libaudiofoundation/tests/Android.bp
+++ b/media/libaudiofoundation/tests/Android.bp
@@ -11,12 +11,20 @@
     name: "audiofoundation_parcelable_test",
 
     shared_libs: [
-        "libaudiofoundation",
+        "libbase",
         "libbinder",
         "liblog",
         "libutils",
     ],
 
+    static_libs: [
+        "android.media.audio.common.types-V1-cpp",
+        "audioclient-types-aidl-cpp",
+        "libaudioclient_aidl_conversion",
+        "libaudiofoundation",
+        "libstagefright_foundation",
+    ],
+
     header_libs: [
         "libaudio_system_headers",
     ],
diff --git a/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp b/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
index 068b5d8..50d8dc8 100644
--- a/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
+++ b/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
@@ -53,7 +53,7 @@
 
 AudioGains getAudioGainsForTest() {
     AudioGains audioGains;
-    sp<AudioGain> audioGain = new AudioGain(0 /*index*/, false /*useInChannelMask*/);
+    sp<AudioGain> audioGain = new AudioGain(0 /*index*/, false /*isInput*/);
     audioGain->setMode(AUDIO_GAIN_MODE_JOINT);
     audioGain->setChannelMask(AUDIO_CHANNEL_OUT_STEREO);
     audioGain->setMinValueInMb(-3200);
@@ -75,57 +75,74 @@
     return audioProfiles;
 }
 
-TEST(AudioFoundationParcelableTest, ParcelingAudioGain) {
-    Parcel data;
-    AudioGains audioGains = getAudioGainsForTest();
-
-    ASSERT_EQ(data.writeParcelable(audioGains), NO_ERROR);
-    data.setDataPosition(0);
-    AudioGains audioGainsFromParcel;
-    ASSERT_EQ(data.readParcelable(&audioGainsFromParcel), NO_ERROR);
-    ASSERT_TRUE(audioGainsFromParcel.equals(audioGains));
+TEST(AudioFoundationParcelableTest, ParcelingAudioProfile) {
+    sp<AudioProfile> profile = getAudioProfileVectorForTest()[0];
+    auto conv = legacy2aidl_AudioProfile(profile, false /*isInput*/);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = aidl2legacy_AudioProfile(conv.value(), false /*isInput*/);
+    ASSERT_TRUE(convBack.ok());
+    ASSERT_TRUE(profile->equals(convBack.value()));
 }
 
 TEST(AudioFoundationParcelableTest, ParcelingAudioProfileVector) {
-    Parcel data;
-    AudioProfileVector audioProfiles = getAudioProfileVectorForTest();
+    AudioProfileVector profiles = getAudioProfileVectorForTest();
+    auto conv = legacy2aidl_AudioProfileVector(profiles, false /*isInput*/);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = aidl2legacy_AudioProfileVector(conv.value(), false /*isInput*/);
+    ASSERT_TRUE(convBack.ok());
+    ASSERT_TRUE(profiles.equals(convBack.value()));
+}
 
-    ASSERT_EQ(data.writeParcelable(audioProfiles), NO_ERROR);
-    data.setDataPosition(0);
-    AudioProfileVector audioProfilesFromParcel;
-    ASSERT_EQ(data.readParcelable(&audioProfilesFromParcel), NO_ERROR);
-    ASSERT_TRUE(audioProfilesFromParcel.equals(audioProfiles));
+TEST(AudioFoundationParcelableTest, ParcelingAudioGain) {
+    sp<AudioGain> audioGain = getAudioGainsForTest()[0];
+    auto conv = legacy2aidl_AudioGain(audioGain);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = aidl2legacy_AudioGain(conv.value());
+    ASSERT_TRUE(convBack.ok());
+    ASSERT_TRUE(audioGain->equals(convBack.value()));
+}
+
+TEST(AudioFoundationParcelableTest, ParcelingAudioGains) {
+    AudioGains audioGains = getAudioGainsForTest();
+    auto conv = legacy2aidl_AudioGains(audioGains);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = aidl2legacy_AudioGains(conv.value());
+    ASSERT_TRUE(convBack.ok());
+    ASSERT_TRUE(audioGains.equals(convBack.value()));
 }
 
 TEST(AudioFoundationParcelableTest, ParcelingAudioPort) {
-    Parcel data;
     sp<AudioPort> audioPort = new AudioPort(
             "AudioPortName", AUDIO_PORT_TYPE_DEVICE, AUDIO_PORT_ROLE_SINK);
     audioPort->setGains(getAudioGainsForTest());
     audioPort->setAudioProfiles(getAudioProfileVectorForTest());
 
-    ASSERT_EQ(data.writeParcelable(*audioPort), NO_ERROR);
-    data.setDataPosition(0);
+    media::AudioPort parcelable;
+    ASSERT_EQ(NO_ERROR, audioPort->writeToParcelable(&parcelable));
     sp<AudioPort> audioPortFromParcel = new AudioPort(
             "", AUDIO_PORT_TYPE_NONE, AUDIO_PORT_ROLE_NONE);
-    ASSERT_EQ(data.readParcelable(audioPortFromParcel.get()), NO_ERROR);
+    ASSERT_EQ(NO_ERROR, audioPortFromParcel->readFromParcelable(parcelable));
     ASSERT_TRUE(audioPortFromParcel->equals(audioPort));
 }
 
 TEST(AudioFoundationParcelableTest, ParcelingAudioPortConfig) {
+    const bool isInput = false;
     Parcel data;
     sp<AudioPortConfig> audioPortConfig = new AudioPortConfigTestStub();
     audioPortConfig->applyAudioPortConfig(&TEST_AUDIO_PORT_CONFIG);
-
-    ASSERT_EQ(data.writeParcelable(*audioPortConfig), NO_ERROR);
+    media::audio::common::AudioPortConfig parcelable{};
+    ASSERT_EQ(NO_ERROR, audioPortConfig->writeToParcelable(&parcelable, isInput));
+    ASSERT_EQ(NO_ERROR, data.writeParcelable(parcelable));
     data.setDataPosition(0);
+    media::audio::common::AudioPortConfig parcelableFromParcel{};
+    ASSERT_EQ(NO_ERROR, data.readParcelable(&parcelableFromParcel));
     sp<AudioPortConfig> audioPortConfigFromParcel = new AudioPortConfigTestStub();
-    ASSERT_EQ(data.readParcelable(audioPortConfigFromParcel.get()), NO_ERROR);
-    ASSERT_TRUE(audioPortConfigFromParcel->equals(audioPortConfig));
+    ASSERT_EQ(NO_ERROR, audioPortConfigFromParcel->readFromParcelable(
+                    parcelableFromParcel, isInput));
+    ASSERT_TRUE(audioPortConfigFromParcel->equals(audioPortConfig, isInput));
 }
 
 TEST(AudioFoundationParcelableTest, ParcelingDeviceDescriptorBase) {
-    Parcel data;
     sp<DeviceDescriptorBase> desc = new DeviceDescriptorBase(AUDIO_DEVICE_OUT_SPEAKER);
     desc->setGains(getAudioGainsForTest());
     desc->setAudioProfiles(getAudioProfileVectorForTest());
@@ -135,10 +152,10 @@
     ASSERT_EQ(desc->setEncapsulationMetadataTypes(
             AUDIO_ENCAPSULATION_METADATA_TYPE_ALL_POSITION_BITS), NO_ERROR);
 
-    ASSERT_EQ(data.writeParcelable(*desc), NO_ERROR);
-    data.setDataPosition(0);
+    media::AudioPort parcelable;
+    ASSERT_EQ(NO_ERROR, desc->writeToParcelable(&parcelable));
     sp<DeviceDescriptorBase> descFromParcel = new DeviceDescriptorBase(AUDIO_DEVICE_NONE);
-    ASSERT_EQ(data.readParcelable(descFromParcel.get()), NO_ERROR);
+    ASSERT_EQ(NO_ERROR, descFromParcel->readFromParcelable(parcelable));
     ASSERT_TRUE(descFromParcel->equals(desc));
 }
 
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index bd24c84..5f63e8d 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -27,9 +27,11 @@
         "libaudiohal@5.0",
         "libaudiohal@6.0",
         "libaudiohal@7.0",
+        "libaudiohal@7.1",
     ],
 
     shared_libs: [
+        "audioclient-types-aidl-cpp",
         "libdl",
         "libhidlbase",
         "liblog",
diff --git a/media/libaudiohal/FactoryHalHidl.cpp b/media/libaudiohal/FactoryHalHidl.cpp
index e420d07..804edcc 100644
--- a/media/libaudiohal/FactoryHalHidl.cpp
+++ b/media/libaudiohal/FactoryHalHidl.cpp
@@ -31,6 +31,7 @@
 /** Supported HAL versions, in order of preference.
  */
 const char* sAudioHALVersions[] = {
+    "7.1",
     "7.0",
     "6.0",
     "5.0",
@@ -94,7 +95,7 @@
 }  // namespace
 
 void* createPreferredImpl(const std::string& package, const std::string& interface) {
-    for (auto version = detail::sAudioHALVersions; version != nullptr; ++version) {
+    for (auto version = detail::sAudioHALVersions; *version != nullptr; ++version) {
         void* rawInterface = nullptr;
         if (hasHalService(package, *version, interface)
                 && createHalService(*version, interface, &rawInterface)) {
diff --git a/media/libaudiohal/impl/Android.bp b/media/libaudiohal/impl/Android.bp
index a2c6e8a..ed7e50b 100644
--- a/media/libaudiohal/impl/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -7,22 +7,33 @@
     default_applicable_licenses: ["frameworks_av_license"],
 }
 
-cc_defaults {
-    name: "libaudiohal_default",
-
+filegroup {
+    name: "audio_core_hal_client_sources",
     srcs: [
         "DeviceHalLocal.cpp",
         "DevicesFactoryHalHybrid.cpp",
         "DevicesFactoryHalLocal.cpp",
-        "StreamHalLocal.cpp",
-
-        "ConversionHelperHidl.cpp",
         "DeviceHalHidl.cpp",
         "DevicesFactoryHalHidl.cpp",
+        "StreamHalLocal.cpp",
+        "StreamHalHidl.cpp",
+    ],
+}
+
+filegroup {
+    name: "audio_effect_hal_client_sources",
+    srcs: [
         "EffectBufferHalHidl.cpp",
         "EffectHalHidl.cpp",
         "EffectsFactoryHalHidl.cpp",
-        "StreamHalHidl.cpp",
+    ],
+}
+
+cc_defaults {
+    name: "libaudiohal_default",
+
+    srcs: [
+        "ConversionHelperHidl.cpp",
     ],
 
     cflags: [
@@ -50,6 +61,7 @@
         "libmedia_helper",
         "libmediautils",
         "libutils",
+        "audioclient-types-aidl-cpp",
     ],
     header_libs: [
         "android.hardware.audio.common.util@all-versions",
@@ -65,6 +77,10 @@
 cc_library_shared {
     name: "libaudiohal@4.0",
     defaults: ["libaudiohal_default"],
+    srcs: [
+        ":audio_core_hal_client_sources",
+        ":audio_effect_hal_client_sources",
+    ],
     shared_libs: [
         "android.hardware.audio.common@4.0",
         "android.hardware.audio.common@4.0-util",
@@ -83,6 +99,10 @@
 cc_library_shared {
     name: "libaudiohal@5.0",
     defaults: ["libaudiohal_default"],
+    srcs: [
+        ":audio_core_hal_client_sources",
+        ":audio_effect_hal_client_sources",
+    ],
     shared_libs: [
         "android.hardware.audio.common@5.0",
         "android.hardware.audio.common@5.0-util",
@@ -101,6 +121,10 @@
 cc_library_shared {
     name: "libaudiohal@6.0",
     defaults: ["libaudiohal_default"],
+    srcs: [
+        ":audio_core_hal_client_sources",
+        ":audio_effect_hal_client_sources",
+    ],
     shared_libs: [
         "android.hardware.audio.common@6.0",
         "android.hardware.audio.common@6.0-util",
@@ -119,6 +143,10 @@
 cc_library_shared {
     name: "libaudiohal@7.0",
     defaults: ["libaudiohal_default"],
+    srcs: [
+        ":audio_core_hal_client_sources",
+        ":audio_effect_hal_client_sources",
+    ],
     shared_libs: [
         "android.hardware.audio.common@7.0",
         "android.hardware.audio.common@7.0-util",
@@ -133,3 +161,25 @@
         "-include common/all-versions/VersionMacro.h",
     ]
 }
+
+cc_library_shared {
+    name: "libaudiohal@7.1",
+    defaults: ["libaudiohal_default"],
+    srcs: [
+        ":audio_core_hal_client_sources",
+    ],
+    shared_libs: [
+        "android.hardware.audio.common@7.0",
+        "android.hardware.audio.common@7.0-util",
+        "android.hardware.audio@7.0",
+        "android.hardware.audio@7.1",
+        "android.hardware.audio@7.0-util",
+    ],
+    cflags: [
+        "-DMAJOR_VERSION=7",
+        "-DMINOR_VERSION=1",
+        "-DCOMMON_TYPES_MINOR_VERSION=0",
+        "-DCORE_TYPES_MINOR_VERSION=0",
+        "-include common/all-versions/VersionMacro.h",
+    ]
+}
diff --git a/media/libaudiohal/impl/ConversionHelperHidl.cpp b/media/libaudiohal/impl/ConversionHelperHidl.cpp
index 32eaa31..1d34814 100644
--- a/media/libaudiohal/impl/ConversionHelperHidl.cpp
+++ b/media/libaudiohal/impl/ConversionHelperHidl.cpp
@@ -24,10 +24,9 @@
 #include "ConversionHelperHidl.h"
 
 namespace android {
-namespace CPP_VERSION {
 
-using namespace ::android::hardware::audio::common::CPP_VERSION;
-using namespace ::android::hardware::audio::CPP_VERSION;
+using namespace ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION;
+using namespace ::android::hardware::audio::CORE_TYPES_CPP_VERSION;
 
 // static
 status_t ConversionHelperHidl::keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys) {
@@ -105,6 +104,15 @@
 }
 
 // static
+void ConversionHelperHidl::argsFromHal(
+        const Vector<String16>& args, hidl_vec<hidl_string> *hidlArgs) {
+    hidlArgs->resize(args.size());
+    for (size_t i = 0; i < args.size(); ++i) {
+        (*hidlArgs)[i] = String8(args[i]).c_str();
+    }
+}
+
+// static
 status_t ConversionHelperHidl::analyzeResult(const Result& result) {
     switch (result) {
         case Result::OK: return OK;
@@ -120,5 +128,4 @@
     ALOGE("%s %p %s: %s (from rpc)", mClassName, this, funcName, description);
 }
 
-}  // namespace CPP_VERSION
 }  // namespace android
diff --git a/media/libaudiohal/impl/ConversionHelperHidl.h b/media/libaudiohal/impl/ConversionHelperHidl.h
index 59122c7..9368551 100644
--- a/media/libaudiohal/impl/ConversionHelperHidl.h
+++ b/media/libaudiohal/impl/ConversionHelperHidl.h
@@ -17,26 +17,28 @@
 #ifndef ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
 #define ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
 
-#include PATH(android/hardware/audio/FILE_VERSION/types.h)
+#include PATH(android/hardware/audio/CORE_TYPES_FILE_VERSION/types.h)
 #include <hidl/HidlSupport.h>
 #include <system/audio.h>
 #include <utils/String8.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
 
-using ::android::hardware::audio::CPP_VERSION::ParameterValue;
-using CoreResult = ::android::hardware::audio::CPP_VERSION::Result;
+using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::ParameterValue;
+using CoreResult = ::android::hardware::audio::CORE_TYPES_CPP_VERSION::Result;
 
 using ::android::hardware::Return;
 using ::android::hardware::hidl_string;
 using ::android::hardware::hidl_vec;
 
 namespace android {
-namespace CPP_VERSION {
 
 class ConversionHelperHidl {
   protected:
     static status_t keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys);
     static status_t parametersFromHal(const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams);
     static void parametersToHal(const hidl_vec<ParameterValue>& parameters, String8 *values);
+    static void argsFromHal(const Vector<String16>& args, hidl_vec<hidl_string> *hidlArgs);
 
     ConversionHelperHidl(const char* className);
 
@@ -82,7 +84,6 @@
     void emitError(const char* funcName, const char* description);
 };
 
-}  // namespace CPP_VERSION
 }  // namespace android
 
 #endif // ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index 02d66ae..8b09d76 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -20,6 +20,7 @@
 //#define LOG_NDEBUG 0
 
 #include <cutils/native_handle.h>
+#include <cutils/properties.h>
 #include <hwbinder/IPCThreadState.h>
 #include <media/AudioContainers.h>
 #include <utils/Log.h>
@@ -30,27 +31,40 @@
 #include <util/CoreUtils.h>
 
 #include "DeviceHalHidl.h"
-#include "EffectHalHidl.h"
 #include "ParameterUtils.h"
 #include "StreamHalHidl.h"
 
-using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
+using ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION::implementation::HidlUtils;
 using ::android::hardware::audio::common::utils::EnumBitfield;
-using ::android::hardware::audio::CPP_VERSION::implementation::CoreUtils;
+using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::implementation::CoreUtils;
 using ::android::hardware::hidl_string;
 using ::android::hardware::hidl_vec;
 
 namespace android {
-namespace CPP_VERSION {
 
-using namespace ::android::hardware::audio::common::CPP_VERSION;
-using namespace ::android::hardware::audio::CPP_VERSION;
+using namespace ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION;
+using namespace ::android::hardware::audio::CORE_TYPES_CPP_VERSION;
 
-using EffectHalHidl = ::android::effect::CPP_VERSION::EffectHalHidl;
+DeviceHalHidl::DeviceHalHidl(const sp<::android::hardware::audio::CPP_VERSION::IDevice>& device)
+        : ConversionHelperHidl("Device"), mDevice(device) {
+}
 
-DeviceHalHidl::DeviceHalHidl(const sp<IDevice>& device)
-        : ConversionHelperHidl("Device"), mDevice(device),
-          mPrimaryDevice(IPrimaryDevice::castFrom(device)) {
+DeviceHalHidl::DeviceHalHidl(
+        const sp<::android::hardware::audio::CPP_VERSION::IPrimaryDevice>& device)
+        : ConversionHelperHidl("Device"),
+#if MAJOR_VERSION <= 6 || (MAJOR_VERSION == 7 && MINOR_VERSION == 0)
+          mDevice(device),
+#endif
+          mPrimaryDevice(device) {
+#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
+    auto getDeviceRet = mPrimaryDevice->getDevice();
+    if (getDeviceRet.isOk()) {
+        mDevice = getDeviceRet;
+    } else {
+        ALOGE("Call to IPrimaryDevice.getDevice has failed: %s",
+                getDeviceRet.description().c_str());
+    }
+#endif
 }
 
 DeviceHalHidl::~DeviceHalHidl() {
@@ -204,17 +218,32 @@
             status != OK) {
         return status;
     }
+
+#if !(MAJOR_VERSION == 7 && MINOR_VERSION == 1)
+    //TODO: b/193496180 use spatializer flag at audio HAL when available
+    if ((flags & AUDIO_OUTPUT_FLAG_SPATIALIZER) != 0) {
+        flags = (audio_output_flags_t)(flags & ~AUDIO_OUTPUT_FLAG_SPATIALIZER);
+        flags = (audio_output_flags_t)
+                (flags | AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
+    }
+#endif
+
     CoreUtils::AudioOutputFlags hidlFlags;
     if (status_t status = CoreUtils::audioOutputFlagsFromHal(flags, &hidlFlags); status != OK) {
         return status;
     }
     Result retval = Result::NOT_INITIALIZED;
+#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
+    Return<void> ret = mDevice->openOutputStream_7_1(
+#else
     Return<void> ret = mDevice->openOutputStream(
+#endif
             handle, hidlDevice, hidlConfig, hidlFlags,
 #if MAJOR_VERSION >= 4
             {} /* metadata */,
 #endif
-            [&](Result r, const sp<IStreamOut>& result, const AudioConfig& suggestedConfig) {
+            [&](Result r, const sp<::android::hardware::audio::CPP_VERSION::IStreamOut>& result,
+                    const AudioConfig& suggestedConfig) {
                 retval = r;
                 if (retval == Result::OK) {
                     *outStream = new StreamOutHalHidl(result);
@@ -282,9 +311,14 @@
         sinkMetadata.tracks[0].destination.device(std::move(hidlOutputDevice));
     }
 #endif
+#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
+    Return<void> ret = mDevice->openInputStream_7_1(
+#else
     Return<void> ret = mDevice->openInputStream(
+#endif
             handle, hidlDevice, hidlConfig, hidlFlags, sinkMetadata,
-            [&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
+            [&](Result r, const sp<::android::hardware::audio::CPP_VERSION::IStreamIn>& result,
+                    const AudioConfig& suggestedConfig) {
                 retval = r;
                 if (retval == Result::OK) {
                     *inStream = new StreamInHalHidl(result);
@@ -432,8 +466,7 @@
         audio_port_handle_t device, sp<EffectHalInterface> effect) {
     if (mDevice == 0) return NO_INIT;
     return processReturn("addDeviceEffect", mDevice->addDeviceEffect(
-            static_cast<AudioPortHandle>(device),
-            static_cast<EffectHalHidl*>(effect.get())->effectId()));
+            static_cast<AudioPortHandle>(device), effect->effectId()));
 }
 #else
 status_t DeviceHalHidl::addDeviceEffect(
@@ -447,8 +480,7 @@
         audio_port_handle_t device, sp<EffectHalInterface> effect) {
     if (mDevice == 0) return NO_INIT;
     return processReturn("removeDeviceEffect", mDevice->removeDeviceEffect(
-            static_cast<AudioPortHandle>(device),
-            static_cast<EffectHalHidl*>(effect.get())->effectId()));
+            static_cast<AudioPortHandle>(device), effect->effectId()));
 }
 #else
 status_t DeviceHalHidl::removeDeviceEffect(
@@ -457,11 +489,13 @@
 }
 #endif
 
-status_t DeviceHalHidl::dump(int fd) {
+status_t DeviceHalHidl::dump(int fd, const Vector<String16>& args) {
     if (mDevice == 0) return NO_INIT;
     native_handle_t* hidlHandle = native_handle_create(1, 0);
     hidlHandle->data[0] = fd;
-    Return<void> ret = mDevice->debug(hidlHandle, {} /* options */);
+    hidl_vec<hidl_string> hidlArgs;
+    argsFromHal(args, &hidlArgs);
+    Return<void> ret = mDevice->debug(hidlHandle, hidlArgs);
     native_handle_delete(hidlHandle);
 
     // TODO(b/111997867, b/177271958)  Workaround - remove when fixed.
@@ -478,5 +512,4 @@
     return processReturn("dump", ret);
 }
 
-} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/impl/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
index 2c847cf..104db40 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -20,15 +20,11 @@
 #include PATH(android/hardware/audio/FILE_VERSION/IDevice.h)
 #include PATH(android/hardware/audio/FILE_VERSION/IPrimaryDevice.h)
 #include <media/audiohal/DeviceHalInterface.h>
+#include <media/audiohal/EffectHalInterface.h>
 
 #include "ConversionHelperHidl.h"
 
-using ::android::hardware::audio::CPP_VERSION::IDevice;
-using ::android::hardware::audio::CPP_VERSION::IPrimaryDevice;
-using ::android::hardware::Return;
-
 namespace android {
-namespace CPP_VERSION {
 
 class DeviceHalHidl : public DeviceHalInterface, public ConversionHelperHidl
 {
@@ -119,15 +115,35 @@
     status_t addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
     status_t removeDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
 
-    virtual status_t dump(int fd);
+    status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType __unused,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos __unused) override {
+        // TODO: Implement the HAL query when moving to AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
+    int32_t getAAudioMixerBurstCount() override {
+        // TODO: Implement the HAL query when moving to AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
+    int32_t getAAudioHardwareBurstMinUsec() override {
+        // TODO: Implement the HAL query when moving to AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
+    status_t dump(int fd, const Vector<String16>& args) override;
 
   private:
     friend class DevicesFactoryHalHidl;
-    sp<IDevice> mDevice;
-    sp<IPrimaryDevice> mPrimaryDevice;  // Null if it's not a primary device.
+    sp<::android::hardware::audio::CPP_VERSION::IDevice> mDevice;
+    sp<::android::hardware::audio::CPP_VERSION::IPrimaryDevice> mPrimaryDevice;
+    // Null if it's not a primary device.
 
     // Can not be constructed directly by clients.
-    explicit DeviceHalHidl(const sp<IDevice>& device);
+    explicit DeviceHalHidl(const sp<::android::hardware::audio::CPP_VERSION::IDevice>& device);
+    explicit DeviceHalHidl(
+            const sp<::android::hardware::audio::CPP_VERSION::IPrimaryDevice>& device);
 
     // The destructor automatically closes the device.
     virtual ~DeviceHalHidl();
@@ -135,7 +151,6 @@
     template <typename HalPort> status_t getAudioPortImpl(HalPort *port);
 };
 
-} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
diff --git a/media/libaudiohal/impl/DeviceHalLocal.cpp b/media/libaudiohal/impl/DeviceHalLocal.cpp
index af7dc1a..1384c1e 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.cpp
+++ b/media/libaudiohal/impl/DeviceHalLocal.cpp
@@ -23,7 +23,6 @@
 #include "StreamHalLocal.h"
 
 namespace android {
-namespace CPP_VERSION {
 
 DeviceHalLocal::DeviceHalLocal(audio_hw_device_t *dev)
         : mDev(dev) {
@@ -233,7 +232,7 @@
     return INVALID_OPERATION;
 }
 
-status_t DeviceHalLocal::dump(int fd) {
+status_t DeviceHalLocal::dump(int fd, const Vector<String16>& /* args */) {
     return mDev->dump(mDev, fd);
 }
 
@@ -245,5 +244,4 @@
     mDev->close_input_stream(mDev, stream_in);
 }
 
-} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/impl/DeviceHalLocal.h b/media/libaudiohal/impl/DeviceHalLocal.h
index 46b510b..b06e253 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.h
+++ b/media/libaudiohal/impl/DeviceHalLocal.h
@@ -21,7 +21,6 @@
 #include <media/audiohal/DeviceHalInterface.h>
 
 namespace android {
-namespace CPP_VERSION {
 
 class DeviceHalLocal : public DeviceHalInterface
 {
@@ -112,7 +111,24 @@
     status_t addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
     status_t removeDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
 
-    virtual status_t dump(int fd);
+    status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType __unused,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos __unused) override {
+        // This function will only be available on AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
+    int32_t getAAudioMixerBurstCount() override {
+        // This function will only be available on AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
+    int32_t getAAudioHardwareBurstMinUsec() override {
+        // This function will only be available on AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
+    status_t dump(int fd, const Vector<String16>& args) override;
 
     void closeOutputStream(struct audio_stream_out *stream_out);
     void closeInputStream(struct audio_stream_in *stream_in);
@@ -131,7 +147,6 @@
     virtual ~DeviceHalLocal();
 };
 
-} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
index 1c0eacb..f475729 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
@@ -31,14 +31,13 @@
 #include "DevicesFactoryHalHidl.h"
 
 using ::android::hardware::audio::CPP_VERSION::IDevice;
-using ::android::hardware::audio::CPP_VERSION::Result;
+using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::Result;
 using ::android::hardware::Return;
 using ::android::hardware::Void;
 using ::android::hidl::manager::V1_0::IServiceManager;
 using ::android::hidl::manager::V1_0::IServiceNotification;
 
 namespace android {
-namespace CPP_VERSION {
 
 class ServiceNotificationListener : public IServiceNotification {
   public:
@@ -115,14 +114,37 @@
     if (status != OK) return status;
     Result retval = Result::NOT_INITIALIZED;
     for (const auto& factory : factories) {
-        Return<void> ret = factory->openDevice(
-                hidlId,
-                [&](Result r, const sp<IDevice>& result) {
-                    retval = r;
-                    if (retval == Result::OK) {
-                        *device = new DeviceHalHidl(result);
-                    }
-                });
+        Return<void> ret;
+        if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_PRIMARY) == 0) {
+            // In V7.1 it's not possible to cast IDevice back to IPrimaryDevice,
+            // thus openPrimaryDevice must be used.
+#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
+            ret = factory->openPrimaryDevice_7_1(
+#else
+            ret = factory->openPrimaryDevice(
+#endif
+                    [&](Result r,
+                        const sp<::android::hardware::audio::CPP_VERSION::IPrimaryDevice>& result) {
+                        retval = r;
+                        if (retval == Result::OK) {
+                            *device = new DeviceHalHidl(result);
+                        }
+                    });
+        } else {
+#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
+            ret = factory->openDevice_7_1(
+#else
+            ret = factory->openDevice(
+#endif
+                    hidlId,
+                    [&](Result r,
+                        const sp<::android::hardware::audio::CPP_VERSION::IDevice>& result) {
+                        retval = r;
+                        if (retval == Result::OK) {
+                            *device = new DeviceHalHidl(result);
+                        }
+                    });
+        }
         if (!ret.isOk()) return FAILED_TRANSACTION;
         switch (retval) {
             // Device was found and was initialized successfully.
@@ -178,7 +200,8 @@
     return NO_ERROR;
 }
 
-void DevicesFactoryHalHidl::addDeviceFactory(sp<IDevicesFactory> factory, bool needToNotify) {
+void DevicesFactoryHalHidl::addDeviceFactory(
+        sp<::android::hardware::audio::CPP_VERSION::IDevicesFactory> factory, bool needToNotify) {
     // It is assumed that the DevicesFactoryHalInterface instance is owned
     // by AudioFlinger and thus have the same lifespan.
     factory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
@@ -198,10 +221,10 @@
     }
 }
 
-std::vector<sp<IDevicesFactory>> DevicesFactoryHalHidl::copyDeviceFactories() {
+std::vector<sp<::android::hardware::audio::CPP_VERSION::IDevicesFactory>>
+        DevicesFactoryHalHidl::copyDeviceFactories() {
     std::lock_guard<std::mutex> lock(mLock);
     return mDeviceFactories;
 }
 
-} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.h b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
index 6f84efe..ffd229d 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
@@ -30,7 +30,6 @@
 using ::android::hardware::audio::CPP_VERSION::IDevicesFactory;
 
 namespace android {
-namespace CPP_VERSION {
 
 class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
 {
@@ -46,6 +45,8 @@
 
     status_t setCallbackOnce(sp<DevicesFactoryHalCallback> callback) override;
 
+    float getHalVersion() const override { return MAJOR_VERSION + (float)MINOR_VERSION / 10; }
+
   private:
     friend class ServiceNotificationListener;
     void addDeviceFactory(sp<IDevicesFactory> factory, bool needToNotify);
@@ -59,7 +60,6 @@
     virtual ~DevicesFactoryHalHidl() = default;
 };
 
-} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
index cde8d85..d684c27 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
@@ -22,7 +22,6 @@
 #include "DevicesFactoryHalLocal.h"
 
 namespace android {
-namespace CPP_VERSION {
 
 DevicesFactoryHalHybrid::DevicesFactoryHalHybrid(sp<IDevicesFactory> hidlFactory)
         : mLocalFactory(new DevicesFactoryHalLocal()),
@@ -51,11 +50,9 @@
     return INVALID_OPERATION;
 }
 
-} // namespace CPP_VERSION
-
 extern "C" __attribute__((visibility("default"))) void* createIDevicesFactory() {
     auto service = hardware::audio::CPP_VERSION::IDevicesFactory::getService();
-    return service ? new CPP_VERSION::DevicesFactoryHalHybrid(service) : nullptr;
+    return service ? new DevicesFactoryHalHybrid(service) : nullptr;
 }
 
 } // namespace android
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
index 568a1fb..221584c 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
@@ -25,7 +25,6 @@
 using ::android::hardware::audio::CPP_VERSION::IDevicesFactory;
 
 namespace android {
-namespace CPP_VERSION {
 
 class DevicesFactoryHalHybrid : public DevicesFactoryHalInterface
 {
@@ -40,12 +39,15 @@
 
             status_t setCallbackOnce(sp<DevicesFactoryHalCallback> callback) override;
 
+            float getHalVersion() const override {
+                return MAJOR_VERSION + (float)MINOR_VERSION / 10;
+            }
+
   private:
     sp<DevicesFactoryHalInterface> mLocalFactory;
     sp<DevicesFactoryHalInterface> mHidlFactory;
 };
 
-} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_H
diff --git a/media/libaudiohal/impl/DevicesFactoryHalLocal.cpp b/media/libaudiohal/impl/DevicesFactoryHalLocal.cpp
index af67ff5..13a9acd 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalLocal.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalLocal.cpp
@@ -26,7 +26,6 @@
 #include "DevicesFactoryHalLocal.h"
 
 namespace android {
-namespace CPP_VERSION {
 
 static status_t load_audio_interface(const char *if_name, audio_hw_device_t **dev)
 {
@@ -67,5 +66,4 @@
     return rc;
 }
 
-} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/impl/DevicesFactoryHalLocal.h b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
index 32bf362..a0da125 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalLocal.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
@@ -24,7 +24,6 @@
 #include "DeviceHalLocal.h"
 
 namespace android {
-namespace CPP_VERSION {
 
 class DevicesFactoryHalLocal : public DevicesFactoryHalInterface
 {
@@ -41,6 +40,10 @@
                 return INVALID_OPERATION;
             }
 
+            float getHalVersion() const override {
+                return MAJOR_VERSION + (float)MINOR_VERSION / 10;
+            }
+
   private:
     friend class DevicesFactoryHalHybrid;
 
@@ -50,7 +53,6 @@
     virtual ~DevicesFactoryHalLocal() {}
 };
 
-} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_H
diff --git a/media/libaudiohal/impl/EffectBufferHalHidl.cpp b/media/libaudiohal/impl/EffectBufferHalHidl.cpp
index 5367972..65297af 100644
--- a/media/libaudiohal/impl/EffectBufferHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectBufferHalHidl.cpp
@@ -31,7 +31,6 @@
 
 namespace android {
 namespace effect {
-namespace CPP_VERSION {
 
 // static
 uint64_t EffectBufferHalHidl::makeUniqueId() {
@@ -144,5 +143,4 @@
 }
 
 } // namespace effect
-} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/impl/EffectBufferHalHidl.h b/media/libaudiohal/impl/EffectBufferHalHidl.h
index 4826813..a9df68b 100644
--- a/media/libaudiohal/impl/EffectBufferHalHidl.h
+++ b/media/libaudiohal/impl/EffectBufferHalHidl.h
@@ -28,7 +28,6 @@
 
 namespace android {
 namespace effect {
-namespace CPP_VERSION {
 
 using namespace ::android::hardware::audio::effect::CPP_VERSION;
 
@@ -74,7 +73,6 @@
     status_t init();
 };
 
-} // namespace CPP_VERSION
 } // namespace effect
 } // namespace android
 
diff --git a/media/libaudiohal/impl/EffectHalHidl.cpp b/media/libaudiohal/impl/EffectHalHidl.cpp
index 51ad146..1bb1e5f 100644
--- a/media/libaudiohal/impl/EffectHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectHalHidl.cpp
@@ -36,7 +36,6 @@
 
 namespace android {
 namespace effect {
-namespace CPP_VERSION {
 
 using namespace ::android::hardware::audio::common::CPP_VERSION;
 using namespace ::android::hardware::audio::effect::CPP_VERSION;
@@ -310,6 +309,5 @@
     return result;
 }
 
-} // namespace CPP_VERSION
 } // namespace effect
 } // namespace android
diff --git a/media/libaudiohal/impl/EffectHalHidl.h b/media/libaudiohal/impl/EffectHalHidl.h
index 8e46638..07745db 100644
--- a/media/libaudiohal/impl/EffectHalHidl.h
+++ b/media/libaudiohal/impl/EffectHalHidl.h
@@ -28,7 +28,6 @@
 
 namespace android {
 namespace effect {
-namespace CPP_VERSION {
 
 using namespace ::android::hardware::audio::effect::CPP_VERSION;
 
@@ -63,7 +62,7 @@
 
     virtual status_t dump(int fd);
 
-    uint64_t effectId() const { return mEffectId; }
+    virtual uint64_t effectId() const { return mEffectId; }
 
   private:
     friend class EffectsFactoryHalHidl;
@@ -96,7 +95,6 @@
     status_t setProcessBuffers();
 };
 
-} // namespace CPP_VERSION
 } // namespace effect
 } // namespace android
 
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index f042b92..90954b2 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -33,7 +33,6 @@
 
 namespace android {
 namespace effect {
-namespace CPP_VERSION {
 
 using namespace ::android::hardware::audio::common::CPP_VERSION;
 using namespace ::android::hardware::audio::effect::CPP_VERSION;
@@ -73,7 +72,9 @@
         uint32_t index, effect_descriptor_t *pDescriptor) {
     // TODO: We need somehow to track the changes on the server side
     // or figure out how to convert everybody to query all the descriptors at once.
-    // TODO: check for nullptr
+    if (pDescriptor == nullptr) {
+        return BAD_VALUE;
+    }
     if (mLastDescriptors.size() == 0) {
         status_t queryResult = queryAllDescriptors();
         if (queryResult != OK) return queryResult;
@@ -85,7 +86,9 @@
 
 status_t EffectsFactoryHalHidl::getDescriptor(
         const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
-    // TODO: check for nullptr
+    if (pDescriptor == nullptr || pEffectUuid == nullptr) {
+        return BAD_VALUE;
+    }
     if (mEffectsFactory == 0) return NO_INIT;
     Uuid hidlUuid;
     UuidUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
@@ -105,6 +108,33 @@
     return processReturn(__FUNCTION__, ret);
 }
 
+status_t EffectsFactoryHalHidl::getDescriptors(const effect_uuid_t *pEffectType,
+                                               std::vector<effect_descriptor_t> *descriptors) {
+    if (pEffectType == nullptr || descriptors == nullptr) {
+        return BAD_VALUE;
+    }
+
+    uint32_t numEffects = 0;
+    status_t status = queryNumberEffects(&numEffects);
+    if (status != NO_ERROR) {
+        ALOGW("%s error %d from FactoryHal queryNumberEffects", __func__, status);
+        return status;
+    }
+
+    for (uint32_t i = 0; i < numEffects; i++) {
+        effect_descriptor_t descriptor;
+        status = getDescriptor(i, &descriptor);
+        if (status != NO_ERROR) {
+            ALOGW("%s error %d from FactoryHal getDescriptor", __func__, status);
+            continue;
+        }
+        if (memcmp(&descriptor.type, pEffectType, sizeof(effect_uuid_t)) == 0) {
+            descriptors->push_back(descriptor);
+        }
+    }
+    return descriptors->empty() ? NAME_NOT_FOUND : NO_ERROR;
+}
+
 status_t EffectsFactoryHalHidl::createEffect(
         const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
         int32_t deviceId __unused, sp<EffectHalInterface> *effect) {
@@ -173,12 +203,11 @@
     return EffectBufferHalHidl::mirror(external, size, buffer);
 }
 
-} // namespace CPP_VERSION
 } // namespace effect
 
 extern "C" __attribute__((visibility("default"))) void* createIEffectsFactory() {
     auto service = hardware::audio::effect::CPP_VERSION::IEffectsFactory::getService();
-    return service ? new effect::CPP_VERSION::EffectsFactoryHalHidl(service) : nullptr;
+    return service ? new effect::EffectsFactoryHalHidl(service) : nullptr;
 }
 
 } // namespace android
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.h b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
index 5fa85e7..7491133 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
@@ -24,10 +24,9 @@
 
 namespace android {
 namespace effect {
-namespace CPP_VERSION {
 
 using ::android::hardware::hidl_vec;
-using ::android::CPP_VERSION::ConversionHelperHidl;
+using ::android::ConversionHelperHidl;
 using namespace ::android::hardware::audio::effect::CPP_VERSION;
 
 class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
@@ -45,6 +44,9 @@
     virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
             effect_descriptor_t *pDescriptor);
 
+    virtual status_t getDescriptors(const effect_uuid_t *pEffectType,
+                                    std::vector<effect_descriptor_t> *descriptors);
+
     // Creates an effect engine of the specified type.
     // To release the effect engine, it is necessary to release references
     // to the returned effect object.
@@ -67,7 +69,6 @@
     status_t queryAllDescriptors();
 };
 
-} // namespace CPP_VERSION
 } // namespace effect
 } // namespace android
 
diff --git a/media/libaudiohal/impl/ParameterUtils.h b/media/libaudiohal/impl/ParameterUtils.h
index 9cab72e..b5dcb9d 100644
--- a/media/libaudiohal/impl/ParameterUtils.h
+++ b/media/libaudiohal/impl/ParameterUtils.h
@@ -16,17 +16,16 @@
 
 #pragma once
 
-#include PATH(android/hardware/audio/FILE_VERSION/types.h)
+#include PATH(android/hardware/audio/CORE_TYPES_FILE_VERSION/types.h)
 #include <hidl/HidlSupport.h>
 
-using ::android::hardware::audio::CPP_VERSION::ParameterValue;
-using ::android::hardware::audio::CPP_VERSION::Result;
+using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::ParameterValue;
+using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::Result;
 using ::android::hardware::Return;
 using ::android::hardware::hidl_vec;
 using ::android::hardware::hidl_string;
 
 namespace android {
-namespace CPP_VERSION {
 namespace utils {
 
 #if MAJOR_VERSION == 2
@@ -56,5 +55,4 @@
 #endif
 
 } // namespace utils
-} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 129b1c1..703d302 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -23,29 +23,26 @@
 #include <mediautils/SchedulingPolicyService.h>
 #include <utils/Log.h>
 
-#include PATH(android/hardware/audio/FILE_VERSION/IStreamOutCallback.h)
+#include PATH(android/hardware/audio/CORE_TYPES_FILE_VERSION/IStreamOutCallback.h)
 #include <HidlUtils.h>
 #include <util/CoreUtils.h>
 
 #include "DeviceHalHidl.h"
-#include "EffectHalHidl.h"
 #include "ParameterUtils.h"
 #include "StreamHalHidl.h"
 
-using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
-using ::android::hardware::audio::CPP_VERSION::implementation::CoreUtils;
+using ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION::implementation::HidlUtils;
+using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::implementation::CoreUtils;
 using ::android::hardware::MQDescriptorSync;
 using ::android::hardware::Return;
 using ::android::hardware::Void;
 
 namespace android {
-namespace CPP_VERSION {
 
-using EffectHalHidl = ::android::effect::CPP_VERSION::EffectHalHidl;
 using ReadCommand = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadCommand;
 
-using namespace ::android::hardware::audio::common::CPP_VERSION;
-using namespace ::android::hardware::audio::CPP_VERSION;
+using namespace ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION;
+using namespace ::android::hardware::audio::CORE_TYPES_CPP_VERSION;
 
 StreamHalHidl::StreamHalHidl(IStream *stream)
         : ConversionHelperHidl("Stream"),
@@ -137,14 +134,12 @@
 
 status_t StreamHalHidl::addEffect(sp<EffectHalInterface> effect) {
     if (!mStream) return NO_INIT;
-    return processReturn("addEffect", mStream->addEffect(
-                    static_cast<EffectHalHidl*>(effect.get())->effectId()));
+    return processReturn("addEffect", mStream->addEffect(effect->effectId()));
 }
 
 status_t StreamHalHidl::removeEffect(sp<EffectHalInterface> effect) {
     if (!mStream) return NO_INIT;
-    return processReturn("removeEffect", mStream->removeEffect(
-                    static_cast<EffectHalHidl*>(effect.get())->effectId()));
+    return processReturn("removeEffect", mStream->removeEffect(effect->effectId()));
 }
 
 status_t StreamHalHidl::standby() {
@@ -152,11 +147,13 @@
     return processReturn("standby", mStream->standby());
 }
 
-status_t StreamHalHidl::dump(int fd) {
+status_t StreamHalHidl::dump(int fd, const Vector<String16>& args) {
     if (!mStream) return NO_INIT;
     native_handle_t* hidlHandle = native_handle_create(1, 0);
     hidlHandle->data[0] = fd;
-    Return<void> ret = mStream->debug(hidlHandle, {} /* options */);
+    hidl_vec<hidl_string> hidlArgs;
+    argsFromHal(args, &hidlArgs);
+    Return<void> ret = mStream->debug(hidlHandle, hidlArgs);
     native_handle_delete(hidlHandle);
 
     // TODO(b/111997867, b/177271958)  Workaround - remove when fixed.
@@ -326,7 +323,8 @@
 
 }  // namespace
 
-StreamOutHalHidl::StreamOutHalHidl(const sp<IStreamOut>& stream)
+StreamOutHalHidl::StreamOutHalHidl(
+        const sp<::android::hardware::audio::CPP_VERSION::IStreamOut>& stream)
         : StreamHalHidl(stream.get()), mStream(stream), mWriterClient(0), mEfGroup(nullptr) {
 }
 
@@ -642,7 +640,11 @@
 #elif MAJOR_VERSION >= 4
 status_t StreamOutHalHidl::updateSourceMetadata(
         const StreamOutHalInterface::SourceMetadata& sourceMetadata) {
-    CPP_VERSION::SourceMetadata hidlMetadata;
+#if MAJOR_VERSION == 4
+    ::android::hardware::audio::CORE_TYPES_CPP_VERSION::SourceMetadata hidlMetadata;
+#else
+    ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION::SourceMetadata hidlMetadata;
+#endif
     if (status_t status = CoreUtils::sourceMetadataFromHalV7(
                     sourceMetadata.tracks, true /*ignoreNonVendorTags*/, &hidlMetadata);
             status != OK) {
@@ -753,7 +755,7 @@
                     static_cast<TimestretchFallbackMode>(playbackRate.mFallbackMode)}));
 }
 
-#include PATH(android/hardware/audio/FILE_VERSION/IStreamOutEventCallback.h)
+#include PATH(android/hardware/audio/CORE_TYPES_FILE_VERSION/IStreamOutEventCallback.h)
 
 namespace {
 
@@ -818,7 +820,8 @@
 }
 
 
-StreamInHalHidl::StreamInHalHidl(const sp<IStreamIn>& stream)
+StreamInHalHidl::StreamInHalHidl(
+        const sp<::android::hardware::audio::CPP_VERSION::IStreamIn>& stream)
         : StreamHalHidl(stream.get()), mStream(stream), mReaderClient(0), mEfGroup(nullptr) {
 }
 
@@ -1031,7 +1034,11 @@
 
 status_t StreamInHalHidl::updateSinkMetadata(const
         StreamInHalInterface::SinkMetadata& sinkMetadata) {
-    CPP_VERSION::SinkMetadata hidlMetadata;
+#if MAJOR_VERSION == 4
+    ::android::hardware::audio::CORE_TYPES_CPP_VERSION::SinkMetadata hidlMetadata;
+#else
+    ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION::SinkMetadata hidlMetadata;
+#endif
     if (status_t status = CoreUtils::sinkMetadataFromHalV7(
                     sinkMetadata.tracks, true /*ignoreNonVendorTags*/, &hidlMetadata);
             status != OK) {
@@ -1066,5 +1073,4 @@
 }
 #endif
 
-} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/impl/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
index 970903b..97ad87d 100644
--- a/media/libaudiohal/impl/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -19,20 +19,19 @@
 
 #include <atomic>
 
-#include PATH(android/hardware/audio/FILE_VERSION/IStream.h)
+#include PATH(android/hardware/audio/CORE_TYPES_FILE_VERSION/IStream.h)
 #include PATH(android/hardware/audio/FILE_VERSION/IStreamIn.h)
 #include PATH(android/hardware/audio/FILE_VERSION/IStreamOut.h)
 #include <fmq/EventFlag.h>
 #include <fmq/MessageQueue.h>
+#include <media/audiohal/EffectHalInterface.h>
 #include <media/audiohal/StreamHalInterface.h>
 #include <mediautils/Synchronization.h>
 
 #include "ConversionHelperHidl.h"
 #include "StreamPowerLog.h"
 
-using ::android::hardware::audio::CPP_VERSION::IStream;
-using ::android::hardware::audio::CPP_VERSION::IStreamIn;
-using ::android::hardware::audio::CPP_VERSION::IStreamOut;
+using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::IStream;
 using ::android::hardware::EventFlag;
 using ::android::hardware::MessageQueue;
 using ::android::hardware::Return;
@@ -42,7 +41,6 @@
 using WriteStatus = ::android::hardware::audio::CPP_VERSION::IStreamOut::WriteStatus;
 
 namespace android {
-namespace CPP_VERSION {
 
 class DeviceHalHidl;
 
@@ -71,7 +69,7 @@
     // Put the audio hardware input/output into standby mode.
     virtual status_t standby();
 
-    virtual status_t dump(int fd);
+    virtual status_t dump(int fd, const Vector<String16>& args) override;
 
     // Start a stream operating in mmap mode.
     virtual status_t start();
@@ -199,7 +197,7 @@
 
     mediautils::atomic_wp<StreamOutHalInterfaceCallback> mCallback;
     mediautils::atomic_wp<StreamOutHalInterfaceEventCallback> mEventCallback;
-    const sp<IStreamOut> mStream;
+    const sp<::android::hardware::audio::CPP_VERSION::IStreamOut> mStream;
     std::unique_ptr<CommandMQ> mCommandMQ;
     std::unique_ptr<DataMQ> mDataMQ;
     std::unique_ptr<StatusMQ> mStatusMQ;
@@ -207,7 +205,7 @@
     EventFlag* mEfGroup;
 
     // Can not be constructed directly by clients.
-    StreamOutHalHidl(const sp<IStreamOut>& stream);
+    StreamOutHalHidl(const sp<::android::hardware::audio::CPP_VERSION::IStreamOut>& stream);
 
     virtual ~StreamOutHalHidl();
 
@@ -255,7 +253,7 @@
     typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
     typedef MessageQueue<ReadStatus, hardware::kSynchronizedReadWrite> StatusMQ;
 
-    const sp<IStreamIn> mStream;
+    const sp<::android::hardware::audio::CPP_VERSION::IStreamIn> mStream;
     std::unique_ptr<CommandMQ> mCommandMQ;
     std::unique_ptr<DataMQ> mDataMQ;
     std::unique_ptr<StatusMQ> mStatusMQ;
@@ -263,7 +261,7 @@
     EventFlag* mEfGroup;
 
     // Can not be constructed directly by clients.
-    StreamInHalHidl(const sp<IStreamIn>& stream);
+    StreamInHalHidl(const sp<::android::hardware::audio::CPP_VERSION::IStreamIn>& stream);
 
     virtual ~StreamInHalHidl();
 
@@ -273,7 +271,6 @@
     status_t prepareForReading(size_t bufferSize);
 };
 
-} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_STREAM_HAL_HIDL_H
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index 34bd5df..477f510 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -27,7 +27,6 @@
 #include "StreamHalLocal.h"
 
 namespace android {
-namespace CPP_VERSION {
 
 StreamHalLocal::StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device)
         : mDevice(device),
@@ -87,7 +86,8 @@
     return mStream->standby(mStream);
 }
 
-status_t StreamHalLocal::dump(int fd) {
+status_t StreamHalLocal::dump(int fd, const Vector<String16>& args) {
+    (void) args;
     status_t status = mStream->dump(mStream, fd);
     mStreamPowerLog.dump(fd);
     return status;
@@ -517,7 +517,4 @@
 }
 #endif
 
-} // namespace CPP_VERSION
 } // namespace android
-
-
diff --git a/media/libaudiohal/impl/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
index b260495..e6e5037 100644
--- a/media/libaudiohal/impl/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -21,7 +21,6 @@
 #include "StreamPowerLog.h"
 
 namespace android {
-namespace CPP_VERSION {
 
 class DeviceHalLocal;
 
@@ -50,7 +49,7 @@
     // Put the audio hardware input/output into standby mode.
     virtual status_t standby();
 
-    virtual status_t dump(int fd);
+    virtual status_t dump(int fd, const Vector<String16>& args) override;
 
     // Start a stream operating in mmap mode.
     virtual status_t start() = 0;
@@ -246,7 +245,6 @@
     void doUpdateSinkMetadataV7(const SinkMetadata& sinkMetadata);
 };
 
-} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
diff --git a/media/libaudiohal/impl/StreamPowerLog.h b/media/libaudiohal/impl/StreamPowerLog.h
index f6a554b..c08ee47 100644
--- a/media/libaudiohal/impl/StreamPowerLog.h
+++ b/media/libaudiohal/impl/StreamPowerLog.h
@@ -24,7 +24,6 @@
 #include <system/audio.h>
 
 namespace android {
-namespace CPP_VERSION {
 
 class StreamPowerLog {
 public:
@@ -99,7 +98,6 @@
     size_t mFrameSize;
 };
 
-} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_STREAM_POWER_LOG_H
diff --git a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
index 29ef011..70c3199 100644
--- a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
 #define ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
 
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
 #include <media/audiohal/EffectHalInterface.h>
 #include <media/MicrophoneInfo.h>
 #include <system/audio.h>
@@ -120,7 +122,13 @@
     virtual status_t removeDeviceEffect(
             audio_port_handle_t device, sp<EffectHalInterface> effect) = 0;
 
-    virtual status_t dump(int fd) = 0;
+    virtual status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos)  = 0;
+    virtual int32_t getAAudioMixerBurstCount() = 0;
+    virtual int32_t getAAudioHardwareBurstMinUsec() = 0;
+
+    virtual status_t dump(int fd, const Vector<String16>& args) = 0;
 
   protected:
     // Subclasses can not be constructed directly by clients.
diff --git a/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h b/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h
index 5091558..17010e6 100644
--- a/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h
@@ -43,6 +43,8 @@
     // The callback can be only set once.
     virtual status_t setCallbackOnce(sp<DevicesFactoryHalCallback> callback) = 0;
 
+    virtual float getHalVersion() const = 0;
+
     static sp<DevicesFactoryHalInterface> create();
 
   protected:
diff --git a/media/libaudiohal/include/media/audiohal/EffectHalInterface.h b/media/libaudiohal/include/media/audiohal/EffectHalInterface.h
index 03165bd..2969c92 100644
--- a/media/libaudiohal/include/media/audiohal/EffectHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/EffectHalInterface.h
@@ -57,6 +57,9 @@
 
     virtual status_t dump(int fd) = 0;
 
+    // Unique effect ID to use with the core HAL.
+    virtual uint64_t effectId() const = 0;
+
   protected:
     // Subclasses can not be constructed directly by clients.
     EffectHalInterface() {}
diff --git a/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h b/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
index 9fb56ae..3e505bd 100644
--- a/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
@@ -37,6 +37,9 @@
     virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
             effect_descriptor_t *pDescriptor) = 0;
 
+    virtual status_t getDescriptors(const effect_uuid_t *pEffectType,
+                                    std::vector<effect_descriptor_t> *descriptors) = 0;
+
     // Creates an effect engine of the specified type.
     // To release the effect engine, it is necessary to release references
     // to the returned effect object.
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index 2be12fb..2b5b2db 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -25,6 +25,7 @@
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 #include <utils/String8.h>
+#include <utils/Vector.h>
 
 namespace android {
 
@@ -69,7 +70,7 @@
     // Put the audio hardware input/output into standby mode.
     virtual status_t standby() = 0;
 
-    virtual status_t dump(int fd) = 0;
+    virtual status_t dump(int fd, const Vector<String16>& args = {}) = 0;
 
     // Start a stream operating in mmap mode.
     virtual status_t start() = 0;
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index d85e2e9..e6fdb1d 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -108,15 +108,11 @@
 
     if (track->mHapticChannelCount > 0) {
         track->mAdjustInChannelCount = track->channelCount + track->mHapticChannelCount;
-        track->mAdjustOutChannelCount = track->channelCount + track->mMixerHapticChannelCount;
-        track->mAdjustNonDestructiveInChannelCount = track->mAdjustOutChannelCount;
-        track->mAdjustNonDestructiveOutChannelCount = track->channelCount;
+        track->mAdjustOutChannelCount = track->channelCount;
         track->mKeepContractedChannels = track->mHapticPlaybackEnabled;
     } else {
         track->mAdjustInChannelCount = 0;
         track->mAdjustOutChannelCount = 0;
-        track->mAdjustNonDestructiveInChannelCount = 0;
-        track->mAdjustNonDestructiveOutChannelCount = 0;
         track->mKeepContractedChannels = false;
     }
 
@@ -131,8 +127,7 @@
     // do it after downmix since track format may change!
     track->prepareForReformat();
 
-    track->prepareForAdjustChannelsNonDestructive(mFrameCount);
-    track->prepareForAdjustChannels();
+    track->prepareForAdjustChannels(mFrameCount);
 
     // Resampler channels may have changed.
     track->recreateResampler(mSampleRate);
@@ -193,6 +188,24 @@
         // mDownmixerBufferProvider reset below.
     }
 
+    // See if we should use our built-in non-effect downmixer.
+    if (mMixerInFormat == AUDIO_FORMAT_PCM_FLOAT
+            && mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO
+            && audio_channel_mask_get_representation(channelMask)
+                    == AUDIO_CHANNEL_REPRESENTATION_POSITION) {
+        mDownmixerBufferProvider.reset(new ChannelMixBufferProvider(channelMask,
+                mMixerChannelMask, mMixerInFormat, kCopyBufferFrameCount));
+        if (static_cast<ChannelMixBufferProvider *>(mDownmixerBufferProvider.get())
+                ->isValid()) {
+            mDownmixRequiresFormat = mMixerInFormat;
+            reconfigureBufferProviders();
+            ALOGD("%s: Fallback using ChannelMix", __func__);
+            return NO_ERROR;
+        } else {
+            ALOGD("%s: ChannelMix not supported for channel mask %#x", __func__, channelMask);
+        }
+    }
+
     // Effect downmixer does not accept the channel conversion.  Let's use our remixer.
     mDownmixerBufferProvider.reset(new RemixBufferProvider(channelMask,
             mMixerChannelMask, mMixerInFormat, kCopyBufferFrameCount));
@@ -265,48 +278,20 @@
     }
 }
 
-status_t AudioMixer::Track::prepareForAdjustChannels()
+status_t AudioMixer::Track::prepareForAdjustChannels(size_t frames)
 {
     ALOGV("AudioMixer::prepareForAdjustChannels(%p) with inChannelCount: %u, outChannelCount: %u",
             this, mAdjustInChannelCount, mAdjustOutChannelCount);
     unprepareForAdjustChannels();
     if (mAdjustInChannelCount != mAdjustOutChannelCount) {
-        mAdjustChannelsBufferProvider.reset(new AdjustChannelsBufferProvider(
-                mFormat, mAdjustInChannelCount, mAdjustOutChannelCount, kCopyBufferFrameCount));
-        reconfigureBufferProviders();
-    }
-    return NO_ERROR;
-}
-
-void AudioMixer::Track::unprepareForAdjustChannelsNonDestructive()
-{
-    ALOGV("AUDIOMIXER::unprepareForAdjustChannelsNonDestructive");
-    if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
-        mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
-        reconfigureBufferProviders();
-    }
-}
-
-status_t AudioMixer::Track::prepareForAdjustChannelsNonDestructive(size_t frames)
-{
-    ALOGV("AudioMixer::prepareForAdjustChannelsNonDestructive(%p) with inChannelCount: %u, "
-          "outChannelCount: %u, keepContractedChannels: %d",
-            this, mAdjustNonDestructiveInChannelCount, mAdjustNonDestructiveOutChannelCount,
-            mKeepContractedChannels);
-    unprepareForAdjustChannelsNonDestructive();
-    if (mAdjustNonDestructiveInChannelCount != mAdjustNonDestructiveOutChannelCount) {
         uint8_t* buffer = mKeepContractedChannels
                 ? (uint8_t*)mainBuffer + frames * audio_bytes_per_frame(
                         mMixerChannelCount, mMixerFormat)
-                : NULL;
-        mContractChannelsNonDestructiveBufferProvider.reset(
-                new AdjustChannelsBufferProvider(
-                        mFormat,
-                        mAdjustNonDestructiveInChannelCount,
-                        mAdjustNonDestructiveOutChannelCount,
-                        frames,
-                        mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
-                        buffer));
+                : nullptr;
+        mAdjustChannelsBufferProvider.reset(new AdjustChannelsBufferProvider(
+                mFormat, mAdjustInChannelCount, mAdjustOutChannelCount, frames,
+                mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
+                buffer, mMixerHapticChannelCount));
         reconfigureBufferProviders();
     }
     return NO_ERROR;
@@ -314,9 +299,9 @@
 
 void AudioMixer::Track::clearContractedBuffer()
 {
-    if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+    if (mAdjustChannelsBufferProvider.get() != nullptr) {
         static_cast<AdjustChannelsBufferProvider*>(
-                mContractChannelsNonDestructiveBufferProvider.get())->clearContractedFrames();
+                mAdjustChannelsBufferProvider.get())->clearContractedFrames();
     }
 }
 
@@ -328,10 +313,6 @@
         mAdjustChannelsBufferProvider->setBufferProvider(bufferProvider);
         bufferProvider = mAdjustChannelsBufferProvider.get();
     }
-    if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
-        mContractChannelsNonDestructiveBufferProvider->setBufferProvider(bufferProvider);
-        bufferProvider = mContractChannelsNonDestructiveBufferProvider.get();
-    }
     if (mReformatBufferProvider.get() != nullptr) {
         mReformatBufferProvider->setBufferProvider(bufferProvider);
         bufferProvider = mReformatBufferProvider.get();
@@ -377,7 +358,7 @@
                 track->mainBuffer = valueBuf;
                 ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
                 if (track->mKeepContractedChannels) {
-                    track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+                    track->prepareForAdjustChannels(mFrameCount);
                 }
                 invalidate();
             }
@@ -405,7 +386,7 @@
                 track->mMixerFormat = format;
                 ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
                 if (track->mKeepContractedChannels) {
-                    track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+                    track->prepareForAdjustChannels(mFrameCount);
                 }
             }
             } break;
@@ -424,8 +405,7 @@
             if (track->mHapticPlaybackEnabled != hapticPlaybackEnabled) {
                 track->mHapticPlaybackEnabled = hapticPlaybackEnabled;
                 track->mKeepContractedChannels = hapticPlaybackEnabled;
-                track->prepareForAdjustChannelsNonDestructive(mFrameCount);
-                track->prepareForAdjustChannels();
+                track->prepareForAdjustChannels(mFrameCount);
             }
             } break;
         case HAPTIC_INTENSITY: {
@@ -434,6 +414,12 @@
                 track->mHapticIntensity = hapticIntensity;
             }
             } break;
+        case HAPTIC_MAX_AMPLITUDE: {
+            const float hapticMaxAmplitude = *reinterpret_cast<float*>(value);
+            if (track->mHapticMaxAmplitude != hapticMaxAmplitude) {
+                track->mHapticMaxAmplitude = hapticMaxAmplitude;
+            }
+            } break;
         default:
             LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
         }
@@ -512,8 +498,6 @@
         track->mDownmixerBufferProvider->reset();
     } else if (track->mReformatBufferProvider.get() != nullptr) {
         track->mReformatBufferProvider->reset();
-    } else if (track->mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
-        track->mContractChannelsNonDestructiveBufferProvider->reset();
     } else if (track->mAdjustChannelsBufferProvider.get() != nullptr) {
         track->mAdjustChannelsBufferProvider->reset();
     }
@@ -553,12 +537,11 @@
     // haptic
     t->mHapticPlaybackEnabled = false;
     t->mHapticIntensity = os::HapticScale::NONE;
+    t->mHapticMaxAmplitude = NAN;
     t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
     t->mMixerHapticChannelCount = 0;
     t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
-    t->mAdjustOutChannelCount = t->channelCount + t->mMixerHapticChannelCount;
-    t->mAdjustNonDestructiveInChannelCount = t->mAdjustOutChannelCount;
-    t->mAdjustNonDestructiveOutChannelCount = t->channelCount;
+    t->mAdjustOutChannelCount = t->channelCount;
     t->mKeepContractedChannels = false;
     // Check the downmixing (or upmixing) requirements.
     status_t status = t->prepareForDownmix();
@@ -569,8 +552,7 @@
     // prepareForDownmix() may change mDownmixRequiresFormat
     ALOGVV("mMixerFormat:%#x  mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
     t->prepareForReformat();
-    t->prepareForAdjustChannelsNonDestructive(mFrameCount);
-    t->prepareForAdjustChannels();
+    t->prepareForAdjustChannels(mFrameCount);
     return OK;
 }
 
@@ -602,7 +584,8 @@
                 switch (t->mMixerFormat) {
                 // Mixer format should be AUDIO_FORMAT_PCM_FLOAT.
                 case AUDIO_FORMAT_PCM_FLOAT: {
-                    os::scaleHapticData((float*) buffer, sampleCount, t->mHapticIntensity);
+                    os::scaleHapticData((float*) buffer, sampleCount, t->mHapticIntensity,
+                                        t->mHapticMaxAmplitude);
                 } break;
                 default:
                     LOG_ALWAYS_FATAL("bad mMixerFormat: %#x", t->mMixerFormat);
diff --git a/media/libaudioprocessing/AudioMixerOps.h b/media/libaudioprocessing/AudioMixerOps.h
index cd47dc6..ab6a8b6 100644
--- a/media/libaudioprocessing/AudioMixerOps.h
+++ b/media/libaudioprocessing/AudioMixerOps.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_AUDIO_MIXER_OPS_H
 #define ANDROID_AUDIO_MIXER_OPS_H
 
+#include <audio_utils/channels.h>
+#include <audio_utils/primitives.h>
 #include <system/audio.h>
 
 namespace android {
@@ -229,15 +231,26 @@
  * complexity of working on interleaved streams is now getting
  * too high, and likely limits compiler optimization.
  */
-template <int MIXTYPE, int NCHAN,
+
+// compile-time function.
+constexpr inline bool usesCenterChannel(audio_channel_mask_t mask) {
+    using namespace audio_utils::channels;
+    for (size_t i = 0; i < std::size(kSideFromChannelIdx); ++i) {
+        if ((mask & (1 << i)) != 0 && kSideFromChannelIdx[i] == AUDIO_GEOMETRY_SIDE_CENTER) {
+            return true;
+        }
+    }
+    return false;
+}
+
+/*
+ * Applies stereo volume to the audio data based on proper left right channel affinity
+ * (templated channel MASK parameter).
+ */
+template <int MIXTYPE, audio_channel_mask_t MASK,
         typename TO, typename TI, typename TV,
         typename F>
-void stereoVolumeHelper(TO*& out, const TI*& in, const TV *vol, F f) {
-    static_assert(NCHAN > 0 && NCHAN <= FCC_LIMIT);
-    static_assert(MIXTYPE == MIXTYPE_MULTI_STEREOVOL
-            || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
-            || MIXTYPE == MIXTYPE_STEREOEXPAND
-            || MIXTYPE == MIXTYPE_MONOEXPAND);
+void stereoVolumeHelperWithChannelMask(TO*& out, const TI*& in, const TV *vol, F f) {
     auto proc = [](auto& a, const auto& b) {
         if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL
                 || MIXTYPE == MIXTYPE_STEREOEXPAND
@@ -250,59 +263,113 @@
     auto inp = [&in]() -> const TI& {
         if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND
                 || MIXTYPE == MIXTYPE_MONOEXPAND) {
-            return *in;
+            return *in; // note STEREOEXPAND assumes replicated L/R channels (see doc below).
         } else {
             return *in++;
         }
     };
 
-    // HALs should only expose the canonical channel masks.
-    proc(*out++, f(inp(), vol[0])); // front left
-    if constexpr (NCHAN == 1) return;
-    proc(*out++, f(inp(), vol[1])); // front right
-    if constexpr (NCHAN == 2)  return;
-    if constexpr (NCHAN == 4) {
-        proc(*out++, f(inp(), vol[0])); // back left
-        proc(*out++, f(inp(), vol[1])); // back right
-        return;
-    }
-
-    // TODO: Precompute center volume if not ramping.
     std::decay_t<TV> center;
-    if constexpr (std::is_floating_point_v<TV>) {
-        center = (vol[0] + vol[1]) * 0.5;       // do not use divide
-    } else {
-        center = (vol[0] >> 1) + (vol[1] >> 1); // rounds to 0.
-    }
-    proc(*out++, f(inp(), center)); // center (or 2.1 LFE)
-    if constexpr (NCHAN == 3) return;
-    if constexpr (NCHAN == 5) {
-        proc(*out++, f(inp(), vol[0]));  // back left
-        proc(*out++, f(inp(), vol[1]));  // back right
-        return;
-    }
-
-    proc(*out++, f(inp(), center)); // lfe
-    proc(*out++, f(inp(), vol[0])); // back left
-    proc(*out++, f(inp(), vol[1])); // back right
-    if constexpr (NCHAN == 6) return;
-    if constexpr (NCHAN == 7) {
-        proc(*out++, f(inp(), center)); // back center
-        return;
-    }
-    // NCHAN == 8
-    proc(*out++, f(inp(), vol[0])); // side left
-    proc(*out++, f(inp(), vol[1])); // side right
-    if constexpr (NCHAN > FCC_8) {
-        // Mutes to zero extended surround channels.
-        // 7.1.4 has the correct behavior.
-        // 22.2 has the behavior that FLC and FRC will be mixed instead
-        // of SL and SR and LFE will be center, not left.
-        for (int i = 8; i < NCHAN; ++i) {
-            // TODO: Consider using android::audio_utils::channels::kSideFromChannelIdx
-            proc(*out++, f(inp(), 0.f));
+    constexpr bool USES_CENTER_CHANNEL = usesCenterChannel(MASK);
+    if constexpr (USES_CENTER_CHANNEL) {
+        if constexpr (std::is_floating_point_v<TV>) {
+            center = (vol[0] + vol[1]) * 0.5;       // do not use divide
+        } else {
+            center = (vol[0] >> 1) + (vol[1] >> 1); // rounds to 0.
         }
     }
+
+    using namespace audio_utils::channels;
+
+    // if LFE and LFE2 are both present, they take left and right volume respectively.
+    constexpr unsigned LFE_LFE2 = \
+             AUDIO_CHANNEL_OUT_LOW_FREQUENCY | AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2;
+    constexpr bool has_LFE_LFE2 = (MASK & LFE_LFE2) == LFE_LFE2;
+
+#pragma push_macro("DO_CHANNEL_POSITION")
+#undef DO_CHANNEL_POSITION
+#define DO_CHANNEL_POSITION(BIT_INDEX) \
+    if constexpr ((MASK & (1 << BIT_INDEX)) != 0) { \
+        constexpr auto side = kSideFromChannelIdx[BIT_INDEX]; \
+        if constexpr (side == AUDIO_GEOMETRY_SIDE_LEFT || \
+               has_LFE_LFE2 && (1 << BIT_INDEX) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY) { \
+            proc(*out++, f(inp(), vol[0])); \
+        } else if constexpr (side == AUDIO_GEOMETRY_SIDE_RIGHT || \
+               has_LFE_LFE2 && (1 << BIT_INDEX) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) { \
+            proc(*out++, f(inp(), vol[1])); \
+        } else /* constexpr */ { \
+            proc(*out++, f(inp(), center)); \
+        } \
+    }
+
+    DO_CHANNEL_POSITION(0);
+    DO_CHANNEL_POSITION(1);
+    DO_CHANNEL_POSITION(2);
+    DO_CHANNEL_POSITION(3);
+    DO_CHANNEL_POSITION(4);
+    DO_CHANNEL_POSITION(5);
+    DO_CHANNEL_POSITION(6);
+    DO_CHANNEL_POSITION(7);
+
+    DO_CHANNEL_POSITION(8);
+    DO_CHANNEL_POSITION(9);
+    DO_CHANNEL_POSITION(10);
+    DO_CHANNEL_POSITION(11);
+    DO_CHANNEL_POSITION(12);
+    DO_CHANNEL_POSITION(13);
+    DO_CHANNEL_POSITION(14);
+    DO_CHANNEL_POSITION(15);
+
+    DO_CHANNEL_POSITION(16);
+    DO_CHANNEL_POSITION(17);
+    DO_CHANNEL_POSITION(18);
+    DO_CHANNEL_POSITION(19);
+    DO_CHANNEL_POSITION(20);
+    DO_CHANNEL_POSITION(21);
+    DO_CHANNEL_POSITION(22);
+    DO_CHANNEL_POSITION(23);
+    DO_CHANNEL_POSITION(24);
+    DO_CHANNEL_POSITION(25);
+    static_assert(FCC_LIMIT <= FCC_26); // Note: this may need to change.
+#pragma pop_macro("DO_CHANNEL_POSITION")
+}
+
+// These are the channel position masks we expect from the HAL.
+// See audio_channel_out_mask_from_count() but this is constexpr
+constexpr inline audio_channel_mask_t canonicalChannelMaskFromCount(size_t channelCount) {
+    constexpr audio_channel_mask_t canonical[] = {
+        [0] = AUDIO_CHANNEL_NONE,
+        [1] = AUDIO_CHANNEL_OUT_MONO,
+        [2] = AUDIO_CHANNEL_OUT_STEREO,
+        [3] = AUDIO_CHANNEL_OUT_2POINT1,
+        [4] = AUDIO_CHANNEL_OUT_QUAD,
+        [5] = AUDIO_CHANNEL_OUT_PENTA,
+        [6] = AUDIO_CHANNEL_OUT_5POINT1,
+        [7] = AUDIO_CHANNEL_OUT_6POINT1,
+        [8] = AUDIO_CHANNEL_OUT_7POINT1,
+        [12] = AUDIO_CHANNEL_OUT_7POINT1POINT4,
+        [14] = AUDIO_CHANNEL_OUT_9POINT1POINT4,
+        [16] = AUDIO_CHANNEL_OUT_9POINT1POINT6,
+        [24] = AUDIO_CHANNEL_OUT_22POINT2,
+    };
+    return channelCount < std::size(canonical) ? canonical[channelCount] : AUDIO_CHANNEL_NONE;
+}
+
+template <int MIXTYPE, int NCHAN,
+        typename TO, typename TI, typename TV,
+        typename F>
+void stereoVolumeHelper(TO*& out, const TI*& in, const TV *vol, F f) {
+    static_assert(NCHAN > 0 && NCHAN <= FCC_LIMIT);
+    static_assert(MIXTYPE == MIXTYPE_MULTI_STEREOVOL
+            || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
+            || MIXTYPE == MIXTYPE_STEREOEXPAND
+            || MIXTYPE == MIXTYPE_MONOEXPAND);
+    constexpr audio_channel_mask_t MASK{canonicalChannelMaskFromCount(NCHAN)};
+    if constexpr (MASK == AUDIO_CHANNEL_NONE) {
+        ALOGE("%s: Invalid position count %d", __func__, NCHAN);
+        return; // not a valid system mask, ignore.
+    }
+    stereoVolumeHelperWithChannelMask<MIXTYPE, MASK, TO, TI, TV, F>(out, in, vol, f);
 }
 
 /*
diff --git a/media/libaudioprocessing/BufferProviders.cpp b/media/libaudioprocessing/BufferProviders.cpp
index 6d31c12..4658db8 100644
--- a/media/libaudioprocessing/BufferProviders.cpp
+++ b/media/libaudioprocessing/BufferProviders.cpp
@@ -364,6 +364,29 @@
             src, mInputChannels, mIdxAry, mSampleSize, frames);
 }
 
+ChannelMixBufferProvider::ChannelMixBufferProvider(audio_channel_mask_t inputChannelMask,
+        audio_channel_mask_t outputChannelMask, audio_format_t format,
+        size_t bufferFrameCount) :
+        CopyBufferProvider(
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(inputChannelMask),
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(outputChannelMask),
+                bufferFrameCount)
+{
+    ALOGV("ChannelMixBufferProvider(%p)(%#x, %#x, %#x)",
+            this, format, inputChannelMask, outputChannelMask);
+    if (outputChannelMask == AUDIO_CHANNEL_OUT_STEREO && format == AUDIO_FORMAT_PCM_FLOAT) {
+        mIsValid = mChannelMix.setInputChannelMask(inputChannelMask);
+    }
+}
+
+void ChannelMixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    mChannelMix.process(static_cast<const float *>(src), static_cast<float *>(dst),
+            frames, false /* accumulate */);
+}
+
 ReformatBufferProvider::ReformatBufferProvider(int32_t channelCount,
         audio_format_t inputFormat, audio_format_t outputFormat,
         size_t bufferFrameCount) :
@@ -630,7 +653,8 @@
 
 AdjustChannelsBufferProvider::AdjustChannelsBufferProvider(
         audio_format_t format, size_t inChannelCount, size_t outChannelCount,
-        size_t frameCount, audio_format_t contractedFormat, void* contractedBuffer) :
+        size_t frameCount, audio_format_t contractedFormat, void* contractedBuffer,
+        size_t contractedOutChannelCount) :
         CopyBufferProvider(
                 audio_bytes_per_frame(inChannelCount, format),
                 audio_bytes_per_frame(std::max(inChannelCount, outChannelCount), format),
@@ -640,15 +664,22 @@
         mOutChannelCount(outChannelCount),
         mSampleSizeInBytes(audio_bytes_per_sample(format)),
         mFrameCount(frameCount),
-        mContractedChannelCount(inChannelCount - outChannelCount),
-        mContractedFormat(contractedFormat),
+        mContractedFormat(inChannelCount > outChannelCount
+                ? contractedFormat : AUDIO_FORMAT_INVALID),
+        mContractedInChannelCount(inChannelCount > outChannelCount
+                ? inChannelCount - outChannelCount : 0),
+        mContractedOutChannelCount(contractedOutChannelCount),
+        mContractedSampleSizeInBytes(audio_bytes_per_sample(contractedFormat)),
+        mContractedInputFrameSize(mContractedInChannelCount * mContractedSampleSizeInBytes),
         mContractedBuffer(contractedBuffer),
         mContractedWrittenFrames(0)
 {
-    ALOGV("AdjustChannelsBufferProvider(%p)(%#x, %zu, %zu, %zu, %#x, %p)", this, format,
-            inChannelCount, outChannelCount, frameCount, contractedFormat, contractedBuffer);
+    ALOGV("AdjustChannelsBufferProvider(%p)(%#x, %zu, %zu, %zu, %#x, %p, %zu)",
+          this, format, inChannelCount, outChannelCount, frameCount, contractedFormat,
+          contractedBuffer, contractedOutChannelCount);
     if (mContractedFormat != AUDIO_FORMAT_INVALID && mInChannelCount > mOutChannelCount) {
-        mContractedFrameSize = audio_bytes_per_frame(mContractedChannelCount, mContractedFormat);
+        mContractedOutputFrameSize =
+                audio_bytes_per_frame(mContractedOutChannelCount, mContractedFormat);
     }
 }
 
@@ -667,25 +698,39 @@
 
 void AdjustChannelsBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
 {
-    if (mInChannelCount > mOutChannelCount) {
-        // For case multi to mono, adjust_channels has special logic that will mix first two input
-        // channels into a single output channel. In that case, use adjust_channels_non_destructive
-        // to keep only one channel data even when contracting to mono.
-        adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount,
-                mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
-        if (mContractedFormat != AUDIO_FORMAT_INVALID
-            && mContractedBuffer != nullptr) {
-            const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
+    // For case multi to mono, adjust_channels has special logic that will mix first two input
+    // channels into a single output channel. In that case, use adjust_channels_non_destructive
+    // to keep only one channel data even when contracting to mono.
+    adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount,
+            mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
+    if (mContractedFormat != AUDIO_FORMAT_INVALID
+        && mContractedBuffer != nullptr) {
+        const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
+        uint8_t* oriBuf = (uint8_t*) dst + contractedIdx;
+        uint8_t* buf = (uint8_t*) mContractedBuffer
+                + mContractedWrittenFrames * mContractedOutputFrameSize;
+        if (mContractedInChannelCount > mContractedOutChannelCount) {
+            // Adjust the channels first as the contracted buffer may not have enough
+            // space for the data.
+            // Use adjust_channels_non_destructive to avoid mix first two channels into one single
+            // output channel when it is multi to mono.
+            adjust_channels_non_destructive(
+                    oriBuf, mContractedInChannelCount, oriBuf, mContractedOutChannelCount,
+                    mSampleSizeInBytes, frames * mContractedInChannelCount * mSampleSizeInBytes);
             memcpy_by_audio_format(
-                    (uint8_t*) mContractedBuffer + mContractedWrittenFrames * mContractedFrameSize,
-                    mContractedFormat, (uint8_t*) dst + contractedIdx, mFormat,
-                    mContractedChannelCount * frames);
-            mContractedWrittenFrames += frames;
+                    buf, mContractedFormat, oriBuf, mFormat, mContractedOutChannelCount * frames);
+        } else {
+            // Copy the data first as the dst buffer may not have enough space for extra channel.
+            memcpy_by_audio_format(
+                buf, mContractedFormat, oriBuf, mFormat, mContractedInChannelCount * frames);
+            // Note that if the contracted data is from MONO to MULTICHANNEL, the first 2 channels
+            // will be duplicated with the original single input channel and all the other channels
+            // will be 0-filled.
+            adjust_channels(
+                    buf, mContractedInChannelCount, buf, mContractedOutChannelCount,
+                    mContractedSampleSizeInBytes, mContractedInputFrameSize * frames);
         }
-    } else {
-        // Prefer expanding data from the end of each audio frame.
-        adjust_channels(src, mInChannelCount, dst, mOutChannelCount,
-                mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
+        mContractedWrittenFrames += frames;
     }
 }
 
diff --git a/media/libaudioprocessing/include/media/AudioMixer.h b/media/libaudioprocessing/include/media/AudioMixer.h
index 70eafe3..2993a60 100644
--- a/media/libaudioprocessing/include/media/AudioMixer.h
+++ b/media/libaudioprocessing/include/media/AudioMixer.h
@@ -50,6 +50,7 @@
         // for haptic
         HAPTIC_ENABLED  = 0x4007, // Set haptic data from this track should be played or not.
         HAPTIC_INTENSITY = 0x4008, // Set the intensity to play haptic data.
+        HAPTIC_MAX_AMPLITUDE = 0x4009, // Set the max amplitude allowed for haptic data.
         // for target TIMESTRETCH
         PLAYBACK_RATE   = 0x4300, // Configure timestretch on this track name;
                                   // parameter 'value' is a pointer to the new playback rate.
@@ -79,7 +80,6 @@
             mPostDownmixReformatBufferProvider.reset(nullptr);
             mDownmixerBufferProvider.reset(nullptr);
             mReformatBufferProvider.reset(nullptr);
-            mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
             mAdjustChannelsBufferProvider.reset(nullptr);
         }
 
@@ -94,10 +94,8 @@
         void        unprepareForDownmix();
         status_t    prepareForReformat();
         void        unprepareForReformat();
-        status_t    prepareForAdjustChannels();
+        status_t    prepareForAdjustChannels(size_t frames);
         void        unprepareForAdjustChannels();
-        status_t    prepareForAdjustChannelsNonDestructive(size_t frames);
-        void        unprepareForAdjustChannelsNonDestructive();
         void        clearContractedBuffer();
         bool        setPlaybackRate(const AudioPlaybackRate &playbackRate);
         void        reconfigureBufferProviders();
@@ -113,24 +111,18 @@
          * 2) mAdjustChannelsBufferProvider: Expands or contracts sample data from one interleaved
          *    channel format to another. Expanded channels are filled with zeros and put at the end
          *    of each audio frame. Contracted channels are copied to the end of the buffer.
-         * 3) mContractChannelsNonDestructiveBufferProvider: Non-destructively contract sample data.
-         *    This is currently using at audio-haptic coupled playback to separate audio and haptic
-         *    data. Contracted channels could be written to given buffer.
-         * 4) mReformatBufferProvider: If not NULL, performs the audio reformat to
+         * 3) mReformatBufferProvider: If not NULL, performs the audio reformat to
          *    match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
          *    requires reformat. For example, it may convert floating point input to
          *    PCM_16_bit if that's required by the downmixer.
-         * 5) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
+         * 4) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
          *    the number of channels required by the mixer sink.
-         * 6) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
+         * 5) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
          *    the downmixer requirements to the mixer engine input requirements.
-         * 7) mTimestretchBufferProvider: Adds timestretching for playback rate
+         * 6) mTimestretchBufferProvider: Adds timestretching for playback rate
          */
         AudioBufferProvider* mInputBufferProvider;    // externally provided buffer provider.
-        // TODO: combine mAdjustChannelsBufferProvider and
-        // mContractChannelsNonDestructiveBufferProvider
         std::unique_ptr<PassthruBufferProvider> mAdjustChannelsBufferProvider;
-        std::unique_ptr<PassthruBufferProvider> mContractChannelsNonDestructiveBufferProvider;
         std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
         std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
         std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
@@ -145,14 +137,13 @@
         // Haptic
         bool                 mHapticPlaybackEnabled;
         os::HapticScale      mHapticIntensity;
+        float                mHapticMaxAmplitude;
         audio_channel_mask_t mHapticChannelMask;
         uint32_t             mHapticChannelCount;
         audio_channel_mask_t mMixerHapticChannelMask;
         uint32_t             mMixerHapticChannelCount;
         uint32_t             mAdjustInChannelCount;
         uint32_t             mAdjustOutChannelCount;
-        uint32_t             mAdjustNonDestructiveInChannelCount;
-        uint32_t             mAdjustNonDestructiveOutChannelCount;
         bool                 mKeepContractedChannels;
     };
 
diff --git a/media/libaudioprocessing/include/media/BufferProviders.h b/media/libaudioprocessing/include/media/BufferProviders.h
index b038854..b3ab8a5 100644
--- a/media/libaudioprocessing/include/media/BufferProviders.h
+++ b/media/libaudioprocessing/include/media/BufferProviders.h
@@ -20,6 +20,7 @@
 #include <stdint.h>
 #include <sys/types.h>
 
+#include <audio_utils/ChannelMix.h>
 #include <media/AudioBufferProvider.h>
 #include <media/AudioResamplerPublic.h>
 #include <system/audio.h>
@@ -129,6 +130,23 @@
     static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2;
 };
 
+// ChannelMixBufferProvider derives from CopyBufferProvider to perform an
+// downmix to the proper channel count and mask.
+class ChannelMixBufferProvider : public CopyBufferProvider {
+public:
+    ChannelMixBufferProvider(audio_channel_mask_t inputChannelMask,
+            audio_channel_mask_t outputChannelMask, audio_format_t format,
+            size_t bufferFrameCount);
+
+    void copyFrames(void *dst, const void *src, size_t frames) override;
+
+    bool isValid() const { return mIsValid; }
+
+protected:
+    audio_utils::channels::ChannelMix mChannelMix;
+    bool mIsValid = false;
+};
+
 // RemixBufferProvider derives from CopyBufferProvider to perform an
 // upmix or downmix to the proper channel count and mask.
 class RemixBufferProvider : public CopyBufferProvider {
@@ -223,17 +241,22 @@
 // Extra expanded channels are filled with zeros and put at the end of each audio frame.
 // Contracted channels are copied to the end of the output buffer(storage should be
 // allocated appropriately).
-// Contracted channels could be written to output buffer.
+// Contracted channels could be written to output buffer and got adjusted. When the contracted
+// channels are adjusted in the contracted buffer, the input channel count will be calculated
+// as `inChannelCount - outChannelCount`. The output channel count is provided by caller, which
+// is `contractedOutChannelCount`. Currently, adjusting contracted channels is used for audio
+// coupled haptic playback. If the device supports two haptic channels while apps only provide
+// single haptic channel, the second haptic channel will be duplicated with the first haptic
+// channel's data. If the device supports single haptic channels while apps provide two haptic
+// channels, the second channel will be contracted.
 class AdjustChannelsBufferProvider : public CopyBufferProvider {
 public:
-    AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
-            size_t outChannelCount, size_t frameCount) : AdjustChannelsBufferProvider(
-                    format, inChannelCount, outChannelCount,
-                    frameCount, AUDIO_FORMAT_INVALID, nullptr) { }
     // Contracted data is converted to contractedFormat and put into contractedBuffer.
     AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
-            size_t outChannelCount, size_t frameCount, audio_format_t contractedFormat,
-            void* contractedBuffer);
+            size_t outChannelCount, size_t frameCount,
+            audio_format_t contractedFormat = AUDIO_FORMAT_INVALID,
+            void* contractedBuffer = nullptr,
+            size_t contractedOutChannelCount = 0);
     //Overrides
     status_t getNextBuffer(Buffer* pBuffer) override;
     void copyFrames(void *dst, const void *src, size_t frames) override;
@@ -247,11 +270,14 @@
     const size_t         mOutChannelCount;
     const size_t         mSampleSizeInBytes;
     const size_t         mFrameCount;
-    const size_t         mContractedChannelCount;
     const audio_format_t mContractedFormat;
+    const size_t         mContractedInChannelCount;
+    const size_t         mContractedOutChannelCount;
+    const size_t         mContractedSampleSizeInBytes;
+    const size_t         mContractedInputFrameSize; // contracted input frame size
     void                *mContractedBuffer;
     size_t               mContractedWrittenFrames;
-    size_t               mContractedFrameSize;
+    size_t               mContractedOutputFrameSize; // contracted output frame size
 };
 // ----------------------------------------------------------------------------
 } // namespace android
diff --git a/media/libaudioprocessing/tests/Android.bp b/media/libaudioprocessing/tests/Android.bp
index 3856817..ad402db 100644
--- a/media/libaudioprocessing/tests/Android.bp
+++ b/media/libaudioprocessing/tests/Android.bp
@@ -76,6 +76,7 @@
 //
 cc_binary {
     name: "mixerops_objdump",
+    header_libs: ["libaudioutils_headers"],
     srcs: ["mixerops_objdump.cpp"],
 }
 
@@ -84,6 +85,16 @@
 //
 cc_benchmark {
     name: "mixerops_benchmark",
+    header_libs: ["libaudioutils_headers"],
     srcs: ["mixerops_benchmark.cpp"],
     static_libs: ["libgoogle-benchmark"],
 }
+
+//
+// mixerops unit test
+//
+cc_test {
+    name: "mixerops_tests",
+    defaults: ["libaudioprocessing_test_defaults"],
+    srcs: ["mixerops_tests.cpp"],
+}
diff --git a/media/libaudioprocessing/tests/mixerops_benchmark.cpp b/media/libaudioprocessing/tests/mixerops_benchmark.cpp
index 7a4c5c7..f866b1a 100644
--- a/media/libaudioprocessing/tests/mixerops_benchmark.cpp
+++ b/media/libaudioprocessing/tests/mixerops_benchmark.cpp
@@ -16,11 +16,9 @@
 
 #include <inttypes.h>
 #include <type_traits>
-#include "../../../../system/media/audio_utils/include/audio_utils/primitives.h"
 #define LOG_ALWAYS_FATAL(...)
 
 #include <../AudioMixerOps.h>
-
 #include <benchmark/benchmark.h>
 
 using namespace android;
diff --git a/media/libaudioprocessing/tests/mixerops_tests.cpp b/media/libaudioprocessing/tests/mixerops_tests.cpp
new file mode 100644
index 0000000..2500ba9
--- /dev/null
+++ b/media/libaudioprocessing/tests/mixerops_tests.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "mixerop_tests"
+#include <log/log.h>
+
+#include <inttypes.h>
+#include <type_traits>
+
+#include <../AudioMixerOps.h>
+#include <gtest/gtest.h>
+
+using namespace android;
+
+// Note: gtest templated tests require typenames, not integers.
+template <int MIXTYPE, int NCHAN>
+class MixerOpsBasicTest {
+public:
+    static void testStereoVolume() {
+        using namespace android::audio_utils::channels;
+
+        constexpr size_t FRAME_COUNT = 1000;
+        constexpr size_t SAMPLE_COUNT = FRAME_COUNT * NCHAN;
+
+        const float in[SAMPLE_COUNT] = {[0 ... (SAMPLE_COUNT - 1)] = 1.f};
+
+        AUDIO_GEOMETRY_SIDE sides[NCHAN];
+        size_t i = 0;
+        unsigned channel = canonicalChannelMaskFromCount(NCHAN);
+        constexpr unsigned LFE_LFE2 =
+                AUDIO_CHANNEL_OUT_LOW_FREQUENCY | AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2;
+        bool has_LFE_LFE2 = (channel & LFE_LFE2) == LFE_LFE2;
+        while (channel != 0) {
+            const int index = __builtin_ctz(channel);
+            if (has_LFE_LFE2 && (1 << index) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY) {
+                sides[i++] = AUDIO_GEOMETRY_SIDE_LEFT; // special case
+            } else if (has_LFE_LFE2 && (1 << index) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) {
+                sides[i++] = AUDIO_GEOMETRY_SIDE_RIGHT; // special case
+            } else {
+                sides[i++] = sideFromChannelIdx(index);
+            }
+            channel &= ~(1 << index);
+        }
+
+        float vola[2] = {1.f, 0.f}; // left volume at max.
+        float out[SAMPLE_COUNT]{};
+        float aux[FRAME_COUNT]{};
+        float volaux = 0.5;
+        {
+            volumeMulti<MIXTYPE, NCHAN>(out, FRAME_COUNT, in, aux, vola, volaux);
+            const float *outp = out;
+            const float *auxp = aux;
+            const float left = vola[0];
+            const float center = (vola[0] + vola[1]) * 0.5;
+            const float right = vola[1];
+            for (size_t i = 0; i < FRAME_COUNT; ++i) {
+                for (size_t j = 0; j < NCHAN; ++j) {
+                    const float audio = *outp++;
+                    if (sides[j] == AUDIO_GEOMETRY_SIDE_LEFT) {
+                        EXPECT_EQ(left, audio);
+                    } else if (sides[j] == AUDIO_GEOMETRY_SIDE_CENTER) {
+                        EXPECT_EQ(center, audio);
+                    } else {
+                        EXPECT_EQ(right, audio);
+                    }
+                }
+                EXPECT_EQ(volaux, *auxp++);  // works if all channels contain 1.f
+            }
+        }
+        float volb[2] = {0.f, 0.5f}; // right volume at half max.
+        {
+            // this accumulates into out, aux.
+            // float out[SAMPLE_COUNT]{};
+            // float aux[FRAME_COUNT]{};
+            volumeMulti<MIXTYPE, NCHAN>(out, FRAME_COUNT, in, aux, volb, volaux);
+            const float *outp = out;
+            const float *auxp = aux;
+            const float left = vola[0] + volb[0];
+            const float center = (vola[0] + vola[1] + volb[0] + volb[1]) * 0.5;
+            const float right = vola[1] + volb[1];
+            for (size_t i = 0; i < FRAME_COUNT; ++i) {
+                for (size_t j = 0; j < NCHAN; ++j) {
+                    const float audio = *outp++;
+                    if (sides[j] == AUDIO_GEOMETRY_SIDE_LEFT) {
+                        EXPECT_EQ(left, audio);
+                    } else if (sides[j] == AUDIO_GEOMETRY_SIDE_CENTER) {
+                        EXPECT_EQ(center, audio);
+                    } else {
+                        EXPECT_EQ(right, audio);
+                    }
+                }
+                // aux is accumulated so 2x the amplitude
+                EXPECT_EQ(volaux * 2.f, *auxp++);  // works if all channels contain 1.f
+            }
+        }
+
+        { // test aux as derived from out.
+            // AUX channel is the weighted sum of all of the output channels prior to volume
+            // adjustment.  We must set L and R to the same volume to allow computation
+            // of AUX from the output values.
+            const float volmono = 0.25f;
+            const float vollr[2] = {volmono, volmono}; // all the same.
+            float out[SAMPLE_COUNT]{};
+            float aux[FRAME_COUNT]{};
+            volumeMulti<MIXTYPE, NCHAN>(out, FRAME_COUNT, in, aux, vollr, volaux);
+            const float *outp = out;
+            const float *auxp = aux;
+            for (size_t i = 0; i < FRAME_COUNT; ++i) {
+                float accum = 0.f;
+                for (size_t j = 0; j < NCHAN; ++j) {
+                    accum += *outp++;
+                }
+                EXPECT_EQ(accum / NCHAN * volaux / volmono, *auxp++);
+            }
+        }
+    }
+};
+
+TEST(mixerops, stereovolume_1) { // Note: mono not used for output sinks yet.
+    MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 1>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_2) {
+    MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 2>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_3) {
+    MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 3>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_4) {
+    MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 4>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_5) {
+    MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 5>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_6) {
+    MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 6>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_7) {
+    MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 7>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_8) {
+    MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 8>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_12) {
+    if constexpr (FCC_LIMIT >= 12) { // NOTE: FCC_LIMIT is an enum, so can't #if
+        MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 12>::testStereoVolume();
+    }
+}
+TEST(mixerops, stereovolume_24) {
+    if constexpr (FCC_LIMIT >= 24) {
+        MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 24>::testStereoVolume();
+    }
+}
+TEST(mixerops, channel_equivalence) {
+    // we must match the constexpr function with the system determined channel mask from count.
+    for (size_t i = 0; i < FCC_LIMIT; ++i) {
+        const audio_channel_mask_t actual = canonicalChannelMaskFromCount(i);
+        const audio_channel_mask_t system = audio_channel_out_mask_from_count(i);
+        if (system == AUDIO_CHANNEL_INVALID) continue;
+        EXPECT_EQ(system, actual);
+    }
+}
diff --git a/media/libeffects/downmix/Android.bp b/media/libeffects/downmix/Android.bp
index b26d028..abe622d 100644
--- a/media/libeffects/downmix/Android.bp
+++ b/media/libeffects/downmix/Android.bp
@@ -24,6 +24,10 @@
     vendor: true,
     srcs: ["EffectDownmix.cpp"],
 
+    export_include_dirs: [
+        ".",
+    ],
+
     shared_libs: [
         "libaudioutils",
         "libcutils",
diff --git a/media/libeffects/downmix/EffectDownmix.cpp b/media/libeffects/downmix/EffectDownmix.cpp
index f500bc3..d8f5787 100644
--- a/media/libeffects/downmix/EffectDownmix.cpp
+++ b/media/libeffects/downmix/EffectDownmix.cpp
@@ -19,7 +19,7 @@
 #include <log/log.h>
 
 #include "EffectDownmix.h"
-#include <math.h>
+#include <audio_utils/ChannelMix.h>
 
 // Do not submit with DOWNMIX_TEST_CHANNEL_INDEX defined, strictly for testing
 //#define DOWNMIX_TEST_CHANNEL_INDEX 0
@@ -35,12 +35,13 @@
 } downmix_state_t;
 
 /* parameters for each downmixer */
-typedef struct {
+struct downmix_object_t {
     downmix_state_t state;
     downmix_type_t type;
     bool apply_volume_correction;
     uint8_t input_channel_count;
-} downmix_object_t;
+    android::audio_utils::channels::ChannelMix channelMix;
+};
 
 typedef struct downmix_module_s {
     const struct effect_interface_s *itfe;
@@ -77,11 +78,6 @@
         downmix_object_t *pDownmixer, int32_t param, uint32_t size, void *pValue);
 static int Downmix_getParameter(
         downmix_object_t *pDownmixer, int32_t param, uint32_t *pSize, void *pValue);
-static void Downmix_foldFromQuad(float *pSrc, float *pDst, size_t numFrames, bool accumulate);
-static void Downmix_foldFrom5Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate);
-static void Downmix_foldFrom7Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate);
-static bool Downmix_foldGeneric(
-        uint32_t mask, float *pSrc, float *pDst, size_t numFrames, bool accumulate);
 
 // effect_handle_t interface implementation for downmix effect
 const struct effect_interface_s gDownmixInterface = {
@@ -192,9 +188,11 @@
     if (!mask) {
         return false;
     }
-    // check against unsupported channels
-    if (mask & ~AUDIO_CHANNEL_OUT_22POINT2) {
-        ALOGE("Unsupported channels in %u", mask & ~AUDIO_CHANNEL_OUT_22POINT2);
+    // check against unsupported channels (up to FCC_26)
+    constexpr uint32_t MAXIMUM_CHANNEL_MASK = AUDIO_CHANNEL_OUT_22POINT2
+            | AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT | AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT;
+    if (mask & ~MAXIMUM_CHANNEL_MASK) {
+        ALOGE("Unsupported channels in %#x", mask & ~MAXIMUM_CHANNEL_MASK);
         return false;
     }
     return true;
@@ -315,7 +313,8 @@
         audio_buffer_t *inBuffer, audio_buffer_t *outBuffer) {
 
     downmix_object_t *pDownmixer;
-    float *pSrc, *pDst;
+    const float *pSrc;
+    float *pDst;
     downmix_module_t *pDwmModule = (downmix_module_t *)self;
 
     if (pDwmModule == NULL) {
@@ -344,7 +343,8 @@
 
     const bool accumulate =
             (pDwmModule->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
-    const uint32_t downmixInputChannelMask = pDwmModule->config.inputCfg.channels;
+    const audio_channel_mask_t downmixInputChannelMask =
+            (audio_channel_mask_t)pDwmModule->config.inputCfg.channels;
 
     switch(pDownmixer->type) {
 
@@ -368,38 +368,13 @@
           }
           break;
 
-      case DOWNMIX_TYPE_FOLD:
-#ifdef DOWNMIX_ALWAYS_USE_GENERIC_DOWNMIXER
-          // bypass the optimized downmix routines for the common formats
-          if (!Downmix_foldGeneric(
-                  downmixInputChannelMask, pSrc, pDst, numFrames, accumulate)) {
-              ALOGE("Multichannel configuration %#x is not supported",
-                    downmixInputChannelMask);
-              return -EINVAL;
-          }
-          break;
-#endif
-        // optimize for the common formats
-        switch (downmixInputChannelMask) {
-        case AUDIO_CHANNEL_OUT_QUAD_BACK:
-        case AUDIO_CHANNEL_OUT_QUAD_SIDE:
-            Downmix_foldFromQuad(pSrc, pDst, numFrames, accumulate);
-            break;
-        case AUDIO_CHANNEL_OUT_5POINT1_BACK:
-        case AUDIO_CHANNEL_OUT_5POINT1_SIDE:
-            Downmix_foldFrom5Point1(pSrc, pDst, numFrames, accumulate);
-            break;
-        case AUDIO_CHANNEL_OUT_7POINT1:
-            Downmix_foldFrom7Point1(pSrc, pDst, numFrames, accumulate);
-            break;
-        default:
-            if (!Downmix_foldGeneric(
-                    downmixInputChannelMask, pSrc, pDst, numFrames, accumulate)) {
+      case DOWNMIX_TYPE_FOLD: {
+            if (!pDownmixer->channelMix.process(
+                    pSrc, pDst, numFrames, accumulate, downmixInputChannelMask)) {
                 ALOGE("Multichannel configuration %#x is not supported",
                       downmixInputChannelMask);
                 return -EINVAL;
             }
-            break;
         }
         break;
 
@@ -674,6 +649,12 @@
         ALOGE("Downmix_Configure error: invalid config");
         return -EINVAL;
     }
+    // when configuring the effect, do not allow a blank or unsupported channel mask
+    if (!Downmix_validChannelMask(pConfig->inputCfg.channels)) {
+        ALOGE("Downmix_Configure error: input channel mask(0x%x) not supported",
+                                                    pConfig->inputCfg.channels);
+        return -EINVAL;
+    }
 
     if (&pDwmModule->config != pConfig) {
         memcpy(&pDwmModule->config, pConfig, sizeof(effect_config_t));
@@ -684,12 +665,6 @@
         pDownmixer->apply_volume_correction = false;
         pDownmixer->input_channel_count = 8; // matches default input of AUDIO_CHANNEL_OUT_7POINT1
     } else {
-        // when configuring the effect, do not allow a blank or unsupported channel mask
-        if (!Downmix_validChannelMask(pConfig->inputCfg.channels)) {
-            ALOGE("Downmix_Configure error: input channel mask(0x%x) not supported",
-                                                        pConfig->inputCfg.channels);
-            return -EINVAL;
-        }
         pDownmixer->input_channel_count =
                 audio_channel_count_from_out_mask(pConfig->inputCfg.channels);
     }
@@ -780,7 +755,6 @@
     return 0;
 } /* end Downmix_setParameter */
 
-
 /*----------------------------------------------------------------------------
  * Downmix_getParameter()
  *----------------------------------------------------------------------------
@@ -829,299 +803,3 @@
     return 0;
 } /* end Downmix_getParameter */
 
-
-/*----------------------------------------------------------------------------
- * Downmix_foldFromQuad()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix a quad signal to stereo
- *
- * Inputs:
- *  pSrc       quad audio samples to downmix
- *  numFrames  the number of quad frames to downmix
- *  accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- *               or overwrite pDst (when false)
- *
- * Outputs:
- *  pDst       downmixed stereo audio samples
- *
- *----------------------------------------------------------------------------
- */
-void Downmix_foldFromQuad(float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
-    // sample at index 0 is FL
-    // sample at index 1 is FR
-    // sample at index 2 is RL
-    // sample at index 3 is RR
-    if (accumulate) {
-        while (numFrames) {
-            // FL + RL
-            pDst[0] = clamp_float(pDst[0] + ((pSrc[0] + pSrc[2]) / 2.0f));
-            // FR + RR
-            pDst[1] = clamp_float(pDst[1] + ((pSrc[1] + pSrc[3]) / 2.0f));
-            pSrc += 4;
-            pDst += 2;
-            numFrames--;
-        }
-    } else { // same code as above but without adding and clamping pDst[i] to itself
-        while (numFrames) {
-            // FL + RL
-            pDst[0] = clamp_float((pSrc[0] + pSrc[2]) / 2.0f);
-            // FR + RR
-            pDst[1] = clamp_float((pSrc[1] + pSrc[3]) / 2.0f);
-            pSrc += 4;
-            pDst += 2;
-            numFrames--;
-        }
-    }
-}
-
-/*----------------------------------------------------------------------------
- * Downmix_foldFrom5Point1()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix a 5.1 signal to stereo
- *
- * Inputs:
- *  pSrc       5.1 audio samples to downmix
- *  numFrames  the number of 5.1 frames to downmix
- *  accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- *               or overwrite pDst (when false)
- *
- * Outputs:
- *  pDst       downmixed stereo audio samples
- *
- *----------------------------------------------------------------------------
- */
-void Downmix_foldFrom5Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
-    float lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
-    // sample at index 0 is FL
-    // sample at index 1 is FR
-    // sample at index 2 is FC
-    // sample at index 3 is LFE
-    // sample at index 4 is RL
-    // sample at index 5 is RR
-    // code is mostly duplicated between the two values of accumulate to avoid repeating the test
-    // for every sample
-    if (accumulate) {
-        while (numFrames) {
-            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
-            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
-                    + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
-            // FL + centerPlusLfeContrib + RL
-            lt = pSrc[0] + centerPlusLfeContrib + pSrc[4];
-            // FR + centerPlusLfeContrib + RR
-            rt = pSrc[1] + centerPlusLfeContrib + pSrc[5];
-            // accumulate in destination
-            pDst[0] = clamp_float(pDst[0] + (lt / 2.0f));
-            pDst[1] = clamp_float(pDst[1] + (rt / 2.0f));
-            pSrc += 6;
-            pDst += 2;
-            numFrames--;
-        }
-    } else { // same code as above but without adding and clamping pDst[i] to itself
-        while (numFrames) {
-            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
-            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
-                    + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
-            // FL + centerPlusLfeContrib + RL
-            lt = pSrc[0] + centerPlusLfeContrib + pSrc[4];
-            // FR + centerPlusLfeContrib + RR
-            rt = pSrc[1] + centerPlusLfeContrib + pSrc[5];
-            // store in destination
-            pDst[0] = clamp_float(lt / 2.0f); // differs from when accumulate is true above
-            pDst[1] = clamp_float(rt / 2.0f); // differs from when accumulate is true above
-            pSrc += 6;
-            pDst += 2;
-            numFrames--;
-        }
-    }
-}
-
-/*----------------------------------------------------------------------------
- * Downmix_foldFrom7Point1()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix a 7.1 signal to stereo
- *
- * Inputs:
- *  pSrc       7.1 audio samples to downmix
- *  numFrames  the number of 7.1 frames to downmix
- *  accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- *               or overwrite pDst (when false)
- *
- * Outputs:
- *  pDst       downmixed stereo audio samples
- *
- *----------------------------------------------------------------------------
- */
-void Downmix_foldFrom7Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
-    float lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
-    // sample at index 0 is FL
-    // sample at index 1 is FR
-    // sample at index 2 is FC
-    // sample at index 3 is LFE
-    // sample at index 4 is RL
-    // sample at index 5 is RR
-    // sample at index 6 is SL
-    // sample at index 7 is SR
-    // code is mostly duplicated between the two values of accumulate to avoid repeating the test
-    // for every sample
-    if (accumulate) {
-        while (numFrames) {
-            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
-            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
-                    + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
-            // FL + centerPlusLfeContrib + SL + RL
-            lt = pSrc[0] + centerPlusLfeContrib + pSrc[6] + pSrc[4];
-            // FR + centerPlusLfeContrib + SR + RR
-            rt = pSrc[1] + centerPlusLfeContrib + pSrc[7] + pSrc[5];
-            //accumulate in destination
-            pDst[0] = clamp_float(pDst[0] + (lt / 2.0f));
-            pDst[1] = clamp_float(pDst[1] + (rt / 2.0f));
-            pSrc += 8;
-            pDst += 2;
-            numFrames--;
-        }
-    } else { // same code as above but without adding and clamping pDst[i] to itself
-        while (numFrames) {
-            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
-            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
-                    + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
-            // FL + centerPlusLfeContrib + SL + RL
-            lt = pSrc[0] + centerPlusLfeContrib + pSrc[6] + pSrc[4];
-            // FR + centerPlusLfeContrib + SR + RR
-            rt = pSrc[1] + centerPlusLfeContrib + pSrc[7] + pSrc[5];
-            // store in destination
-            pDst[0] = clamp_float(lt / 2.0f); // differs from when accumulate is true above
-            pDst[1] = clamp_float(rt / 2.0f); // differs from when accumulate is true above
-            pSrc += 8;
-            pDst += 2;
-            numFrames--;
-        }
-    }
-}
-
-/*----------------------------------------------------------------------------
- * Downmix_foldGeneric()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix to stereo a multichannel signal of arbitrary channel position mask.
- *
- * Inputs:
- *  mask       the channel mask of pSrc
- *  pSrc       multichannel audio buffer to downmix
- *  numFrames  the number of multichannel frames to downmix
- *  accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- *               or overwrite pDst (when false)
- *
- * Outputs:
- *  pDst       downmixed stereo audio samples
- *
- * Returns: false if multichannel format is not supported
- *
- *----------------------------------------------------------------------------
- */
-bool Downmix_foldGeneric(
-        uint32_t mask, float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
-
-    if (!Downmix_validChannelMask(mask)) {
-        return false;
-    }
-    const int numChan = audio_channel_count_from_out_mask(mask);
-
-    // compute at what index each channel is: samples will be in the following order:
-    //   FL  FR  FC    LFE   BL  BR  BC    SL  SR
-    //
-    //  (transfer matrix)
-    //   FL  FR  FC    LFE   BL  BR  BC    SL  SR
-    //   0.5     0.353 0.353 0.5     0.353 0.5
-    //       0.5 0.353 0.353     0.5 0.353     0.5
-
-    // derive the indices for the transfer matrix columns that have non-zero values.
-    int indexFL = -1;
-    int indexFR = -1;
-    int indexFC = -1;
-    int indexLFE = -1;
-    int indexBL = -1;
-    int indexBR = -1;
-    int indexBC = -1;
-    int indexSL = -1;
-    int indexSR = -1;
-    int index = 0;
-    for (unsigned tmp = mask;
-         (tmp & (AUDIO_CHANNEL_OUT_7POINT1 | AUDIO_CHANNEL_OUT_BACK_CENTER)) != 0;
-         ++index) {
-        const unsigned lowestBit = tmp & -(signed)tmp;
-        switch (lowestBit) {
-        case AUDIO_CHANNEL_OUT_FRONT_LEFT:
-            indexFL = index;
-            break;
-        case AUDIO_CHANNEL_OUT_FRONT_RIGHT:
-            indexFR = index;
-            break;
-        case AUDIO_CHANNEL_OUT_FRONT_CENTER:
-            indexFC = index;
-            break;
-        case AUDIO_CHANNEL_OUT_LOW_FREQUENCY:
-            indexLFE = index;
-            break;
-        case AUDIO_CHANNEL_OUT_BACK_LEFT:
-            indexBL = index;
-            break;
-        case AUDIO_CHANNEL_OUT_BACK_RIGHT:
-            indexBR = index;
-            break;
-        case AUDIO_CHANNEL_OUT_BACK_CENTER:
-            indexBC = index;
-            break;
-        case AUDIO_CHANNEL_OUT_SIDE_LEFT:
-            indexSL = index;
-            break;
-        case AUDIO_CHANNEL_OUT_SIDE_RIGHT:
-            indexSR = index;
-            break;
-        }
-        tmp ^= lowestBit;
-    }
-
-    // With good branch prediction, this should run reasonably fast.
-    // Also consider using a transfer matrix form.
-    while (numFrames) {
-        // compute contribution of FC, BC and LFE
-        float centersLfeContrib = 0;
-        if (indexFC >= 0) centersLfeContrib = pSrc[indexFC];
-        if (indexLFE >= 0) centersLfeContrib += pSrc[indexLFE];
-        if (indexBC >= 0) centersLfeContrib += pSrc[indexBC];
-        centersLfeContrib *= MINUS_3_DB_IN_FLOAT;
-
-        float ch[2];
-        ch[0] = centersLfeContrib;
-        ch[1] = centersLfeContrib;
-
-        // mix in left / right channels
-        if (indexFL >= 0) ch[0] += pSrc[indexFL];
-        if (indexFR >= 0) ch[1] += pSrc[indexFR];
-
-        if (indexSL >= 0) ch[0] += pSrc[indexSL];
-        if (indexSR >= 0) ch[1] += pSrc[indexSR]; // note pair checks enforce this if indexSL != 0
-
-        if (indexBL >= 0) ch[0] += pSrc[indexBL];
-        if (indexBR >= 0) ch[1] += pSrc[indexBR]; // note pair checks enforce this if indexBL != 0
-
-        // scale to prevent overflow.
-        ch[0] *= 0.5f;
-        ch[1] *= 0.5f;
-
-        if (accumulate) {
-            ch[0] += pDst[0];
-            ch[1] += pDst[1];
-        }
-
-        pDst[0] = clamp_float(ch[0]);
-        pDst[1] = clamp_float(ch[1]);
-        pSrc += numChan;
-        pDst += 2;
-        numFrames--;
-    }
-    return true;
-}
diff --git a/media/libeffects/downmix/benchmark/downmix_benchmark.cpp b/media/libeffects/downmix/benchmark/downmix_benchmark.cpp
index ee169c2..d9d40ed 100644
--- a/media/libeffects/downmix/benchmark/downmix_benchmark.cpp
+++ b/media/libeffects/downmix/benchmark/downmix_benchmark.cpp
@@ -35,16 +35,14 @@
     AUDIO_CHANNEL_OUT_STEREO,
     AUDIO_CHANNEL_OUT_2POINT1,
     AUDIO_CHANNEL_OUT_2POINT0POINT2,
-    AUDIO_CHANNEL_OUT_QUAD,
-    AUDIO_CHANNEL_OUT_QUAD_BACK,
+    AUDIO_CHANNEL_OUT_QUAD, // AUDIO_CHANNEL_OUT_QUAD_BACK
     AUDIO_CHANNEL_OUT_QUAD_SIDE,
     AUDIO_CHANNEL_OUT_SURROUND,
     AUDIO_CHANNEL_OUT_2POINT1POINT2,
     AUDIO_CHANNEL_OUT_3POINT0POINT2,
     AUDIO_CHANNEL_OUT_PENTA,
     AUDIO_CHANNEL_OUT_3POINT1POINT2,
-    AUDIO_CHANNEL_OUT_5POINT1,
-    AUDIO_CHANNEL_OUT_5POINT1_BACK,
+    AUDIO_CHANNEL_OUT_5POINT1, // AUDIO_CHANNEL_OUT_5POINT1_BACK
     AUDIO_CHANNEL_OUT_5POINT1_SIDE,
     AUDIO_CHANNEL_OUT_6POINT1,
     AUDIO_CHANNEL_OUT_5POINT1POINT2,
@@ -62,58 +60,34 @@
 static constexpr size_t kFrameCount = 1000;
 
 /*
-Pixel 3XL
-downmix_benchmark:
-  #BM_Downmix/0     4723 ns    4708 ns       148694
-  #BM_Downmix/1     4717 ns    4702 ns       148873
-  #BM_Downmix/2     4803 ns    4788 ns       145893
-  #BM_Downmix/3     5056 ns    5041 ns       139110
-  #BM_Downmix/4     4710 ns    4696 ns       149625
-  #BM_Downmix/5     1514 ns    1509 ns       463694
-  #BM_Downmix/6     1513 ns    1509 ns       463451
-  #BM_Downmix/7     1516 ns    1511 ns       463899
-  #BM_Downmix/8     4445 ns    4431 ns       157831
-  #BM_Downmix/9     5081 ns    5065 ns       138412
-  #BM_Downmix/10    4354 ns    4341 ns       161247
-  #BM_Downmix/11    4411 ns    4397 ns       158893
-  #BM_Downmix/12    4434 ns    4420 ns       157992
-  #BM_Downmix/13    4845 ns    4830 ns       144873
-  #BM_Downmix/14    4851 ns    4835 ns       144954
-  #BM_Downmix/15    4884 ns    4870 ns       144233
-  #BM_Downmix/16    5832 ns    5813 ns       120565
-  #BM_Downmix/17    5241 ns    5224 ns       133927
-  #BM_Downmix/18    5044 ns    5028 ns       139131
-  #BM_Downmix/19    5244 ns    5227 ns       132315
-  #BM_Downmix/20    5943 ns    5923 ns       117759
-  #BM_Downmix/21    5990 ns    5971 ns       117263
-  #BM_Downmix/22    4468 ns    4454 ns       156689
-  #BM_Downmix/23    7306 ns    7286 ns        95911
---
-downmix_benchmark: (generic fold)
-  #BM_Downmix/0     4722 ns    4707 ns       149847
-  #BM_Downmix/1     4714 ns    4698 ns       148748
-  #BM_Downmix/2     4794 ns    4779 ns       145661
-  #BM_Downmix/3     5053 ns    5035 ns       139172
-  #BM_Downmix/4     4695 ns    4678 ns       149762
-  #BM_Downmix/5     4381 ns    4368 ns       159675
-  #BM_Downmix/6     4387 ns    4373 ns       160267
-  #BM_Downmix/7     4732 ns    4717 ns       148514
-  #BM_Downmix/8     4430 ns    4415 ns       158133
-  #BM_Downmix/9     5101 ns    5084 ns       138353
-  #BM_Downmix/10    4356 ns    4343 ns       160821
-  #BM_Downmix/11    4397 ns    4383 ns       159995
-  #BM_Downmix/12    4438 ns    4424 ns       158117
-  #BM_Downmix/13    5243 ns    5226 ns       133863
-  #BM_Downmix/14    5259 ns    5242 ns       131855
-  #BM_Downmix/15    5245 ns    5228 ns       133686
-  #BM_Downmix/16    5829 ns    5809 ns       120543
-  #BM_Downmix/17    5245 ns    5228 ns       133533
-  #BM_Downmix/18    5935 ns    5916 ns       118282
-  #BM_Downmix/19    5263 ns    5245 ns       133657
-  #BM_Downmix/20    5998 ns    5978 ns       114693
-  #BM_Downmix/21    5989 ns    5969 ns       117450
-  #BM_Downmix/22    4442 ns    4431 ns       157913
-  #BM_Downmix/23    7309 ns    7290 ns        95797
+Pixel 4XL
+$ adb shell /data/benchmarktest/downmix_benchmark/vendor/downmix_benchmark
+
+--------------------------------------------------------
+Benchmark              Time             CPU   Iterations
+--------------------------------------------------------
+BM_Downmix/0        3638 ns         3624 ns       197517 AUDIO_CHANNEL_OUT_MONO
+BM_Downmix/1        4040 ns         4024 ns       178766
+BM_Downmix/2        4759 ns         4740 ns       134741 AUDIO_CHANNEL_OUT_STEREO
+BM_Downmix/3        6042 ns         6017 ns       129546 AUDIO_CHANNEL_OUT_2POINT1
+BM_Downmix/4        6897 ns         6868 ns        96316 AUDIO_CHANNEL_OUT_2POINT0POINT2
+BM_Downmix/5        2117 ns         2109 ns       331705 AUDIO_CHANNEL_OUT_QUAD
+BM_Downmix/6        2097 ns         2088 ns       335421 AUDIO_CHANNEL_OUT_QUAD_SIDE
+BM_Downmix/7        7291 ns         7263 ns        96256 AUDIO_CHANNEL_OUT_SURROUND
+BM_Downmix/8        8246 ns         8206 ns        84318 AUDIO_CHANNEL_OUT_2POINT1POINT2
+BM_Downmix/9        8341 ns         8303 ns        84298 AUDIO_CHANNEL_OUT_3POINT0POINT2
+BM_Downmix/10       7549 ns         7517 ns        84293 AUDIO_CHANNEL_OUT_PENTA
+BM_Downmix/11       9395 ns         9354 ns        75209 AUDIO_CHANNEL_OUT_3POINT1POINT2
+BM_Downmix/12       3267 ns         3253 ns       215596 AUDIO_CHANNEL_OUT_5POINT1
+BM_Downmix/13       3178 ns         3163 ns       220132 AUDIO_CHANNEL_OUT_5POINT1_SIDE
+BM_Downmix/14      10245 ns        10199 ns        67486 AUDIO_CHANNEL_OUT_6POINT1
+BM_Downmix/15      10975 ns        10929 ns        61359 AUDIO_CHANNEL_OUT_5POINT1POINT2
+BM_Downmix/16       3796 ns         3780 ns       184728 AUDIO_CHANNEL_OUT_7POINT1
+BM_Downmix/17      13562 ns        13503 ns        51823 AUDIO_CHANNEL_OUT_5POINT1POINT4
+BM_Downmix/18      13573 ns        13516 ns        51800 AUDIO_CHANNEL_OUT_7POINT1POINT2
+BM_Downmix/19      15502 ns        15435 ns        47147 AUDIO_CHANNEL_OUT_7POINT1POINT4
+BM_Downmix/20      16693 ns        16624 ns        42109 AUDIO_CHANNEL_OUT_13POINT_360RA
+BM_Downmix/21      28267 ns        28116 ns        24982 AUDIO_CHANNEL_OUT_22POINT2
 */
 
 static void BM_Downmix(benchmark::State& state) {
@@ -125,7 +99,7 @@
     std::minstd_rand gen(channelMask);
     std::uniform_real_distribution<> dis(-1.0f, 1.0f);
     std::vector<float> input(kFrameCount * channelCount);
-    std::vector<float> output(kFrameCount * 2);
+    std::vector<float> output(kFrameCount * FCC_2);
     for (auto& in : input) {
         in = dis(gen);
     }
@@ -187,7 +161,8 @@
         benchmark::ClobberMemory();
     }
 
-    state.SetComplexityN(state.range(0));
+    state.SetComplexityN(channelCount);
+    state.SetLabel(audio_channel_out_mask_to_string(channelMask));
 
     if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(effectHandle); status != 0) {
         ALOGE("release_effect returned an error = %d\n", status);
diff --git a/media/libeffects/downmix/tests/Android.bp b/media/libeffects/downmix/tests/Android.bp
index 4940117..392a6fa 100644
--- a/media/libeffects/downmix/tests/Android.bp
+++ b/media/libeffects/downmix/tests/Android.bp
@@ -18,9 +18,6 @@
     gtest: true,
     host_supported: true,
     vendor: true,
-    include_dirs: [
-        "frameworks/av/media/libeffects/downmix",
-    ],
     header_libs: [
         "libaudioeffects",
     ],
@@ -51,9 +48,6 @@
     name:"downmixtest",
     host_supported: false,
     proprietary: true,
-    include_dirs: [
-        "frameworks/av/media/libeffects/downmix",
-    ],
 
     header_libs: [
         "libaudioeffects",
diff --git a/media/libeffects/downmix/tests/downmix_tests.cpp b/media/libeffects/downmix/tests/downmix_tests.cpp
index d4b7a3a..20e19a3 100644
--- a/media/libeffects/downmix/tests/downmix_tests.cpp
+++ b/media/libeffects/downmix/tests/downmix_tests.cpp
@@ -33,16 +33,14 @@
     AUDIO_CHANNEL_OUT_STEREO,
     AUDIO_CHANNEL_OUT_2POINT1,
     AUDIO_CHANNEL_OUT_2POINT0POINT2,
-    AUDIO_CHANNEL_OUT_QUAD,
-    AUDIO_CHANNEL_OUT_QUAD_BACK,
+    AUDIO_CHANNEL_OUT_QUAD, // AUDIO_CHANNEL_OUT_QUAD_BACK
     AUDIO_CHANNEL_OUT_QUAD_SIDE,
     AUDIO_CHANNEL_OUT_SURROUND,
     AUDIO_CHANNEL_OUT_2POINT1POINT2,
     AUDIO_CHANNEL_OUT_3POINT0POINT2,
     AUDIO_CHANNEL_OUT_PENTA,
     AUDIO_CHANNEL_OUT_3POINT1POINT2,
-    AUDIO_CHANNEL_OUT_5POINT1,
-    AUDIO_CHANNEL_OUT_5POINT1_BACK,
+    AUDIO_CHANNEL_OUT_5POINT1, // AUDIO_CHANNEL_OUT_5POINT1_BACK
     AUDIO_CHANNEL_OUT_5POINT1_SIDE,
     AUDIO_CHANNEL_OUT_6POINT1,
     AUDIO_CHANNEL_OUT_5POINT1POINT2,
@@ -52,10 +50,72 @@
     AUDIO_CHANNEL_OUT_7POINT1POINT4,
     AUDIO_CHANNEL_OUT_13POINT_360RA,
     AUDIO_CHANNEL_OUT_22POINT2,
+    audio_channel_mask_t(AUDIO_CHANNEL_OUT_22POINT2
+            | AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT | AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT),
 };
 
-static constexpr audio_channel_mask_t kConsideredChannels =
-    (audio_channel_mask_t)(AUDIO_CHANNEL_OUT_7POINT1 | AUDIO_CHANNEL_OUT_BACK_CENTER);
+constexpr float COEF_25 = 0.2508909536f;
+constexpr float COEF_35 = 0.3543928915f;
+constexpr float COEF_36 = 0.3552343859f;
+constexpr float COEF_61 = 0.6057043428f;
+
+constexpr inline float kScaleFromChannelIdxLeft[] = {
+    1.f,       // AUDIO_CHANNEL_OUT_FRONT_LEFT            = 0x1u,
+    0.f,       // AUDIO_CHANNEL_OUT_FRONT_RIGHT           = 0x2u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_CENTER          = 0x4u,
+    0.5f,      // AUDIO_CHANNEL_OUT_LOW_FREQUENCY         = 0x8u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_BACK_LEFT             = 0x10u,
+    0.f,       // AUDIO_CHANNEL_OUT_BACK_RIGHT            = 0x20u,
+    COEF_61,   // AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER  = 0x40u,
+    COEF_25,   // AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80u,
+    0.5f,      // AUDIO_CHANNEL_OUT_BACK_CENTER           = 0x100u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_SIDE_LEFT             = 0x200u,
+    0.f,       // AUDIO_CHANNEL_OUT_SIDE_RIGHT            = 0x400u,
+    COEF_36,   // AUDIO_CHANNEL_OUT_TOP_CENTER            = 0x800u,
+    1.f,       // AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT        = 0x1000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER      = 0x2000u,
+    0.f,       // AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT       = 0x4000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_BACK_LEFT         = 0x8000u,
+    COEF_35,   // AUDIO_CHANNEL_OUT_TOP_BACK_CENTER       = 0x10000u,
+    0.f,       // AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT        = 0x20000u,
+    COEF_61,   // AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT         = 0x40000u,
+    0.f,       // AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT        = 0x80000u,
+    1.f,       // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT     = 0x100000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER   = 0x200000u,
+    0.f, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT    = 0x400000u,
+    0.f, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2       = 0x800000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT       = 0x1000000u,
+    0.f,       // AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT      = 0x2000000u,
+};
+
+constexpr inline float kScaleFromChannelIdxRight[] = {
+    0.f,       // AUDIO_CHANNEL_OUT_FRONT_LEFT            = 0x1u,
+    1.f,       // AUDIO_CHANNEL_OUT_FRONT_RIGHT           = 0x2u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_CENTER          = 0x4u,
+    0.5f,      // AUDIO_CHANNEL_OUT_LOW_FREQUENCY         = 0x8u,
+    0.f,       // AUDIO_CHANNEL_OUT_BACK_LEFT             = 0x10u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_BACK_RIGHT            = 0x20u,
+    COEF_25,   // AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER  = 0x40u,
+    COEF_61,   // AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80u,
+    0.5f,      // AUDIO_CHANNEL_OUT_BACK_CENTER           = 0x100u,
+    0.f,       // AUDIO_CHANNEL_OUT_SIDE_LEFT             = 0x200u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_SIDE_RIGHT            = 0x400u,
+    COEF_36,   // AUDIO_CHANNEL_OUT_TOP_CENTER            = 0x800u,
+    0.f,       // AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT        = 0x1000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER      = 0x2000u,
+    1.f,       // AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT       = 0x4000u,
+    0.f,       // AUDIO_CHANNEL_OUT_TOP_BACK_LEFT         = 0x8000u,
+    COEF_35,   // AUDIO_CHANNEL_OUT_TOP_BACK_CENTER       = 0x10000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT        = 0x20000u,
+    0.f,       // AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT         = 0x40000u,
+    COEF_61,   // AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT        = 0x80000u,
+    0.f,       // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT     = 0x100000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER   = 0x200000u,
+    1.f,       // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT    = 0x400000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2       = 0x800000u,
+    0.f,       // AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT       = 0x1000000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT      = 0x2000000u,
+};
 
 // Downmix doesn't change with sample rate
 static constexpr size_t kSampleRates[] = {
@@ -93,8 +153,8 @@
     void testBalance(int sampleRate, audio_channel_mask_t channelMask) {
         using namespace ::android::audio_utils::channels;
 
-        size_t frames = 100;
-        unsigned outChannels = 2;
+        size_t frames = 100; // set to an even number (2, 4, 6 ... ) stream alternates +1, -1.
+        constexpr unsigned outChannels = 2;
         unsigned inChannels = audio_channel_count_from_out_mask(channelMask);
         std::vector<float> input(frames * inChannels);
         std::vector<float> output(frames * outChannels);
@@ -102,7 +162,7 @@
         double savedPower[32][2]{};
         for (unsigned i = 0, channel = channelMask; channel != 0; ++i) {
             const int index = __builtin_ctz(channel);
-            ASSERT_LT(index, FCC_24);
+            ASSERT_LT(index, FCC_26);
             const int pairIndex = pairIdxFromChannelIdx(index);
             const AUDIO_GEOMETRY_SIDE side = sideFromChannelIdx(index);
             const int channelBit = 1 << index;
@@ -119,7 +179,7 @@
 
             auto stats = channelStatistics(output, 2 /* channels */);
             // printf("power: %s %s\n", stats[0].toString().c_str(), stats[1].toString().c_str());
-            double power[2] = { stats[0].getVariance(), stats[1].getVariance() };
+            double power[2] = { stats[0].getPopVariance(), stats[1].getPopVariance() };
 
             // Check symmetric power for pair channels on exchange of left/right position.
             // to do this, we save previous power measurements.
@@ -130,28 +190,39 @@
             savedPower[index][0] = power[0];
             savedPower[index][1] = power[1];
 
-            // Confirm exactly the mix amount prescribed by the existing downmix effect.
-            // For future changes to the downmix effect, the nearness needs to be relaxed
-            // to compare behavior S or earlier.
-            if ((channelBit & kConsideredChannels) == 0) {
-                // for channels not considered, expect 0 power for legacy downmix
-                EXPECT_EQ(0.f, power[0]);
-                EXPECT_EQ(0.f, power[1]);
-                continue;
-            }
-            constexpr float POWER_TOLERANCE = 0.01;  // for variance sum error.
+            constexpr float POWER_TOLERANCE = 0.001;
+            const float expectedPower =
+                    kScaleFromChannelIdxLeft[index] * kScaleFromChannelIdxLeft[index]
+                    + kScaleFromChannelIdxRight[index] * kScaleFromChannelIdxRight[index];
+            EXPECT_NEAR(expectedPower, power[0] + power[1], POWER_TOLERANCE);
             switch (side) {
             case AUDIO_GEOMETRY_SIDE_LEFT:
-                EXPECT_NEAR(0.25f, power[0], POWER_TOLERANCE);
+                if (channelBit == AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) {
+                    break;
+                }
                 EXPECT_EQ(0.f, power[1]);
                 break;
             case AUDIO_GEOMETRY_SIDE_RIGHT:
+                if (channelBit == AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) {
+                    break;
+                }
                 EXPECT_EQ(0.f, power[0]);
-                EXPECT_NEAR(0.25f, power[1], POWER_TOLERANCE);
                 break;
             case AUDIO_GEOMETRY_SIDE_CENTER:
-                EXPECT_NEAR(0.125f, power[0], POWER_TOLERANCE);
-                EXPECT_NEAR(0.125f, power[1], POWER_TOLERANCE);
+                if (channelBit == AUDIO_CHANNEL_OUT_LOW_FREQUENCY) {
+                    if (channelMask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) {
+                        EXPECT_EQ(0.f, power[1]);
+                        break;
+                    } else {
+                        EXPECT_NEAR_EPSILON(power[0], power[1]); // always true
+                        EXPECT_NEAR(expectedPower, power[0] + power[1], POWER_TOLERANCE);
+                        break;
+                    }
+                } else if (channelBit == AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) {
+                    EXPECT_EQ(0.f, power[0]);
+                    EXPECT_NEAR(expectedPower, power[1], POWER_TOLERANCE);
+                    break;
+                }
                 EXPECT_NEAR_EPSILON(power[0], power[1]);
                 break;
             }
@@ -178,6 +249,7 @@
                 handle_, EFFECT_CMD_SET_CONFIG,
                 sizeof(effect_config_t), &config_, &replySize, &reply);
         ASSERT_EQ(0, err);
+        ASSERT_EQ(0, reply);
         err = (downmixApi->command)(
                 handle_, EFFECT_CMD_ENABLE,
                 0, nullptr, &replySize, &reply);
@@ -188,6 +260,27 @@
         ASSERT_EQ(0, err);
     }
 
+    // This test assumes the channel mask is invalid.
+    void testInvalidChannelMask(audio_channel_mask_t invalidChannelMask) {
+        reconfig(48000 /* sampleRate */, invalidChannelMask);
+        const int32_t sessionId = 0;
+        const int32_t ioId = 0;
+        int32_t err = AUDIO_EFFECT_LIBRARY_INFO_SYM.create_effect(
+                &downmix_uuid_, sessionId, ioId,  &handle_);
+        ASSERT_EQ(0, err);
+
+        const struct effect_interface_s * const downmixApi = *handle_;
+        int32_t reply = 0;
+        uint32_t replySize = (uint32_t)sizeof(reply);
+        err = (downmixApi->command)(
+                handle_, EFFECT_CMD_SET_CONFIG,
+                sizeof(effect_config_t), &config_, &replySize, &reply);
+        ASSERT_EQ(0, err);
+        ASSERT_NE(0, reply);  // error has occurred.
+        err = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(handle_);
+        ASSERT_EQ(0, err);
+    }
+
 private:
     void reconfig(int sampleRate, audio_channel_mask_t channelMask) {
         config_.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
@@ -234,6 +327,16 @@
     int inputChannelCount_{};
 };
 
+TEST(DownmixTestSimple, invalidChannelMask) {
+    // Fill in a dummy test method to use DownmixTest outside of a parameterized test.
+    class DownmixTestComplete : public DownmixTest {
+        void TestBody() override {}
+    } downmixtest;
+
+    constexpr auto INVALID_CHANNEL_MASK = audio_channel_mask_t(1 << 31);
+    downmixtest.testInvalidChannelMask(INVALID_CHANNEL_MASK);
+}
+
 TEST_P(DownmixTest, basic) {
     testBalance(kSampleRates[std::get<0>(GetParam())],
             kChannelPositionMasks[std::get<1>(GetParam())]);
@@ -244,10 +347,11 @@
         ::testing::Combine(
                 ::testing::Range(0, (int)std::size(kSampleRates)),
                 ::testing::Range(0, (int)std::size(kChannelPositionMasks))
-                ));
-
-int main(int argc, /* const */ char** argv) {
-    ::testing::InitGoogleTest(&argc, argv);
-    int status = RUN_ALL_TESTS();
-    return status;
-}
+                ),
+        [](const testing::TestParamInfo<DownmixTest::ParamType>& info) {
+            const int index = std::get<1>(info.param);
+            const audio_channel_mask_t channelMask = kChannelPositionMasks[index];
+            const std::string name = std::string(audio_channel_out_mask_to_string(channelMask))
+                + "_" + std::to_string(std::get<0>(info.param)) + "_" + std::to_string(index);
+            return name;
+        });
diff --git a/media/libeffects/hapticgenerator/Android.bp b/media/libeffects/hapticgenerator/Android.bp
index a660957..03ce329 100644
--- a/media/libeffects/hapticgenerator/Android.bp
+++ b/media/libeffects/hapticgenerator/Android.bp
@@ -45,6 +45,7 @@
 
     shared_libs: [
         "libaudioutils",
+        "libbase",
         "libbinder",
         "liblog",
         "libutils",
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
index 65a20a7..3137e13 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
@@ -22,12 +22,15 @@
 
 #include <algorithm>
 #include <memory>
+#include <string>
 #include <utility>
 
 #include <errno.h>
 #include <inttypes.h>
 #include <math.h>
 
+#include <android-base/parsedouble.h>
+#include <android-base/properties.h>
 #include <audio_effects/effect_hapticgenerator.h>
 #include <audio_utils/format.h>
 #include <system/audio.h>
@@ -35,6 +38,7 @@
 static constexpr float DEFAULT_RESONANT_FREQUENCY = 150.0f;
 static constexpr float DEFAULT_BSF_ZERO_Q = 8.0f;
 static constexpr float DEFAULT_BSF_POLE_Q = 4.0f;
+static constexpr float DEFAULT_DISTORTION_OUTPUT_GAIN = 1.5f;
 
 // This is the only symbol that needs to be exported
 __attribute__ ((visibility ("default")))
@@ -81,6 +85,15 @@
 
 namespace {
 
+float getFloatProperty(const std::string& key, float defaultValue) {
+    float result;
+    std::string value = android::base::GetProperty(key, "");
+    if (!value.empty() && android::base::ParseFloat(value, &result)) {
+        return result;
+    }
+    return defaultValue;
+}
+
 int HapticGenerator_Init(struct HapticGeneratorContext *context) {
     context->itfe = &gHapticGeneratorInterface;
 
@@ -114,7 +127,9 @@
     context->param.distortionCornerFrequency = 300.0f;
     context->param.distortionInputGain = 0.3f;
     context->param.distortionCubeThreshold = 0.1f;
-    context->param.distortionOutputGain = 1.5f;
+    context->param.distortionOutputGain = getFloatProperty(
+            "vendor.audio.hapticgenerator.distortion.output.gain", DEFAULT_DISTORTION_OUTPUT_GAIN);
+    ALOGD("Using distortion output gain as %f", context->param.distortionOutputGain);
 
     context->state = HAPTICGENERATOR_STATE_INITIALIZED;
     return 0;
@@ -287,15 +302,17 @@
         break;
     }
     case HG_PARAM_VIBRATOR_INFO: {
-        if (value == nullptr || size != 2 * sizeof(float)) {
+        if (value == nullptr || size != 3 * sizeof(float)) {
             return -EINVAL;
         }
         const float resonantFrequency = *(float*) value;
         const float qFactor = *((float *) value + 1);
+        const float maxAmplitude = *((float *) value + 2);
         context->param.resonantFrequency =
                 isnan(resonantFrequency) ? DEFAULT_RESONANT_FREQUENCY : resonantFrequency;
         context->param.bsfZeroQ = isnan(qFactor) ? DEFAULT_BSF_POLE_Q : qFactor;
         context->param.bsfPoleQ = context->param.bsfZeroQ / 2.0f;
+        context->param.maxHapticAmplitude = maxAmplitude;
 
         if (context->processorsRecord.bpf != nullptr) {
             context->processorsRecord.bpf->setCoefficients(
@@ -448,7 +465,8 @@
     float* hapticOutBuffer = HapticGenerator_runProcessingChain(
             context->processingChain, context->inputBuffer.data(),
             context->outputBuffer.data(), inBuffer->frameCount);
-    os::scaleHapticData(hapticOutBuffer, hapticSampleCount, context->param.maxHapticIntensity);
+    os::scaleHapticData(hapticOutBuffer, hapticSampleCount, context->param.maxHapticIntensity,
+                        context->param.maxHapticAmplitude);
 
     // For haptic data, the haptic playback thread will copy the data from effect input buffer,
     // which contains haptic data at the end of the buffer, directly to sink buffer.
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.h b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
index 96b744a..85e961f 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.h
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
@@ -51,6 +51,7 @@
     // A map from track id to haptic intensity.
     std::map<int, os::HapticScale> id2Intensity;
     os::HapticScale maxHapticIntensity; // max intensity will be used to scale haptic data.
+    float maxHapticAmplitude; // max amplitude will be used to limit haptic data absolute values.
 
     float resonantFrequency;
     float bpfQ;
diff --git a/media/libeffects/lvm/benchmarks/Android.bp b/media/libeffects/lvm/benchmarks/Android.bp
index 8a25b85..c21c5f2 100644
--- a/media/libeffects/lvm/benchmarks/Android.bp
+++ b/media/libeffects/lvm/benchmarks/Android.bp
@@ -29,9 +29,6 @@
     name: "reverb_benchmark",
     vendor: true,
     host_supported: true,
-    include_dirs: [
-        "frameworks/av/media/libeffects/lvm/wrapper/Reverb",
-    ],
     srcs: ["reverb_benchmark.cpp"],
     static_libs: [
         "libreverb",
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp
index 7e5caed..ccef5ab 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp
@@ -135,7 +135,6 @@
     LVM_UINT32 fs =
             (LVM_UINT32)LVEQNB_SampleRateTab[(LVM_UINT16)pParams->SampleRate]; /* Sample rate */
     LVM_UINT32 fc;     /* Filter centre frequency */
-    LVM_INT16 QFactor; /* Filter Q factor */
 
     pInstance->NBands = pParams->NBands;
 
@@ -144,7 +143,6 @@
          * Get the filter settings
          */
         fc = (LVM_UINT32)pParams->pBandDefinition[i].Frequency; /* Get the band centre frequency */
-        QFactor = (LVM_INT16)pParams->pBandDefinition[i].QFactor; /* Get the band Q factor */
 
         pInstance->pBiquadType[i] = LVEQNB_SinglePrecision_Float; /* Default to single precision */
 
@@ -313,9 +311,9 @@
      */
     pInstance->eqBiquad.resize(pParams->NBands,
                                android::audio_utils::BiquadFilter<LVM_FLOAT>(pParams->NrChannels));
-    LVEQNB_ClearFilterHistory(pInstance);
 
     if (bChange || modeChange) {
+        LVEQNB_ClearFilterHistory(pInstance);
         /*
          * If the sample rate has changed clear the history
          */
diff --git a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Control.cpp b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Control.cpp
index 8e63502..ffed6d4 100644
--- a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Control.cpp
+++ b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Control.cpp
@@ -421,7 +421,6 @@
      * Intermediate variables and temporary values
      */
     LVM_FLOAT T0;
-    LVM_FLOAT D;
     LVM_FLOAT A0;
     LVM_FLOAT B1;
     LVM_FLOAT B2;
@@ -444,9 +443,6 @@
      * Calculating the intermediate values
      */
     T0 = Frequency * LVPSA_Float_TwoPiOnFsTable[Fs]; /* T0 = 2 * Pi * Fc / Fs */
-    D = 3200;                                        /* Floating point value 1.000000 (1*100*2^5) */
-    /* Force D = 1 : the function was originally used for a peaking filter.
-       The D parameter do not exist for a BandPass filter coefficients */
 
     /*
      * Calculate the B2 coefficient
@@ -535,7 +531,6 @@
      * Intermediate variables and temporary values
      */
     LVM_FLOAT T0;
-    LVM_FLOAT D;
     LVM_FLOAT A0;
     LVM_FLOAT B1;
     LVM_FLOAT B2;
@@ -558,9 +553,6 @@
      * Calculating the intermediate values
      */
     T0 = Frequency * LVPSA_Float_TwoPiOnFsTable[Fs]; /* T0 = 2 * Pi * Fc / Fs */
-    D = 3200;                                        /* Floating point value 1.000000 (1*100*2^5) */
-    /* Force D = 1 : the function was originally used for a peaking filter.
-       The D parameter do not exist for a BandPass filter coefficients */
 
     /*
      * Calculate the B2 coefficient
diff --git a/media/libeffects/lvm/tests/Android.bp b/media/libeffects/lvm/tests/Android.bp
index 9939ed1..7d7f8b9 100644
--- a/media/libeffects/lvm/tests/Android.bp
+++ b/media/libeffects/lvm/tests/Android.bp
@@ -18,10 +18,6 @@
         "EffectReverbTest.cpp",
         "EffectTestHelper.cpp",
     ],
-    include_dirs: [
-        "frameworks/av/media/libeffects/lvm/lib/Common/lib",
-        "frameworks/av/media/libeffects/lvm/wrapper/Reverb",
-    ],
     static_libs: [
         "libaudioutils",
         "libreverb",
@@ -108,10 +104,6 @@
     proprietary: true,
     gtest: false,
 
-    include_dirs: [
-        "frameworks/av/media/libeffects/lvm/wrapper/Reverb",
-    ],
-
     header_libs: [
         "libaudioeffects",
     ],
diff --git a/media/libeffects/lvm/tests/EffectBundleTest.cpp b/media/libeffects/lvm/tests/EffectBundleTest.cpp
index 881ffb1..018cb7c 100644
--- a/media/libeffects/lvm/tests/EffectBundleTest.cpp
+++ b/media/libeffects/lvm/tests/EffectBundleTest.cpp
@@ -14,29 +14,39 @@
  * limitations under the License.
  */
 
+#include <system/audio_effects/effect_bassboost.h>
+#include <system/audio_effects/effect_equalizer.h>
+#include <system/audio_effects/effect_virtualizer.h>
 #include "EffectTestHelper.h"
-using namespace android;
 
-// Update isBassBoost, if the order of effects is updated
-constexpr effect_uuid_t kEffectUuids[] = {
+using namespace android;
+typedef enum {
+    EFFECT_BASS_BOOST,
+    EFFECT_EQUALIZER,
+    EFFECT_VIRTUALIZER,
+    EFFECT_VOLUME
+} effect_type_t;
+
+const std::map<effect_type_t, effect_uuid_t> kEffectUuids = {
         // NXP SW BassBoost
-        {0x8631f300, 0x72e2, 0x11df, 0xb57e, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
-        // NXP SW Virtualizer
-        {0x1d4033c0, 0x8557, 0x11df, 0x9f2d, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+        {EFFECT_BASS_BOOST,
+         {0x8631f300, 0x72e2, 0x11df, 0xb57e, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}},
         // NXP SW Equalizer
-        {0xce772f20, 0x847d, 0x11df, 0xbb17, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+        {EFFECT_EQUALIZER,
+         {0xce772f20, 0x847d, 0x11df, 0xbb17, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}},
+        // NXP SW Virtualizer
+        {EFFECT_VIRTUALIZER,
+         {0x1d4033c0, 0x8557, 0x11df, 0x9f2d, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}},
         // NXP SW Volume
-        {0x119341a0, 0x8469, 0x11df, 0x81f9, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+        {EFFECT_VOLUME, {0x119341a0, 0x8469, 0x11df, 0x81f9, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}},
 };
 
-static bool isBassBoost(const effect_uuid_t* uuid) {
-    // Update this, if the order of effects in kEffectUuids is updated
-    return uuid == &kEffectUuids[0];
-}
+const size_t kNumEffectUuids = std::size(kEffectUuids);
 
-constexpr size_t kNumEffectUuids = std::size(kEffectUuids);
+constexpr float kMinAmplitude = -1.0f;
+constexpr float kMaxAmplitude = 1.0f;
 
-typedef std::tuple<int, int, int, int, int> SingleEffectTestParam;
+using SingleEffectTestParam = std::tuple<int, int, int, int, int>;
 class SingleEffectTest : public ::testing::TestWithParam<SingleEffectTestParam> {
   public:
     SingleEffectTest()
@@ -46,7 +56,8 @@
           mFrameCount(EffectTestHelper::kFrameCounts[std::get<2>(GetParam())]),
           mLoopCount(EffectTestHelper::kLoopCounts[std::get<3>(GetParam())]),
           mTotalFrameCount(mFrameCount * mLoopCount),
-          mUuid(&kEffectUuids[std::get<4>(GetParam())]) {}
+          mEffectType((effect_type_t)std::get<4>(GetParam())),
+          mUuid(kEffectUuids.at(mEffectType)) {}
 
     const size_t mChMask;
     const size_t mChannelCount;
@@ -54,7 +65,8 @@
     const size_t mFrameCount;
     const size_t mLoopCount;
     const size_t mTotalFrameCount;
-    const effect_uuid_t* mUuid;
+    const effect_type_t mEffectType;
+    const effect_uuid_t mUuid;
 };
 
 // Tests applying a single effect
@@ -63,7 +75,7 @@
                  << "chMask: " << mChMask << " sampleRate: " << mSampleRate
                  << " frameCount: " << mFrameCount << " loopCount: " << mLoopCount);
 
-    EffectTestHelper effect(mUuid, mChMask, mChMask, mSampleRate, mFrameCount, mLoopCount);
+    EffectTestHelper effect(&mUuid, mChMask, mChMask, mSampleRate, mFrameCount, mLoopCount);
 
     ASSERT_NO_FATAL_FAILURE(effect.createEffect());
     ASSERT_NO_FATAL_FAILURE(effect.setConfig());
@@ -72,7 +84,7 @@
     std::vector<float> input(mTotalFrameCount * mChannelCount);
     std::vector<float> output(mTotalFrameCount * mChannelCount);
     std::minstd_rand gen(mChMask);
-    std::uniform_real_distribution<> dis(-1.0f, 1.0f);
+    std::uniform_real_distribution<> dis(kMinAmplitude, kMaxAmplitude);
     for (auto& in : input) {
         in = dis(gen);
     }
@@ -88,7 +100,7 @@
                            ::testing::Range(0, (int)EffectTestHelper::kNumLoopCounts),
                            ::testing::Range(0, (int)kNumEffectUuids)));
 
-typedef std::tuple<int, int, int, int> SingleEffectComparisonTestParam;
+using SingleEffectComparisonTestParam = std::tuple<int, int, int, int>;
 class SingleEffectComparisonTest
     : public ::testing::TestWithParam<SingleEffectComparisonTestParam> {
   public:
@@ -97,13 +109,15 @@
           mFrameCount(EffectTestHelper::kFrameCounts[std::get<1>(GetParam())]),
           mLoopCount(EffectTestHelper::kLoopCounts[std::get<2>(GetParam())]),
           mTotalFrameCount(mFrameCount * mLoopCount),
-          mUuid(&kEffectUuids[std::get<3>(GetParam())]) {}
+          mEffectType((effect_type_t)std::get<3>(GetParam())),
+          mUuid(kEffectUuids.at(mEffectType)) {}
 
     const size_t mSampleRate;
     const size_t mFrameCount;
     const size_t mLoopCount;
     const size_t mTotalFrameCount;
-    const effect_uuid_t* mUuid;
+    const effect_type_t mEffectType;
+    const effect_uuid_t mUuid;
 };
 
 // Compares first two channels in multi-channel output to stereo output when same effect is applied
@@ -115,7 +129,7 @@
     std::vector<float> monoInput(mTotalFrameCount);
 
     std::minstd_rand gen(mSampleRate);
-    std::uniform_real_distribution<> dis(-1.0f, 1.0f);
+    std::uniform_real_distribution<> dis(kMinAmplitude, kMaxAmplitude);
     for (auto& in : monoInput) {
         in = dis(gen);
     }
@@ -126,7 +140,7 @@
                     mTotalFrameCount * sizeof(float) * FCC_1);
 
     // Apply effect on stereo channels
-    EffectTestHelper stereoEffect(mUuid, AUDIO_CHANNEL_OUT_STEREO, AUDIO_CHANNEL_OUT_STEREO,
+    EffectTestHelper stereoEffect(&mUuid, AUDIO_CHANNEL_OUT_STEREO, AUDIO_CHANNEL_OUT_STEREO,
                                   mSampleRate, mFrameCount, mLoopCount);
 
     ASSERT_NO_FATAL_FAILURE(stereoEffect.createEffect());
@@ -142,7 +156,7 @@
 
     for (size_t chMask : EffectTestHelper::kChMasks) {
         size_t channelCount = audio_channel_count_from_out_mask(chMask);
-        EffectTestHelper testEffect(mUuid, chMask, chMask, mSampleRate, mFrameCount, mLoopCount);
+        EffectTestHelper testEffect(&mUuid, chMask, chMask, mSampleRate, mFrameCount, mLoopCount);
 
         ASSERT_NO_FATAL_FAILURE(testEffect.createEffect());
         ASSERT_NO_FATAL_FAILURE(testEffect.setConfig());
@@ -170,7 +184,7 @@
         memcpy_to_i16_from_float(stereoTestI16.data(), stereoTestOutput.data(),
                                  mTotalFrameCount * FCC_2);
 
-        if (isBassBoost(mUuid)) {
+        if (EFFECT_BASS_BOOST == mEffectType) {
             // SNR must be above the threshold
             float snr = computeSnr<int16_t>(stereoRefI16.data(), stereoTestI16.data(),
                                             mTotalFrameCount * FCC_2);
@@ -191,6 +205,135 @@
                            ::testing::Range(0, (int)EffectTestHelper::kNumLoopCounts),
                            ::testing::Range(0, (int)kNumEffectUuids)));
 
+using SingleEffectDefaultSetParamTestParam = std::tuple<int, int, int>;
+class SingleEffectDefaultSetParamTest
+    : public ::testing::TestWithParam<SingleEffectDefaultSetParamTestParam> {
+  public:
+    SingleEffectDefaultSetParamTest()
+        : mChMask(EffectTestHelper::kChMasks[std::get<0>(GetParam())]),
+          mChannelCount(audio_channel_count_from_out_mask(mChMask)),
+          mSampleRate(16000),
+          mFrameCount(EffectTestHelper::kFrameCounts[std::get<1>(GetParam())]),
+          mLoopCount(1),
+          mTotalFrameCount(mFrameCount * mLoopCount),
+          mEffectType((effect_type_t)std::get<2>(GetParam())),
+          mUuid(kEffectUuids.at(mEffectType)) {}
+
+    const size_t mChMask;
+    const size_t mChannelCount;
+    const size_t mSampleRate;
+    const size_t mFrameCount;
+    const size_t mLoopCount;
+    const size_t mTotalFrameCount;
+    const effect_type_t mEffectType;
+    const effect_uuid_t mUuid;
+};
+
+// Tests verifying that redundant setParam calls do not alter output
+TEST_P(SingleEffectDefaultSetParamTest, SimpleProcess) {
+    SCOPED_TRACE(testing::Message()
+                 << "chMask: " << mChMask << " sampleRate: " << mSampleRate
+                 << " frameCount: " << mFrameCount << " loopCount: " << mLoopCount);
+    // effect.process() handles mTotalFrameCount * mChannelCount samples in each call.
+    // This test calls process() twice per effect, hence total samples when allocating
+    // input and output vectors is twice the number of samples processed in one call.
+    size_t totalNumSamples = 2 * mTotalFrameCount * mChannelCount;
+    // Initialize input buffer with deterministic pseudo-random values
+    std::vector<float> input(totalNumSamples);
+    std::minstd_rand gen(mChMask);
+    std::uniform_real_distribution<> dis(kMinAmplitude, kMaxAmplitude);
+    for (auto& in : input) {
+        in = dis(gen);
+    }
+
+    uint32_t key;
+    int32_t value1, value2;
+    switch (mEffectType) {
+        case EFFECT_BASS_BOOST:
+            key = BASSBOOST_PARAM_STRENGTH;
+            value1 = 1;
+            value2 = 14;
+            break;
+        case EFFECT_VIRTUALIZER:
+            key = VIRTUALIZER_PARAM_STRENGTH;
+            value1 = 0;
+            value2 = 100;
+            break;
+        case EFFECT_EQUALIZER:
+            key = EQ_PARAM_CUR_PRESET;
+            value1 = 0;
+            value2 = 1;
+            break;
+        case EFFECT_VOLUME:
+            key = 0 /* VOLUME_PARAM_LEVEL */;
+            value1 = 0;
+            value2 = -100;
+            break;
+        default:
+            FAIL() << "Unsupported effect type : " << mEffectType;
+    }
+
+    EffectTestHelper refEffect(&mUuid, mChMask, mChMask, mSampleRate, mFrameCount, mLoopCount);
+
+    ASSERT_NO_FATAL_FAILURE(refEffect.createEffect());
+    ASSERT_NO_FATAL_FAILURE(refEffect.setConfig());
+
+    if (EFFECT_BASS_BOOST == mEffectType) {
+        ASSERT_NO_FATAL_FAILURE(refEffect.setParam<int16_t>(key, value1));
+    } else {
+        ASSERT_NO_FATAL_FAILURE(refEffect.setParam<int32_t>(key, value1));
+    }
+    std::vector<float> refOutput(totalNumSamples);
+    float* pInput = input.data();
+    float* pOutput = refOutput.data();
+    ASSERT_NO_FATAL_FAILURE(refEffect.process(pInput, pOutput));
+
+    pInput += totalNumSamples / 2;
+    pOutput += totalNumSamples / 2;
+    ASSERT_NO_FATAL_FAILURE(refEffect.process(pInput, pOutput));
+    ASSERT_NO_FATAL_FAILURE(refEffect.releaseEffect());
+
+    EffectTestHelper testEffect(&mUuid, mChMask, mChMask, mSampleRate, mFrameCount, mLoopCount);
+
+    ASSERT_NO_FATAL_FAILURE(testEffect.createEffect());
+    ASSERT_NO_FATAL_FAILURE(testEffect.setConfig());
+
+    if (EFFECT_BASS_BOOST == mEffectType) {
+        ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int16_t>(key, value1));
+    } else {
+        ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int32_t>(key, value1));
+    }
+
+    std::vector<float> testOutput(totalNumSamples);
+    pInput = input.data();
+    pOutput = testOutput.data();
+    ASSERT_NO_FATAL_FAILURE(testEffect.process(pInput, pOutput));
+
+    // Call setParam once to change the parameters, and then call setParam again
+    // to restore the parameters to the initial state, making the first setParam
+    // call redundant
+    if (EFFECT_BASS_BOOST == mEffectType) {
+        ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int16_t>(key, value2));
+        ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int16_t>(key, value1));
+    } else {
+        ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int32_t>(key, value2));
+        ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int32_t>(key, value1));
+    }
+
+    pInput += totalNumSamples / 2;
+    pOutput += totalNumSamples / 2;
+    ASSERT_NO_FATAL_FAILURE(testEffect.process(pInput, pOutput));
+    ASSERT_NO_FATAL_FAILURE(testEffect.releaseEffect());
+    ASSERT_TRUE(areNearlySame(refOutput.data(), testOutput.data(), totalNumSamples))
+            << "Outputs do not match with default setParam calls";
+}
+
+INSTANTIATE_TEST_SUITE_P(
+        EffectBundleTestAll, SingleEffectDefaultSetParamTest,
+        ::testing::Combine(::testing::Range(0, (int)EffectTestHelper::kNumChMasks),
+                           ::testing::Range(0, (int)EffectTestHelper::kNumFrameCounts),
+                           ::testing::Range(0, (int)kNumEffectUuids)));
+
 int main(int argc, char** argv) {
     ::testing::InitGoogleTest(&argc, argv);
     int status = RUN_ALL_TESTS();
diff --git a/media/libeffects/lvm/tests/EffectTestHelper.cpp b/media/libeffects/lvm/tests/EffectTestHelper.cpp
index 625c15a..ec727c7 100644
--- a/media/libeffects/lvm/tests/EffectTestHelper.cpp
+++ b/media/libeffects/lvm/tests/EffectTestHelper.cpp
@@ -50,23 +50,6 @@
     ASSERT_EQ(reply, 0) << "cmd_enable reply non zero " << reply;
 }
 
-void EffectTestHelper::setParam(uint32_t type, uint32_t value) {
-    int reply = 0;
-    uint32_t replySize = sizeof(reply);
-    uint32_t paramData[2] = {type, value};
-    auto effectParam = new effect_param_t[sizeof(effect_param_t) + sizeof(paramData)];
-    memcpy(&effectParam->data[0], &paramData[0], sizeof(paramData));
-    effectParam->psize = sizeof(paramData[0]);
-    effectParam->vsize = sizeof(paramData[1]);
-    int status = (*mEffectHandle)
-                         ->command(mEffectHandle, EFFECT_CMD_SET_PARAM,
-                                   sizeof(effect_param_t) + sizeof(paramData), effectParam,
-                                   &replySize, &reply);
-    delete[] effectParam;
-    ASSERT_EQ(status, 0) << "set_param returned an error " << status;
-    ASSERT_EQ(reply, 0) << "set_param reply non zero " << reply;
-}
-
 void EffectTestHelper::process(float* input, float* output) {
     audio_buffer_t inBuffer = {.frameCount = mFrameCount, .f32 = input};
     audio_buffer_t outBuffer = {.frameCount = mFrameCount, .f32 = output};
diff --git a/media/libeffects/lvm/tests/EffectTestHelper.h b/media/libeffects/lvm/tests/EffectTestHelper.h
index 3854d46..bcee84e 100644
--- a/media/libeffects/lvm/tests/EffectTestHelper.h
+++ b/media/libeffects/lvm/tests/EffectTestHelper.h
@@ -50,6 +50,23 @@
     return snr;
 }
 
+template <typename T>
+static float areNearlySame(const T* ref, const T* tst, size_t count) {
+    T delta;
+    if constexpr (std::is_floating_point_v<T>) {
+        delta = std::numeric_limits<T>::epsilon();
+    } else {
+        delta = 1;
+    }
+    for (size_t i = 0; i < count; ++i) {
+        const double diff(tst[i] - ref[i]);
+        if (abs(diff) > delta) {
+            return false;
+        }
+    }
+    return true;
+}
+
 class EffectTestHelper {
   public:
     EffectTestHelper(const effect_uuid_t* uuid, size_t inChMask, size_t outChMask,
@@ -65,7 +82,25 @@
     void createEffect();
     void releaseEffect();
     void setConfig();
-    void setParam(uint32_t type, uint32_t val);
+    template <typename VALUE_DTYPE>
+    void setParam(uint32_t type, VALUE_DTYPE const value) {
+        int reply = 0;
+        uint32_t replySize = sizeof(reply);
+
+        uint8_t paramData[sizeof(effect_param_t) + sizeof(type) + sizeof(value)];
+        auto effectParam = (effect_param_t*)paramData;
+
+        memcpy(&effectParam->data[0], &type, sizeof(type));
+        memcpy(&effectParam->data[sizeof(type)], &value, sizeof(value));
+        effectParam->psize = sizeof(type);
+        effectParam->vsize = sizeof(value);
+        int status = (*mEffectHandle)
+                             ->command(mEffectHandle, EFFECT_CMD_SET_PARAM,
+                                       sizeof(effect_param_t) + sizeof(type) + sizeof(value),
+                                       effectParam, &replySize, &reply);
+        ASSERT_EQ(status, 0) << "set_param returned an error " << status;
+        ASSERT_EQ(reply, 0) << "set_param reply non zero " << reply;
+    };
     void process(float* input, float* output);
 
     // Corresponds to SNR for 1 bit difference between two int16_t signals
diff --git a/media/libeffects/lvm/wrapper/Android.bp b/media/libeffects/lvm/wrapper/Android.bp
index e169e3c..1287514 100644
--- a/media/libeffects/lvm/wrapper/Android.bp
+++ b/media/libeffects/lvm/wrapper/Android.bp
@@ -89,6 +89,8 @@
 
     local_include_dirs: ["Reverb"],
 
+    export_include_dirs: ["Reverb"],
+
     header_libs: [
         "libhardware_headers",
         "libaudioeffects",
diff --git a/media/libeffects/preprocessing/.clang-format b/media/libeffects/preprocessing/.clang-format
deleted file mode 120000
index f1b4f69..0000000
--- a/media/libeffects/preprocessing/.clang-format
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../build/soong/scripts/system-clang-format
\ No newline at end of file
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index 19a8b2f..61a2bf5 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -150,6 +150,7 @@
 
 bool sHasAuxChannels[PREPROC_NUM_EFFECTS] = {
         false,  // PREPROC_AGC
+        false,  // PREPROC_AGC2
         true,   // PREPROC_AEC
         true,   // PREPROC_NS
 };
diff --git a/media/libeffects/preprocessing/tests/correlation.cpp b/media/libeffects/preprocessing/tests/correlation.cpp
index eb56fc3..0853673 100644
--- a/media/libeffects/preprocessing/tests/correlation.cpp
+++ b/media/libeffects/preprocessing/tests/correlation.cpp
@@ -36,7 +36,7 @@
                                                                    const int16_t* sigY, int len,
                                                                    int16_t enableCrossCorr) {
     float maxCorrVal = 0.f, prevCorrVal = 0.f;
-    int delay = 0, peakIndex = 0, flag = 0;
+    int peakIndex = 0, flag = 0;
     int loopLim = (1 == enableCrossCorr) ? len : kMinLoopLimitValue;
     std::vector<int> peakIndexVect(kNumPeaks, 0);
     std::vector<float> peakValueVect(kNumPeaks, 0.f);
@@ -47,7 +47,6 @@
         }
         corrVal /= len - i;
         if (corrVal > maxCorrVal) {
-            delay = i;
             maxCorrVal = corrVal;
         }
         // Correlation peaks are expected to be observed at equal intervals. The interval length is
diff --git a/media/libeffects/testlibs/Android.bp b/media/libeffects/testlibs/Android.bp
new file mode 100644
index 0000000..5ba56bb
--- /dev/null
+++ b/media/libeffects/testlibs/Android.bp
@@ -0,0 +1,77 @@
+// Test Reverb library
+package {
+    default_applicable_licenses: [
+        "frameworks_av_media_libeffects_testlibs_license",
+    ],
+}
+
+license {
+    name: "frameworks_av_media_libeffects_testlibs_license",
+    visibility: [":__subpackages__"],
+    license_kinds: [
+        "SPDX-license-identifier-Apache-2.0",
+    ],
+    license_text: [
+        "NOTICE",
+    ],
+}
+
+cc_library {
+    name: "libreverbtest",
+    host_supported: true,
+    vendor: true,
+    srcs: [
+        "EffectReverb.c",
+        "EffectsMath.c",
+    ],
+
+    shared_libs: [
+        "libcutils",
+        "liblog",
+    ],
+
+    relative_install_path: "soundfx",
+
+    cflags: [
+        "-fvisibility=hidden",
+        "-Wall",
+        "-Werror",
+        "-Wno-address-of-packed-member",
+    ],
+
+    header_libs: [
+        "libaudioeffects",
+    ],
+}
+
+cc_library {
+    name: "libequalizertest",
+    host_supported: true,
+    vendor: true,
+    srcs: [
+        "AudioBiquadFilter.cpp",
+        "AudioCoefInterpolator.cpp",
+        "AudioEqualizer.cpp",
+        "AudioPeakingFilter.cpp",
+        "AudioShelvingFilter.cpp",
+        "EffectEqualizer.cpp",
+        "EffectsMath.c",
+    ],
+
+    shared_libs: [
+        "libcutils",
+        "liblog",
+    ],
+
+    relative_install_path: "soundfx",
+
+    cflags: [
+        "-fvisibility=hidden",
+        "-Wall",
+        "-Werror",
+    ],
+
+    header_libs: [
+        "libaudioeffects",
+    ],
+}
diff --git a/media/libeffects/testlibs/Android.mk_ b/media/libeffects/testlibs/Android.mk_
deleted file mode 100644
index 14c373f..0000000
--- a/media/libeffects/testlibs/Android.mk_
+++ /dev/null
@@ -1,55 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-# Test Reverb library
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
-	EffectReverb.c.arm \
-	EffectsMath.c.arm
-
-LOCAL_CFLAGS := -O2
-
-LOCAL_SHARED_LIBRARIES := \
-	libcutils \
-	libdl
-
-LOCAL_MODULE_RELATIVE_PATH := soundfx
-LOCAL_MODULE := libreverbtest
-
-LOCAL_C_INCLUDES := \
-	$(call include-path-for, audio-effects) \
-	$(call include-path-for, graphics corecg)
-
-LOCAL_MODULE_TAGS := optional
-
-include $(BUILD_SHARED_LIBRARY)
-
-# Test Equalizer library
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
-	EffectsMath.c.arm \
-	EffectEqualizer.cpp \
-	AudioBiquadFilter.cpp.arm \
-	AudioCoefInterpolator.cpp.arm \
-	AudioPeakingFilter.cpp.arm \
-	AudioShelvingFilter.cpp.arm \
-	AudioEqualizer.cpp.arm
-
-LOCAL_CFLAGS := -O2
-
-LOCAL_SHARED_LIBRARIES := \
-	libcutils \
-	libdl
-
-LOCAL_MODULE_RELATIVE_PATH := soundfx
-LOCAL_MODULE := libequalizertest
-
-LOCAL_C_INCLUDES := \
-	$(call include-path-for, graphics corecg) \
-	$(call include-path-for, audio-effects)
-
-LOCAL_MODULE_TAGS := optional
-
-include $(BUILD_SHARED_LIBRARY)
-
diff --git a/media/libeffects/testlibs/AudioEqualizer.cpp b/media/libeffects/testlibs/AudioEqualizer.cpp
index 4f3a308..141750b 100644
--- a/media/libeffects/testlibs/AudioEqualizer.cpp
+++ b/media/libeffects/testlibs/AudioEqualizer.cpp
@@ -19,7 +19,7 @@
 #include <assert.h>
 #include <stdlib.h>
 #include <new>
-#include <utils/Log.h>
+#include <log/log.h>
 
 #include "AudioEqualizer.h"
 #include "AudioPeakingFilter.h"
diff --git a/media/libeffects/testlibs/AudioPeakingFilter.cpp b/media/libeffects/testlibs/AudioPeakingFilter.cpp
index 99323ac..4257eca 100644
--- a/media/libeffects/testlibs/AudioPeakingFilter.cpp
+++ b/media/libeffects/testlibs/AudioPeakingFilter.cpp
@@ -87,9 +87,9 @@
 void AudioPeakingFilter::commit(bool immediate) {
     audio_coef_t coefs[5];
     int intCoord[3] = {
-        mFrequency >> FREQ_PRECISION_BITS,
+        (int)(mFrequency >> FREQ_PRECISION_BITS),
         mGain >> GAIN_PRECISION_BITS,
-        mBandwidth >> BANDWIDTH_PRECISION_BITS
+        (int)(mBandwidth >> BANDWIDTH_PRECISION_BITS)
     };
     uint32_t fracCoord[3] = {
         mFrequency << (32 - FREQ_PRECISION_BITS),
diff --git a/media/libeffects/testlibs/AudioShelvingFilter.cpp b/media/libeffects/testlibs/AudioShelvingFilter.cpp
index e031287..ad43c5a 100644
--- a/media/libeffects/testlibs/AudioShelvingFilter.cpp
+++ b/media/libeffects/testlibs/AudioShelvingFilter.cpp
@@ -89,8 +89,8 @@
 void AudioShelvingFilter::commit(bool immediate) {
     audio_coef_t coefs[5];
     int intCoord[2] = {
-        mFrequency >> FREQ_PRECISION_BITS,
-        mGain >> GAIN_PRECISION_BITS
+        (int)(mFrequency >> FREQ_PRECISION_BITS),
+        (int)(mGain >> GAIN_PRECISION_BITS)
     };
     uint32_t fracCoord[2] = {
         mFrequency << (32 - FREQ_PRECISION_BITS),
diff --git a/media/libeffects/testlibs/EffectEqualizer.cpp b/media/libeffects/testlibs/EffectEqualizer.cpp
index db4d009..72b530d 100644
--- a/media/libeffects/testlibs/EffectEqualizer.cpp
+++ b/media/libeffects/testlibs/EffectEqualizer.cpp
@@ -131,7 +131,8 @@
                             int32_t ioId,
                             effect_handle_t *pHandle) {
     int ret;
-    int i;
+    (void)sessionId;
+    (void)ioId;
 
     ALOGV("EffectLibCreateEffect start");
 
@@ -160,7 +161,7 @@
     pContext->state = EQUALIZER_STATE_INITIALIZED;
 
     ALOGV("EffectLibCreateEffect %p, size %d",
-         pContext, AudioEqualizer::GetInstanceSize(kNumBands)+sizeof(EqualizerContext));
+         pContext, (int)(AudioEqualizer::GetInstanceSize(kNumBands)+sizeof(EqualizerContext)));
 
     return 0;
 
@@ -294,7 +295,6 @@
 
 int Equalizer_init(EqualizerContext *pContext)
 {
-    int status;
 
     ALOGV("Equalizer_init start");
 
@@ -630,7 +630,6 @@
         void *pCmdData, uint32_t *replySize, void *pReplyData) {
 
     android::EqualizerContext * pContext = (android::EqualizerContext *) self;
-    int retsize;
 
     if (pContext == NULL || pContext->state == EQUALIZER_STATE_UNINITIALIZED) {
         return -EINVAL;
@@ -750,13 +749,13 @@
         NULL
 };
 
-
+__attribute__ ((visibility ("default")))
 audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
-    tag : AUDIO_EFFECT_LIBRARY_TAG,
-    version : EFFECT_LIBRARY_API_VERSION,
-    name : "Test Equalizer Library",
-    implementor : "The Android Open Source Project",
-    create_effect : android::EffectCreate,
-    release_effect : android::EffectRelease,
-    get_descriptor : android::EffectGetDescriptor,
+    .tag = AUDIO_EFFECT_LIBRARY_TAG,
+    .version = EFFECT_LIBRARY_API_VERSION,
+    .name = "Test Equalizer Library",
+    .implementor = "The Android Open Source Project",
+    .create_effect = android::EffectCreate,
+    .release_effect = android::EffectRelease,
+    .get_descriptor = android::EffectGetDescriptor,
 };
diff --git a/media/libeffects/testlibs/EffectReverb.c b/media/libeffects/testlibs/EffectReverb.c
index fce9bed..efba4f4 100644
--- a/media/libeffects/testlibs/EffectReverb.c
+++ b/media/libeffects/testlibs/EffectReverb.c
@@ -107,6 +107,8 @@
     const effect_descriptor_t *desc;
     int aux = 0;
     int preset = 0;
+    (void)sessionId;
+    (void)ioId;
 
     ALOGV("EffectLibCreateEffect start");
 
@@ -149,7 +151,7 @@
 
     module->context.mState = REVERB_STATE_INITIALIZED;
 
-    ALOGV("EffectLibCreateEffect %p ,size %d", module, sizeof(reverb_module_t));
+    ALOGV("EffectLibCreateEffect %p ,size %zu", module, sizeof(reverb_module_t));
 
     return 0;
 }
@@ -283,7 +285,6 @@
         void *pCmdData, uint32_t *replySize, void *pReplyData) {
     reverb_module_t *pRvbModule = (reverb_module_t *) self;
     reverb_object_t *pReverb;
-    int retsize;
 
     if (pRvbModule == NULL ||
             pRvbModule->context.mState == REVERB_STATE_UNINITIALIZED) {
@@ -758,7 +759,6 @@
     int32_t *pValue32;
     int16_t *pValue16;
     t_reverb_settings *pProperties;
-    int32_t i;
     int32_t temp;
     int32_t temp2;
     uint32_t size;
@@ -1654,7 +1654,6 @@
     int32_t nApOut;
 
     int32_t j;
-    int32_t nEarlyOut;
 
     int32_t tempValue;
 
@@ -2203,6 +2202,7 @@
     return 0;
 }
 
+__attribute__ ((visibility ("default")))
 audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
     .tag = AUDIO_EFFECT_LIBRARY_TAG,
     .version = EFFECT_LIBRARY_API_VERSION,
diff --git a/media/libeffects/testlibs/EffectReverb.h b/media/libeffects/testlibs/EffectReverb.h
index 756c5ea..8f405d4 100644
--- a/media/libeffects/testlibs/EffectReverb.h
+++ b/media/libeffects/testlibs/EffectReverb.h
@@ -443,7 +443,4 @@
 */
 static int ReverbUpdateRoom(reverb_object_t* pReverbData, bool fullUpdate);
 
-
-static int ReverbComputeConstants(reverb_object_t *pReverbData, uint32_t samplingRate);
-
 #endif /*ANDROID_EFFECTREVERB_H_*/
diff --git a/media/libheadtracking/Android.bp b/media/libheadtracking/Android.bp
new file mode 100644
index 0000000..1d41889
--- /dev/null
+++ b/media/libheadtracking/Android.bp
@@ -0,0 +1,82 @@
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_av_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["frameworks_av_license"],
+}
+
+cc_library {
+    name: "libheadtracking",
+    host_supported: true,
+    srcs: [
+      "HeadTrackingProcessor.cpp",
+      "ModeSelector.cpp",
+      "Pose.cpp",
+      "PoseBias.cpp",
+      "PoseDriftCompensator.cpp",
+      "PoseRateLimiter.cpp",
+      "QuaternionUtil.cpp",
+      "ScreenHeadFusion.cpp",
+      "StillnessDetector.cpp",
+      "Twist.cpp",
+    ],
+    export_include_dirs: [
+        "include",
+    ],
+    header_libs: [
+        "libeigen",
+    ],
+    export_header_lib_headers: [
+        "libeigen",
+    ],
+}
+
+cc_library {
+    name: "libheadtracking-binding",
+    srcs: [
+      "SensorPoseProvider.cpp",
+    ],
+    shared_libs: [
+        "libheadtracking",
+        "liblog",
+        "libsensor",
+        "libutils",
+    ],
+    export_shared_lib_headers: [
+        "libheadtracking",
+    ],
+}
+
+cc_binary {
+    name: "SensorPoseProvider-example",
+    srcs: [
+        "SensorPoseProvider-example.cpp",
+    ],
+    shared_libs: [
+        "libheadtracking",
+        "libheadtracking-binding",
+        "libsensor",
+        "libutils",
+    ],
+}
+
+cc_test_host {
+    name: "libheadtracking-test",
+    srcs: [
+        "HeadTrackingProcessor-test.cpp",
+        "ModeSelector-test.cpp",
+        "Pose-test.cpp",
+        "PoseBias-test.cpp",
+        "PoseDriftCompensator-test.cpp",
+        "PoseRateLimiter-test.cpp",
+        "QuaternionUtil-test.cpp",
+        "ScreenHeadFusion-test.cpp",
+        "StillnessDetector-test.cpp",
+        "Twist-test.cpp",
+    ],
+    shared_libs: [
+        "libheadtracking",
+    ],
+}
diff --git a/media/libheadtracking/HeadTrackingProcessor-test.cpp b/media/libheadtracking/HeadTrackingProcessor-test.cpp
new file mode 100644
index 0000000..299192f
--- /dev/null
+++ b/media/libheadtracking/HeadTrackingProcessor-test.cpp
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/HeadTrackingProcessor.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+using Options = HeadTrackingProcessor::Options;
+
+TEST(HeadTrackingProcessor, Initial) {
+    for (auto mode : {HeadTrackingMode::STATIC, HeadTrackingMode::WORLD_RELATIVE,
+                      HeadTrackingMode::SCREEN_RELATIVE}) {
+        std::unique_ptr<HeadTrackingProcessor> processor =
+                createHeadTrackingProcessor(Options{}, mode);
+        processor->calculate(0);
+        EXPECT_EQ(processor->getActualMode(), HeadTrackingMode::STATIC);
+        EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+    }
+}
+
+TEST(HeadTrackingProcessor, BasicComposition) {
+    const Pose3f worldToHead{{1, 2, 3}, Quaternionf::UnitRandom()};
+    const Pose3f worldToScreen{{4, 5, 6}, Quaternionf::UnitRandom()};
+    const Pose3f screenToStage{{7, 8, 9}, Quaternionf::UnitRandom()};
+    const float physicalToLogical = M_PI_2;
+
+    std::unique_ptr<HeadTrackingProcessor> processor =
+            createHeadTrackingProcessor(Options{}, HeadTrackingMode::SCREEN_RELATIVE);
+
+    // Establish a baseline for the drift compensators.
+    processor->setWorldToHeadPose(0, Pose3f(), Twist3f());
+    processor->setWorldToScreenPose(0, Pose3f());
+
+    processor->setDisplayOrientation(physicalToLogical);
+    processor->setWorldToHeadPose(0, worldToHead, Twist3f());
+    processor->setWorldToScreenPose(0, worldToScreen);
+    processor->setScreenToStagePose(screenToStage);
+    processor->calculate(0);
+    ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::SCREEN_RELATIVE);
+    EXPECT_EQ(processor->getHeadToStagePose(), worldToHead.inverse() * worldToScreen *
+                                                       Pose3f(rotateY(-physicalToLogical)) *
+                                                       screenToStage);
+
+    processor->setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+    processor->calculate(0);
+    ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::WORLD_RELATIVE);
+    EXPECT_EQ(processor->getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+
+    processor->setDesiredMode(HeadTrackingMode::STATIC);
+    processor->calculate(0);
+    ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::STATIC);
+    EXPECT_EQ(processor->getHeadToStagePose(), screenToStage);
+}
+
+TEST(HeadTrackingProcessor, Prediction) {
+    const Pose3f worldToHead{{1, 2, 3}, Quaternionf::UnitRandom()};
+    const Twist3f headTwist{{4, 5, 6}, quaternionToRotationVector(Quaternionf::UnitRandom()) / 10};
+    const Pose3f worldToScreen{{4, 5, 6}, Quaternionf::UnitRandom()};
+
+    std::unique_ptr<HeadTrackingProcessor> processor = createHeadTrackingProcessor(
+            Options{.predictionDuration = 2.f}, HeadTrackingMode::WORLD_RELATIVE);
+
+    // Establish a baseline for the drift compensators.
+    processor->setWorldToHeadPose(0, Pose3f(), Twist3f());
+    processor->setWorldToScreenPose(0, Pose3f());
+
+    processor->setWorldToHeadPose(0, worldToHead, headTwist);
+    processor->setWorldToScreenPose(0, worldToScreen);
+    processor->calculate(0);
+    ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::WORLD_RELATIVE);
+    EXPECT_EQ(processor->getHeadToStagePose(), (worldToHead * integrate(headTwist, 2.f)).inverse());
+
+    processor->setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+    processor->calculate(0);
+    ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::SCREEN_RELATIVE);
+    EXPECT_EQ(processor->getHeadToStagePose(),
+              (worldToHead * integrate(headTwist, 2.f)).inverse() * worldToScreen);
+
+    processor->setDesiredMode(HeadTrackingMode::STATIC);
+    processor->calculate(0);
+    ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::STATIC);
+    EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+}
+
+TEST(HeadTrackingProcessor, SmoothModeSwitch) {
+    const Pose3f targetHeadToWorld = Pose3f({4, 0, 0}, rotateZ(M_PI / 2));
+
+    std::unique_ptr<HeadTrackingProcessor> processor = createHeadTrackingProcessor(
+            Options{.maxTranslationalVelocity = 1}, HeadTrackingMode::STATIC);
+
+    // Establish a baseline for the drift compensators.
+    processor->setWorldToHeadPose(0, Pose3f(), Twist3f());
+    processor->setWorldToScreenPose(0, Pose3f());
+
+    processor->calculate(0);
+
+    processor->setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+    processor->setWorldToHeadPose(0, targetHeadToWorld.inverse(), Twist3f());
+
+    // We're expecting a gradual move to the target.
+    processor->calculate(0);
+    EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+    EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+
+    processor->calculate(2);
+    EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+    EXPECT_EQ(processor->getHeadToStagePose(), Pose3f({2, 0, 0}, rotateZ(M_PI / 4)));
+
+    processor->calculate(4);
+    EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+    EXPECT_EQ(processor->getHeadToStagePose(), targetHeadToWorld);
+
+    // Now that we've reached the target, we should no longer be rate limiting.
+    processor->setWorldToHeadPose(4, Pose3f(), Twist3f());
+    processor->calculate(5);
+    EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+    EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+}
+
+}  // namespace
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/HeadTrackingProcessor.cpp b/media/libheadtracking/HeadTrackingProcessor.cpp
new file mode 100644
index 0000000..71fae8a
--- /dev/null
+++ b/media/libheadtracking/HeadTrackingProcessor.cpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/HeadTrackingProcessor.h"
+
+#include "ModeSelector.h"
+#include "PoseBias.h"
+#include "QuaternionUtil.h"
+#include "ScreenHeadFusion.h"
+#include "StillnessDetector.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+class HeadTrackingProcessorImpl : public HeadTrackingProcessor {
+  public:
+    HeadTrackingProcessorImpl(const Options& options, HeadTrackingMode initialMode)
+        : mOptions(options),
+          mHeadStillnessDetector(StillnessDetector::Options{
+                  .defaultValue = false,
+                  .windowDuration = options.autoRecenterWindowDuration,
+                  .translationalThreshold = options.autoRecenterTranslationalThreshold,
+                  .rotationalThreshold = options.autoRecenterRotationalThreshold,
+          }),
+          mScreenStillnessDetector(StillnessDetector::Options{
+                  .defaultValue = true,
+                  .windowDuration = options.screenStillnessWindowDuration,
+                  .translationalThreshold = options.screenStillnessTranslationalThreshold,
+                  .rotationalThreshold = options.screenStillnessRotationalThreshold,
+          }),
+          mModeSelector(ModeSelector::Options{.freshnessTimeout = options.freshnessTimeout},
+                        initialMode),
+          mRateLimiter(PoseRateLimiter::Options{
+                  .maxTranslationalVelocity = options.maxTranslationalVelocity,
+                  .maxRotationalVelocity = options.maxRotationalVelocity}) {}
+
+    void setDesiredMode(HeadTrackingMode mode) override { mModeSelector.setDesiredMode(mode); }
+
+    void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead,
+                            const Twist3f& headTwist) override {
+        Pose3f predictedWorldToHead =
+                worldToHead * integrate(headTwist, mOptions.predictionDuration);
+        mHeadPoseBias.setInput(predictedWorldToHead);
+        mHeadStillnessDetector.setInput(timestamp, predictedWorldToHead);
+        mWorldToHeadTimestamp = timestamp;
+    }
+
+    void setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen) override {
+        if (mPhysicalToLogicalAngle != mPendingPhysicalToLogicalAngle) {
+            // We're introducing an artificial discontinuity. Enable the rate limiter.
+            mRateLimiter.enable();
+            mPhysicalToLogicalAngle = mPendingPhysicalToLogicalAngle;
+        }
+
+        Pose3f worldToLogicalScreen = worldToScreen * Pose3f(rotateY(-mPhysicalToLogicalAngle));
+        mScreenPoseBias.setInput(worldToLogicalScreen);
+        mScreenStillnessDetector.setInput(timestamp, worldToLogicalScreen);
+        mWorldToScreenTimestamp = timestamp;
+    }
+
+    void setScreenToStagePose(const Pose3f& screenToStage) override {
+        mModeSelector.setScreenToStagePose(screenToStage);
+    }
+
+    void setDisplayOrientation(float physicalToLogicalAngle) override {
+        mPendingPhysicalToLogicalAngle = physicalToLogicalAngle;
+    }
+
+    void calculate(int64_t timestamp) override {
+        // Handle the screen first, since it might trigger a recentering of the head.
+        if (mWorldToScreenTimestamp.has_value()) {
+            const Pose3f worldToLogicalScreen = mScreenPoseBias.getOutput();
+            bool screenStable = mScreenStillnessDetector.calculate(timestamp);
+            mModeSelector.setScreenStable(mWorldToScreenTimestamp.value(), screenStable);
+            // Whenever the screen is unstable, recenter the head pose.
+            if (!screenStable) {
+                recenter(true, false);
+            }
+            mScreenHeadFusion.setWorldToScreenPose(mWorldToScreenTimestamp.value(),
+                                                   worldToLogicalScreen);
+        }
+
+        // Handle head.
+        if (mWorldToHeadTimestamp.has_value()) {
+            Pose3f worldToHead = mHeadPoseBias.getOutput();
+            // Auto-recenter.
+            if (mHeadStillnessDetector.calculate(timestamp)) {
+                recenter(true, false);
+                worldToHead = mHeadPoseBias.getOutput();
+            }
+
+            mScreenHeadFusion.setWorldToHeadPose(mWorldToHeadTimestamp.value(), worldToHead);
+            mModeSelector.setWorldToHeadPose(mWorldToHeadTimestamp.value(), worldToHead);
+        }
+
+        auto maybeScreenToHead = mScreenHeadFusion.calculate();
+        if (maybeScreenToHead.has_value()) {
+            mModeSelector.setScreenToHeadPose(maybeScreenToHead->timestamp,
+                                              maybeScreenToHead->pose);
+        } else {
+            mModeSelector.setScreenToHeadPose(timestamp, std::nullopt);
+        }
+
+        HeadTrackingMode prevMode = mModeSelector.getActualMode();
+        mModeSelector.calculate(timestamp);
+        if (mModeSelector.getActualMode() != prevMode) {
+            // Mode has changed, enable rate limiting.
+            mRateLimiter.enable();
+        }
+        mRateLimiter.setTarget(mModeSelector.getHeadToStagePose());
+        mHeadToStagePose = mRateLimiter.calculatePose(timestamp);
+    }
+
+    Pose3f getHeadToStagePose() const override { return mHeadToStagePose; }
+
+    HeadTrackingMode getActualMode() const override { return mModeSelector.getActualMode(); }
+
+    void recenter(bool recenterHead, bool recenterScreen) override {
+        if (recenterHead) {
+            mHeadPoseBias.recenter();
+            mHeadStillnessDetector.reset();
+        }
+        if (recenterScreen) {
+            mScreenPoseBias.recenter();
+            mScreenStillnessDetector.reset();
+        }
+
+        // If a sensor being recentered is included in the current mode, apply rate limiting to
+        // avoid discontinuities.
+        HeadTrackingMode mode = mModeSelector.getActualMode();
+        if ((recenterHead && (mode == HeadTrackingMode::WORLD_RELATIVE ||
+                              mode == HeadTrackingMode::SCREEN_RELATIVE)) ||
+            (recenterScreen && mode == HeadTrackingMode::SCREEN_RELATIVE)) {
+            mRateLimiter.enable();
+        }
+    }
+
+  private:
+    const Options mOptions;
+    float mPhysicalToLogicalAngle = 0;
+    // We store the physical to logical angle as "pending" until the next world-to-screen sample it
+    // applies to arrives.
+    float mPendingPhysicalToLogicalAngle = 0;
+    std::optional<int64_t> mWorldToHeadTimestamp;
+    std::optional<int64_t> mWorldToScreenTimestamp;
+    Pose3f mHeadToStagePose;
+    PoseBias mHeadPoseBias;
+    PoseBias mScreenPoseBias;
+    StillnessDetector mHeadStillnessDetector;
+    StillnessDetector mScreenStillnessDetector;
+    ScreenHeadFusion mScreenHeadFusion;
+    ModeSelector mModeSelector;
+    PoseRateLimiter mRateLimiter;
+};
+
+}  // namespace
+
+std::unique_ptr<HeadTrackingProcessor> createHeadTrackingProcessor(
+        const HeadTrackingProcessor::Options& options, HeadTrackingMode initialMode) {
+    return std::make_unique<HeadTrackingProcessorImpl>(options, initialMode);
+}
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/ModeSelector-test.cpp b/media/libheadtracking/ModeSelector-test.cpp
new file mode 100644
index 0000000..a136e6b
--- /dev/null
+++ b/media/libheadtracking/ModeSelector-test.cpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ModeSelector.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+TEST(ModeSelector, Initial) {
+    ModeSelector::Options options;
+    ModeSelector selector(options);
+
+    selector.calculate(0);
+    EXPECT_EQ(HeadTrackingMode::STATIC, selector.getActualMode());
+    EXPECT_EQ(selector.getHeadToStagePose(), Pose3f());
+}
+
+TEST(ModeSelector, InitialWorldRelative) {
+    const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+
+    ModeSelector::Options options;
+    ModeSelector selector(options, HeadTrackingMode::WORLD_RELATIVE);
+
+    selector.setWorldToHeadPose(0, worldToHead);
+    selector.setScreenStable(0, true);
+    selector.calculate(0);
+    EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+    EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse());
+}
+
+TEST(ModeSelector, InitialScreenRelative) {
+    const Pose3f screenToHead({1, 2, 3}, Quaternionf::UnitRandom());
+
+    ModeSelector::Options options;
+    ModeSelector selector(options, HeadTrackingMode::SCREEN_RELATIVE);
+
+    selector.setScreenToHeadPose(0, screenToHead);
+    selector.calculate(0);
+    EXPECT_EQ(HeadTrackingMode::SCREEN_RELATIVE, selector.getActualMode());
+    EXPECT_EQ(selector.getHeadToStagePose(), screenToHead.inverse());
+}
+
+TEST(ModeSelector, WorldRelative) {
+    const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+    ModeSelector::Options options;
+    ModeSelector selector(options);
+
+    selector.setScreenToStagePose(screenToStage);
+    selector.setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+    selector.setWorldToHeadPose(0, worldToHead);
+    selector.setScreenStable(0, true);
+    selector.calculate(0);
+    EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+    EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+}
+
+TEST(ModeSelector, WorldRelativeUnstable) {
+    const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+    ModeSelector::Options options{.freshnessTimeout = 100};
+    ModeSelector selector(options);
+
+    selector.setScreenToStagePose(screenToStage);
+    selector.setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+    selector.setWorldToHeadPose(0, worldToHead);
+    selector.setScreenStable(0, false);
+    selector.calculate(10);
+    EXPECT_EQ(HeadTrackingMode::STATIC, selector.getActualMode());
+    EXPECT_EQ(selector.getHeadToStagePose(), screenToStage);
+}
+
+TEST(ModeSelector, WorldRelativeStableStale) {
+    const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+    ModeSelector::Options options{.freshnessTimeout = 100};
+    ModeSelector selector(options);
+
+    selector.setScreenToStagePose(screenToStage);
+    selector.setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+    selector.setWorldToHeadPose(100, worldToHead);
+    selector.setScreenStable(0, true);
+    selector.calculate(101);
+    EXPECT_EQ(HeadTrackingMode::STATIC, selector.getActualMode());
+    EXPECT_EQ(selector.getHeadToStagePose(), screenToStage);
+}
+
+TEST(ModeSelector, WorldRelativeStale) {
+    const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+    ModeSelector::Options options{.freshnessTimeout = 100};
+    ModeSelector selector(options);
+
+    selector.setScreenToStagePose(screenToStage);
+    selector.setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+    selector.setWorldToHeadPose(0, worldToHead);
+    selector.calculate(101);
+    EXPECT_EQ(HeadTrackingMode::STATIC, selector.getActualMode());
+    EXPECT_EQ(selector.getHeadToStagePose(), screenToStage);
+}
+
+TEST(ModeSelector, ScreenRelative) {
+    const Pose3f screenToHead({1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+    ModeSelector::Options options;
+    ModeSelector selector(options);
+
+    selector.setScreenToStagePose(screenToStage);
+    selector.setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+    selector.setScreenToHeadPose(0, screenToHead);
+    selector.calculate(0);
+    EXPECT_EQ(HeadTrackingMode::SCREEN_RELATIVE, selector.getActualMode());
+    EXPECT_EQ(selector.getHeadToStagePose(), screenToHead.inverse() * screenToStage);
+}
+
+TEST(ModeSelector, ScreenRelativeStaleToWorldRelative) {
+    const Pose3f screenToHead({1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+    const Pose3f worldToHead({7, 8, 9}, Quaternionf::UnitRandom());
+
+    ModeSelector::Options options{.freshnessTimeout = 100};
+    ModeSelector selector(options);
+
+    selector.setScreenToStagePose(screenToStage);
+    selector.setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+    selector.setScreenToHeadPose(0, screenToHead);
+    selector.setWorldToHeadPose(50, worldToHead);
+    selector.setScreenStable(50, true);
+    selector.calculate(101);
+    EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+    EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+}
+
+TEST(ModeSelector, ScreenRelativeInvalidToWorldRelative) {
+    const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+    const Pose3f worldToHead({7, 8, 9}, Quaternionf::UnitRandom());
+
+    ModeSelector::Options options;
+    ModeSelector selector(options);
+
+    selector.setScreenToStagePose(screenToStage);
+
+    selector.setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+    selector.setScreenToHeadPose(50, std::nullopt);
+    selector.setWorldToHeadPose(50, worldToHead);
+    selector.setScreenStable(50, true);
+    selector.calculate(101);
+    EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+    EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+}
+
+}  // namespace
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/ModeSelector.cpp b/media/libheadtracking/ModeSelector.cpp
new file mode 100644
index 0000000..cb3a27f
--- /dev/null
+++ b/media/libheadtracking/ModeSelector.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ModeSelector.h"
+
+namespace android {
+namespace media {
+
+ModeSelector::ModeSelector(const Options& options, HeadTrackingMode initialMode)
+    : mOptions(options), mDesiredMode(initialMode), mActualMode(initialMode) {}
+
+void ModeSelector::setDesiredMode(HeadTrackingMode mode) {
+    mDesiredMode = mode;
+}
+
+void ModeSelector::setScreenToStagePose(const Pose3f& screenToStage) {
+    mScreenToStage = screenToStage;
+}
+
+void ModeSelector::setScreenToHeadPose(int64_t timestamp,
+                                       const std::optional<Pose3f>& screenToHead) {
+    mScreenToHead = screenToHead;
+    mScreenToHeadTimestamp = timestamp;
+}
+
+void ModeSelector::setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead) {
+    mWorldToHead = worldToHead;
+    mWorldToHeadTimestamp = timestamp;
+}
+
+void ModeSelector::setScreenStable(int64_t timestamp, bool stable) {
+    mScreenStable = stable;
+    mScreenStableTimestamp = timestamp;
+}
+
+void ModeSelector::calculateActualMode(int64_t timestamp) {
+    bool isValidScreenToHead = mScreenToHead.has_value() &&
+                               timestamp - mScreenToHeadTimestamp < mOptions.freshnessTimeout;
+    bool isValidWorldToHead = mWorldToHead.has_value() &&
+                              timestamp - mWorldToHeadTimestamp < mOptions.freshnessTimeout;
+    bool isValidScreenStable = mScreenStable.has_value() &&
+                              timestamp - mScreenStableTimestamp < mOptions.freshnessTimeout;
+
+    HeadTrackingMode mode = mDesiredMode;
+
+    // Optional downgrade from screen-relative to world-relative.
+    if (mode == HeadTrackingMode::SCREEN_RELATIVE) {
+        if (!isValidScreenToHead) {
+            mode = HeadTrackingMode::WORLD_RELATIVE;
+        }
+    }
+
+    // Optional downgrade from world-relative to static.
+    if (mode == HeadTrackingMode::WORLD_RELATIVE) {
+        if (!isValidWorldToHead || !isValidScreenStable || !mScreenStable.value()) {
+            mode = HeadTrackingMode::STATIC;
+        }
+    }
+
+    mActualMode = mode;
+}
+
+void ModeSelector::calculate(int64_t timestamp) {
+    calculateActualMode(timestamp);
+
+    switch (mActualMode) {
+        case HeadTrackingMode::STATIC:
+            mHeadToStage = mScreenToStage;
+            break;
+
+        case HeadTrackingMode::WORLD_RELATIVE:
+            mHeadToStage = mWorldToHead.value().inverse() * mScreenToStage;
+            break;
+
+        case HeadTrackingMode::SCREEN_RELATIVE:
+            mHeadToStage = mScreenToHead.value().inverse() * mScreenToStage;
+            break;
+    }
+}
+
+Pose3f ModeSelector::getHeadToStagePose() const {
+    return mHeadToStage;
+}
+
+HeadTrackingMode ModeSelector::getActualMode() const {
+    return mActualMode;
+}
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/ModeSelector.h b/media/libheadtracking/ModeSelector.h
new file mode 100644
index 0000000..e537040
--- /dev/null
+++ b/media/libheadtracking/ModeSelector.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/HeadTrackingMode.h"
+#include "media/Pose.h"
+
+#include "PoseRateLimiter.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Head-tracking mode selector.
+ *
+ * This class is responsible for production of the determining pose for audio virtualization, based
+ * on a number of available sources and a selectable mode.
+ *
+ * Typical flow is:
+ * ModeSelector selector(...);
+ * while (...) {
+ *     // Set inputs.
+ *     selector.setFoo(...);
+ *     selector.setBar(...);
+ *
+ *     // Update outputs based on inputs.
+ *     selector.calculate(...);
+ *
+ *     // Get outputs.
+ *     Pose3f pose = selector.getHeadToStagePose();
+ * }
+ *
+ * This class is not thread-safe, but thread-compatible.
+ *
+ * For details on the frames of reference involved, their composition and the definitions to the
+ * different modes, refer to:
+ * go/immersive-audio-frames
+ *
+ * The actual mode may deviate from the desired mode in the following cases:
+ * - When we cannot get a valid and fresh estimate of the screen-to-head pose, we will fall back
+ *   from screen-relative to world-relative.
+ * - When we cannot get a fresh estimate of the world-to-head pose, we will fall back from
+ *   world-relative to static.
+ * - In world-relative mode, if the screen is unstable, we will fall back to static.
+ *
+ * All the timestamps used here are of arbitrary units and origin. They just need to be consistent
+ * between all the calls and with the Options provided for determining freshness and rate limiting.
+ */
+class ModeSelector {
+  public:
+    struct Options {
+        int64_t freshnessTimeout = std::numeric_limits<int64_t>::max();
+    };
+
+    ModeSelector(const Options& options, HeadTrackingMode initialMode = HeadTrackingMode::STATIC);
+
+    /** Sets the desired head-tracking mode. */
+    void setDesiredMode(HeadTrackingMode mode);
+
+    /**
+     * Set the screen-to-stage pose, used in all modes.
+     */
+    void setScreenToStagePose(const Pose3f& screenToStage);
+
+    /**
+     * Set the screen-to-head pose, used in screen-relative mode.
+     * The timestamp needs to reflect how fresh the sample is (not necessarily which point in time
+     * it applies to). nullopt can be used if it is determined that the listener is not in front of
+     * the screen.
+     */
+    void setScreenToHeadPose(int64_t timestamp, const std::optional<Pose3f>& screenToHead);
+
+    /**
+     * Set the world-to-head pose, used in world-relative mode.
+     * The timestamp needs to reflect how fresh the sample is (not necessarily which point in time
+     * it applies to).
+     */
+    void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead);
+
+    /**
+     * Set whether the screen is considered stable.
+     * The timestamp needs to reflect how fresh the sample is.
+     */
+     void setScreenStable(int64_t timestamp, bool stable);
+
+    /**
+     * Process all the previous inputs and update the outputs.
+     */
+    void calculate(int64_t timestamp);
+
+    /**
+     * Get the aggregate head-to-stage pose (primary output of this module).
+     */
+    Pose3f getHeadToStagePose() const;
+
+    /**
+     * Get the actual head-tracking mode (which may deviate from the desired one as mentioned in the
+     * class documentation above).
+     */
+    HeadTrackingMode getActualMode() const;
+
+  private:
+    const Options mOptions;
+
+    HeadTrackingMode mDesiredMode;
+    Pose3f mScreenToStage;
+    std::optional<Pose3f> mScreenToHead;
+    int64_t mScreenToHeadTimestamp;
+    std::optional<Pose3f> mWorldToHead;
+    int64_t mWorldToHeadTimestamp;
+    std::optional<bool> mScreenStable;
+    int64_t mScreenStableTimestamp;
+
+    HeadTrackingMode mActualMode;
+    Pose3f mHeadToStage;
+
+    void calculateActualMode(int64_t timestamp);
+};
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/OWNERS b/media/libheadtracking/OWNERS
new file mode 100644
index 0000000..e5d0370
--- /dev/null
+++ b/media/libheadtracking/OWNERS
@@ -0,0 +1,2 @@
+ytai@google.com
+elaurent@google.com
diff --git a/media/libheadtracking/Pose-test.cpp b/media/libheadtracking/Pose-test.cpp
new file mode 100644
index 0000000..a9e18ce
--- /dev/null
+++ b/media/libheadtracking/Pose-test.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Pose.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+using android::media::Pose3f;
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(Pose, CtorDefault) {
+    Pose3f pose;
+    EXPECT_EQ(pose.translation(), Vector3f::Zero());
+    EXPECT_EQ(pose.rotation(), Quaternionf::Identity());
+}
+
+TEST(Pose, CtorRotation) {
+    Quaternionf rot = Quaternionf::UnitRandom();
+    Pose3f pose(rot);
+    EXPECT_EQ(pose.translation(), Vector3f::Zero());
+    EXPECT_EQ(pose.rotation(), rot);
+}
+
+TEST(Pose, CtorTranslation) {
+    Vector3f trans{1, 2, 3};
+    Pose3f pose(trans);
+    EXPECT_EQ(pose.translation(), trans);
+    EXPECT_EQ(pose.rotation(), Quaternionf::Identity());
+}
+
+TEST(Pose, CtorTranslationRotation) {
+    Quaternionf rot = Quaternionf::UnitRandom();
+    Vector3f trans{1, 2, 3};
+    Pose3f pose(trans, rot);
+    EXPECT_EQ(pose.translation(), trans);
+    EXPECT_EQ(pose.rotation(), rot);
+}
+
+TEST(Pose, Inverse) {
+    Pose3f pose({1, 2, 3}, Quaternionf::UnitRandom());
+    EXPECT_EQ(pose.inverse() * pose, Pose3f());
+    EXPECT_EQ(pose * pose.inverse(), Pose3f());
+}
+
+TEST(Pose, IsApprox) {
+    constexpr float eps = std::numeric_limits<float>::epsilon();
+
+    EXPECT_EQ(Pose3f({1, 2, 3}, rotationVectorToQuaternion({4, 5, 6})),
+              Pose3f({1 + eps, 2 + eps, 3 + eps},
+                     rotationVectorToQuaternion({4 + eps, 5 + eps, 6 + eps})));
+
+    EXPECT_NE(Pose3f({1, 2, 3}, rotationVectorToQuaternion({4, 5, 6})),
+              Pose3f({1.01, 2, 3}, rotationVectorToQuaternion({4, 5, 6})));
+
+    EXPECT_NE(Pose3f({1, 2, 3}, rotationVectorToQuaternion({4, 5, 6})),
+              Pose3f({1, 2, 3}, rotationVectorToQuaternion({4.01, 5, 6})));
+}
+
+TEST(Pose, Compose) {
+    Pose3f p1({1, 2, 3}, rotateZ(M_PI_2));
+    Pose3f p2({4, 5, 6}, rotateX(M_PI_2));
+    Pose3f p3({-4, 6, 9}, p1.rotation() * p2.rotation());
+    EXPECT_EQ(p1 * p2, p3);
+}
+
+TEST(Pose, MoveWithRateLimit_NoLimit) {
+    Pose3f from({1, 1, 1}, Quaternionf::Identity());
+    Pose3f to({1, 1, 2}, rotateZ(M_PI_2));
+    auto result = moveWithRateLimit(from, to, 1, 10, 10);
+    EXPECT_EQ(std::get<0>(result), to);
+    EXPECT_FALSE(std::get<1>(result));
+}
+
+TEST(Pose, MoveWithRateLimit_TranslationLimit) {
+    Pose3f from({1, 1, 1}, Quaternionf::Identity());
+    Pose3f to({1, 1, 2}, rotateZ(M_PI_2));
+    auto result = moveWithRateLimit(from, to, 1, 0.5f, 10);
+    Pose3f expected({1, 1, 1.5f}, rotateZ(M_PI_4));
+    EXPECT_EQ(std::get<0>(result), expected);
+    EXPECT_TRUE(std::get<1>(result));
+}
+
+TEST(Pose, MoveWithRateLimit_RotationLimit) {
+    Pose3f from({1, 1, 1}, Quaternionf::Identity());
+    Pose3f to({1, 1, 2}, rotateZ(M_PI_2));
+    auto result = moveWithRateLimit(from, to, 1, 10, M_PI_4);
+    Pose3f expected({1, 1, 1.5f}, rotateZ(M_PI_4));
+    EXPECT_EQ(std::get<0>(result), expected);
+    EXPECT_TRUE(std::get<1>(result));
+}
+
+TEST(Pose, FloatVectorRoundTrip1) {
+    // Rotation vector magnitude must be less than Pi.
+    std::vector<float> vec = { 1, 2, 3, 0.4, 0.5, 0.6};
+    std::optional<Pose3f> pose = Pose3f::fromVector(vec);
+    ASSERT_TRUE(pose.has_value());
+    std::vector<float> reconstructed = pose->toVector();
+    EXPECT_EQ(vec, reconstructed);
+}
+
+TEST(Pose, FloatVectorRoundTrip2) {
+    Pose3f pose({1, 2, 3}, Quaternionf::UnitRandom());
+    std::vector<float> vec = pose.toVector();
+    std::optional<Pose3f> reconstructed = Pose3f::fromVector(vec);
+    ASSERT_TRUE(reconstructed.has_value());
+    EXPECT_EQ(pose, reconstructed.value());
+}
+
+TEST(Pose, FloatVectorInvalid) {
+    EXPECT_FALSE(Pose3f::fromVector({}).has_value());
+    EXPECT_FALSE(Pose3f::fromVector({1, 2, 3, 4, 5}).has_value());
+    EXPECT_FALSE(Pose3f::fromVector({1, 2, 3, 4, 5, 6, 7}).has_value());
+}
+
+}  // namespace
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/Pose.cpp b/media/libheadtracking/Pose.cpp
new file mode 100644
index 0000000..ae39512
--- /dev/null
+++ b/media/libheadtracking/Pose.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Pose.h"
+#include "media/Twist.h"
+#include "QuaternionUtil.h"
+
+namespace android {
+namespace media {
+
+using Eigen::Vector3f;
+
+std::optional<Pose3f> Pose3f::fromVector(const std::vector<float>& vec) {
+    if (vec.size() != 6) {
+        return std::nullopt;
+    }
+    return Pose3f({vec[0], vec[1], vec[2]}, rotationVectorToQuaternion({vec[3], vec[4], vec[5]}));
+}
+
+std::vector<float> Pose3f::toVector() const {
+    Eigen::Vector3f rot = quaternionToRotationVector(mRotation);
+    return {mTranslation[0], mTranslation[1], mTranslation[2], rot[0], rot[1], rot[2]};
+}
+
+std::tuple<Pose3f, bool> moveWithRateLimit(const Pose3f& from, const Pose3f& to, float t,
+                                           float maxTranslationalVelocity,
+                                           float maxRotationalVelocity) {
+    // Never rate limit if both limits are set to infinity.
+    if (isinf(maxTranslationalVelocity) && isinf(maxRotationalVelocity)) {
+        return {to, false};
+    }
+    // Always rate limit if t is 0 (required to avoid division by 0).
+    if (t == 0 || maxTranslationalVelocity == 0 || maxRotationalVelocity == 0) {
+        return {from, true};
+    }
+
+    Pose3f fromToTo = from.inverse() * to;
+    Twist3f twist = differentiate(fromToTo, t);
+    float angularRotationalRatio = twist.scalarRotationalVelocity() / maxRotationalVelocity;
+    float translationalVelocityRatio =
+            twist.scalarTranslationalVelocity() / maxTranslationalVelocity;
+    float maxRatio = std::max(angularRotationalRatio, translationalVelocityRatio);
+    if (maxRatio <= 1) {
+        return {to, false};
+    }
+    return {from * integrate(twist, t / maxRatio), true};
+}
+
+std::ostream& operator<<(std::ostream& os, const Pose3f& pose) {
+    os << "translation: " << pose.translation().transpose()
+       << " quaternion: " << pose.rotation().coeffs().transpose();
+    return os;
+}
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/PoseBias-test.cpp b/media/libheadtracking/PoseBias-test.cpp
new file mode 100644
index 0000000..9f42a2c
--- /dev/null
+++ b/media/libheadtracking/PoseBias-test.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "PoseBias.h"
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+TEST(PoseBias, Initial) {
+    PoseBias bias;
+    EXPECT_EQ(bias.getOutput(), Pose3f());
+}
+
+TEST(PoseBias, Basic) {
+    Pose3f pose1({1, 2, 3}, Quaternionf::UnitRandom());
+    Pose3f pose2({4, 5, 6}, Quaternionf::UnitRandom());
+
+    PoseBias bias;
+    bias.setInput(pose1);
+    EXPECT_EQ(pose1, bias.getOutput());
+    bias.recenter();
+    EXPECT_EQ(bias.getOutput(), Pose3f());
+    bias.setInput(pose2);
+    EXPECT_EQ(bias.getOutput(), pose1.inverse() * pose2);
+    bias.recenter();
+    EXPECT_EQ(bias.getOutput(), Pose3f());
+}
+
+}  // namespace
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/PoseBias.cpp b/media/libheadtracking/PoseBias.cpp
new file mode 100644
index 0000000..33afca6
--- /dev/null
+++ b/media/libheadtracking/PoseBias.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PoseBias.h"
+
+namespace android {
+namespace media {
+
+void PoseBias::setInput(const Pose3f& input) {
+    mLastWorldToInput = input;
+}
+
+void PoseBias::recenter() {
+    mBiasToWorld = mLastWorldToInput.inverse();
+}
+
+Pose3f PoseBias::getOutput() const {
+    return mBiasToWorld * mLastWorldToInput;
+}
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/PoseBias.h b/media/libheadtracking/PoseBias.h
new file mode 100644
index 0000000..9acb49d
--- /dev/null
+++ b/media/libheadtracking/PoseBias.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include "media/Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Biasing for a stream of poses.
+ *
+ * This filter takes a stream of poses and at any time during the stream, can change the frame of
+ * reference for the stream to be that of the last pose received, via the recenter() operation.
+ *
+ * Typical usage:
+ * PoseBias bias;
+ *
+ * bias.setInput(...);
+ * output = bias.getOutput();
+ * bias.setInput(...);
+ * output = bias.getOutput();
+ * bias.setInput(...);
+ * output = bias.getOutput();
+ * bias.recenter();  // Reference frame is now equal to the last input.
+ * output = bias.getOutput();  // This is now the identity pose.
+ *
+ * There doesn't need to be a 1:1 correspondence between setInput() and getOutput() calls.
+ * The initial bias point is identity.
+ *
+ * This implementation is thread-compatible, but not thread-safe.
+ */
+class PoseBias {
+  public:
+    void setInput(const Pose3f& input);
+
+    void recenter();
+
+    Pose3f getOutput() const;
+
+  private:
+    Pose3f mLastWorldToInput;
+    Pose3f mBiasToWorld;
+};
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/PoseDriftCompensator-test.cpp b/media/libheadtracking/PoseDriftCompensator-test.cpp
new file mode 100644
index 0000000..df0a05f
--- /dev/null
+++ b/media/libheadtracking/PoseDriftCompensator-test.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <cmath>
+
+#include "PoseDriftCompensator.h"
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+using Options = PoseDriftCompensator::Options;
+
+TEST(PoseDriftCompensator, Initial) {
+    PoseDriftCompensator comp(Options{});
+    EXPECT_EQ(comp.getOutput(), Pose3f());
+}
+
+TEST(PoseDriftCompensator, NoDrift) {
+    Pose3f pose1({1, 2, 3}, Quaternionf::UnitRandom());
+    Pose3f pose2({4, 5, 6}, Quaternionf::UnitRandom());
+    PoseDriftCompensator comp(Options{});
+
+    // First pose sets the baseline.
+    comp.setInput(1000, pose1);
+    EXPECT_EQ(comp.getOutput(), Pose3f());
+
+    comp.setInput(2000, pose2);
+    EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+
+    // Recentering resets the baseline.
+    comp.recenter();
+    EXPECT_EQ(comp.getOutput(), Pose3f());
+
+    comp.setInput(3000, pose1);
+    EXPECT_EQ(comp.getOutput(), Pose3f());
+
+    comp.setInput(4000, pose2);
+    EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+}
+
+TEST(PoseDriftCompensator, NoDriftZeroTime) {
+    Pose3f pose1({1, 2, 3}, Quaternionf::UnitRandom());
+    Pose3f pose2({4, 5, 6}, Quaternionf::UnitRandom());
+    PoseDriftCompensator comp(Options{});
+
+    comp.setInput(1000, pose1);
+    EXPECT_EQ(comp.getOutput(), Pose3f());
+
+    comp.setInput(1000, pose2);
+    EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+
+    comp.recenter();
+    EXPECT_EQ(comp.getOutput(), Pose3f());
+
+    comp.setInput(1000, pose1);
+    EXPECT_EQ(comp.getOutput(), Pose3f());
+
+    comp.setInput(1000, pose2);
+    EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+}
+
+TEST(PoseDriftCompensator, Asymptotic) {
+    Pose3f pose({1, 2, 3}, Quaternionf::UnitRandom());
+
+    PoseDriftCompensator comp(
+            Options{.translationalDriftTimeConstant = 1, .rotationalDriftTimeConstant = 1});
+
+    // Set the same pose for a long time.
+    for (int64_t t = 0; t < 1000; ++t) {
+        comp.setInput(t, pose);
+    }
+
+    // Output would have faded to approx. identity.
+    EXPECT_EQ(comp.getOutput(), Pose3f());
+}
+
+TEST(PoseDriftCompensator, Fast) {
+    Pose3f pose1({1, 2, 3}, Quaternionf::UnitRandom());
+    Pose3f pose2({4, 5, 6}, Quaternionf::UnitRandom());
+    PoseDriftCompensator comp(
+            Options{.translationalDriftTimeConstant = 1e7, .rotationalDriftTimeConstant = 1e7});
+
+    comp.setInput(0, pose1);
+    EXPECT_EQ(comp.getOutput(), Pose3f());
+
+    comp.setInput(1, pose2);
+    EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+
+    comp.recenter();
+    EXPECT_EQ(comp.getOutput(), Pose3f());
+
+    comp.setInput(2, pose1);
+    EXPECT_EQ(comp.getOutput(), Pose3f());
+
+    comp.setInput(3, pose2);
+    EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+}
+
+TEST(PoseDriftCompensator, Drift) {
+    Pose3f pose1({1, 2, 3}, rotateZ(-M_PI * 3 / 4));
+    PoseDriftCompensator comp(
+            Options{.translationalDriftTimeConstant = 500, .rotationalDriftTimeConstant = 1000});
+
+    // Establish a baseline.
+    comp.setInput(1000, Pose3f());
+
+    // Initial pose is used as is.
+    comp.setInput(1000, pose1);
+    EXPECT_EQ(comp.getOutput(), pose1);
+
+    // After 1000 ticks, our rotation should be exp(-1) and translation exp(-2) from identity.
+    comp.setInput(2000, pose1);
+    EXPECT_EQ(comp.getOutput(),
+              Pose3f(Vector3f{1, 2, 3} * std::expf(-2), rotateZ(-M_PI * 3 / 4 * std::expf(-1))));
+
+    // As long as the input stays the same, we'll continue to advance towards identity.
+    comp.setInput(3000, pose1);
+    EXPECT_EQ(comp.getOutput(),
+              Pose3f(Vector3f{1, 2, 3} * std::expf(-4), rotateZ(-M_PI * 3 / 4 * std::expf(-2))));
+
+    comp.recenter();
+    EXPECT_EQ(comp.getOutput(), Pose3f());
+}
+
+}  // namespace
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/PoseDriftCompensator.cpp b/media/libheadtracking/PoseDriftCompensator.cpp
new file mode 100644
index 0000000..0e90cad
--- /dev/null
+++ b/media/libheadtracking/PoseDriftCompensator.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PoseDriftCompensator.h"
+
+#include <cmath>
+
+#include "QuaternionUtil.h"
+
+namespace android {
+namespace media {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+PoseDriftCompensator::PoseDriftCompensator(const Options& options) : mOptions(options) {}
+
+void PoseDriftCompensator::setInput(int64_t timestamp, const Pose3f& input) {
+    if (mTimestamp.has_value()) {
+        // Avoid computation upon first input (only sets the initial state).
+        Pose3f prevInputToInput = mPrevInput.inverse() * input;
+        mOutput = scale(mOutput, timestamp - mTimestamp.value()) * prevInputToInput;
+    }
+    mPrevInput = input;
+    mTimestamp = timestamp;
+}
+
+void PoseDriftCompensator::recenter() {
+    mTimestamp.reset();
+    mOutput = Pose3f();
+}
+
+Pose3f PoseDriftCompensator::getOutput() const {
+    return mOutput;
+}
+
+Pose3f PoseDriftCompensator::scale(const Pose3f& pose, int64_t dt) {
+    // Translation.
+    Vector3f translation = pose.translation();
+    translation *= std::expf(-static_cast<float>(dt) / mOptions.translationalDriftTimeConstant);
+
+    // Rotation.
+    Vector3f rotationVec = quaternionToRotationVector(pose.rotation());
+    rotationVec *= std::expf(-static_cast<float>(dt) / mOptions.rotationalDriftTimeConstant);
+
+    return Pose3f(translation, rotationVectorToQuaternion(rotationVec));
+}
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/PoseDriftCompensator.h b/media/libheadtracking/PoseDriftCompensator.h
new file mode 100644
index 0000000..a71483b
--- /dev/null
+++ b/media/libheadtracking/PoseDriftCompensator.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Drift compensator for a stream of poses.
+ *
+ * This is effectively a high-pass filter for a pose stream, removing any DC-offset / bias. The
+ * provided input stream will be "pulled" toward identity with an exponential decay filter with a
+ * configurable time constant. Rotation and translation are handled separately.
+ *
+ * Typical usage:
+ * PoseDriftCompensator comp(...);
+ *
+ * while (...) {
+ *   comp.setInput(...);
+ *   Pose3f output = comp.getOutput();
+ * }
+ *
+ * There doesn't need to be a 1:1 correspondence between setInput() and getOutput() calls. The
+ * output timestamp is always that of the last setInput() call. Calling recenter() will reset the
+ * bias to the current output, causing the output to be identity.
+ *
+ * The initial bias point is identity.
+ *
+ * This implementation is thread-compatible, but not thread-safe.
+ */
+class PoseDriftCompensator {
+  public:
+    struct Options {
+        float translationalDriftTimeConstant = std::numeric_limits<float>::infinity();
+        float rotationalDriftTimeConstant = std::numeric_limits<float>::infinity();
+    };
+
+    explicit PoseDriftCompensator(const Options& options);
+
+    void setInput(int64_t timestamp, const Pose3f& input);
+
+    void recenter();
+
+    Pose3f getOutput() const;
+
+  private:
+    const Options mOptions;
+
+    Pose3f mPrevInput;
+    Pose3f mOutput;
+    std::optional<int64_t> mTimestamp;
+
+    Pose3f scale(const Pose3f& pose, int64_t dt);
+};
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/PoseProcessingGraph.png b/media/libheadtracking/PoseProcessingGraph.png
new file mode 100644
index 0000000..325b667
--- /dev/null
+++ b/media/libheadtracking/PoseProcessingGraph.png
Binary files differ
diff --git a/media/libheadtracking/PoseRateLimiter-test.cpp b/media/libheadtracking/PoseRateLimiter-test.cpp
new file mode 100644
index 0000000..f306183
--- /dev/null
+++ b/media/libheadtracking/PoseRateLimiter-test.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "PoseRateLimiter.h"
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+using Options = PoseRateLimiter::Options;
+
+TEST(PoseRateLimiter, Initial) {
+    Pose3f target({1, 2, 3}, Quaternionf::UnitRandom());
+    PoseRateLimiter limiter(Options{.maxTranslationalVelocity = 10, .maxRotationalVelocity = 10});
+    limiter.setTarget(target);
+    EXPECT_EQ(limiter.calculatePose(1000), target);
+}
+
+TEST(PoseRateLimiter, UnlimitedZeroTime) {
+    Pose3f target1({1, 2, 3}, Quaternionf::UnitRandom());
+    Pose3f target2({4, 5, 6}, Quaternionf::UnitRandom());
+    PoseRateLimiter limiter(Options{});
+    limiter.setTarget(target1);
+    EXPECT_EQ(limiter.calculatePose(0), target1);
+    limiter.setTarget(target2);
+    EXPECT_EQ(limiter.calculatePose(0), target2);
+    limiter.setTarget(target1);
+    EXPECT_EQ(limiter.calculatePose(0), target1);
+}
+
+TEST(PoseRateLimiter, Limited) {
+    Pose3f pose1({1, 2, 3}, Quaternionf::Identity());
+    Pose3f pose2({1, 2, 8}, rotateZ(M_PI * 5 / 8));
+    PoseRateLimiter limiter(Options{.maxTranslationalVelocity = 1, .maxRotationalVelocity = 10});
+    limiter.setTarget(pose2);
+    EXPECT_EQ(limiter.calculatePose(1000), pose2);
+
+    // Rate limiting is inactive. Should track despite the violation.
+    limiter.setTarget(pose1);
+    EXPECT_EQ(limiter.calculatePose(1001), pose1);
+
+    // Enable rate limiting and observe gradual motion from pose1 to pose2.
+    limiter.enable();
+    limiter.setTarget(pose2);
+    EXPECT_EQ(limiter.calculatePose(1002), Pose3f({1, 2, 4}, rotateZ(M_PI * 1 / 8)));
+    limiter.setTarget(pose2);
+    EXPECT_EQ(limiter.calculatePose(1003), Pose3f({1, 2, 5}, rotateZ(M_PI * 2 / 8)));
+    // Skip a tick.
+    limiter.setTarget(pose2);
+    EXPECT_EQ(limiter.calculatePose(1005), Pose3f({1, 2, 7}, rotateZ(M_PI * 4 / 8)));
+    limiter.setTarget(pose2);
+    EXPECT_EQ(limiter.calculatePose(1006), pose2);
+
+    // We reached the target, so rate limiting should now be disabled.
+    limiter.setTarget(pose1);
+    EXPECT_EQ(limiter.calculatePose(1007), pose1);
+}
+
+TEST(PoseRateLimiter, Reset) {
+    Pose3f pose1({1, 2, 3}, Quaternionf::Identity());
+    Pose3f pose2({1, 2, 8}, rotateZ(M_PI * 5 / 8));
+    PoseRateLimiter limiter(Options{.maxTranslationalVelocity = 1, .maxRotationalVelocity = 10});
+    limiter.setTarget(pose1);
+    EXPECT_EQ(limiter.calculatePose(1000), pose1);
+
+    // Enable rate limiting and observe gradual motion from pose1 to pose2.
+    limiter.enable();
+    limiter.setTarget(pose2);
+    EXPECT_EQ(limiter.calculatePose(1001), Pose3f({1, 2, 4}, rotateZ(M_PI * 1 / 8)));
+
+    // Reset the pose and disable rate limiting.
+    limiter.reset(pose2);
+    EXPECT_EQ(limiter.calculatePose(1002), pose2);
+
+    // Rate limiting should now be disabled.
+    limiter.setTarget(pose1);
+    EXPECT_EQ(limiter.calculatePose(1003), pose1);
+}
+
+}  // namespace
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/PoseRateLimiter.cpp b/media/libheadtracking/PoseRateLimiter.cpp
new file mode 100644
index 0000000..380e22b
--- /dev/null
+++ b/media/libheadtracking/PoseRateLimiter.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PoseRateLimiter.h"
+
+namespace android {
+namespace media {
+
+PoseRateLimiter::PoseRateLimiter(const Options& options) : mOptions(options), mLimiting(false) {}
+
+void PoseRateLimiter::enable() {
+    mLimiting = true;
+}
+
+void PoseRateLimiter::reset(const Pose3f& target) {
+    mLimiting = false;
+    mTargetPose = target;
+}
+
+void PoseRateLimiter::setTarget(const Pose3f& target) {
+    mTargetPose = target;
+}
+
+Pose3f PoseRateLimiter::calculatePose(int64_t timestamp) {
+    assert(mTargetPose.has_value());
+    Pose3f pose;
+    if (mLimiting && mOutput.has_value()) {
+        std::tie(pose, mLimiting) = moveWithRateLimit(
+                mOutput->pose, mTargetPose.value(), timestamp - mOutput->timestamp,
+                mOptions.maxTranslationalVelocity, mOptions.maxRotationalVelocity);
+    } else {
+        pose = mTargetPose.value();
+    }
+    mOutput = Point{pose, timestamp};
+    return pose;
+}
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/PoseRateLimiter.h b/media/libheadtracking/PoseRateLimiter.h
new file mode 100644
index 0000000..aa2fe80
--- /dev/null
+++ b/media/libheadtracking/PoseRateLimiter.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Limits a stream of poses to a given maximum translational and rotational velocities.
+ *
+ * Normal operation:
+ *
+ * Pose3f output;
+ * PoseRateLimiter limiter(...);
+ *
+ * // Limiting is disabled. Output will be the same as last input.
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ *
+ * // Enable limiting. Output will no longer be necessarily the same as last input.
+ * limiter.enable();
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ *
+ * // When eventually the output has been able to catch up with the last input, the limited will be
+ * // automatically disabled again and the output will match the input again.
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ *
+ * As shown above, the limiter is turned on manually via enable(), but turns off automatically as
+ * soon as the output is able to catch up to the input. The intention is that rate limiting will be
+ * turned on at specific times to smooth out any artificial discontinuities introduced to the pose
+ * stream, but the rest of the time will be a simple passthrough.
+
+ * setTarget(...) and calculatePose(...) don't have to be ordered in any particular way. However,
+ * setTarget or reset() must be called at least once prior to the first calculatePose().
+ *
+ * Calling reset() instead of setTarget() forces the output to the given pose and disables rate
+ * limiting.
+ *
+ * This implementation is thread-compatible, but not thread-safe.
+ */
+class PoseRateLimiter {
+  public:
+    struct Options {
+        float maxTranslationalVelocity = std::numeric_limits<float>::infinity();
+        float maxRotationalVelocity = std::numeric_limits<float>::infinity();
+    };
+
+    explicit PoseRateLimiter(const Options& options);
+
+    void enable();
+
+    void reset(const Pose3f& target);
+    void setTarget(const Pose3f& target);
+
+    Pose3f calculatePose(int64_t timestamp);
+
+  private:
+    struct Point {
+        Pose3f pose;
+        int64_t timestamp;
+    };
+
+    const Options mOptions;
+    bool mLimiting;
+    std::optional<Pose3f> mTargetPose;
+    std::optional<Point> mOutput;
+};
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/QuaternionUtil-test.cpp b/media/libheadtracking/QuaternionUtil-test.cpp
new file mode 100644
index 0000000..e79e54a
--- /dev/null
+++ b/media/libheadtracking/QuaternionUtil-test.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(QuaternionUtil, RotationVectorToQuaternion) {
+    // 90 degrees around Z.
+    Vector3f rot = {0, 0, M_PI_2};
+    Quaternionf quat = rotationVectorToQuaternion(rot);
+    ASSERT_EQ(quat * Vector3f(1, 0, 0), Vector3f(0, 1, 0));
+    ASSERT_EQ(quat * Vector3f(0, 1, 0), Vector3f(-1, 0, 0));
+    ASSERT_EQ(quat * Vector3f(0, 0, 1), Vector3f(0, 0, 1));
+}
+
+TEST(QuaternionUtil, QuaternionToRotationVector) {
+    Quaternionf quat = Quaternionf::FromTwoVectors(Vector3f(1, 0, 0), Vector3f(0, 1, 0));
+    Vector3f rot = quaternionToRotationVector(quat);
+    ASSERT_EQ(rot, Vector3f(0, 0, M_PI_2));
+}
+
+TEST(QuaternionUtil, RoundTripFromQuaternion) {
+    Quaternionf quaternion = Quaternionf::UnitRandom();
+    EXPECT_EQ(quaternion, rotationVectorToQuaternion(quaternionToRotationVector(quaternion)));
+}
+
+TEST(QuaternionUtil, RoundTripFromVector) {
+    Vector3f vec{0.1, 0.2, 0.3};
+    EXPECT_EQ(vec, quaternionToRotationVector(rotationVectorToQuaternion(vec)));
+}
+
+}  // namespace
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/QuaternionUtil.cpp b/media/libheadtracking/QuaternionUtil.cpp
new file mode 100644
index 0000000..5d090de
--- /dev/null
+++ b/media/libheadtracking/QuaternionUtil.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "QuaternionUtil.h"
+
+#include <cassert>
+
+namespace android {
+namespace media {
+
+using Eigen::NumTraits;
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace {
+
+Vector3f LogSU2(const Quaternionf& q) {
+    // Implementation of the logarithmic map of SU(2) using atan.
+    // This follows Hertzberg et al. "Integrating Generic Sensor Fusion Algorithms
+    // with Sound State Representations through Encapsulation of Manifolds", Eq.
+    // (31)
+    // We use asin and acos instead of atan to enable the use of Eigen Autodiff
+    // with SU2.
+    const float sign_of_w = q.w() < 0.f ? -1.f : 1.f;
+    const float abs_w = sign_of_w * q.w();
+    const Vector3f v = sign_of_w * q.vec();
+    const float squared_norm_of_v = v.squaredNorm();
+
+    assert(abs(1.f - abs_w * abs_w - squared_norm_of_v) < NumTraits<float>::dummy_precision());
+
+    if (squared_norm_of_v > NumTraits<float>::dummy_precision()) {
+        const float norm_of_v = sqrt(squared_norm_of_v);
+        if (abs_w > NumTraits<float>::dummy_precision()) {
+            // asin(x) = acos(x) at x = 1/sqrt(2).
+            if (norm_of_v <= float(M_SQRT1_2)) {
+                return (asin(norm_of_v) / norm_of_v) * v;
+            }
+            return (acos(abs_w) / norm_of_v) * v;
+        }
+        return (M_PI_2 / norm_of_v) * v;
+    }
+
+    // Taylor expansion at squared_norm_of_v == 0
+    return (1.f / abs_w - squared_norm_of_v / (3.f * pow(abs_w, 3))) * v;
+}
+
+Quaternionf ExpSU2(const Vector3f& delta) {
+    Quaternionf q_delta;
+    const float theta_squared = delta.squaredNorm();
+    if (theta_squared > NumTraits<float>::dummy_precision()) {
+        const float theta = sqrt(theta_squared);
+        q_delta.w() = cos(theta);
+        q_delta.vec() = (sin(theta) / theta) * delta;
+    } else {
+        // taylor expansions around theta == 0
+        q_delta.w() = 1.f - 0.5f * theta_squared;
+        q_delta.vec() = (1.f - 1.f / 6.f * theta_squared) * delta;
+    }
+    return q_delta;
+}
+
+}  // namespace
+
+Quaternionf rotationVectorToQuaternion(const Vector3f& rotationVector) {
+    //  SU(2) is a double cover of SO(3), thus we have to half the tangent vector
+    //  delta
+    const Vector3f half_delta = 0.5f * rotationVector;
+    return ExpSU2(half_delta);
+}
+
+Vector3f quaternionToRotationVector(const Quaternionf& quaternion) {
+    // SU(2) is a double cover of SO(3), thus we have to multiply the tangent
+    // vector delta by two
+    return 2.f * LogSU2(quaternion);
+}
+
+Quaternionf rotateX(float angle) {
+    return rotationVectorToQuaternion(Vector3f(1, 0, 0) * angle);
+}
+
+Quaternionf rotateY(float angle) {
+    return rotationVectorToQuaternion(Vector3f(0, 1, 0) * angle);
+}
+
+Quaternionf rotateZ(float angle) {
+    return rotationVectorToQuaternion(Vector3f(0, 0, 1) * angle);
+}
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/QuaternionUtil.h b/media/libheadtracking/QuaternionUtil.h
new file mode 100644
index 0000000..f7a2ca9
--- /dev/null
+++ b/media/libheadtracking/QuaternionUtil.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <Eigen/Geometry>
+
+namespace android {
+namespace media {
+
+/**
+ * Converts a rotation vector to an equivalent quaternion.
+ * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
+ * magnitude the rotation angle (in radians) around that axis.
+ */
+Eigen::Quaternionf rotationVectorToQuaternion(const Eigen::Vector3f& rotationVector);
+
+/**
+ * Converts a quaternion to an equivalent rotation vector.
+ * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
+ * magnitude the rotation angle (in radians) around that axis.
+ */
+Eigen::Vector3f quaternionToRotationVector(const Eigen::Quaternionf& quaternion);
+
+/**
+ * Returns a quaternion representing a rotation around the X-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateX(float angle);
+
+/**
+ * Returns a quaternion representing a rotation around the Y-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateY(float angle);
+
+/**
+ * Returns a quaternion representing a rotation around the Z-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateZ(float angle);
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/README.md b/media/libheadtracking/README.md
new file mode 100644
index 0000000..44f7bb2
--- /dev/null
+++ b/media/libheadtracking/README.md
@@ -0,0 +1,187 @@
+# Head-Tracking Library For Immersive Audio
+
+This library handles the processing of head-tracking information, necessary for
+Immersive Audio functionality. It goes from bare sensor reading into the final
+pose fed into a virtualizer.
+
+## Basic Usage
+
+The main entry point into this library is the `HeadTrackingProcessor` class.
+This class is provided with the following inputs:
+
+- Head pose, relative to some arbitrary world frame.
+- Screen pose, relative to some arbitrary world frame.
+- Display orientation, defined as the angle between the "physical" screen and
+  the "logical" screen.
+- Transform between the screen and the sound stage.
+- Desired operational mode:
+    - Static: only the sound stage pose is taken into account. This will result
+      in an experience where the sound stage moved with the listener's head.
+    - World-relative: both the head pose and stage pose are taken into account.
+      This will result in an experience where the sound stage is perceived to be
+      located at a fixed place in the world.
+    - Screen-relative: the head pose, screen pose and stage pose are all taken
+      into account. This will result in an experience where the sound stage is
+      perceived to be located at a fixed place relative to the screen.
+
+Once inputs are provided, the `calculate()` method will make the following
+output available:
+
+- Stage pose, relative to the head. This aggregates all the inputs mentioned
+  above and is ready to be fed into a virtualizer.
+- Actual operational mode. May deviate from the desired one in cases where the
+  desired mode cannot be calculated (for example, as result of dropped messages
+  from one of the sensors).
+
+A `recenter()` operation is also available, which indicates to the system that
+whatever pose the screen and head are currently at should be considered as the
+"center" pose, or frame of reference.
+
+## Pose-Related Conventions
+
+### Naming and Composition
+
+When referring to poses in code, it is always good practice to follow
+conventional naming, which highlights the reference and target frames clearly:
+
+Bad:
+
+```
+Pose3f headPose;
+```
+
+Good:
+
+```
+Pose3f worldToHead;  // “world” is the reference frame,
+                     // “head” is the target frame.
+```
+
+By following this convention, it is easy to follow correct composition of poses,
+by making sure adjacent frames are identical:
+
+```
+Pose3f aToD = aToB * bToC * cToD;
+```
+
+And similarly, inverting the transform simply flips the reference and target:
+
+```
+Pose3f aToB = bToA.inverse();
+```
+
+### Twist
+
+“Twist” is to pose what velocity is to distance: it is the time-derivative of a
+pose, representing the change in pose over a short period of time. Its naming
+convention always states one frame, e.g.:
+Twist3f headTwist;
+
+This means that this twist represents the head-at-time-T to head-at-time-T+dt
+transform. Twists are not composable in the same way as poses.
+
+### Frames of Interest
+
+The frames of interest in this library are defined as follows:
+
+#### Head
+
+This is the listener’s head. The origin is at the center point between the
+ear-drums, the X-axis goes from left ear to right ear, Y-axis goes from the back
+of the head towards the face and Z-axis goes from the bottom of the head to the
+top.
+
+#### Screen
+
+This is the primary screen that the user will be looking at, which is relevant
+for some Immersive Audio use-cases, such as watching a movie. We will follow a
+different convention for this frame than what the Sensor framework uses. The
+origin is at the center of the screen. X-axis goes from left to right, Z-axis
+goes from the screen bottom to the screen top, Y-axis goes “into” the screen (
+from the direction of the viewer). The up/down/left/right of the screen are
+defined as the logical directions used for display. So when flipping the display
+orientation between “landscape” and “portrait”, the frame of reference will
+change with respect to the physical screen.
+
+#### Stage
+
+This is the frame of reference used by the virtualizer for positioning sound
+objects. It is not associated with any physical frame. In a typical
+multi-channel scenario, the listener is at the origin, the X-axis goes from left
+to right, Y-axis from back to front and Z-axis from down to up. For example, a
+front-right speaker is located at positive X, Y and Z=0, a height speaker will
+have a positive Z.
+
+#### World
+
+It is sometimes convenient to use an intermediate frame when dealing with
+head-to-screen transforms. The “world” frame is a frame of reference in the
+physical world, relative to which we can measure the head pose and screen pose.
+It is arbitrary, but expected to be stable (fixed).
+
+## Processing Description
+
+![Pose processing graph](PoseProcessingGraph.png)
+
+The diagram above illustrates the processing that takes place from the inputs to
+the outputs.
+
+### Predictor
+
+The Predictor block gets pose + twist (pose derivative) and extrapolates to
+obtain a predicted head pose (w/ given latency).
+
+### Bias
+
+The Bias blocks establish the reference frame for the poses by having the
+ability to set the current pose as the reference for future poses (recentering).
+
+### Orientation Compensation
+
+The Orientation Compensation block applies the display orientation to the screen
+pose to obtain the pose of the “logical screen” frame, in which the Y-axis is
+pointing in the direction of the logical screen “up” rather than the physical
+one.
+
+### Screen-Relative Pose
+
+The Screen-Relative Pose block is provided with a head pose and a screen pose
+and estimates the pose of the head relative to the screen. Optionally, this
+module may indicate that the user is likely not in front of the screen via the
+“valid” output.
+
+### Stillness Detector
+
+The stillness detector blocks detect when their incoming pose stream has been
+stable for a given amount of time (allowing for a configurable amount of error).
+When the head is considered still, we would trigger a recenter operation
+(“auto-recentering”) and when the screen is considered not still, the mode
+selector would use this information to force static mode.
+
+### Mode Selector
+
+The Mode Selector block aggregates the various sources of pose information into
+a head-to-stage pose that is going to feed the virtualizer. It is controlled by
+the “desired mode” signal that indicates whether the preference is to be in
+either static, world-relative or screen-relative.
+
+The actual mode may diverge from the desired mode. It is determined as follows:
+
+- If the desired mode is static, the actual mode is static.
+- If the desired mode is world-relative:
+    - If head and screen poses are fresh and the screen is stable (stillness
+      detector output is true), the actual mode is world-relative.
+    - Otherwise the actual mode is static.
+- If the desired mode is screen-relative:
+    - If head and screen poses are fresh and the ‘valid’ signal is asserted, the
+      actual mode is screen-relative.
+    - Otherwise, apply the same rules as the desired mode being world-relative.
+
+### Rate Limiter
+
+A Rate Limiter block is applied to the final output to smooth out any abrupt
+transitions caused by any of the following events:
+
+- Mode switch.
+- Display orientation switch.
+- Recenter operation.
diff --git a/media/libheadtracking/ScreenHeadFusion-test.cpp b/media/libheadtracking/ScreenHeadFusion-test.cpp
new file mode 100644
index 0000000..ecf27f5
--- /dev/null
+++ b/media/libheadtracking/ScreenHeadFusion-test.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "ScreenHeadFusion.h"
+#include "TestUtil.h"
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(ScreenHeadFusion, Init) {
+    ScreenHeadFusion fusion;
+    EXPECT_FALSE(fusion.calculate().has_value());
+}
+
+TEST(ScreenHeadFusion, Calculate_NoHead) {
+    ScreenHeadFusion fusion;
+    fusion.setWorldToScreenPose(0, Pose3f());
+    EXPECT_FALSE(fusion.calculate().has_value());
+}
+
+TEST(ScreenHeadFusion, Calculate_NoScreen) {
+    ScreenHeadFusion fusion;
+    fusion.setWorldToHeadPose(0, Pose3f());
+    EXPECT_FALSE(fusion.calculate().has_value());
+}
+
+TEST(ScreenHeadFusion, Calculate) {
+    Pose3f worldToScreen1({1, 2, 3}, Quaternionf::UnitRandom());
+    Pose3f worldToHead1({4, 5, 6}, Quaternionf::UnitRandom());
+    Pose3f worldToScreen2({11, 12, 13}, Quaternionf::UnitRandom());
+    Pose3f worldToHead2({14, 15, 16}, Quaternionf::UnitRandom());
+
+    ScreenHeadFusion fusion;
+    fusion.setWorldToHeadPose(123, worldToHead1);
+    fusion.setWorldToScreenPose(456, worldToScreen1);
+    auto result = fusion.calculate();
+    ASSERT_TRUE(result.has_value());
+    EXPECT_EQ(123, result->timestamp);
+    EXPECT_EQ(worldToScreen1.inverse() * worldToHead1, result->pose);
+
+    fusion.setWorldToHeadPose(567, worldToHead2);
+    result = fusion.calculate();
+    ASSERT_TRUE(result.has_value());
+    EXPECT_EQ(456, result->timestamp);
+    EXPECT_EQ(worldToScreen1.inverse() * worldToHead2, result->pose);
+
+    fusion.setWorldToScreenPose(678, worldToScreen2);
+    result = fusion.calculate();
+    ASSERT_TRUE(result.has_value());
+    EXPECT_EQ(567, result->timestamp);
+    EXPECT_EQ(worldToScreen2.inverse() * worldToHead2, result->pose);
+}
+
+}  // namespace
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/ScreenHeadFusion.cpp b/media/libheadtracking/ScreenHeadFusion.cpp
new file mode 100644
index 0000000..f023570
--- /dev/null
+++ b/media/libheadtracking/ScreenHeadFusion.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ScreenHeadFusion.h"
+
+namespace android {
+namespace media {
+
+void ScreenHeadFusion::setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead) {
+    mWorldToHead = TimestampedPose{.timestamp = timestamp, .pose = worldToHead};
+}
+
+void ScreenHeadFusion::setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen) {
+    mWorldToScreen = TimestampedPose{.timestamp = timestamp, .pose = worldToScreen};
+}
+
+std::optional<ScreenHeadFusion::TimestampedPose> ScreenHeadFusion::calculate() {
+    // TODO: this is temporary, simplistic logic.
+    if (!mWorldToHead.has_value() || !mWorldToScreen.has_value()) {
+        return std::nullopt;
+    }
+    return TimestampedPose{
+            .timestamp = std::min(mWorldToHead->timestamp, mWorldToScreen->timestamp),
+            .pose = mWorldToScreen->pose.inverse() * mWorldToHead->pose};
+}
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/ScreenHeadFusion.h b/media/libheadtracking/ScreenHeadFusion.h
new file mode 100644
index 0000000..ee81100
--- /dev/null
+++ b/media/libheadtracking/ScreenHeadFusion.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Combines world-to-head pose with world-to-screen pose to obtain screen-to-head.
+ *
+ * Input poses may arrive separately. The last pose of each kind is taken into account. The
+ * timestamp of the output is the ealier (older) timestamp of the two inputs.
+ *
+ * Output may be nullopt in the following cases:
+ * - Either one of the inputs has not yet been provided.
+ * - It is estimated that the user is no longer facing the screen.
+ *
+ * Typical usage:
+ *
+ * ScreenHeadFusion fusion(...);
+ * fusion.setWorldToHeadPose(...);
+ * fusion.setWorldToScreenPose(...);
+ * auto output = fusion.calculate();
+ *
+ * This class is not thread-safe, but thread-compatible.
+ */
+class ScreenHeadFusion {
+  public:
+    struct TimestampedPose {
+        int64_t timestamp;
+        Pose3f pose;
+    };
+
+    void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead);
+
+    void setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen);
+
+    /**
+     * Returns the screen-to-head pose, or nullopt if invalid.
+     */
+    std::optional<TimestampedPose> calculate();
+
+  private:
+    std::optional<TimestampedPose> mWorldToHead;
+    std::optional<TimestampedPose> mWorldToScreen;
+};
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/SensorPoseProvider-example.cpp b/media/libheadtracking/SensorPoseProvider-example.cpp
new file mode 100644
index 0000000..88e222e
--- /dev/null
+++ b/media/libheadtracking/SensorPoseProvider-example.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include <iostream>
+
+#include <android/sensor.h>
+#include <hardware/sensors.h>
+#include <utils/SystemClock.h>
+
+#include <media/SensorPoseProvider.h>
+#include <sensor/Sensor.h>
+#include <sensor/SensorManager.h>
+
+using android::elapsedRealtimeNano;
+using android::Sensor;
+using android::SensorManager;
+using android::String16;
+using android::media::Pose3f;
+using android::media::SensorPoseProvider;
+using android::media::Twist3f;
+
+using namespace std::chrono_literals;
+
+const char kPackageName[] = "SensorPoseProvider-example";
+
+class Listener : public SensorPoseProvider::Listener {
+  public:
+    void onPose(int64_t timestamp, int32_t handle, const Pose3f& pose,
+                const std::optional<Twist3f>& twist, bool isNewReference) override {
+        int64_t now = elapsedRealtimeNano();
+
+        std::cout << "onPose t=" << timestamp
+                  << " lag=" << ((now - timestamp) / 1e6) << "[ms]"
+                  << " sensor=" << handle
+                  << " pose=" << pose
+                  << " twist=";
+        if (twist.has_value()) {
+            std::cout << twist.value();
+        } else {
+            std::cout << "<none>";
+        }
+        std::cout << " isNewReference=" << isNewReference << std::endl;
+    }
+};
+
+int main() {
+    SensorManager& sensorManager = SensorManager::getInstanceForPackage(String16(kPackageName));
+
+    const Sensor* headSensor = sensorManager.getDefaultSensor(SENSOR_TYPE_GAME_ROTATION_VECTOR);
+    const Sensor* screenSensor = sensorManager.getDefaultSensor(SENSOR_TYPE_ROTATION_VECTOR);
+
+    Listener listener;
+
+    std::unique_ptr<SensorPoseProvider> provider =
+            SensorPoseProvider::create(kPackageName, &listener);
+    if (!provider->startSensor(headSensor->getHandle(), 500ms)) {
+        std::cout << "Failed to start head sensor" << std::endl;
+    }
+    sleep(2);
+    if (!provider->startSensor(screenSensor->getHandle(), 500ms)) {
+        std::cout << "Failed to start screenSensor sensor" << std::endl;
+    }
+    sleep(2);
+    provider->stopSensor(headSensor->getHandle());
+    sleep(2);
+    return 0;
+}
diff --git a/media/libheadtracking/SensorPoseProvider.cpp b/media/libheadtracking/SensorPoseProvider.cpp
new file mode 100644
index 0000000..f3f9b77
--- /dev/null
+++ b/media/libheadtracking/SensorPoseProvider.cpp
@@ -0,0 +1,379 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/SensorPoseProvider.h>
+
+#define LOG_TAG "SensorPoseProvider"
+
+#include <inttypes.h>
+
+#include <future>
+#include <map>
+#include <thread>
+
+#include <android-base/thread_annotations.h>
+#include <log/log_main.h>
+#include <sensor/Sensor.h>
+#include <sensor/SensorEventQueue.h>
+#include <sensor/SensorManager.h>
+#include <utils/Looper.h>
+
+#include "QuaternionUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+// Identifier to use for our event queue on the loop.
+// The number 19 is arbitrary, only useful if using multiple objects on the same looper.
+constexpr int kIdent = 19;
+
+static inline Looper* ALooper_to_Looper(ALooper* alooper) {
+    return reinterpret_cast<Looper*>(alooper);
+}
+
+static inline ALooper* Looper_to_ALooper(Looper* looper) {
+    return reinterpret_cast<ALooper*>(looper);
+}
+
+/**
+ * RAII-wrapper around SensorEventQueue, which unregisters it on destruction.
+ */
+class EventQueueGuard {
+  public:
+    EventQueueGuard(const sp<SensorEventQueue>& queue, Looper* looper) : mQueue(queue) {
+        mQueue->looper = Looper_to_ALooper(looper);
+        mQueue->requestAdditionalInfo = false;
+        looper->addFd(mQueue->getFd(), kIdent, ALOOPER_EVENT_INPUT, nullptr, nullptr);
+    }
+
+    ~EventQueueGuard() {
+        if (mQueue) {
+            ALooper_to_Looper(mQueue->looper)->removeFd(mQueue->getFd());
+        }
+    }
+
+    EventQueueGuard(const EventQueueGuard&) = delete;
+    EventQueueGuard& operator=(const EventQueueGuard&) = delete;
+
+    [[nodiscard]] SensorEventQueue* get() const { return mQueue.get(); }
+
+  private:
+    sp<SensorEventQueue> mQueue;
+};
+
+/**
+ * RAII-wrapper around an enabled sensor, which disables it upon destruction.
+ */
+class SensorEnableGuard {
+  public:
+    SensorEnableGuard(const sp<SensorEventQueue>& queue, int32_t sensor)
+        : mQueue(queue), mSensor(sensor) {}
+
+    ~SensorEnableGuard() {
+        if (mSensor != SensorPoseProvider::INVALID_HANDLE) {
+            int ret = mQueue->disableSensor(mSensor);
+            if (ret) {
+                ALOGE("Failed to disable sensor: %s", strerror(ret));
+            }
+        }
+    }
+
+    SensorEnableGuard(const SensorEnableGuard&) = delete;
+    SensorEnableGuard& operator=(const SensorEnableGuard&) = delete;
+
+    // Enable moving.
+    SensorEnableGuard(SensorEnableGuard&& other) : mQueue(other.mQueue), mSensor(other.mSensor) {
+        other.mSensor = SensorPoseProvider::INVALID_HANDLE;
+    }
+
+  private:
+    sp<SensorEventQueue> const mQueue;
+    int32_t mSensor;
+};
+
+/**
+ * Streams the required events to a PoseListener, based on events originating from the Sensor stack.
+ */
+class SensorPoseProviderImpl : public SensorPoseProvider {
+  public:
+    static std::unique_ptr<SensorPoseProvider> create(const char* packageName, Listener* listener) {
+        std::unique_ptr<SensorPoseProviderImpl> result(
+                new SensorPoseProviderImpl(packageName, listener));
+        return result->waitInitFinished() ? std::move(result) : nullptr;
+    }
+
+    ~SensorPoseProviderImpl() override {
+        // Disable all active sensors.
+        mEnabledSensors.clear();
+        mLooper->wake();
+        mThread.join();
+    }
+
+    bool startSensor(int32_t sensor, std::chrono::microseconds samplingPeriod) override {
+        // Figure out the sensor's data format.
+        DataFormat format = getSensorFormat(sensor);
+        if (format == DataFormat::kUnknown) {
+            ALOGE("Unknown format for sensor %" PRId32, sensor);
+            return false;
+        }
+
+        {
+            std::lock_guard lock(mMutex);
+            mEnabledSensorsExtra.emplace(sensor, SensorExtra{ .format = format });
+        }
+
+        // Enable the sensor.
+        if (mQueue->enableSensor(sensor, samplingPeriod.count(), 0, 0)) {
+            ALOGE("Failed to enable sensor");
+            std::lock_guard lock(mMutex);
+            mEnabledSensorsExtra.erase(sensor);
+            return false;
+        }
+
+        mEnabledSensors.emplace(sensor, SensorEnableGuard(mQueue.get(), sensor));
+        return true;
+    }
+
+    void stopSensor(int handle) override {
+        mEnabledSensors.erase(handle);
+        std::lock_guard lock(mMutex);
+        mEnabledSensorsExtra.erase(handle);
+    }
+
+  private:
+    enum DataFormat {
+        kUnknown,
+        kQuaternion,
+        kRotationVectorsAndFlags,
+        kRotationVectorsAndDiscontinuityCount,
+    };
+
+    struct PoseEvent {
+        Pose3f pose;
+        std::optional<Twist3f> twist;
+        bool isNewReference;
+    };
+
+    struct SensorExtra {
+        DataFormat format;
+        std::optional<int32_t> discontinuityCount;
+    };
+
+    sp<Looper> mLooper;
+    Listener* const mListener;
+    SensorManager* const mSensorManager;
+    std::thread mThread;
+    std::mutex mMutex;
+    std::map<int32_t, SensorEnableGuard> mEnabledSensors;
+    std::map<int32_t, SensorExtra> mEnabledSensorsExtra GUARDED_BY(mMutex);
+    sp<SensorEventQueue> mQueue;
+
+    // We must do some of the initialization operations on the worker thread, because the API relies
+    // on the thread-local looper. In addition, as a matter of convenience, we store some of the
+    // state on the stack.
+    // For that reason, we use a two-step initialization approach, where the ctor mostly just starts
+    // the worker thread and that thread would notify, via the promise below whenever initialization
+    // is finished, and whether it was successful.
+    std::promise<bool> mInitPromise;
+
+    SensorPoseProviderImpl(const char* packageName, Listener* listener)
+        : mListener(listener),
+          mSensorManager(&SensorManager::getInstanceForPackage(String16(packageName))),
+          mThread([this] { threadFunc(); }) {}
+
+    void initFinished(bool success) { mInitPromise.set_value(success); }
+
+    bool waitInitFinished() { return mInitPromise.get_future().get(); }
+
+    void threadFunc() {
+        // Obtain looper.
+        mLooper = Looper::prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
+
+        // Create event queue.
+        mQueue = mSensorManager->createEventQueue();
+
+        if (mQueue == nullptr) {
+            ALOGE("Failed to create a sensor event queue");
+            initFinished(false);
+            return;
+        }
+
+        EventQueueGuard eventQueueGuard(mQueue, mLooper.get());
+
+        initFinished(true);
+
+        while (true) {
+            int ret = mLooper->pollOnce(-1 /* no timeout */, nullptr, nullptr, nullptr);
+
+            switch (ret) {
+                case ALOOPER_POLL_WAKE:
+                    // Normal way to exit.
+                    return;
+
+                case kIdent:
+                    // Possible events on our queue.
+                    break;
+
+                default:
+                    ALOGE("Unexpected status out of Looper::pollOnce: %d", ret);
+            }
+
+            // Process an event.
+            ASensorEvent event;
+            ssize_t actual = mQueue->read(&event, 1);
+            if (actual > 0) {
+                mQueue->sendAck(&event, actual);
+            }
+            ssize_t size = mQueue->filterEvents(&event, actual);
+
+            if (size < 0 || size > 1) {
+                ALOGE("Unexpected return value from SensorEventQueue::filterEvents: %zd", size);
+                break;
+            }
+            if (size == 0) {
+                // No events.
+                continue;
+            }
+
+            handleEvent(event);
+        }
+    }
+
+    void handleEvent(const ASensorEvent& event) {
+        PoseEvent value;
+        {
+            std::lock_guard lock(mMutex);
+            auto iter = mEnabledSensorsExtra.find(event.sensor);
+            if (iter == mEnabledSensorsExtra.end()) {
+                // This can happen if we have any pending events shortly after stopping.
+                return;
+            }
+            value = parseEvent(event, iter->second.format, &iter->second.discontinuityCount);
+        }
+        mListener->onPose(event.timestamp, event.sensor, value.pose, value.twist,
+                          value.isNewReference);
+    }
+
+    DataFormat getSensorFormat(int32_t handle) {
+        std::optional<const Sensor> sensor = getSensorByHandle(handle);
+        if (!sensor) {
+            ALOGE("Sensor not found: %d", handle);
+            return DataFormat::kUnknown;
+        }
+        if (sensor->getType() == ASENSOR_TYPE_ROTATION_VECTOR ||
+            sensor->getType() == ASENSOR_TYPE_GAME_ROTATION_VECTOR) {
+            return DataFormat::kQuaternion;
+        }
+
+        if (sensor->getType() == ASENSOR_TYPE_HEAD_TRACKER) {
+            return DataFormat::kRotationVectorsAndDiscontinuityCount;
+        }
+
+        if (sensor->getStringType() == "com.google.hardware.sensor.hid_dynamic.headtracker") {
+            return DataFormat::kRotationVectorsAndFlags;
+        }
+
+        return DataFormat::kUnknown;
+    }
+
+    std::optional<const Sensor> getSensorByHandle(int32_t handle) {
+        const Sensor* const* list;
+        ssize_t size;
+
+        // Search static sensor list.
+        size = mSensorManager->getSensorList(&list);
+        if (size < 0) {
+            ALOGE("getSensorList failed with error code %zd", size);
+            return std::nullopt;
+        }
+        for (size_t i = 0; i < size; ++i) {
+            if (list[i]->getHandle() == handle) {
+                return *list[i];
+            }
+        }
+
+        // Search dynamic sensor list.
+        Vector<Sensor> dynList;
+        size = mSensorManager->getDynamicSensorList(dynList);
+        if (size < 0) {
+            ALOGE("getDynamicSensorList failed with error code %zd", size);
+            return std::nullopt;
+        }
+        for (size_t i = 0; i < size; ++i) {
+            if (dynList[i].getHandle() == handle) {
+                return dynList[i];
+            }
+        }
+
+        return std::nullopt;
+    }
+
+    static PoseEvent parseEvent(const ASensorEvent& event, DataFormat format,
+                                std::optional<int32_t>* discontinutyCount) {
+        switch (format) {
+            case DataFormat::kQuaternion: {
+                Eigen::Quaternionf quat(event.data[3], event.data[0], event.data[1], event.data[2]);
+                // Adapt to different frame convention.
+                quat *= rotateX(-M_PI_2);
+                return PoseEvent{Pose3f(quat), std::optional<Twist3f>(), false};
+            }
+
+            case DataFormat::kRotationVectorsAndFlags: {
+                // Custom sensor, assumed to contain:
+                // 3 floats representing orientation as a rotation vector (in rad).
+                // 3 floats representing angular velocity as a rotation vector (in rad/s).
+                // 1 uint32_t of flags, where:
+                // - LSb is '1' iff the given sample is the first one in a new frame of reference.
+                // - The rest of the bits are reserved for future use.
+                Eigen::Vector3f rotation = {event.data[0], event.data[1], event.data[2]};
+                Eigen::Vector3f twist = {event.data[3], event.data[4], event.data[5]};
+                Eigen::Quaternionf quat = rotationVectorToQuaternion(rotation);
+                uint32_t flags = *reinterpret_cast<const uint32_t*>(&event.data[6]);
+                return PoseEvent{Pose3f(quat), Twist3f(Eigen::Vector3f::Zero(), twist),
+                                 (flags & (1 << 0)) != 0};
+            }
+
+            case DataFormat::kRotationVectorsAndDiscontinuityCount: {
+                Eigen::Vector3f rotation = {event.head_tracker.rx, event.head_tracker.ry,
+                                            event.head_tracker.rz};
+                Eigen::Vector3f twist = {event.head_tracker.vx, event.head_tracker.vy,
+                                         event.head_tracker.vz};
+                Eigen::Quaternionf quat = rotationVectorToQuaternion(rotation);
+                bool isNewReference =
+                        !discontinutyCount->has_value() ||
+                        discontinutyCount->value() != event.head_tracker.discontinuity_count;
+                *discontinutyCount = event.head_tracker.discontinuity_count;
+
+                return PoseEvent{Pose3f(quat), Twist3f(Eigen::Vector3f::Zero(), twist),
+                                 isNewReference};
+            }
+
+            default:
+                LOG_ALWAYS_FATAL("Unexpected sensor type: %d", static_cast<int>(format));
+        }
+    }
+};
+
+}  // namespace
+
+std::unique_ptr<SensorPoseProvider> SensorPoseProvider::create(const char* packageName,
+                                                               Listener* listener) {
+    return SensorPoseProviderImpl::create(packageName, listener);
+}
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/StillnessDetector-test.cpp b/media/libheadtracking/StillnessDetector-test.cpp
new file mode 100644
index 0000000..b6cd479
--- /dev/null
+++ b/media/libheadtracking/StillnessDetector-test.cpp
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "StillnessDetector.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+using Options = StillnessDetector::Options;
+
+class StillnessDetectorTest : public testing::TestWithParam<bool> {
+  public:
+    void SetUp() override { mDefaultValue = GetParam(); }
+
+  protected:
+    bool mDefaultValue;
+};
+
+TEST_P(StillnessDetectorTest, Still) {
+    StillnessDetector detector(Options{.defaultValue = mDefaultValue,
+                                       .windowDuration = 1000,
+                                       .translationalThreshold = 1,
+                                       .rotationalThreshold = 0.05});
+
+    const Pose3f baseline(Vector3f{1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f withinThreshold =
+            baseline * Pose3f(Vector3f(0.3, -0.3, 0), rotateX(0.01) * rotateY(-0.01));
+
+    EXPECT_EQ(mDefaultValue, detector.calculate(0));
+    detector.setInput(0, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(0));
+    detector.setInput(300, withinThreshold);
+    EXPECT_EQ(mDefaultValue, detector.calculate(300));
+    detector.setInput(600, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(600));
+    detector.setInput(999, withinThreshold);
+    EXPECT_EQ(mDefaultValue, detector.calculate(999));
+    detector.setInput(1000, baseline);
+    EXPECT_TRUE(detector.calculate(1000));
+}
+
+TEST_P(StillnessDetectorTest, ZeroDuration) {
+    StillnessDetector detector(Options{.defaultValue = mDefaultValue, .windowDuration = 0});
+    EXPECT_TRUE(detector.calculate(0));
+    EXPECT_TRUE(detector.calculate(1000));
+}
+
+TEST_P(StillnessDetectorTest, NotStillTranslation) {
+    StillnessDetector detector(Options{.defaultValue = mDefaultValue,
+                                       .windowDuration = 1000,
+                                       .translationalThreshold = 1,
+                                       .rotationalThreshold = 0.05});
+
+    const Pose3f baseline(Vector3f{1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f withinThreshold =
+            baseline * Pose3f(Vector3f(0.3, -0.3, 0), rotateX(0.01) * rotateY(-0.01));
+    const Pose3f outsideThreshold = baseline * Pose3f(Vector3f(1, 1, 0));
+
+    EXPECT_EQ(mDefaultValue, detector.calculate(0));
+    detector.setInput(0, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(0));
+    detector.setInput(300, outsideThreshold);
+    EXPECT_EQ(mDefaultValue, detector.calculate(300));
+    detector.setInput(600, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(600));
+    detector.setInput(1299, withinThreshold);
+    EXPECT_FALSE(detector.calculate(1299));
+    detector.setInput(1300, baseline);
+    EXPECT_TRUE(detector.calculate(1300));
+}
+
+TEST_P(StillnessDetectorTest, NotStillRotation) {
+    StillnessDetector detector(Options{.defaultValue = mDefaultValue,
+                                       .windowDuration = 1000,
+                                       .translationalThreshold = 1,
+                                       .rotationalThreshold = 0.05});
+
+    const Pose3f baseline(Vector3f{1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f withinThreshold =
+            baseline * Pose3f(Vector3f(0.3, -0.3, 0), rotateX(0.03) * rotateY(-0.03));
+    const Pose3f outsideThreshold = baseline * Pose3f(rotateZ(0.06));
+
+    EXPECT_EQ(mDefaultValue, detector.calculate(0));
+    detector.setInput(0, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(0));
+    detector.setInput(300, outsideThreshold);
+    EXPECT_EQ(mDefaultValue, detector.calculate(300));
+    detector.setInput(600, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(600));
+    detector.setInput(1299, withinThreshold);
+    EXPECT_FALSE(detector.calculate(1299));
+    detector.setInput(1300, baseline);
+    EXPECT_TRUE(detector.calculate(1300));
+}
+
+TEST_P(StillnessDetectorTest, Suppression) {
+    StillnessDetector detector(Options{.defaultValue = mDefaultValue,
+                                       .windowDuration = 1000,
+                                       .translationalThreshold = 1,
+                                       .rotationalThreshold = 0.05});
+
+    const Pose3f baseline(Vector3f{1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f outsideThreshold = baseline * Pose3f(Vector3f(1.1, 0, 0));
+    const Pose3f middlePoint = baseline * Pose3f(Vector3f(0.55, 0, 0));
+
+    detector.setInput(0, baseline);
+    detector.setInput(1000, baseline);
+    EXPECT_TRUE(detector.calculate(1000));
+    detector.setInput(1100, outsideThreshold);
+    EXPECT_FALSE(detector.calculate(1100));
+    detector.setInput(1500, middlePoint);
+    EXPECT_FALSE(detector.calculate(1500));
+    EXPECT_FALSE(detector.calculate(1999));
+    EXPECT_TRUE(detector.calculate(2000));
+}
+
+TEST_P(StillnessDetectorTest, Reset) {
+    StillnessDetector detector(Options{.defaultValue = mDefaultValue,
+                                       .windowDuration = 1000,
+                                       .translationalThreshold = 1,
+                                       .rotationalThreshold = 0.05});
+
+    const Pose3f baseline(Vector3f{1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f withinThreshold =
+            baseline * Pose3f(Vector3f(0.3, -0.3, 0), rotateX(0.01) * rotateY(-0.01));
+    EXPECT_EQ(mDefaultValue, detector.calculate(0));
+    detector.setInput(300, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(300));
+    detector.reset();
+    detector.setInput(600, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(600));
+    detector.setInput(900, withinThreshold);
+    EXPECT_EQ(mDefaultValue, detector.calculate(900));
+    detector.setInput(1200, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(1200));
+    detector.setInput(1599, withinThreshold);
+    EXPECT_EQ(mDefaultValue, detector.calculate(1599));
+    detector.setInput(1600, baseline);
+    EXPECT_TRUE(detector.calculate(1600));
+}
+
+INSTANTIATE_TEST_SUITE_P(StillnessDetectorTestParametrized, StillnessDetectorTest,
+                         testing::Values(false, true));
+
+}  // namespace
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/StillnessDetector.cpp b/media/libheadtracking/StillnessDetector.cpp
new file mode 100644
index 0000000..be7c893
--- /dev/null
+++ b/media/libheadtracking/StillnessDetector.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "StillnessDetector.h"
+
+namespace android {
+namespace media {
+
+StillnessDetector::StillnessDetector(const Options& options)
+    : mOptions(options), mCosHalfRotationalThreshold(cos(mOptions.rotationalThreshold / 2)) {}
+
+void StillnessDetector::reset() {
+    mFifo.clear();
+    mWindowFull = false;
+    mSuppressionDeadline.reset();
+}
+
+void StillnessDetector::setInput(int64_t timestamp, const Pose3f& input) {
+    mFifo.push_back(TimestampedPose{timestamp, input});
+    discardOld(timestamp);
+}
+
+bool StillnessDetector::calculate(int64_t timestamp) {
+    discardOld(timestamp);
+
+    // Check whether all the poses in the queue are in the proximity of the new one. We want to do
+    // this before checking the overriding conditions below, in order to update the suppression
+    // deadline correctly. We always go from end to start, to find the most recent pose that
+    // violated stillness and update the suppression deadline if it has not been set or if the new
+    // one ends after the current one.
+    bool moved = false;
+
+    if (!mFifo.empty()) {
+        for (auto iter = mFifo.rbegin() + 1; iter != mFifo.rend(); ++iter) {
+            const auto& event = *iter;
+            if (!areNear(event.pose, mFifo.back().pose)) {
+                // Enable suppression for the duration of the window.
+                int64_t deadline = event.timestamp + mOptions.windowDuration;
+                if (!mSuppressionDeadline.has_value() || mSuppressionDeadline.value() < deadline) {
+                    mSuppressionDeadline = deadline;
+                }
+                moved = true;
+                break;
+            }
+        }
+    }
+
+    // If the window has not been full, return the default value.
+    if (!mWindowFull) {
+        return mOptions.defaultValue;
+    }
+
+    // Force "in motion" while the suppression deadline is active.
+    if (mSuppressionDeadline.has_value()) {
+        return false;
+    }
+
+    return !moved;
+}
+
+void StillnessDetector::discardOld(int64_t timestamp) {
+    // Handle the special case of the window duration being zero (always considered full).
+    if (mOptions.windowDuration == 0) {
+        mFifo.clear();
+        mWindowFull = true;
+    }
+
+    // Remove any events from the queue that are older than the window. If there were any such
+    // events we consider the window full.
+    const int64_t windowStart = timestamp - mOptions.windowDuration;
+    while (!mFifo.empty() && mFifo.front().timestamp <= windowStart) {
+        mWindowFull = true;
+        mFifo.pop_front();
+    }
+
+    // Expire the suppression deadline.
+    if (mSuppressionDeadline.has_value() && mSuppressionDeadline <= timestamp) {
+        mSuppressionDeadline.reset();
+    }
+}
+
+bool StillnessDetector::areNear(const Pose3f& pose1, const Pose3f& pose2) const {
+    // Check translation. We use the L1 norm to reduce computational load on expense of accuracy.
+    // The L1 norm is an upper bound for the actual (L2) norm, so this approach will err on the side
+    // of "not near".
+    if ((pose1.translation() - pose2.translation()).lpNorm<1>() > mOptions.translationalThreshold) {
+        return false;
+    }
+
+    // Check orientation.
+    // The angle x between the quaternions is greater than that threshold iff
+    // cos(x/2) < cos(threshold/2).
+    // cos(x/2) can be efficiently calculated as the dot product of both quaternions.
+    if (pose1.rotation().dot(pose2.rotation()) < mCosHalfRotationalThreshold) {
+        return false;
+    }
+
+    return true;
+}
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/StillnessDetector.h b/media/libheadtracking/StillnessDetector.h
new file mode 100644
index 0000000..ee4b2d8
--- /dev/null
+++ b/media/libheadtracking/StillnessDetector.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <deque>
+
+#include <media/Pose.h>
+
+namespace android {
+namespace media {
+
+/**
+ * Given a stream of poses, determines if the pose is stable ("still").
+ * Stillness is defined as all poses in the recent history ("window") being near the most recent
+ * sample.
+ *
+ * Typical usage:
+ *
+ * StillnessDetector detector(StilnessDetector::Options{...});
+ *
+ * while (...) {
+ *    detector.setInput(timestamp, pose);
+ *    bool still = detector.calculate(timestamp);
+ * }
+ *
+ * The detection is not considered reliable until a sufficient number of samples has been provided
+ * for an initial fill-up of the window. During that time, the detector will return whatever default
+ * value has been configured.
+ * The reset() method can be used to empty the window again and get back to this initial state.
+ * In the special case of the window size being 0, the state will always be considered "still".
+ */
+class StillnessDetector {
+  public:
+    /**
+     * Configuration options for the detector.
+     */
+    struct Options {
+        /**
+         * During the initial fill of the window, should we consider the state still?
+         */
+         bool defaultValue;
+        /**
+         * How long is the window, in ticks. The special value of 0 indicates that the stream is
+         * always considered still.
+         */
+        int64_t windowDuration;
+        /**
+         * How much of a translational deviation from the target (in meters) is considered motion.
+         * This is an approximate quantity - the actual threshold might be a little different as we
+         * trade-off accuracy with computational efficiency.
+         */
+        float translationalThreshold;
+        /**
+         * How much of a rotational deviation from the target (in radians) is considered motion.
+         * This is an approximate quantity - the actual threshold might be a little different as we
+         * trade-off accuracy with computational efficiency.
+         */
+        float rotationalThreshold;
+    };
+
+    /** Ctor. */
+    explicit StillnessDetector(const Options& options);
+
+    /** Clear the window. */
+    void reset();
+    /** Push a new sample. */
+    void setInput(int64_t timestamp, const Pose3f& input);
+    /** Calculate whether the stream is still at the given timestamp. */
+    bool calculate(int64_t timestamp);
+
+  private:
+    struct TimestampedPose {
+        int64_t timestamp;
+        Pose3f pose;
+    };
+
+    const Options mOptions;
+    // Precalculated cos(mOptions.rotationalThreshold / 2)
+    const float mCosHalfRotationalThreshold;
+    std::deque<TimestampedPose> mFifo;
+    bool mWindowFull = false;
+    // As soon as motion is detected, this will be set for the time of detection + window duration,
+    // and during this time we will always consider outselves in motion without checking. This is
+    // used for hyteresis purposes, since because of the approximate method we use for determining
+    // stillness, we may toggle back and forth at a rate faster than the window side.
+    std::optional<int64_t> mSuppressionDeadline;
+
+    bool areNear(const Pose3f& pose1, const Pose3f& pose2) const;
+    void discardOld(int64_t timestamp);
+};
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/TestUtil.h b/media/libheadtracking/TestUtil.h
new file mode 100644
index 0000000..4636d86
--- /dev/null
+++ b/media/libheadtracking/TestUtil.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <gtest/gtest.h>
+
+#include "media/Pose.h"
+#include "media/Twist.h"
+
+namespace {
+
+constexpr float kPoseComparisonPrecision = 1e-5;
+
+}  // namespace
+
+// These specializations make {EXPECT,ASSERT}_{EQ,NE} work correctly for Pose3f, Twist3f, Vector3f
+// and Quaternionf.
+namespace testing {
+namespace internal {
+
+template <>
+inline AssertionResult CmpHelperEQ<android::media::Pose3f, android::media::Pose3f>(
+        const char* lhs_expression, const char* rhs_expression, const android::media::Pose3f& lhs,
+        const android::media::Pose3f& rhs) {
+    if (lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+        return AssertionSuccess();
+    }
+
+    return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<android::media::Pose3f, android::media::Pose3f>(
+        const char* lhs_expression, const char* rhs_expression, const android::media::Pose3f& lhs,
+        const android::media::Pose3f& rhs) {
+    if (!lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+        return AssertionSuccess();
+    }
+
+    return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperEQ<android::media::Twist3f, android::media::Twist3f>(
+        const char* lhs_expression, const char* rhs_expression, const android::media::Twist3f& lhs,
+        const android::media::Twist3f& rhs) {
+    if (lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+        return AssertionSuccess();
+    }
+
+    return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<android::media::Twist3f, android::media::Twist3f>(
+        const char* lhs_expression, const char* rhs_expression, const android::media::Twist3f& lhs,
+        const android::media::Twist3f& rhs) {
+    if (!lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+        return AssertionSuccess();
+    }
+
+    return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperEQ<Eigen::Vector3f, Eigen::Vector3f>(const char* lhs_expression,
+                                                                     const char* rhs_expression,
+                                                                     const Eigen::Vector3f& lhs,
+                                                                     const Eigen::Vector3f& rhs) {
+    if (lhs.isApprox(rhs)) {
+        return AssertionSuccess();
+    }
+
+    return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<Eigen::Vector3f, Eigen::Vector3f>(const char* lhs_expression,
+                                                                     const char* rhs_expression,
+                                                                     const Eigen::Vector3f& lhs,
+                                                                     const Eigen::Vector3f& rhs) {
+    if (!lhs.isApprox(rhs)) {
+        return AssertionSuccess();
+    }
+
+    return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperEQ<Eigen::Quaternionf, Eigen::Quaternionf>(
+        const char* lhs_expression, const char* rhs_expression, const Eigen::Quaternionf& lhs,
+        const Eigen::Quaternionf& rhs) {
+    // Negating the coefs results in an equivalent quaternion.
+    if (lhs.isApprox(rhs) || lhs.isApprox(Eigen::Quaternionf(-rhs.coeffs()))) {
+        return AssertionSuccess();
+    }
+
+    return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<Eigen::Quaternionf, Eigen::Quaternionf>(
+        const char* lhs_expression, const char* rhs_expression, const Eigen::Quaternionf& lhs,
+        const Eigen::Quaternionf& rhs) {
+    // Negating the coefs results in an equivalent quaternion.
+    if (!(lhs.isApprox(rhs) || lhs.isApprox(Eigen::Quaternionf(-rhs.coeffs())))) {
+        return AssertionSuccess();
+    }
+
+    return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+}  // namespace internal
+}  // namespace testing
diff --git a/media/libheadtracking/Twist-test.cpp b/media/libheadtracking/Twist-test.cpp
new file mode 100644
index 0000000..7984e1e
--- /dev/null
+++ b/media/libheadtracking/Twist-test.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Twist.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(Twist, DefaultCtor) {
+    Twist3f twist;
+    EXPECT_EQ(twist.translationalVelocity(), Vector3f::Zero());
+    EXPECT_EQ(twist.rotationalVelocity(), Vector3f::Zero());
+    EXPECT_FLOAT_EQ(twist.scalarRotationalVelocity(), 0);
+    EXPECT_FLOAT_EQ(twist.scalarTranslationalVelocity(), 0);
+}
+
+TEST(Twist, FullCtor) {
+    Vector3f rot{1, 2, 3};
+    Vector3f trans{4, 5, 6};
+    Twist3f twist(trans, rot);
+    EXPECT_EQ(twist.translationalVelocity(), trans);
+    EXPECT_EQ(twist.rotationalVelocity(), rot);
+    EXPECT_FLOAT_EQ(twist.scalarRotationalVelocity(), std::sqrt(14.f));
+    EXPECT_FLOAT_EQ(twist.scalarTranslationalVelocity(), std::sqrt(77.f));
+}
+
+TEST(Twist, Integrate) {
+    Vector3f trans{1, 2, 3};
+    // 45 deg/sec around Z.
+    Vector3f rot{0, 0, M_PI_4};
+    Twist3f twist(trans, rot);
+    Pose3f pose = integrate(twist, 2.f);
+
+    EXPECT_EQ(pose, Pose3f(Vector3f{2, 4, 6}, rotateZ(M_PI_2)));
+}
+
+TEST(Twist, Differentiate) {
+    Pose3f pose(Vector3f{2, 4, 6}, rotateZ(M_PI_2));
+    Twist3f twist = differentiate(pose, 2.f);
+    EXPECT_EQ(twist, Twist3f(Vector3f(1, 2, 3), Vector3f(0, 0, M_PI_4)));
+}
+
+}  // namespace
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/Twist.cpp b/media/libheadtracking/Twist.cpp
new file mode 100644
index 0000000..664c4d5
--- /dev/null
+++ b/media/libheadtracking/Twist.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Twist.h"
+
+#include "QuaternionUtil.h"
+
+namespace android {
+namespace media {
+
+Pose3f integrate(const Twist3f& twist, float dt) {
+    Eigen::Vector3f translation = twist.translationalVelocity() * dt;
+    Eigen::Vector3f rotationVector = twist.rotationalVelocity() * dt;
+    return Pose3f(translation, rotationVectorToQuaternion(rotationVector));
+}
+
+Twist3f differentiate(const Pose3f& pose, float dt) {
+    Eigen::Vector3f translationalVelocity = pose.translation() / dt;
+    Eigen::Vector3f rotationalVelocity = quaternionToRotationVector(pose.rotation()) / dt;
+    return Twist3f(translationalVelocity, rotationalVelocity);
+}
+
+std::ostream& operator<<(std::ostream& os, const Twist3f& twist) {
+    os << "translation: " << twist.translationalVelocity().transpose()
+       << " rotation vector: " << twist.rotationalVelocity().transpose();
+    return os;
+}
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/include/media/HeadTrackingMode.h b/media/libheadtracking/include/media/HeadTrackingMode.h
new file mode 100644
index 0000000..38496e8
--- /dev/null
+++ b/media/libheadtracking/include/media/HeadTrackingMode.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+namespace android {
+namespace media {
+
+/**
+ * Mode of head-tracking.
+ */
+enum class HeadTrackingMode {
+    /** No head-tracking - screen-to-head pose is assumed to be identity. */
+    STATIC,
+    /** Head tracking enabled - world-to-screen pose is assumed to be identity. */
+    WORLD_RELATIVE,
+    /** Full screen-to-head tracking enabled. */
+    SCREEN_RELATIVE,
+};
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/include/media/HeadTrackingProcessor.h b/media/libheadtracking/include/media/HeadTrackingProcessor.h
new file mode 100644
index 0000000..1744be3
--- /dev/null
+++ b/media/libheadtracking/include/media/HeadTrackingProcessor.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <limits>
+
+#include "HeadTrackingMode.h"
+#include "Pose.h"
+#include "Twist.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Main entry-point for this library.
+ * This interfaces encompasses all the processing required for determining the head-to-stage pose
+ * used for audio virtualization.
+ * The usage involves periodic setting of the inputs, calling calculate() and obtaining the outputs.
+ * This class is not thread-safe, but thread-compatible.
+ */
+class HeadTrackingProcessor {
+  public:
+    virtual ~HeadTrackingProcessor() = default;
+
+    struct Options {
+        float maxTranslationalVelocity = std::numeric_limits<float>::infinity();
+        float maxRotationalVelocity = std::numeric_limits<float>::infinity();
+        int64_t freshnessTimeout = std::numeric_limits<int64_t>::max();
+        float predictionDuration = 0;
+        int64_t autoRecenterWindowDuration = std::numeric_limits<int64_t>::max();
+        float autoRecenterTranslationalThreshold = std::numeric_limits<float>::infinity();
+        float autoRecenterRotationalThreshold = std::numeric_limits<float>::infinity();
+        int64_t screenStillnessWindowDuration = 0;
+        float screenStillnessTranslationalThreshold = std::numeric_limits<float>::infinity();
+        float screenStillnessRotationalThreshold = std::numeric_limits<float>::infinity();
+    };
+
+    /** Sets the desired head-tracking mode. */
+    virtual void setDesiredMode(HeadTrackingMode mode) = 0;
+
+    /**
+     * Sets the world-to-head pose and head twist (velocity).
+     * headTwist is given in the head coordinate frame.
+     */
+    virtual void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead,
+                                    const Twist3f& headTwist) = 0;
+
+    /**
+     * Sets the world-to-screen pose.
+     */
+    virtual void setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen) = 0;
+
+    /**
+     * Set the screen-to-stage pose, used in all modes.
+     */
+    virtual void setScreenToStagePose(const Pose3f& screenToStage) = 0;
+
+    /**
+     * Sets the display orientation.
+     * Orientation is expressed in the angle of rotation from the physical "up" side of the screen
+     * to the logical "up" side of the content displayed the screen. Counterclockwise angles, as
+     * viewed while facing the screen are positive.
+     */
+    virtual void setDisplayOrientation(float physicalToLogicalAngle) = 0;
+
+    /**
+     * Process all the previous inputs and update the outputs.
+     */
+    virtual void calculate(int64_t timestamp) = 0;
+
+    /**
+     * Get the aggregate head-to-stage pose (primary output of this module).
+     */
+    virtual Pose3f getHeadToStagePose() const = 0;
+
+    /**
+     * Get the actual head-tracking mode (which may deviate from the desired one as mentioned in the
+     * class documentation above).
+     */
+    virtual HeadTrackingMode getActualMode() const = 0;
+
+    /**
+     * This causes the current poses for both the head and/or screen to be considered "center".
+     */
+    virtual void recenter(bool recenterHead = true, bool recenterScreen = true) = 0;
+};
+
+/**
+ * Creates an instance featuring a default implementation of the HeadTrackingProcessor interface.
+ */
+std::unique_ptr<HeadTrackingProcessor> createHeadTrackingProcessor(
+        const HeadTrackingProcessor::Options& options,
+        HeadTrackingMode initialMode = HeadTrackingMode::STATIC);
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/include/media/Pose.h b/media/libheadtracking/include/media/Pose.h
new file mode 100644
index 0000000..e660bb9
--- /dev/null
+++ b/media/libheadtracking/include/media/Pose.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+#include <vector>
+#include <Eigen/Geometry>
+
+namespace android {
+namespace media {
+
+/**
+ * A 6-DoF pose.
+ * This class represents a proper rigid transformation (translation + rotation) between a reference
+ * frame and a target frame,
+ *
+ * See https://en.wikipedia.org/wiki/Six_degrees_of_freedom
+ */
+class Pose3f {
+  public:
+    /** Typical precision for isApprox comparisons. */
+    static constexpr float kDummyPrecision = 1e-5f;
+
+    Pose3f(const Eigen::Vector3f& translation, const Eigen::Quaternionf& rotation)
+        : mTranslation(translation), mRotation(rotation) {}
+
+    explicit Pose3f(const Eigen::Vector3f& translation)
+        : Pose3f(translation, Eigen::Quaternionf::Identity()) {}
+
+    explicit Pose3f(const Eigen::Quaternionf& rotation)
+        : Pose3f(Eigen::Vector3f::Zero(), rotation) {}
+
+    Pose3f() : Pose3f(Eigen::Vector3f::Zero(), Eigen::Quaternionf::Identity()) {}
+
+    Pose3f(const Pose3f& other) { *this = other; }
+
+    /**
+     * Create instance from a vector-of-floats representation.
+     * The vector is expected to have exactly 6 elements, where the first three are a translation
+     * vector and the last three are a rotation vector.
+     *
+     * Returns nullopt if the input vector is illegal.
+     */
+    static std::optional<Pose3f> fromVector(const std::vector<float>& vec);
+
+    /**
+     * Convert instance to a vector-of-floats representation.
+     * The vector will have exactly 6 elements, where the first three are a translation vector and
+     * the last three are a rotation vector.
+     */
+    std::vector<float> toVector() const;
+
+    Pose3f& operator=(const Pose3f& other) {
+        mTranslation = other.mTranslation;
+        mRotation = other.mRotation;
+        return *this;
+    }
+
+    Eigen::Vector3f translation() const { return mTranslation; };
+    Eigen::Quaternionf rotation() const { return mRotation; };
+
+    /**
+     * Reverses the reference and target frames.
+     */
+    Pose3f inverse() const {
+        Eigen::Quaternionf invRotation = mRotation.inverse();
+        return Pose3f(-(invRotation * translation()), invRotation);
+    }
+
+    /**
+     * Composes (chains) together two poses. By convention, this only makes sense if the target
+     * frame of the left-hand pose is the same the reference frame of the right-hand pose.
+     * Note that this operator is not commutative.
+     */
+    Pose3f operator*(const Pose3f& other) const {
+        Pose3f result = *this;
+        result *= other;
+        return result;
+    }
+
+    Pose3f& operator*=(const Pose3f& other) {
+        mTranslation += mRotation * other.mTranslation;
+        mRotation *= other.mRotation;
+        return *this;
+    }
+
+    /**
+     * This is an imprecise "fuzzy" comparison, which is only to be used for validity-testing
+     * purposes.
+     */
+    bool isApprox(const Pose3f& other, float prec = kDummyPrecision) const {
+        return (mTranslation - other.mTranslation).norm() < prec &&
+               // Quaternions are equivalent under sign inversion.
+               ((mRotation.coeffs() - other.mRotation.coeffs()).norm() < prec ||
+                (mRotation.coeffs() + other.mRotation.coeffs()).norm() < prec);
+    }
+
+  private:
+    Eigen::Vector3f mTranslation;
+    Eigen::Quaternionf mRotation;
+};
+
+/**
+ * Pretty-printer for Pose3f.
+ */
+std::ostream& operator<<(std::ostream& os, const Pose3f& pose);
+
+/**
+ * Move between the 'from' pose and the 'to' pose, while making sure velocity limits are enforced.
+ * If velocity limits are not violated, returns the 'to' pose and false.
+ * If velocity limits are violated, returns pose farthest along the path that can be reached within
+ * the limits, and true.
+ */
+std::tuple<Pose3f, bool> moveWithRateLimit(const Pose3f& from, const Pose3f& to, float t,
+                                           float maxTranslationalVelocity,
+                                           float maxRotationalVelocity);
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/include/media/SensorPoseProvider.h b/media/libheadtracking/include/media/SensorPoseProvider.h
new file mode 100644
index 0000000..d2a6b77
--- /dev/null
+++ b/media/libheadtracking/include/media/SensorPoseProvider.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <chrono>
+#include <memory>
+#include <optional>
+
+#include <android/sensor.h>
+
+#include "Pose.h"
+#include "Twist.h"
+
+namespace android {
+namespace media {
+
+/**
+ * A utility providing streaming of pose data from motion sensors provided by the Sensor Framework.
+ *
+ * A live instance of this interface keeps around some resources required for accessing sensor
+ * readings (e.g. a thread and a queue). Those would be released when the instance is deleted.
+ *
+ * Once alive, individual sensors can be subscribed to using startSensor() and updates can be
+ * stopped via stopSensor(). Those two methods should not be called concurrently and correct usage
+ * is assumed.
+ */
+class SensorPoseProvider {
+  public:
+    static constexpr int32_t INVALID_HANDLE = ASENSOR_INVALID;
+
+    /**
+     * Interface for consuming pose-related sensor events.
+     *
+     * The listener will be provided with a stream of events, each including:
+     * - A handle of the sensor responsible for the event.
+     * - Timestamp.
+     * - Pose.
+     * - Optional twist (time-derivative of pose).
+     *
+     * Sensors having only orientation data will have the translation part of the pose set to
+     * identity.
+     *
+     * Events are delivered in a serialized manner (i.e. callbacks do not need to be reentrant).
+     * Callbacks should not block.
+     */
+    class Listener {
+      public:
+        virtual ~Listener() = default;
+
+        virtual void onPose(int64_t timestamp, int32_t handle, const Pose3f& pose,
+                            const std::optional<Twist3f>& twist, bool isNewReference) = 0;
+    };
+
+    /**
+     * Creates a new SensorPoseProvider instance.
+     * Events will be delivered to the listener as long as the returned instance is kept alive.
+     * @param packageName Client's package name.
+     * @param listener The listener that will get the events.
+     * @return The new instance, or nullptr in case of failure.
+     */
+    static std::unique_ptr<SensorPoseProvider> create(const char* packageName, Listener* listener);
+
+    virtual ~SensorPoseProvider() = default;
+
+    /**
+     * Start receiving pose updates from a given sensor.
+     * Attempting to start a sensor that has already been started results in undefined behavior.
+     * @param sensor The sensor to subscribe to.
+     * @param samplingPeriod Sampling interval, in microseconds. Actual rate might be slightly
+     * different.
+     * @return true iff succeeded.
+     */
+    virtual bool startSensor(int32_t sensor, std::chrono::microseconds samplingPeriod) = 0;
+
+    /**
+     * Stop a sensor, previously started with startSensor(). It is not required to stop all sensors
+     * before deleting the SensorPoseProvider instance.
+     * @param handle The sensor handle, as provided to startSensor().
+     */
+    virtual void stopSensor(int32_t handle) = 0;
+};
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/include/media/Twist.h b/media/libheadtracking/include/media/Twist.h
new file mode 100644
index 0000000..291cea3
--- /dev/null
+++ b/media/libheadtracking/include/media/Twist.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <Eigen/Geometry>
+
+#include "Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * A 6-DoF twist.
+ * This class represents the translational and rotational velocity of a rigid object, typically
+ * relative to its own coordinate-frame.
+ * It is created by two 3-vectors, one representing linear motion per time-unit and the other, a
+ * rotation-vector in radians per time-unit (right-handed).
+ */
+class Twist3f {
+  public:
+    Twist3f(const Eigen::Vector3f& translationalVelocity, const Eigen::Vector3f& rotationalVelocity)
+        : mTranslationalVelocity(translationalVelocity), mRotationalVelocity(rotationalVelocity) {}
+
+    Twist3f() : Twist3f(Eigen::Vector3f::Zero(), Eigen::Vector3f::Zero()) {}
+
+    Twist3f(const Twist3f& other) { *this = other; }
+
+    Twist3f& operator=(const Twist3f& other) {
+        mTranslationalVelocity = other.mTranslationalVelocity;
+        mRotationalVelocity = other.mRotationalVelocity;
+        return *this;
+    }
+
+    Eigen::Vector3f translationalVelocity() const { return mTranslationalVelocity; }
+    Eigen::Vector3f rotationalVelocity() const { return mRotationalVelocity; }
+
+    float scalarTranslationalVelocity() const { return mTranslationalVelocity.norm(); }
+    float scalarRotationalVelocity() const { return mRotationalVelocity.norm(); }
+
+    bool isApprox(const Twist3f& other,
+                  float prec = Eigen::NumTraits<float>::dummy_precision()) const {
+        return mTranslationalVelocity.isApprox(other.mTranslationalVelocity, prec) &&
+               mRotationalVelocity.isApprox(other.mRotationalVelocity, prec);
+    }
+
+    template<typename T>
+    Twist3f operator*(const T& s) const {
+        return Twist3f(mTranslationalVelocity * s, mRotationalVelocity * s);
+    }
+
+    template<typename T>
+    Twist3f operator/(const T& s) const {
+        return Twist3f(mTranslationalVelocity / s, mRotationalVelocity / s);
+    }
+
+  private:
+    Eigen::Vector3f mTranslationalVelocity;
+    Eigen::Vector3f mRotationalVelocity;
+};
+
+/**
+ * Integrate a twist over time to obtain a pose.
+ * dt is the time over which to integration.
+ * The resulting pose represents the transformation between the starting point and the ending point
+ * of the motion over the time period.
+ */
+Pose3f integrate(const Twist3f& twist, float dt);
+
+/**
+ * Differentiate pose to obtain a twist.
+ * dt is the time of the motion between the reference and the target frames of the pose.
+ */
+Twist3f differentiate(const Pose3f& pose, float dt);
+
+/**
+ * Pretty-printer for twist.
+ */
+std::ostream& operator<<(std::ostream& os, const Twist3f& twist);
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheif/Android.bp b/media/libheif/Android.bp
index 6a3427e..55ba61a 100644
--- a/media/libheif/Android.bp
+++ b/media/libheif/Android.bp
@@ -26,7 +26,5 @@
         "-Wall",
     ],
 
-    include_dirs: [],
-
     export_include_dirs: ["include"],
 }
diff --git a/media/libheif/HeifDecoderImpl.cpp b/media/libheif/HeifDecoderImpl.cpp
index 273d91c..b28ae70 100644
--- a/media/libheif/HeifDecoderImpl.cpp
+++ b/media/libheif/HeifDecoderImpl.cpp
@@ -15,6 +15,7 @@
  */
 
 //#define LOG_NDEBUG 0
+#include "include/HeifDecoderAPI.h"
 #define LOG_TAG "HeifDecoderImpl"
 
 #include "HeifDecoderImpl.h"
@@ -464,7 +465,7 @@
 }
 
 bool HeifDecoderImpl::setOutputColor(HeifColorFormat heifColor) {
-    if (heifColor == mOutputColor) {
+    if (heifColor == (HeifColorFormat)mOutputColor) {
         return true;
     }
 
@@ -484,6 +485,11 @@
             mOutputColor = HAL_PIXEL_FORMAT_BGRA_8888;
             break;
         }
+        case kHeifColorFormat_RGBA_1010102:
+        {
+            mOutputColor = HAL_PIXEL_FORMAT_RGBA_1010102;
+            break;
+        }
         default:
             ALOGE("Unsupported output color format %d", heifColor);
             return false;
diff --git a/media/libheif/include/HeifDecoderAPI.h b/media/libheif/include/HeifDecoderAPI.h
index 9073672..fa51aef 100644
--- a/media/libheif/include/HeifDecoderAPI.h
+++ b/media/libheif/include/HeifDecoderAPI.h
@@ -23,9 +23,10 @@
  * The output color pixel format of heif decoder.
  */
 typedef enum {
-    kHeifColorFormat_RGB565     = 0,
-    kHeifColorFormat_RGBA_8888  = 1,
-    kHeifColorFormat_BGRA_8888  = 2,
+    kHeifColorFormat_RGB565       = 0,
+    kHeifColorFormat_RGBA_8888    = 1,
+    kHeifColorFormat_BGRA_8888    = 2,
+    kHeifColorFormat_RGBA_1010102 = 3,
 } HeifColorFormat;
 
 /*
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index e98d7d8..2dd5784 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -19,6 +19,10 @@
     name: "libmedia_headers",
     vendor_available: true,
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media.swcodec",
+    ],
 
     export_include_dirs: ["include"],
     header_libs: [
@@ -214,6 +218,11 @@
     name: "libmedia_midiiowrapper",
 
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media",
+    ],
+
 
     srcs: ["MidiIoWrapper.cpp"],
 
@@ -347,6 +356,7 @@
 
     shared_libs: [
         "android.hidl.token@1.0-utils",
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "av-types-aidl-cpp",
         "liblog",
@@ -378,12 +388,12 @@
     ],
 
     static_libs: [
-        "resourcemanager_aidl_interface-ndk_platform",
+        "resourcemanager_aidl_interface-ndk",
         "framework-permission-aidl-cpp",
     ],
 
     export_static_lib_headers: [
-        "resourcemanager_aidl_interface-ndk_platform",
+        "resourcemanager_aidl_interface-ndk",
         "framework-permission-aidl-cpp",
     ],
 
@@ -440,6 +450,6 @@
 
     apex_available: [
         "//apex_available:platform",
-        "com.android.media"
+        "com.android.media",
     ],
 }
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index c89c023..c9f361e 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -17,7 +17,6 @@
 
 #include <arpa/inet.h>
 #include <stdint.h>
-#include <sys/types.h>
 
 #include <android/IDataSource.h>
 #include <binder/IPCThreadState.h>
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index 67d33fa..85768bd 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -20,12 +20,14 @@
 #define LOG_TAG "MediaProfiles"
 
 #include <stdlib.h>
+#include <utils/misc.h>
 #include <utils/Log.h>
 #include <utils/Vector.h>
 #include <cutils/properties.h>
 #include <expat.h>
 #include <media/MediaProfiles.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaCodecConstants.h>
 #include <OMX_Video.h>
 #include <sys/stat.h>
 
@@ -86,7 +88,24 @@
     {"h263", VIDEO_ENCODER_H263},
     {"h264", VIDEO_ENCODER_H264},
     {"m4v",  VIDEO_ENCODER_MPEG_4_SP},
-    {"hevc", VIDEO_ENCODER_HEVC}
+    {"vp8",  VIDEO_ENCODER_VP8},
+    {"hevc", VIDEO_ENCODER_HEVC},
+    {"vp9",  VIDEO_ENCODER_VP9},
+    {"dolbyvision", VIDEO_ENCODER_DOLBY_VISION},
+};
+
+const MediaProfiles::NameToTagMap MediaProfiles::sChromaSubsamplingNameMap[] = {
+    {"yuv 4:2:0", CHROMA_SUBSAMPLING_YUV_420},
+    {"yuv 4:2:2", CHROMA_SUBSAMPLING_YUV_422},
+    {"yuv 4:4:4", CHROMA_SUBSAMPLING_YUV_444},
+};
+
+const MediaProfiles::NameToTagMap MediaProfiles::sHdrFormatNameMap[] = {
+    {"sdr", HDR_FORMAT_NONE},
+    {"hlg", HDR_FORMAT_HLG},
+    {"hdr10", HDR_FORMAT_HDR10},
+    {"hdr10+", HDR_FORMAT_HDR10PLUS},
+    {"dolbyvision", HDR_FORMAT_DOLBY_VISION},
 };
 
 const MediaProfiles::NameToTagMap MediaProfiles::sAudioEncoderNameMap[] = {
@@ -164,12 +183,18 @@
 MediaProfiles::logVideoCodec(const MediaProfiles::VideoCodec& codec UNUSED)
 {
     ALOGV("video codec:");
-    ALOGV("codec = %d", codec.mCodec);
+    ALOGV("codec = %d (%s)", codec.mCodec,
+            findNameForTag(sVideoEncoderNameMap, NELEM(sVideoEncoderNameMap), codec.mCodec));
     ALOGV("bit rate: %d", codec.mBitRate);
     ALOGV("frame width: %d", codec.mFrameWidth);
     ALOGV("frame height: %d", codec.mFrameHeight);
     ALOGV("frame rate: %d", codec.mFrameRate);
     ALOGV("profile: %d", codec.mProfile);
+    ALOGV("chroma: %s", findNameForTag(sChromaSubsamplingNameMap, NELEM(sChromaSubsamplingNameMap),
+                                       codec.mChromaSubsampling));
+    ALOGV("bit depth: %d", codec.mBitDepth);
+    ALOGV("hdr format: %s", findNameForTag(sHdrFormatNameMap, NELEM(sHdrFormatNameMap),
+                                           codec.mHdrFormat));
 }
 
 /*static*/ void
@@ -232,6 +257,155 @@
     return tag;
 }
 
+/*static*/ const char *
+MediaProfiles::findNameForTag(
+        const MediaProfiles::NameToTagMap *map, size_t nMappings, int tag, const char *def_)
+{
+    for (size_t i = 0; i < nMappings; ++i) {
+        if (map[i].tag == tag) {
+            return map[i].name;
+        }
+    }
+    return def_;
+}
+
+/*static*/ bool
+MediaProfiles::detectAdvancedVideoProfile(
+        video_encoder codec, int profile,
+        chroma_subsampling *chroma, int *bitDepth, hdr_format *hdr)
+{
+    // default values
+    *chroma = CHROMA_SUBSAMPLING_YUV_420;
+    *bitDepth = 8;
+    *hdr = HDR_FORMAT_NONE;
+
+    switch (codec) {
+    case VIDEO_ENCODER_H263:
+    case VIDEO_ENCODER_MPEG_4_SP:
+    case VIDEO_ENCODER_VP8:
+        // these are always 4:2:0 SDR 8-bit
+        return true;
+
+    case VIDEO_ENCODER_H264:
+        switch (profile) {
+        case AVCProfileBaseline:
+        case AVCProfileConstrainedBaseline:
+        case AVCProfileMain:
+        case AVCProfileExtended:
+        case AVCProfileHigh:
+        case AVCProfileConstrainedHigh:
+            return true;
+        case AVCProfileHigh10:
+            // returning false here as this could be an HLG stream
+            *bitDepth = 10;
+            return false;
+        case AVCProfileHigh422:
+            *chroma = CHROMA_SUBSAMPLING_YUV_422;
+            // returning false here as bit-depth could be 8 or 10
+            return false;
+        case AVCProfileHigh444:
+            *chroma = CHROMA_SUBSAMPLING_YUV_444;
+            // returning false here as bit-depth could be 8 or 10
+            return false;
+        default:
+            return false;
+        }
+        // flow does not get here
+
+    case VIDEO_ENCODER_HEVC:
+        switch (profile) {
+        case HEVCProfileMain:
+            return true;
+        case HEVCProfileMain10:
+            *bitDepth = 10;
+            // returning false here as this could be an HLG stream
+            return false;
+        case HEVCProfileMain10HDR10:
+            *bitDepth = 10;
+            *hdr = HDR_FORMAT_HDR10;
+            return true;
+        case HEVCProfileMain10HDR10Plus:
+            *bitDepth = 10;
+            *hdr = HDR_FORMAT_HDR10PLUS;
+            return true;
+        default:
+            return false;
+        }
+        // flow does not get here
+
+    case VIDEO_ENCODER_VP9:
+        switch (profile) {
+        case VP9Profile0:
+            return true;
+        case VP9Profile2:
+            // this is always 10-bit on Android */
+            *bitDepth = 10;
+            // returning false here as this could be an HLG stream
+            return false;
+        case VP9Profile2HDR:
+            // this is always 10-bit on Android */
+            *bitDepth = 10;
+            *hdr = HDR_FORMAT_HDR10;
+            return true;
+        case VP9Profile2HDR10Plus:
+            *bitDepth = 10;
+            *hdr = HDR_FORMAT_HDR10PLUS;
+            return true;
+        default:
+            return false;
+        }
+        // flow does not get here
+
+    case VIDEO_ENCODER_DOLBY_VISION:
+    {
+        // for Dolby Vision codec we always assume 10-bit DV
+        *bitDepth = 10;
+        *hdr = HDR_FORMAT_DOLBY_VISION;
+
+        switch (profile) {
+        case DolbyVisionProfileDvheDer /* profile 2 deprecated */:
+        case DolbyVisionProfileDvheDen /* profile 3 deprecated */:
+        case DolbyVisionProfileDvavPer /* profile 0 deprecated */:
+        case DolbyVisionProfileDvavPen /* profile 1 deprecated */:
+        case DolbyVisionProfileDvheDtr /* dvhe.04 */:
+        case DolbyVisionProfileDvheStn /* dvhe.05 */:
+        case DolbyVisionProfileDvheDth /* profile 6 deprecated */:
+        case DolbyVisionProfileDvheDtb /* dvhe.07 */:
+        case DolbyVisionProfileDvheSt  /* dvhe.08 */:
+        case DolbyVisionProfileDvavSe  /* dvav.09 */:
+        case DolbyVisionProfileDvav110 /* dvav1.10 */:
+            return true;
+        default:
+            return false;
+        }
+        // flow does not get here
+    }
+
+    case VIDEO_ENCODER_AV1:
+        switch (profile) {
+        case AV1ProfileMain10:
+            *bitDepth = 10;
+            // returning false here as this could be an HLG stream
+            return false;
+        case AV1ProfileMain10HDR10:
+            *bitDepth = 10;
+            *hdr = HDR_FORMAT_HDR10;
+            return true;
+        case AV1ProfileMain10HDR10Plus:
+            *bitDepth = 10;
+            *hdr = HDR_FORMAT_HDR10PLUS;
+            return true;
+        default:
+            return false;
+        }
+        // flow does not get here
+
+    default:
+        return false;
+    }
+    // flow does not get here
+}
+
 /*static*/ void
 MediaProfiles::createVideoCodec(const char **atts, size_t natts, MediaProfiles *profiles)
 {
@@ -250,13 +424,56 @@
     }
 
     int profile = -1;
-    if (natts >= 12 && !strcmp("profile", atts[10])) {
-        profile = atoi(atts[11]);
+    chroma_subsampling chroma = CHROMA_SUBSAMPLING_YUV_420;
+    int bitDepth = 8;
+    hdr_format hdr = HDR_FORMAT_NONE;
+    if (codec == VIDEO_ENCODER_DOLBY_VISION) {
+        bitDepth = 10;
+        hdr = HDR_FORMAT_DOLBY_VISION;
     }
 
-    VideoCodec videoCodec {
+    if (natts >= 12 && !strcmp("profile", atts[10])) {
+        profile = atoi(atts[11]);
+        if (!detectAdvancedVideoProfile(
+                (video_encoder)codec, profile, &chroma, &bitDepth, &hdr)) {
+            // if not detected read values from the attributes
+            for (size_t ix = 12; natts >= ix + 2; ix += 2) {
+                if (!strcmp("chroma", atts[ix])) {
+                    int chromaTag = findTagForName(sChromaSubsamplingNameMap,
+                                         NELEM(sChromaSubsamplingNameMap), atts[ix + 1]);
+                    if (chromaTag == -1) {
+                        ALOGE("MediaProfiles::createVideoCodec invalid chroma %s", atts[ix + 1]);
+                        return;
+                    } else {
+                        chroma = (chroma_subsampling)chromaTag;
+                    }
+                } else if (!strcmp("bitDepth", atts[ix])) {
+                    bitDepth = atoi(atts[ix + 1]);
+                    if (bitDepth < 8 || bitDepth > 16) {
+                        ALOGE("MediaProfiles::createVideoCodec invalid bidDepth %s", atts[ix + 1]);
+                        return;
+                    }
+                } else if (!strcmp("hdr", atts[ix])) {
+                    int hdrTag = findTagForName(sHdrFormatNameMap,
+                                                NELEM(sHdrFormatNameMap), atts[ix + 1]);
+                    if (hdrTag == -1) {
+                        ALOGE("MediaProfiles::createVideoCodec invalid hdr %s", atts[ix + 1]);
+                        return;
+                    } else {
+                        hdr = (hdr_format)hdrTag;
+                    }
+                } else {
+                    // ignoring here. TODO: rewrite this whole file to ignore invalid attrs
+                    ALOGD("MediaProfiles::createVideoCodec ignoring invalid attr %s", atts[ix]);
+                }
+            }
+        }
+    }
+
+    VideoCodec videoCodec{
             static_cast<video_encoder>(codec),
-            atoi(atts[3]), atoi(atts[5]), atoi(atts[7]), atoi(atts[9]), profile };
+            atoi(atts[3]) /* bitRate */, atoi(atts[5]) /* width */, atoi(atts[7]) /* height */,
+            atoi(atts[9]) /* frameRate */, profile, chroma, bitDepth, hdr };
     logVideoCodec(videoCodec);
 
     size_t nCamcorderProfiles;
diff --git a/media/libmedia/include/media/MediaProfiles.h b/media/libmedia/include/media/MediaProfiles.h
index 4a898e2..e75b694 100644
--- a/media/libmedia/include/media/MediaProfiles.h
+++ b/media/libmedia/include/media/MediaProfiles.h
@@ -81,6 +81,19 @@
     AUDIO_DECODER_WMA,
 };
 
+enum chroma_subsampling {
+    CHROMA_SUBSAMPLING_YUV_420,
+    CHROMA_SUBSAMPLING_YUV_422,
+    CHROMA_SUBSAMPLING_YUV_444,
+};
+
+enum hdr_format {
+    HDR_FORMAT_NONE,
+    HDR_FORMAT_HLG,
+    HDR_FORMAT_HDR10,
+    HDR_FORMAT_HDR10PLUS,
+    HDR_FORMAT_DOLBY_VISION,
+};
 
 class MediaProfiles
 {
@@ -117,13 +130,19 @@
          * @param profile codec profile (for MediaCodec) or -1 for none
          */
         VideoCodec(video_encoder codec, int bitrate, int frameWidth, int frameHeight, int frameRate,
-                   int profile = -1)
+                   int profile = -1,
+                   chroma_subsampling chroma = CHROMA_SUBSAMPLING_YUV_420,
+                   int bitDepth = 8,
+                   hdr_format hdr = HDR_FORMAT_NONE)
             : mCodec(codec),
               mBitRate(bitrate),
               mFrameWidth(frameWidth),
               mFrameHeight(frameHeight),
               mFrameRate(frameRate),
-              mProfile(profile) {
+              mProfile(profile),
+              mChromaSubsampling(chroma),
+              mBitDepth(bitDepth),
+              mHdrFormat(hdr) {
         }
 
         VideoCodec(const VideoCodec&) = default;
@@ -160,6 +179,21 @@
             return mProfile;
         }
 
+        /** Returns the chroma subsampling. */
+        chroma_subsampling getChromaSubsampling() const {
+            return mChromaSubsampling;
+        }
+
+        /** Returns the bit depth. */
+        int getBitDepth() const {
+            return mBitDepth;
+        }
+
+        /** Returns the chroma subsampling. */
+        hdr_format getHdrFormat() const {
+            return mHdrFormat;
+        }
+
     private:
         video_encoder mCodec;
         int mBitRate;
@@ -167,6 +201,9 @@
         int mFrameHeight;
         int mFrameRate;
         int mProfile;
+        chroma_subsampling mChromaSubsampling;
+        int mBitDepth;
+        hdr_format mHdrFormat;
         friend class MediaProfiles;
     };
 
@@ -533,6 +570,39 @@
     static int findTagForName(const NameToTagMap *map, size_t nMappings, const char *name);
 
     /**
+     * Finds the string representation for an integer enum tag.
+     *
+     * This is the reverse for findTagForName
+     *
+     * @param map       the name-to-tag map to search
+     * @param nMappings the number of mappings in |map|
+     * @param tag       the enum value to find
+     * @param def_      the return value if the enum is not found
+     *
+     * @return the string name corresponding to |tag| or |def_| if not found.
+     */
+    static const char *findNameForTag(
+            const NameToTagMap *map, size_t nMappings,
+            int tag, const char *def_ = "(unknown)");
+
+    /**
+     * Updates the chroma subsampling, bit-depth and hdr-format for
+     * advanced codec profiles.
+     *
+     * @param codec    the video codec type
+     * @param profile  the MediaCodec profile
+     * @param chroma   pointer to the chroma subsampling output
+     * @param bitDepth pointer to the bit depth output
+     * @param hdr      pointer to the hdr format output
+     *
+     * @return true, if the profile fully determined chroma, bit-depth and hdr-format, false
+     *         otherwise.
+     */
+    static bool detectAdvancedVideoProfile(
+            video_encoder codec, int profile,
+            chroma_subsampling *chroma, int *bitDepth, hdr_format *hdr);
+
+    /**
      * Check on existing profiles with the following criteria:
      * 1. Low quality profile must have the lowest video
      *    resolution product (width x height)
@@ -549,6 +619,8 @@
 
     // Mappings from name (for instance, codec name) to enum value
     static const NameToTagMap sVideoEncoderNameMap[];
+    static const NameToTagMap sChromaSubsamplingNameMap[];
+    static const NameToTagMap sHdrFormatNameMap[];
     static const NameToTagMap sAudioEncoderNameMap[];
     static const NameToTagMap sFileFormatMap[];
     static const NameToTagMap sVideoDecoderNameMap[];
diff --git a/media/libmedia/include/media/mediarecorder.h b/media/libmedia/include/media/mediarecorder.h
index d54ff32..dd18144 100644
--- a/media/libmedia/include/media/mediarecorder.h
+++ b/media/libmedia/include/media/mediarecorder.h
@@ -108,7 +108,9 @@
     VIDEO_ENCODER_MPEG_4_SP = 3,
     VIDEO_ENCODER_VP8 = 4,
     VIDEO_ENCODER_HEVC = 5,
-
+    VIDEO_ENCODER_VP9 = 6,
+    VIDEO_ENCODER_DOLBY_VISION = 7,
+    VIDEO_ENCODER_AV1 = 8,
     VIDEO_ENCODER_LIST_END // must be the last - used to validate the video encoder type
 };
 
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 1c9b9e4..5215c1b 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -949,6 +949,9 @@
         mVideoWidth = ext1;
         mVideoHeight = ext2;
         break;
+    case MEDIA_STARTED:
+        ALOGV("Received media started message");
+        break;
     case MEDIA_NOTIFY_TIME:
         ALOGV("Received notify time message");
         break;
diff --git a/media/libmedia/tests/codeclist/Android.bp b/media/libmedia/tests/codeclist/Android.bp
index 7dd0caa..2ed3126 100644
--- a/media/libmedia/tests/codeclist/Android.bp
+++ b/media/libmedia/tests/codeclist/Android.bp
@@ -25,9 +25,25 @@
 
 cc_test {
     name: "CodecListTest",
-    test_suites: ["device-tests"],
+    test_suites: ["device-tests", "mts"],
     gtest: true,
 
+    // Support multilib variants (using different suffix per sub-architecture), which is needed on
+    // build targets with secondary architectures, as the MTS test suite packaging logic flattens
+    // all test artifacts into a single `testcases` directory.
+    compile_multilib: "both",
+    multilib: {
+        lib32: {
+            suffix: "32",
+        },
+        lib64: {
+            suffix: "64",
+        },
+    },
+
+    // used within mainline MTS, but only to R, not to Q.
+    min_sdk_version: "30",
+
     srcs: [
         "CodecListTest.cpp",
     ],
@@ -35,7 +51,7 @@
     shared_libs: [
         "libbinder",
         "liblog",
-        "libmedia_codeclist",
+        "libmedia_codeclist", // available >= R
         "libstagefright",
         "libstagefright_foundation",
         "libstagefright_xmlparser",
diff --git a/media/libmedia/tests/codeclist/AndroidTest.xml b/media/libmedia/tests/codeclist/AndroidTest.xml
new file mode 100644
index 0000000..eeaab8e
--- /dev/null
+++ b/media/libmedia/tests/codeclist/AndroidTest.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2020 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<configuration description="Test module config for CodecList unit tests">
+    <option name="test-suite-tag" value="CodecListTest" />
+    <object type="module_controller" class="com.android.tradefed.testtype.suite.module.Sdk30ModuleController" />
+
+    <target_preparer class="com.android.compatibility.common.tradefed.targetprep.FilePusher">
+        <option name="cleanup" value="true" />
+        <option name="append-bitness" value="true" />
+        <option name="push" value="CodecListTest->/data/local/tmp/CodecListTest" />
+    </target_preparer>
+
+    <test class="com.android.tradefed.testtype.GTest" >
+        <option name="native-test-device-path" value="/data/local/tmp" />
+        <option name="module-name" value="CodecListTest" />
+    </test>
+
+
+</configuration>
diff --git a/media/libmedia/xsd/vts/OWNERS b/media/libmedia/xsd/vts/OWNERS
new file mode 100644
index 0000000..9af2eba
--- /dev/null
+++ b/media/libmedia/xsd/vts/OWNERS
@@ -0,0 +1,2 @@
+# Bug component: 151862
+sundongahn@google.com
diff --git a/media/libmediaformatshaper/Android.bp b/media/libmediaformatshaper/Android.bp
index bdd1465..7e8f351 100644
--- a/media/libmediaformatshaper/Android.bp
+++ b/media/libmediaformatshaper/Android.bp
@@ -95,10 +95,10 @@
 
     min_sdk_version: "29",
 
-    apex_available: [
-        "//apex_available:platform",
-        "com.android.media",
-    ],
+    // the library lives only in the module
+    // framework accesses with dlopen() and uses "libmediaformatshaper_headers" so both
+    // sides track to the interface.
+    apex_available: ["com.android.media"],
 
     version_script: "exports.lds",
 
diff --git a/media/libmediahelper/Android.bp b/media/libmediahelper/Android.bp
index 9b54199..a433fc6 100644
--- a/media/libmediahelper/Android.bp
+++ b/media/libmediahelper/Android.bp
@@ -20,7 +20,7 @@
     },
     apex_available: [
         "//apex_available:platform",
-        "com.android.bluetooth.updatable",
+        "com.android.bluetooth",
         "com.android.media",
         "com.android.media.swcodec",
     ],
diff --git a/media/libmediahelper/TypeConverter.cpp b/media/libmediahelper/TypeConverter.cpp
index d3a517f..e29364c 100644
--- a/media/libmediahelper/TypeConverter.cpp
+++ b/media/libmediahelper/TypeConverter.cpp
@@ -30,6 +30,8 @@
     MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_CALL),
     MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_COMMUNICATION),
     MAKE_STRING_FROM_ENUM(AUDIO_MODE_CALL_SCREEN),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_CALL_REDIRECT),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_COMMUNICATION_REDIRECT),
     TERMINATOR
 };
 
@@ -50,6 +52,8 @@
     MAKE_STRING_FROM_ENUM(AUDIO_FLAG_MUTE_HAPTIC),
     MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NO_SYSTEM_CAPTURE),
     MAKE_STRING_FROM_ENUM(AUDIO_FLAG_CAPTURE_PRIVATE),
+    MAKE_STRING_FROM_ENUM(AUDIO_FLAG_CONTENT_SPATIALIZED),
+    MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NEVER_SPATIALIZE),
     TERMINATOR
 };
 
diff --git a/media/libmediametrics/IMediaMetricsService.cpp b/media/libmediametrics/IMediaMetricsService.cpp
deleted file mode 100644
index b5675e6..0000000
--- a/media/libmediametrics/IMediaMetricsService.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MediaMetrics"
-
-#include <stdint.h>
-#include <inttypes.h>
-#include <sys/types.h>
-
-#include <binder/Parcel.h>
-#include <binder/IMemory.h>
-#include <binder/IPCThreadState.h>
-
-#include <utils/Errors.h>  // for status_t
-#include <utils/List.h>
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-#include <media/MediaMetricsItem.h>
-#include <media/IMediaMetricsService.h>
-
-namespace android {
-
-// TODO: Currently ONE_WAY transactions, make both ONE_WAY and synchronous options.
-
-enum {
-    SUBMIT_ITEM = IBinder::FIRST_CALL_TRANSACTION,
-    SUBMIT_BUFFER,
-};
-
-class BpMediaMetricsService: public BpInterface<IMediaMetricsService>
-{
-public:
-    explicit BpMediaMetricsService(const sp<IBinder>& impl)
-        : BpInterface<IMediaMetricsService>(impl)
-    {
-    }
-
-    status_t submit(mediametrics::Item *item) override
-    {
-        if (item == nullptr) {
-            return BAD_VALUE;
-        }
-        ALOGV("%s: (ONEWAY) item=%s", __func__, item->toString().c_str());
-
-        Parcel data;
-        data.writeInterfaceToken(IMediaMetricsService::getInterfaceDescriptor());
-
-        status_t status = item->writeToParcel(&data);
-        if (status != NO_ERROR) { // assume failure logged in item
-            return status;
-        }
-
-        status = remote()->transact(
-                SUBMIT_ITEM, data, nullptr /* reply */, IBinder::FLAG_ONEWAY);
-        ALOGW_IF(status != NO_ERROR, "%s: bad response from service for submit, status=%d",
-                __func__, status);
-        return status;
-    }
-
-    status_t submitBuffer(const char *buffer, size_t length) override
-    {
-        if (buffer == nullptr || length > INT32_MAX) {
-            return BAD_VALUE;
-        }
-        ALOGV("%s: (ONEWAY) length:%zu", __func__, length);
-
-        Parcel data;
-        data.writeInterfaceToken(IMediaMetricsService::getInterfaceDescriptor());
-
-        status_t status = data.writeInt32(length)
-                ?: data.write((uint8_t*)buffer, length);
-        if (status != NO_ERROR) {
-            return status;
-        }
-
-        status = remote()->transact(
-                SUBMIT_BUFFER, data, nullptr /* reply */, IBinder::FLAG_ONEWAY);
-        ALOGW_IF(status != NO_ERROR, "%s: bad response from service for submit, status=%d",
-                __func__, status);
-        return status;
-    }
-};
-
-IMPLEMENT_META_INTERFACE(MediaMetricsService, "android.media.IMediaMetricsService");
-
-// ----------------------------------------------------------------------
-
-status_t BnMediaMetricsService::onTransact(
-    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
-    switch (code) {
-    case SUBMIT_ITEM: {
-        CHECK_INTERFACE(IMediaMetricsService, data, reply);
-
-        mediametrics::Item * const item = mediametrics::Item::create();
-        status_t status = item->readFromParcel(data);
-        if (status != NO_ERROR) { // assume failure logged in item
-            return status;
-        }
-        status = submitInternal(item, true /* release */);
-        // assume failure logged by submitInternal
-        return NO_ERROR;
-    }
-    case SUBMIT_BUFFER: {
-        CHECK_INTERFACE(IMediaMetricsService, data, reply);
-        int32_t length;
-        status_t status = data.readInt32(&length);
-        if (status != NO_ERROR || length <= 0) {
-            return BAD_VALUE;
-        }
-        const void *ptr = data.readInplace(length);
-        if (ptr == nullptr) {
-            return BAD_VALUE;
-        }
-        status = submitBuffer(static_cast<const char *>(ptr), length);
-        // assume failure logged by submitBuffer
-        return NO_ERROR;
-    }
-
-    default:
-        return BBinder::onTransact(code, data, reply, flags);
-    }
-}
-
-// ----------------------------------------------------------------------------
-
-} // namespace android
diff --git a/media/libmediametrics/MediaMetricsItem.cpp b/media/libmediametrics/MediaMetricsItem.cpp
index d597a4d..57fc49d 100644
--- a/media/libmediametrics/MediaMetricsItem.cpp
+++ b/media/libmediametrics/MediaMetricsItem.cpp
@@ -23,6 +23,7 @@
 
 #include <mutex>
 #include <set>
+#include <unordered_map>
 
 #include <binder/Parcel.h>
 #include <cutils/properties.h>
@@ -51,6 +52,33 @@
 // the service is off.
 #define SVC_TRIES               2
 
+static const std::unordered_map<std::string, int32_t>& getErrorStringMap() {
+    // DO NOT MODIFY VALUES (OK to add new ones).
+    // This may be found in frameworks/av/media/libmediametrics/include/MediaMetricsConstants.h
+    static std::unordered_map<std::string, int32_t> map{
+        {"",                                      NO_ERROR},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_OK,       NO_ERROR},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT, BAD_VALUE},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_IO,       DEAD_OBJECT},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY,   NO_MEMORY},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY, PERMISSION_DENIED},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_STATE,    INVALID_OPERATION},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT,  WOULD_BLOCK},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN,  UNKNOWN_ERROR},
+    };
+    return map;
+}
+
+status_t statusStringToStatus(const char *error) {
+    const auto& map = getErrorStringMap();
+    if (error == nullptr || error[0] == '\0') return NO_ERROR;
+    auto it = map.find(error);
+    if (it != map.end()) {
+        return it->second;
+    }
+    return UNKNOWN_ERROR;
+}
+
 mediametrics::Item* mediametrics::Item::convert(mediametrics_handle_t handle) {
     mediametrics::Item *item = (android::mediametrics::Item *) handle;
     return item;
diff --git a/media/libmediametrics/include/MediaMetricsConstants.h b/media/libmediametrics/include/MediaMetricsConstants.h
index a09a673..4247375 100644
--- a/media/libmediametrics/include/MediaMetricsConstants.h
+++ b/media/libmediametrics/include/MediaMetricsConstants.h
@@ -25,6 +25,9 @@
  * 2) Consistent behavior and documentation.
  */
 
+#define AMEDIAMETRICS_INITIAL_MAX_VOLUME (0.f)
+#define AMEDIAMETRICS_INITIAL_MIN_VOLUME (1.f)
+
 /*
  * Taxonomy of audio keys
  *
@@ -61,6 +64,10 @@
 #define AMEDIAMETRICS_KEY_AUDIO_FLINGER       AMEDIAMETRICS_KEY_PREFIX_AUDIO "flinger"
 #define AMEDIAMETRICS_KEY_AUDIO_POLICY        AMEDIAMETRICS_KEY_PREFIX_AUDIO "policy"
 
+// Error keys
+#define AMEDIAMETRICS_KEY_AUDIO_TRACK_ERROR   AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK "error"
+#define AMEDIAMETRICS_KEY_AUDIO_RECORD_ERROR  AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD "error"
+
 /*
  * MediaMetrics Properties are unified space for consistency and readability.
  */
@@ -112,9 +119,15 @@
 #define AMEDIAMETRICS_PROP_DEVICETIMENS   "deviceTimeNs"   // int64_t playback/record time
 #define AMEDIAMETRICS_PROP_DEVICEVOLUME   "deviceVolume"   // double - average device volume
 
+#define AMEDIAMETRICS_PROP_DEVICEMAXVOLUMEDURATIONNS "deviceMaxVolumeDurationNs" // int64_t
+#define AMEDIAMETRICS_PROP_DEVICEMAXVOLUME "deviceMaxVolume" // double - maximum device volume
+#define AMEDIAMETRICS_PROP_DEVICEMINVOLUMEDURATIONNS "deviceMinVolumeDurationNs" // int64_t
+#define AMEDIAMETRICS_PROP_DEVICEMINVOLUME "deviceMinVolume" // double - minimum device volume
+
 #define AMEDIAMETRICS_PROP_DIRECTION      "direction"      // string AAudio input or output
 #define AMEDIAMETRICS_PROP_DURATIONNS     "durationNs"     // int64 duration time span
 #define AMEDIAMETRICS_PROP_ENCODING       "encoding"       // string value of format
+
 #define AMEDIAMETRICS_PROP_EVENT          "event#"         // string value (often func name)
 #define AMEDIAMETRICS_PROP_EXECUTIONTIMENS "executionTimeNs"  // time to execute the event
 
@@ -146,7 +159,17 @@
 #define AMEDIAMETRICS_PROP_STARTUPMS      "startupMs"      // double value
 // State is "ACTIVE" or "STOPPED" for AudioRecord
 #define AMEDIAMETRICS_PROP_STATE          "state"          // string
-#define AMEDIAMETRICS_PROP_STATUS         "status"         // int32 status_t
+#define AMEDIAMETRICS_PROP_STATUS         "status#"        // int32 status_t
+                                                           // AAudio uses their own status codes
+// Supplemental information to the status code.
+#define AMEDIAMETRICS_PROP_STATUSSUBCODE  "statusSubCode"  // int32, specific code
+                                                           // used in conjunction with status.
+#define AMEDIAMETRICS_PROP_STATUSMESSAGE  "statusMessage"  // string, supplemental info.
+                                                           // Arbitrary information treated as
+                                                           // informational, may be logcat msg,
+                                                           // or an exception with stack trace.
+                                                           // Treated as "debug" information.
+
 #define AMEDIAMETRICS_PROP_STREAMTYPE     "streamType"     // string (AudioTrack)
 #define AMEDIAMETRICS_PROP_THREADID       "threadId"       // int32 value io handle
 #define AMEDIAMETRICS_PROP_THROTTLEMS     "throttleMs"     // double
@@ -215,4 +238,78 @@
 #define AMEDIAMETRICS_PROP_CALLERNAME_VALUE_TONEGENERATOR "tonegenerator"  // dial tones
 #define AMEDIAMETRICS_PROP_CALLERNAME_VALUE_UNKNOWN       "unknown"        // callerName not set
 
+// MediaMetrics errors are expected to cover the following sources:
+// https://docs.oracle.com/javase/7/docs/api/java/lang/RuntimeException.html
+// https://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html
+// https://cs.android.com/android/platform/superproject/+/master:frameworks/native/libs/binder/include/binder/Status.h;drc=88e25c0861499ee3ab885814dddc097ab234cb7b;l=57
+// https://cs.android.com/android/platform/superproject/+/master:frameworks/base/media/java/android/media/AudioSystem.java;drc=3ac246c43294d7f7012bdcb0ccb7bae1aa695bd4;l=785
+// https://cs.android.com/android/platform/superproject/+/master:frameworks/av/media/libaaudio/include/aaudio/AAudio.h;drc=cfd3a6fa3aaaf712a890dc02452b38ef401083b8;l=120
+// https://abseil.io/docs/cpp/guides/status-codes
+
+// Status errors:
+// An empty status string or "ok" is interpreted as no error.
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_OK                "ok"
+
+// Error category: argument
+//   IllegalArgumentException
+//   NullPointerException
+//   BAD_VALUE
+//   absl::INVALID_ARGUMENT
+//   absl::OUT_OF_RANGE
+//   Out of range, out of bounds.
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT          "argument"
+
+// Error category: io
+//   IOException
+//   android.os.DeadObjectException, android.os.RemoteException
+//   DEAD_OBJECT
+//   FAILED_TRANSACTION
+//   IO_ERROR
+//   file or ioctl failure
+//   Service, rpc, binder, or socket failure.
+//   Hardware or device failure.
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_IO                "io"
+
+// Error category: outOfMemory
+//   OutOfMemoryException
+//   NO_MEMORY
+//   absl::RESOURCE_EXHAUSTED
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY            "memory"
+
+// Error category: security
+//   SecurityException
+//   PERMISSION_DENIED
+//   absl::PERMISSION_DENIED
+//   absl::UNAUTHENTICATED
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY          "security"
+
+// Error category: state
+//   IllegalStateException
+//   UnsupportedOperationException
+//   INVALID_OPERATION
+//   NO_INIT
+//   absl::NOT_FOUND
+//   absl::ALREADY_EXISTS
+//   absl::FAILED_PRECONDITION
+//   absl::UNAVAILABLE
+//   absl::UNIMPLEMENTED
+//   Functionality not implemented (argument may or may not be correct).
+//   Call unexpected or out of order.
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_STATE             "state"
+
+// Error category: timeout
+//   TimeoutException
+//   WOULD_BLOCK
+//   absl::DEADLINE_EXCEEDED
+//   absl::ABORTED
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT           "timeout"
+
+// Error category: unknown
+//   Exception (Java specified not listed above, or custom app/service)
+//   UNKNOWN_ERROR
+//   absl::INTERNAL
+//   absl::DATA_LOSS
+//   Catch-all bucket for errors not listed above.
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN           "unknown"
+
 #endif // ANDROID_MEDIA_MEDIAMETRICSCONSTANTS_H
diff --git a/media/libmediametrics/include/media/MediaMetricsItem.h b/media/libmediametrics/include/media/MediaMetricsItem.h
index cbf89c6..de56665 100644
--- a/media/libmediametrics/include/media/MediaMetricsItem.h
+++ b/media/libmediametrics/include/media/MediaMetricsItem.h
@@ -106,6 +106,36 @@
 };
 
 /*
+ * Helper for status conversions
+ */
+
+inline constexpr const char* statusToStatusString(status_t status) {
+    switch (status) {
+    case BAD_VALUE:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT;
+    case DEAD_OBJECT:
+    case FAILED_TRANSACTION:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_IO;
+    case NO_MEMORY:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY;
+    case PERMISSION_DENIED:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY;
+    case NO_INIT:
+    case INVALID_OPERATION:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_STATE;
+    case WOULD_BLOCK:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT;
+    default:
+        if (status >= 0) return AMEDIAMETRICS_PROP_STATUS_VALUE_OK; // non-negative values "OK"
+        [[fallthrough]];            // negative values are error.
+    case UNKNOWN_ERROR:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN;
+    }
+}
+
+status_t statusStringToStatus(const char *error);
+
+/*
  * Time printing
  *
  * kPrintFormatLong time string is 19 characters (including null termination).
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
index f55678d..a23d1d9 100644
--- a/media/libmediaplayerservice/Android.bp
+++ b/media/libmediaplayerservice/Android.bp
@@ -65,10 +65,13 @@
         "libstagefright_foundation",
         "libstagefright_httplive",
         "libutils",
+        "packagemanager_aidl-cpp",
     ],
 
     header_libs: [
         "media_plugin_headers",
+        "libmediautils_headers",
+        "libstagefright_rtsp_headers",
     ],
 
     static_libs: [
@@ -76,6 +79,9 @@
         "libstagefright_nuplayer",
         "libstagefright_rtsp",
         "libstagefright_timedtext",
+        // this needs it, but it can get it transitively through libstagefright.
+        // i'm going to leave it here.
+        "libstagefright_webm",
         "framework-permission-aidl-cpp",
     ],
 
@@ -84,13 +90,16 @@
         "framework-permission-aidl-cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright/rtsp",
-        "frameworks/av/media/libstagefright/webm",
+    export_header_lib_headers: [
+        "libmediautils_headers",
     ],
 
     local_include_dirs: ["include"],
 
+    export_include_dirs: [
+        ".",
+    ],
+
     cflags: [
         "-Werror",
         "-Wno-error=deprecated-declarations",
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index 05f7365..cd411ea 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -30,7 +30,7 @@
 #include "MediaPlayerFactory.h"
 
 #include "TestPlayerStub.h"
-#include "nuplayer/NuPlayerDriver.h"
+#include <nuplayer/NuPlayerDriver.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index d278a01..3b5e1e2 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -21,6 +21,7 @@
 #define LOG_TAG "MediaPlayerService"
 #include <utils/Log.h>
 
+#include <chrono>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <sys/time.h>
@@ -81,7 +82,7 @@
 #include "MediaPlayerFactory.h"
 
 #include "TestPlayerStub.h"
-#include "nuplayer/NuPlayerDriver.h"
+#include <nuplayer/NuPlayerDriver.h>
 
 
 static const int kDumpLockRetries = 50;
@@ -1830,7 +1831,6 @@
 {
     close();
     free(mAttributes);
-    delete mCallbackData;
 }
 
 //static
@@ -2051,8 +2051,7 @@
 
         mRecycledTrack.clear();
         close_l();
-        delete mCallbackData;
-        mCallbackData = NULL;
+        mCallbackData.clear();
     }
 }
 
@@ -2173,7 +2172,7 @@
     }
 
     sp<AudioTrack> t;
-    CallbackData *newcbd = NULL;
+    sp<CallbackData> newcbd;
 
     // We don't attempt to create a new track if we are recycling an
     // offloaded track. But, if we are recycling a non-offloaded or we
@@ -2183,8 +2182,8 @@
     if (!(reuse && bothOffloaded)) {
         ALOGV("creating new AudioTrack");
 
-        if (mCallback != NULL) {
-            newcbd = new CallbackData(this);
+        if (mCallback != nullptr) {
+            newcbd = sp<CallbackData>::make(wp<AudioOutput>::fromExisting(this));
             t = new AudioTrack(
                     mStreamType,
                     sampleRate,
@@ -2192,7 +2191,6 @@
                     channelMask,
                     frameCount,
                     flags,
-                    CallbackWrapper,
                     newcbd,
                     0,  // notification frames
                     mSessionId,
@@ -2219,8 +2217,7 @@
                     channelMask,
                     frameCount,
                     flags,
-                    NULL, // callback
-                    NULL, // user data
+                    nullptr, // callback
                     0, // notification frames
                     mSessionId,
                     AudioTrack::TRANSFER_DEFAULT,
@@ -2236,8 +2233,7 @@
         t->setCallerName("media");
         if ((t == 0) || (t->initCheck() != NO_ERROR)) {
             ALOGE("Unable to create audio track");
-            delete newcbd;
-            // t goes out of scope, so reference count drops to zero
+            // t, newcbd goes out of scope, so reference count drops to zero
             return NO_INIT;
         } else {
             // successful AudioTrack initialization implies a legacy stream type was generated
@@ -2271,7 +2267,6 @@
             if (mCallbackData != NULL) {
                 mCallbackData->setOutput(this);
             }
-            delete newcbd;
             return updateTrack();
         }
     }
@@ -2377,7 +2372,7 @@
             if (mCallbackData != NULL) {
                 // two alternative approaches
 #if 1
-                CallbackData *callbackData = mCallbackData;
+                sp<CallbackData> callbackData = mCallbackData;
                 mLock.unlock();
                 // proper acquisition sequence
                 callbackData->lock();
@@ -2414,9 +2409,8 @@
             // for example, the next player could be prepared and seeked.
             //
             // Presuming it isn't advisable to force the track over.
-             if (mNextOutput->mTrack == NULL) {
+             if (mNextOutput->mTrack == nullptr) {
                 ALOGD("Recycling track for gapless playback");
-                delete mNextOutput->mCallbackData;
                 mNextOutput->mCallbackData = mCallbackData;
                 mNextOutput->mRecycledTrack = mTrack;
                 mNextOutput->mSampleRateHz = mSampleRateHz;
@@ -2424,11 +2418,11 @@
                 mNextOutput->mFlags = mFlags;
                 mNextOutput->mFrameSize = mFrameSize;
                 close_l();
-                mCallbackData = NULL;  // destruction handled by mNextOutput
+                mCallbackData.clear();
             } else {
                 ALOGW("Ignoring gapless playback because next player has already started");
                 // remove track in case resource needed for future players.
-                if (mCallbackData != NULL) {
+                if (mCallbackData != nullptr) {
                     mCallbackData->endTrackSwitch();  // release lock for callbacks before close.
                 }
                 close_l();
@@ -2467,8 +2461,13 @@
 void MediaPlayerService::AudioOutput::pause()
 {
     ALOGV("pause");
+    // We use pauseAndWait() instead of pause() to ensure tracks ramp to silence before
+    // any flush. We choose 40 ms timeout to allow 1 deep buffer mixer period
+    // to occur.  Often waiting is 0 - 20 ms.
+    using namespace std::chrono_literals;
+    constexpr auto TIMEOUT_MS = 40ms;
     Mutex::Autolock lock(mLock);
-    if (mTrack != 0) mTrack->pause();
+    if (mTrack != 0) mTrack->pauseAndWait(TIMEOUT_MS);
 }
 
 void MediaPlayerService::AudioOutput::close()
@@ -2650,76 +2649,71 @@
     }
 }
 
-// static
-void MediaPlayerService::AudioOutput::CallbackWrapper(
-        int event, void *cookie, void *info) {
-    //ALOGV("callbackwrapper");
-    CallbackData *data = (CallbackData*)cookie;
-    // lock to ensure we aren't caught in the middle of a track switch.
-    data->lock();
-    AudioOutput *me = data->getOutput();
-    AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
-    if (me == NULL) {
-        // no output set, likely because the track was scheduled to be reused
-        // by another player, but the format turned out to be incompatible.
-        data->unlock();
-        if (buffer != NULL) {
-            buffer->size = 0;
-        }
+size_t MediaPlayerService::AudioOutput::CallbackData::onMoreData(const AudioTrack::Buffer& buffer) {
+    ALOGD("data callback");
+    lock();
+    sp<AudioOutput> me = getOutput();
+    if (me == nullptr) {
+        unlock();
+        return 0;
+    }
+    size_t actualSize = (*me->mCallback)(
+            me.get(), buffer.raw, buffer.size, me->mCallbackCookie,
+            CB_EVENT_FILL_BUFFER);
+
+    // Log when no data is returned from the callback.
+    // (1) We may have no data (especially with network streaming sources).
+    // (2) We may have reached the EOS and the audio track is not stopped yet.
+    // Note that AwesomePlayer/AudioPlayer will only return zero size when it reaches the EOS.
+    // NuPlayerRenderer will return zero when it doesn't have data (it doesn't block to fill).
+    //
+    // This is a benign busy-wait, with the next data request generated 10 ms or more later;
+    // nevertheless for power reasons, we don't want to see too many of these.
+
+    ALOGV_IF(actualSize == 0 && buffer->size > 0, "callbackwrapper: empty buffer returned");
+    unlock();
+    return actualSize;
+}
+
+void MediaPlayerService::AudioOutput::CallbackData::onStreamEnd() {
+    lock();
+    sp<AudioOutput> me = getOutput();
+    if (me == nullptr) {
+        unlock();
         return;
     }
+    ALOGV("callbackwrapper: deliver EVENT_STREAM_END");
+    (*me->mCallback)(me.get(), NULL /* buffer */, 0 /* size */,
+            me->mCallbackCookie, CB_EVENT_STREAM_END);
+    unlock();
+}
 
-    switch(event) {
-    case AudioTrack::EVENT_MORE_DATA: {
-        size_t actualSize = (*me->mCallback)(
-                me, buffer->raw, buffer->size, me->mCallbackCookie,
-                CB_EVENT_FILL_BUFFER);
 
-        // Log when no data is returned from the callback.
-        // (1) We may have no data (especially with network streaming sources).
-        // (2) We may have reached the EOS and the audio track is not stopped yet.
-        // Note that AwesomePlayer/AudioPlayer will only return zero size when it reaches the EOS.
-        // NuPlayerRenderer will return zero when it doesn't have data (it doesn't block to fill).
-        //
-        // This is a benign busy-wait, with the next data request generated 10 ms or more later;
-        // nevertheless for power reasons, we don't want to see too many of these.
-
-        ALOGV_IF(actualSize == 0 && buffer->size > 0, "callbackwrapper: empty buffer returned");
-
-        buffer->size = actualSize;
-        } break;
-
-    case AudioTrack::EVENT_STREAM_END:
-        // currently only occurs for offloaded callbacks
-        ALOGV("callbackwrapper: deliver EVENT_STREAM_END");
-        (*me->mCallback)(me, NULL /* buffer */, 0 /* size */,
-                me->mCallbackCookie, CB_EVENT_STREAM_END);
-        break;
-
-    case AudioTrack::EVENT_NEW_IAUDIOTRACK :
-        ALOGV("callbackwrapper: deliver EVENT_TEAR_DOWN");
-        (*me->mCallback)(me,  NULL /* buffer */, 0 /* size */,
-                me->mCallbackCookie, CB_EVENT_TEAR_DOWN);
-        break;
-
-    case AudioTrack::EVENT_UNDERRUN:
-        // This occurs when there is no data available, typically
-        // when there is a failure to supply data to the AudioTrack.  It can also
-        // occur in non-offloaded mode when the audio device comes out of standby.
-        //
-        // If an AudioTrack underruns it outputs silence. Since this happens suddenly
-        // it may sound like an audible pop or glitch.
-        //
-        // The underrun event is sent once per track underrun; the condition is reset
-        // when more data is sent to the AudioTrack.
-        ALOGD("callbackwrapper: EVENT_UNDERRUN (discarded)");
-        break;
-
-    default:
-        ALOGE("received unknown event type: %d inside CallbackWrapper !", event);
+void MediaPlayerService::AudioOutput::CallbackData::onNewIAudioTrack() {
+    lock();
+    sp<AudioOutput> me = getOutput();
+    if (me == nullptr) {
+        unlock();
+        return;
     }
+    ALOGV("callbackwrapper: deliver EVENT_TEAR_DOWN");
+    (*me->mCallback)(me.get(),  NULL /* buffer */, 0 /* size */,
+            me->mCallbackCookie, CB_EVENT_TEAR_DOWN);
+    unlock();
+}
 
-    data->unlock();
+void MediaPlayerService::AudioOutput::CallbackData::onUnderrun() {
+    // This occurs when there is no data available, typically
+    // when there is a failure to supply data to the AudioTrack.  It can also
+    // occur in non-offloaded mode when the audio device comes out of standby.
+    //
+    // If an AudioTrack underruns it outputs silence. Since this happens suddenly
+    // it may sound like an audible pop or glitch.
+    //
+    // The underrun event is sent once per track underrun; the condition is reset
+    // when more data is sent to the AudioTrack.
+    ALOGD("callbackwrapper: EVENT_UNDERRUN (discarded)");
+
 }
 
 audio_session_t MediaPlayerService::AudioOutput::getSessionId() const
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 98091be..86be3fe 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -30,9 +30,11 @@
 #include <media/AidlConversion.h>
 #include <media/AudioResamplerPublic.h>
 #include <media/AudioSystem.h>
+#include <media/AudioTrack.h>
 #include <media/MediaPlayerInterface.h>
 #include <media/Metadata.h>
 #include <media/stagefright/foundation/ABase.h>
+#include <mediautils/Synchronization.h>
 #include <android/content/AttributionSourceState.h>
 
 #include <system/audio.h>
@@ -41,7 +43,6 @@
 
 using content::AttributionSourceState;
 
-class AudioTrack;
 struct AVSyncSettings;
 class DeathNotifier;
 class IDataSource;
@@ -161,7 +162,7 @@
         sp<AudioOutput>         mNextOutput;
         AudioCallback           mCallback;
         void *                  mCallbackCookie;
-        CallbackData *          mCallbackData;
+        sp<CallbackData>        mCallbackData;
         audio_stream_type_t     mStreamType;
         audio_attributes_t *    mAttributes;
         float                   mLeftVolume;
@@ -189,15 +190,15 @@
         // CallbackData is what is passed to the AudioTrack as the "user" data.
         // We need to be able to target this to a different Output on the fly,
         // so we can't use the Output itself for this.
-        class CallbackData {
+        class CallbackData : public AudioTrack::IAudioTrackCallback {
             friend AudioOutput;
         public:
-            explicit CallbackData(AudioOutput *cookie) {
+            explicit CallbackData(const wp<AudioOutput>& cookie) {
                 mData = cookie;
                 mSwitching = false;
             }
-            AudioOutput *   getOutput() const { return mData; }
-            void            setOutput(AudioOutput* newcookie) { mData = newcookie; }
+            sp<AudioOutput> getOutput() const { return mData.load().promote(); }
+            void            setOutput(const wp<AudioOutput>& newcookie) { mData.store(newcookie); }
             // lock/unlock are used by the callback before accessing the payload of this object
             void            lock() const { mLock.lock(); }
             void            unlock() const { mLock.unlock(); }
@@ -220,8 +221,13 @@
                 }
                 mSwitching = false;
             }
+        protected:
+            size_t onMoreData(const AudioTrack::Buffer& buffer) override;
+            void onUnderrun() override;
+            void onStreamEnd() override;
+            void onNewIAudioTrack() override;
         private:
-            AudioOutput *   mData;
+            mediautils::atomic_wp<AudioOutput> mData;
             mutable Mutex   mLock; // a recursive mutex might make this unnecessary.
             bool            mSwitching;
             DISALLOW_EVIL_CONSTRUCTORS(CallbackData);
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index a914006..4aa80be 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -126,8 +126,13 @@
     }
 
     if ((as == AUDIO_SOURCE_FM_TUNER
-            && !(captureAudioOutputAllowed(mAttributionSource)
+                && !(captureAudioOutputAllowed(mAttributionSource)
                     || captureTunerAudioInputAllowed(mAttributionSource)))
+            || (as == AUDIO_SOURCE_REMOTE_SUBMIX
+                && !(captureAudioOutputAllowed(mAttributionSource)
+                    || modifyAudioRoutingAllowed(mAttributionSource)))
+            || (as == AUDIO_SOURCE_ECHO_REFERENCE
+                && !captureAudioOutputAllowed(mAttributionSource))
             || !recordingAllowed(mAttributionSource, (audio_source_t)as)) {
         return PERMISSION_DENIED;
     }
diff --git a/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
index 2aabd53..8c86e16 100644
--- a/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
+++ b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
@@ -225,10 +225,26 @@
             "media.stagefright.thumbnail.prefer_hw_codecs", false);
     uint32_t flags = preferhw ? 0 : MediaCodecList::kPreferSoftwareCodecs;
     Vector<AString> matchingCodecs;
+    sp<AMessage> format = new AMessage;
+    status_t err = convertMetaDataToMessage(trackMeta, &format);
+    if (err != OK) {
+        format = NULL;
+    }
+
+    // If decoding thumbnail check decoder supports thumbnail dimensions instead
+    int32_t thumbHeight, thumbWidth;
+    if (thumbnail && format != NULL
+            && trackMeta->findInt32(kKeyThumbnailHeight, &thumbHeight)
+            && trackMeta->findInt32(kKeyThumbnailWidth, &thumbWidth)) {
+        format->setInt32("height", thumbHeight);
+        format->setInt32("width", thumbWidth);
+    }
+
     MediaCodecList::findMatchingCodecs(
             mime,
             false, /* encoder */
             flags,
+            format,
             &matchingCodecs);
 
     for (size_t i = 0; i < matchingCodecs.size(); ++i) {
@@ -348,11 +364,18 @@
     bool preferhw = property_get_bool(
             "media.stagefright.thumbnail.prefer_hw_codecs", false);
     uint32_t flags = preferhw ? 0 : MediaCodecList::kPreferSoftwareCodecs;
+    sp<AMessage> format = new AMessage;
+    status_t err = convertMetaDataToMessage(trackMeta, &format);
+    if (err != OK) {
+        format = NULL;
+    }
+
     Vector<AString> matchingCodecs;
     MediaCodecList::findMatchingCodecs(
             mime,
             false, /* encoder */
             flags,
+            format,
             &matchingCodecs);
 
     for (size_t i = 0; i < matchingCodecs.size(); ++i) {
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index bffd7b3..ea1fdf4 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -16,13 +16,16 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "StagefrightRecorder"
+#define ATRACE_TAG ATRACE_TAG_VIDEO
+#include <utils/Trace.h>
 #include <inttypes.h>
 // TODO/workaround: including base logging now as it conflicts with ADebug.h
 // and it must be included first.
 #include <android-base/logging.h>
 #include <utils/Log.h>
 
-#include "WebmWriter.h"
+#include <webm/WebmWriter.h>
+
 #include "StagefrightRecorder.h"
 
 #include <algorithm>
@@ -64,7 +67,7 @@
 
 #include <system/audio.h>
 
-#include "ARTPWriter.h"
+#include <media/stagefright/rtsp/ARTPWriter.h>
 
 namespace android {
 
@@ -1856,6 +1859,7 @@
 // Set up the appropriate MediaSource depending on the chosen option
 status_t StagefrightRecorder::setupMediaSource(
                       sp<MediaSource> *mediaSource) {
+    ATRACE_CALL();
     if (mVideoSource == VIDEO_SOURCE_DEFAULT
             || mVideoSource == VIDEO_SOURCE_CAMERA) {
         sp<CameraSource> cameraSource;
@@ -1936,6 +1940,7 @@
 status_t StagefrightRecorder::setupVideoEncoder(
         const sp<MediaSource> &cameraSource,
         sp<MediaCodecSource> *source) {
+    ATRACE_CALL();
     source->clear();
 
     sp<AMessage> format = new AMessage();
@@ -2114,6 +2119,7 @@
 }
 
 status_t StagefrightRecorder::setupAudioEncoder(const sp<MediaWriter>& writer) {
+    ATRACE_CALL();
     status_t status = BAD_VALUE;
     if (OK != (status = checkAudioEncoderCapabilities())) {
         return status;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index d6de47f..d7785da 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -26,6 +26,7 @@
 #include <system/audio.h>
 
 #include <media/hardware/MetadataBufferType.h>
+#include <media/stagefright/foundation/AString.h>
 #include <android/content/AttributionSourceState.h>
 
 namespace android {
diff --git a/media/libmediaplayerservice/fuzzer/Android.bp b/media/libmediaplayerservice/fuzzer/Android.bp
new file mode 100644
index 0000000..a36f1d6
--- /dev/null
+++ b/media/libmediaplayerservice/fuzzer/Android.bp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_av_media_libmediaplayerservice_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: [
+        "frameworks_av_media_libmediaplayerservice_license",
+    ],
+}
+
+cc_defaults {
+    name: "libmediaplayerserviceFuzzer_defaults",
+    static_libs: [
+        "libmediaplayerservice",
+        "liblog",
+    ],
+    shared_libs: [
+        "framework-permission-aidl-cpp",
+        "libbinder",
+        "libcutils",
+        "libmedia",
+        "libstagefright",
+        "libutils",
+        "libstagefright_foundation",
+    ],
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
+
+cc_fuzz {
+    name: "mediarecorder_fuzzer",
+    srcs: [
+        "mediarecorder_fuzzer.cpp",
+    ],
+    defaults: [
+        "libmediaplayerserviceFuzzer_defaults",
+    ],
+    static_libs: [
+        "libstagefright_rtsp",
+        "libbase",
+    ],
+    shared_libs: [
+        "av-types-aidl-cpp",
+        "media_permission-aidl-cpp",
+        "libaudioclient_aidl_conversion",
+        "libandroid_net",
+        "libcamera_client",
+        "libgui",
+        "libmediametrics",
+    ],
+}
+
+cc_fuzz {
+    name: "metadataretriever_fuzzer",
+    srcs: [
+        "metadataretriever_fuzzer.cpp",
+    ],
+    defaults: [
+        "libmediaplayerserviceFuzzer_defaults",
+    ],
+    static_libs: [
+        "libplayerservice_datasource",
+    ],
+    shared_libs: [
+        "libdatasource",
+        "libdrmframework",
+    ],
+}
+
+cc_fuzz {
+    name: "mediaplayer_fuzzer",
+    srcs: [
+        "mediaplayer_fuzzer.cpp",
+    ],
+    defaults: [
+        "libmediaplayerserviceFuzzer_defaults",
+    ],
+    static_libs: [
+        "libplayerservice_datasource",
+        "libstagefright_nuplayer",
+        "libstagefright_rtsp",
+        "libstagefright_timedtext",
+    ],
+    shared_libs: [
+        "android.hardware.media.c2@1.0",
+        "android.hardware.media.omx@1.0",
+        "av-types-aidl-cpp",
+        "libaudioclient_aidl_conversion",
+        "libbase",
+        "libactivitymanager_aidl",
+        "libandroid_net",
+        "libaudioclient",
+        "libcamera_client",
+        "libcodec2_client",
+        "libcrypto",
+        "libdatasource",
+        "libdrmframework",
+        "libgui",
+        "libhidlbase",
+        "liblog",
+        "libmedia_codeclist",
+        "libmedia_omx",
+        "libmediadrm",
+        "libmediametrics",
+        "libmediautils",
+        "libmemunreachable",
+        "libnetd_client",
+        "libpowermanager",
+        "libstagefright_httplive",
+    ],
+}
diff --git a/media/libmediaplayerservice/fuzzer/README.md b/media/libmediaplayerservice/fuzzer/README.md
new file mode 100644
index 0000000..a93c809
--- /dev/null
+++ b/media/libmediaplayerservice/fuzzer/README.md
@@ -0,0 +1,83 @@
+# Fuzzer for libmediaplayerservice
+## Table of contents
++ [StagefrightMediaRecorder](#StagefrightMediaRecorder)
++ [StagefrightMetadataRetriever](#StagefrightMetadataRetriever)
++ [MediaPlayer](#MediaPlayer)
+
+# <a name="StagefrightMediaRecorder"></a> Fuzzer for StagefrightMediaRecorder
+
+StagefrightMediaRecorder supports the following parameters:
+1. Output Formats (parameter name: `setOutputFormat`)
+2. Audio Encoders (parameter name: `setAudioEncoder`)
+3. Video Encoders (parameter name: `setVideoEncoder`)
+4. Audio Sources (parameter name: `setAudioSource`)
+5. Video Sources (parameter name: `setVideoSource`)
+6. Microphone Direction (parameter name: `setMicrophoneDirection`)
+
+You can find the possible values in the fuzzer's source code.
+
+#### Steps to run
+1. Build the fuzzer
+```
+  $ mm -j$(nproc) mediarecorder_fuzzer
+```
+2. Run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/mediarecorder_fuzzer/mediarecorder_fuzzer
+```
+
+# <a name="StagefrightMetadataRetriever"></a> Fuzzer for StagefrightMetadataRetriever
+
+StagefrightMetadataRetriever supports the following data sources:
+1. Url (parameter name: `url`)
+2. File descriptor (parameter name: `fd`)
+3. DataSource (parameter name: `source`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `url` | Url of data source | Value obtained from FuzzedDataProvider |
+| `fd` | File descriptor value of input file | Value obtained from FuzzedDataProvider |
+| `source` | DataSource object | Data obtained from FuzzedDataProvider |
+
+#### Steps to run
+1. Build the fuzzer
+```
+  $ mm -j$(nproc) metadataretriever_fuzzer
+```
+2. To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/metadataretriever_fuzzer/metadataretriever_fuzzer
+```
+
+# <a name="MediaPlayer"></a> Fuzzer for MediaPlayer
+
+MediaPlayerService supports the following data sources:
+1. Url (parameter name: `url`)
+2. File descriptor (parameter name: `fd`)
+3. IStreamSource  (parameter name: `source`)
+4. IDataSource (parameter name: `source`)
+5. RTP Parameters  (parameter name: `rtpParams`)
+
+MediaPlayerService supports the following parameters:
+1. Audio sessions (parameter name: `audioSessionId`)
+2. Audio stretch modes (parameter name: `mStretchMode`)
+3. Audio fallback modes  (parameter name: `mFallbackMode`)
+4. Media parameter keys (parameter name: `key`)
+5. Audio Stream Types (parameter name: `streamType`)
+6. Media Event Types (parameter name: `msg`)
+7. Media Info Types (parameter name: `ext1`)
+
+You can find the possible values in the fuzzer's source code.
+
+#### Steps to run
+1. Build the fuzzer
+```
+  $ mm -j$(nproc) mediaplayer_fuzzer
+```
+2. To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/mediaplayer_fuzzer/mediaplayer_fuzzer
+```
diff --git a/media/libmediaplayerservice/fuzzer/mediaplayer_fuzzer.cpp b/media/libmediaplayerservice/fuzzer/mediaplayer_fuzzer.cpp
new file mode 100644
index 0000000..7799f44
--- /dev/null
+++ b/media/libmediaplayerservice/fuzzer/mediaplayer_fuzzer.cpp
@@ -0,0 +1,368 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <MediaPlayerService.h>
+#include <camera/Camera.h>
+#include <datasource/FileSource.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include <media/IMediaCodecList.h>
+#include <media/IMediaHTTPService.h>
+#include <media/IMediaPlayer.h>
+#include <media/IMediaRecorder.h>
+#include <media/IRemoteDisplay.h>
+#include <media/IRemoteDisplayClient.h>
+#include <media/stagefright/RemoteDataSource.h>
+#include <media/stagefright/foundation/base64.h>
+#include <thread>
+#include "fuzzer/FuzzedDataProvider.h"
+
+constexpr int32_t kUuidSize = 16;
+constexpr int32_t kMaxSleepTimeInMs = 100;
+constexpr int32_t kMinSleepTimeInMs = 0;
+constexpr int32_t kPlayCountMin = 1;
+constexpr int32_t kPlayCountMax = 10;
+constexpr int32_t kMaxDimension = 8192;
+constexpr int32_t kMinDimension = 0;
+
+using namespace std;
+using namespace android;
+
+constexpr audio_session_t kSupportedAudioSessions[] = {
+    AUDIO_SESSION_DEVICE, AUDIO_SESSION_OUTPUT_STAGE, AUDIO_SESSION_OUTPUT_MIX};
+
+constexpr audio_timestretch_stretch_mode_t kAudioStretchModes[] = {
+    AUDIO_TIMESTRETCH_STRETCH_DEFAULT, AUDIO_TIMESTRETCH_STRETCH_VOICE};
+
+constexpr audio_timestretch_fallback_mode_t kAudioFallbackModes[] = {
+    AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT, AUDIO_TIMESTRETCH_FALLBACK_DEFAULT,
+    AUDIO_TIMESTRETCH_FALLBACK_MUTE, AUDIO_TIMESTRETCH_FALLBACK_FAIL};
+
+constexpr media_parameter_keys kMediaParamKeys[] = {
+    KEY_PARAMETER_CACHE_STAT_COLLECT_FREQ_MS, KEY_PARAMETER_AUDIO_CHANNEL_COUNT,
+    KEY_PARAMETER_PLAYBACK_RATE_PERMILLE, KEY_PARAMETER_AUDIO_ATTRIBUTES,
+    KEY_PARAMETER_RTP_ATTRIBUTES};
+
+constexpr audio_stream_type_t kAudioStreamTypes[] = {
+    AUDIO_STREAM_DEFAULT,      AUDIO_STREAM_VOICE_CALL,    AUDIO_STREAM_SYSTEM,
+    AUDIO_STREAM_RING,         AUDIO_STREAM_MUSIC,         AUDIO_STREAM_ALARM,
+    AUDIO_STREAM_NOTIFICATION, AUDIO_STREAM_BLUETOOTH_SCO, AUDIO_STREAM_ENFORCED_AUDIBLE,
+    AUDIO_STREAM_DTMF,         AUDIO_STREAM_TTS,           AUDIO_STREAM_ASSISTANT};
+
+constexpr media_event_type kMediaEventTypes[] = {MEDIA_NOP,
+                                                 MEDIA_PREPARED,
+                                                 MEDIA_PLAYBACK_COMPLETE,
+                                                 MEDIA_BUFFERING_UPDATE,
+                                                 MEDIA_SEEK_COMPLETE,
+                                                 MEDIA_SET_VIDEO_SIZE,
+                                                 MEDIA_STARTED,
+                                                 MEDIA_PAUSED,
+                                                 MEDIA_STOPPED,
+                                                 MEDIA_SKIPPED,
+                                                 MEDIA_NOTIFY_TIME,
+                                                 MEDIA_TIMED_TEXT,
+                                                 MEDIA_ERROR,
+                                                 MEDIA_INFO,
+                                                 MEDIA_SUBTITLE_DATA,
+                                                 MEDIA_META_DATA,
+                                                 MEDIA_DRM_INFO,
+                                                 MEDIA_TIME_DISCONTINUITY,
+                                                 MEDIA_IMS_RX_NOTICE,
+                                                 MEDIA_AUDIO_ROUTING_CHANGED};
+
+constexpr media_info_type kMediaInfoTypes[] = {
+    MEDIA_INFO_UNKNOWN,           MEDIA_INFO_STARTED_AS_NEXT,
+    MEDIA_INFO_RENDERING_START,   MEDIA_INFO_VIDEO_TRACK_LAGGING,
+    MEDIA_INFO_BUFFERING_START,   MEDIA_INFO_BUFFERING_END,
+    MEDIA_INFO_NETWORK_BANDWIDTH, MEDIA_INFO_BAD_INTERLEAVING,
+    MEDIA_INFO_NOT_SEEKABLE,      MEDIA_INFO_METADATA_UPDATE,
+    MEDIA_INFO_PLAY_AUDIO_ERROR,  MEDIA_INFO_PLAY_VIDEO_ERROR,
+    MEDIA_INFO_TIMED_TEXT_ERROR};
+
+const char *kUrlPrefix[] = {"data:", "http://", "https://", "rtsp://", "content://", "test://"};
+
+struct TestStreamSource : public IStreamSource {
+    void setListener(const sp<IStreamListener> & /*listener*/) override{};
+    void setBuffers(const Vector<sp<IMemory>> & /*buffers*/) override{};
+    void onBufferAvailable(size_t /*index*/) override{};
+    IBinder *onAsBinder() { return nullptr; };
+};
+
+class BinderDeathNotifier : public IBinder::DeathRecipient {
+   public:
+    void binderDied(const wp<IBinder> &) { abort(); }
+};
+
+class MediaPlayerServiceFuzzer {
+   public:
+    MediaPlayerServiceFuzzer(const uint8_t *data, size_t size)
+        : mFdp(data, size), mDataSourceFd(memfd_create("InputFile", MFD_ALLOW_SEALING)){};
+    ~MediaPlayerServiceFuzzer() { close(mDataSourceFd); };
+    void process(const uint8_t *data, size_t size);
+
+   private:
+    bool setDataSource(const uint8_t *data, size_t size);
+    void invokeMediaPlayer();
+    FuzzedDataProvider mFdp;
+    sp<IMediaPlayer> mMediaPlayer = nullptr;
+    sp<IMediaPlayerClient> mMediaPlayerClient = nullptr;
+    const int32_t mDataSourceFd;
+};
+
+bool MediaPlayerServiceFuzzer::setDataSource(const uint8_t *data, size_t size) {
+    status_t status = -1;
+    enum DataSourceType {http, fd, stream, file, socket, kMaxValue = socket};
+    switch (mFdp.ConsumeEnum<DataSourceType>()) {
+        case http: {
+            KeyedVector<String8, String8> headers;
+            headers.add(String8(mFdp.ConsumeRandomLengthString().c_str()),
+                        String8(mFdp.ConsumeRandomLengthString().c_str()));
+
+            uint32_t dataBlobSize = mFdp.ConsumeIntegralInRange<uint16_t>(0, size);
+            vector<uint8_t> uriSuffix = mFdp.ConsumeBytes<uint8_t>(dataBlobSize);
+
+            string uri(mFdp.PickValueInArray(kUrlPrefix));
+            uri += ";base64,";
+            AString out;
+            encodeBase64(uriSuffix.data(), uriSuffix.size(), &out);
+            uri += out.c_str();
+            status = mMediaPlayer->setDataSource(nullptr /*httpService*/, uri.c_str(), &headers);
+            break;
+        }
+        case fd: {
+            write(mDataSourceFd, data, size);
+
+            status = mMediaPlayer->setDataSource(mDataSourceFd, 0, size);
+            break;
+        }
+        case stream: {
+            sp<IStreamSource> streamSource = sp<TestStreamSource>::make();
+            status = mMediaPlayer->setDataSource(streamSource);
+            break;
+        }
+        case file: {
+            write(mDataSourceFd, data, size);
+
+            sp<DataSource> dataSource = new FileSource(dup(mDataSourceFd), 0, size);
+            sp<IDataSource> iDataSource = RemoteDataSource::wrap(dataSource);
+            if (!iDataSource) {
+                return false;
+            }
+            status = mMediaPlayer->setDataSource(iDataSource);
+            break;
+        }
+        case socket: {
+            String8 rtpParams = String8(mFdp.ConsumeRandomLengthString().c_str());
+            struct sockaddr_in endpoint;
+            endpoint.sin_family = mFdp.ConsumeIntegral<unsigned short>();
+            endpoint.sin_port = mFdp.ConsumeIntegral<uint16_t>();
+            mMediaPlayer->setRetransmitEndpoint(&endpoint);
+            status = mMediaPlayer->setDataSource(rtpParams);
+            break;
+        }
+    }
+
+    if (status != 0) {
+        return false;
+    }
+    return true;
+}
+
+void MediaPlayerServiceFuzzer::invokeMediaPlayer() {
+    sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
+    String8 name = String8(mFdp.ConsumeRandomLengthString().c_str());
+    uint32_t width = mFdp.ConsumeIntegralInRange<uint32_t>(kMinDimension, kMaxDimension);
+    uint32_t height = mFdp.ConsumeIntegralInRange<uint32_t>(kMinDimension, kMaxDimension);
+    uint32_t pixelFormat = mFdp.ConsumeIntegral<int32_t>();
+    uint32_t flags = mFdp.ConsumeIntegral<int32_t>();
+    sp<SurfaceControl> surfaceControl =
+        composerClient->createSurface(name, width, height, pixelFormat, flags);
+    if (surfaceControl) {
+        sp<Surface> surface = surfaceControl->getSurface();
+        mMediaPlayer->setVideoSurfaceTexture(surface->getIGraphicBufferProducer());
+    }
+
+    BufferingSettings buffering;
+    buffering.mInitialMarkMs = mFdp.ConsumeIntegral<int32_t>();
+    buffering.mResumePlaybackMarkMs = mFdp.ConsumeIntegral<int32_t>();
+    mMediaPlayer->setBufferingSettings(buffering);
+    mMediaPlayer->getBufferingSettings(&buffering);
+
+    mMediaPlayer->prepareAsync();
+    size_t playCount = mFdp.ConsumeIntegralInRange<size_t>(kPlayCountMin, kPlayCountMax);
+    for (size_t Idx = 0; Idx < playCount; ++Idx) {
+        mMediaPlayer->start();
+        this_thread::sleep_for(chrono::milliseconds(
+            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
+        mMediaPlayer->pause();
+        this_thread::sleep_for(chrono::milliseconds(
+            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
+        mMediaPlayer->stop();
+    }
+    bool state;
+    mMediaPlayer->isPlaying(&state);
+
+    AudioPlaybackRate rate;
+    rate.mSpeed = mFdp.ConsumeFloatingPoint<float>();
+    rate.mPitch = mFdp.ConsumeFloatingPoint<float>();
+    rate.mStretchMode = mFdp.PickValueInArray(kAudioStretchModes);
+    rate.mFallbackMode = mFdp.PickValueInArray(kAudioFallbackModes);
+    mMediaPlayer->setPlaybackSettings(rate);
+    mMediaPlayer->getPlaybackSettings(&rate);
+
+    AVSyncSettings *avSyncSettings = new AVSyncSettings();
+    float videoFpsHint = mFdp.ConsumeFloatingPoint<float>();
+    mMediaPlayer->setSyncSettings(*avSyncSettings, videoFpsHint);
+    mMediaPlayer->getSyncSettings(avSyncSettings, &videoFpsHint);
+    delete avSyncSettings;
+
+    mMediaPlayer->seekTo(mFdp.ConsumeIntegral<int32_t>());
+
+    int32_t msec;
+    mMediaPlayer->getCurrentPosition(&msec);
+    mMediaPlayer->getDuration(&msec);
+    mMediaPlayer->reset();
+
+    mMediaPlayer->notifyAt(mFdp.ConsumeIntegral<int64_t>());
+
+    mMediaPlayer->setAudioStreamType(mFdp.PickValueInArray(kAudioStreamTypes));
+    mMediaPlayer->setLooping(mFdp.ConsumeIntegral<int32_t>());
+    float left = mFdp.ConsumeFloatingPoint<float>();
+    float right = mFdp.ConsumeFloatingPoint<float>();
+    mMediaPlayer->setVolume(left, right);
+
+    Parcel request, reply;
+    request.writeInt32(mFdp.ConsumeIntegral<int32_t>());
+    request.setDataPosition(0);
+    mMediaPlayer->invoke(request, &reply);
+
+    Parcel filter;
+    filter.writeInt32(mFdp.ConsumeIntegral<int32_t>());
+    filter.setDataPosition(0);
+    mMediaPlayer->setMetadataFilter(filter);
+
+    bool updateOnly = mFdp.ConsumeBool();
+    bool applyFilter = mFdp.ConsumeBool();
+    mMediaPlayer->getMetadata(updateOnly, applyFilter, &reply);
+    mMediaPlayer->setAuxEffectSendLevel(mFdp.ConsumeFloatingPoint<float>());
+    mMediaPlayer->attachAuxEffect(mFdp.ConsumeIntegral<int32_t>());
+
+    int32_t key = mFdp.PickValueInArray(kMediaParamKeys);
+    request.writeInt32(mFdp.ConsumeIntegral<int32_t>());
+    request.setDataPosition(0);
+    mMediaPlayer->setParameter(key, request);
+    key = mFdp.PickValueInArray(kMediaParamKeys);
+    mMediaPlayer->getParameter(key, &reply);
+
+    struct sockaddr_in endpoint;
+    mMediaPlayer->getRetransmitEndpoint(&endpoint);
+
+    AttributionSourceState attributionSource;
+    attributionSource.packageName = mFdp.ConsumeRandomLengthString().c_str();
+    attributionSource.token = sp<BBinder>::make();
+    const sp<IMediaPlayerService> mpService(IMediaDeathNotifier::getMediaPlayerService());
+    sp<IMediaPlayer> mNextMediaPlayer = mpService->create(
+        mMediaPlayerClient, mFdp.PickValueInArray(kSupportedAudioSessions), attributionSource);
+    mMediaPlayer->setNextPlayer(mNextMediaPlayer);
+
+    const sp<media::VolumeShaper::Configuration> configuration =
+        sp<media::VolumeShaper::Configuration>::make();
+    const sp<media::VolumeShaper::Operation> operation = sp<media::VolumeShaper::Operation>::make();
+    mMediaPlayer->applyVolumeShaper(configuration, operation);
+
+    mMediaPlayer->getVolumeShaperState(mFdp.ConsumeIntegral<int32_t>());
+    uint8_t uuid[kUuidSize];
+    for (int32_t index = 0; index < kUuidSize; ++index) {
+        uuid[index] = mFdp.ConsumeIntegral<uint8_t>();
+    }
+    Vector<uint8_t> drmSessionId;
+    drmSessionId.push_back(mFdp.ConsumeIntegral<uint8_t>());
+    mMediaPlayer->prepareDrm(uuid, drmSessionId);
+    mMediaPlayer->releaseDrm();
+
+    audio_port_handle_t deviceId = mFdp.ConsumeIntegral<int32_t>();
+    mMediaPlayer->setOutputDevice(deviceId);
+    mMediaPlayer->getRoutedDeviceId(&deviceId);
+
+    mMediaPlayer->enableAudioDeviceCallback(mFdp.ConsumeBool());
+
+    sp<MediaPlayer> mediaPlayer = (MediaPlayer *)mMediaPlayer.get();
+
+    int32_t msg = mFdp.PickValueInArray(kMediaEventTypes);
+    int32_t ext1 = mFdp.PickValueInArray(kMediaInfoTypes);
+    int32_t ext2 = mFdp.ConsumeIntegral<int32_t>();
+    Parcel obj;
+    obj.writeInt32(mFdp.ConsumeIntegral<int32_t>());
+    obj.setDataPosition(0);
+    mediaPlayer->notify(msg, ext1, ext2, &obj);
+
+    int32_t mediaPlayerDumpFd = memfd_create("OutputDumpFile", MFD_ALLOW_SEALING);
+    Vector<String16> args;
+    args.push_back(String16(mFdp.ConsumeRandomLengthString().c_str()));
+    mediaPlayer->dump(mediaPlayerDumpFd, args);
+    close(mediaPlayerDumpFd);
+
+    mMediaPlayer->disconnect();
+}
+
+void MediaPlayerServiceFuzzer::process(const uint8_t *data, size_t size) {
+    MediaPlayerService::instantiate();
+
+    const sp<IMediaPlayerService> mpService(IMediaDeathNotifier::getMediaPlayerService());
+    if (!mpService) {
+        return;
+    }
+
+    sp<IMediaCodecList> mediaCodecList = mpService->getCodecList();
+
+    sp<IRemoteDisplayClient> remoteDisplayClient;
+    sp<IRemoteDisplay> remoteDisplay = mpService->listenForRemoteDisplay(
+        String16(mFdp.ConsumeRandomLengthString().c_str()) /*opPackageName*/, remoteDisplayClient,
+        String8(mFdp.ConsumeRandomLengthString().c_str()) /*iface*/);
+
+    mpService->addBatteryData(mFdp.ConsumeIntegral<uint32_t>());
+    Parcel reply;
+    mpService->pullBatteryData(&reply);
+
+    sp<MediaPlayerService> mediaPlayerService = (MediaPlayerService *)mpService.get();
+    AttributionSourceState attributionSource;
+    attributionSource.packageName = mFdp.ConsumeRandomLengthString().c_str();
+    attributionSource.token = sp<BBinder>::make();
+    mMediaPlayer = mediaPlayerService->create(
+        mMediaPlayerClient, mFdp.PickValueInArray(kSupportedAudioSessions), attributionSource);
+
+    int32_t mediaPlayerServiceDumpFd = memfd_create("OutputDumpFile", MFD_ALLOW_SEALING);
+    Vector<String16> args;
+    args.push_back(String16(mFdp.ConsumeRandomLengthString().c_str()));
+    mediaPlayerService->dump(mediaPlayerServiceDumpFd, args);
+    close(mediaPlayerServiceDumpFd);
+
+    if (!mMediaPlayer) {
+        return;
+    }
+
+    if (setDataSource(data, size)) {
+        invokeMediaPlayer();
+    }
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+    MediaPlayerServiceFuzzer mpsFuzzer(data, size);
+    ProcessState::self()->startThreadPool();
+    mpsFuzzer.process(data, size);
+    return 0;
+};
diff --git a/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp b/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp
new file mode 100644
index 0000000..b0040fe
--- /dev/null
+++ b/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp
@@ -0,0 +1,312 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <media/stagefright/foundation/AString.h>
+#include "fuzzer/FuzzedDataProvider.h"
+
+#include <StagefrightRecorder.h>
+#include <camera/Camera.h>
+#include <camera/android/hardware/ICamera.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include <media/stagefright/PersistentSurface.h>
+#include <thread>
+
+using namespace std;
+using namespace android;
+using namespace android::hardware;
+
+constexpr video_source kSupportedVideoSources[] = {VIDEO_SOURCE_DEFAULT, VIDEO_SOURCE_CAMERA,
+                                                   VIDEO_SOURCE_SURFACE};
+
+constexpr audio_source_t kSupportedAudioSources[] = {
+    AUDIO_SOURCE_DEFAULT,           AUDIO_SOURCE_MIC,
+    AUDIO_SOURCE_VOICE_UPLINK,      AUDIO_SOURCE_VOICE_DOWNLINK,
+    AUDIO_SOURCE_VOICE_CALL,        AUDIO_SOURCE_CAMCORDER,
+    AUDIO_SOURCE_VOICE_RECOGNITION, AUDIO_SOURCE_VOICE_COMMUNICATION,
+    AUDIO_SOURCE_REMOTE_SUBMIX,     AUDIO_SOURCE_UNPROCESSED,
+    AUDIO_SOURCE_VOICE_PERFORMANCE, AUDIO_SOURCE_ECHO_REFERENCE,
+    AUDIO_SOURCE_FM_TUNER,          AUDIO_SOURCE_HOTWORD};
+
+constexpr audio_microphone_direction_t kSupportedMicrophoneDirections[] = {
+    MIC_DIRECTION_UNSPECIFIED, MIC_DIRECTION_FRONT, MIC_DIRECTION_BACK, MIC_DIRECTION_EXTERNAL};
+
+struct RecordingConfig {
+    output_format outputFormat;
+    audio_encoder audioEncoder;
+    video_encoder videoEncoder;
+};
+
+const struct RecordingConfig kRecordingConfigList[] = {
+    {OUTPUT_FORMAT_AMR_NB, AUDIO_ENCODER_AMR_NB, VIDEO_ENCODER_DEFAULT},
+    {OUTPUT_FORMAT_AMR_WB, AUDIO_ENCODER_AMR_WB, VIDEO_ENCODER_DEFAULT},
+    {OUTPUT_FORMAT_AAC_ADTS, AUDIO_ENCODER_AAC, VIDEO_ENCODER_DEFAULT},
+    {OUTPUT_FORMAT_AAC_ADTS, AUDIO_ENCODER_HE_AAC, VIDEO_ENCODER_DEFAULT},
+    {OUTPUT_FORMAT_AAC_ADTS, AUDIO_ENCODER_AAC_ELD, VIDEO_ENCODER_DEFAULT},
+    {OUTPUT_FORMAT_OGG, AUDIO_ENCODER_OPUS, VIDEO_ENCODER_DEFAULT},
+    {OUTPUT_FORMAT_RTP_AVP, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_DEFAULT},
+    {OUTPUT_FORMAT_MPEG2TS, AUDIO_ENCODER_AAC, VIDEO_ENCODER_H264},
+    {OUTPUT_FORMAT_WEBM, AUDIO_ENCODER_VORBIS, VIDEO_ENCODER_VP8},
+    {OUTPUT_FORMAT_THREE_GPP, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_MPEG_4_SP},
+    {OUTPUT_FORMAT_MPEG_4, AUDIO_ENCODER_AAC, VIDEO_ENCODER_H264},
+    {OUTPUT_FORMAT_MPEG_4, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_MPEG_4_SP},
+    {OUTPUT_FORMAT_MPEG_4, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_HEVC}};
+
+const string kParametersList[] = {"max-duration",
+                                  "max-filesize",
+                                  "interleave-duration-us",
+                                  "param-movie-time-scale",
+                                  "param-geotag-longitude",
+                                  "param-geotag-latitude",
+                                  "param-track-time-status",
+                                  "audio-param-sampling-rate",
+                                  "audio-param-encoding-bitrate",
+                                  "audio-param-number-of-channels",
+                                  "audio-param-time-scale",
+                                  "video-param-rotation-angle-degrees",
+                                  "video-param-encoding-bitrate",
+                                  "video-param-bitrate-mode",
+                                  "video-param-i-frames-interval",
+                                  "video-param-encoder-profile",
+                                  "video-param-encoder-level",
+                                  "video-param-camera-id",
+                                  "video-param-time-scale",
+                                  "param-use-64bit-offset",
+                                  "time-lapse-enable",
+                                  "time-lapse-fps",
+                                  "rtp-param-local-ip",
+                                  "rtp-param-local-port",
+                                  "rtp-param-remote-port",
+                                  "rtp-param-self-id",
+                                  "rtp-param-opponent-id",
+                                  "rtp-param-payload-type",
+                                  "rtp-param-ext-cvo-extmap",
+                                  "rtp-param-ext-cvo-degrees",
+                                  "video-param-request-i-frame",
+                                  "rtp-param-set-socket-dscp",
+                                  "rtp-param-set-socket-network"};
+
+constexpr int32_t kMaxSleepTimeInMs = 100;
+constexpr int32_t kMinSleepTimeInMs = 0;
+constexpr int32_t kMinVideoSize = 2;
+constexpr int32_t kMaxVideoSize = 8192;
+constexpr int32_t kNumRecordMin = 1;
+constexpr int32_t kNumRecordMax = 10;
+
+class TestAudioDeviceCallback : public AudioSystem::AudioDeviceCallback {
+   public:
+    virtual ~TestAudioDeviceCallback() = default;
+
+    void onAudioDeviceUpdate(audio_io_handle_t /*audioIo*/,
+                             audio_port_handle_t /*deviceId*/) override{};
+};
+
+class TestCamera : public ICamera {
+   public:
+    virtual ~TestCamera() = default;
+
+    binder::Status disconnect() override { return binder::Status::ok(); };
+    status_t connect(const sp<ICameraClient> & /*client*/) override { return 0; };
+    status_t lock() override { return 0; };
+    status_t unlock() override { return 0; };
+    status_t setPreviewTarget(const sp<IGraphicBufferProducer> & /*bufferProducer*/) override {
+        return 0;
+    };
+    void setPreviewCallbackFlag(int /*flag*/) override{};
+    status_t setPreviewCallbackTarget(
+        const sp<IGraphicBufferProducer> & /*callbackProducer*/) override {
+        return 0;
+    };
+    status_t startPreview() override { return 0; };
+    void stopPreview() override{};
+    bool previewEnabled() override { return true; };
+    status_t startRecording() override { return 0; };
+    void stopRecording() override{};
+    bool recordingEnabled() override { return true; };
+    void releaseRecordingFrame(const sp<IMemory> & /*mem*/) override{};
+    void releaseRecordingFrameHandle(native_handle_t * /*handle*/) override{};
+    void releaseRecordingFrameHandleBatch(const vector<native_handle_t *> & /*handles*/) override{};
+    status_t autoFocus() override { return 0; };
+    status_t cancelAutoFocus() override { return 0; };
+    status_t takePicture(int /*msgType*/) override { return 0; };
+    status_t setParameters(const String8 & /*params*/) override { return 0; };
+    String8 getParameters() const override { return String8(); };
+    status_t sendCommand(int32_t /*cmd*/, int32_t /*arg1*/, int32_t /*arg2*/) override {
+        return 0;
+    };
+    status_t setVideoBufferMode(int32_t /*videoBufferMode*/) override { return 0; };
+    status_t setVideoTarget(const sp<IGraphicBufferProducer> & /*bufferProducer*/) override {
+        return 0;
+    };
+    status_t setAudioRestriction(int32_t /*mode*/) override { return 0; };
+    int32_t getGlobalAudioRestriction() override { return 0; };
+    IBinder *onAsBinder() override { return reinterpret_cast<IBinder *>(this); };
+};
+
+class TestMediaRecorderClient : public IMediaRecorderClient {
+   public:
+    virtual ~TestMediaRecorderClient() = default;
+
+    void notify(int /*msg*/, int /*ext1*/, int /*ext2*/) override{};
+    IBinder *onAsBinder() override { return reinterpret_cast<IBinder *>(this); };
+};
+
+class MediaRecorderClientFuzzer {
+   public:
+    MediaRecorderClientFuzzer(const uint8_t *data, size_t size);
+    ~MediaRecorderClientFuzzer() { close(mMediaRecorderOutputFd); }
+    void process();
+
+   private:
+    void setConfig();
+    void getConfig();
+    void dumpInfo();
+
+    FuzzedDataProvider mFdp;
+    unique_ptr<MediaRecorderBase> mStfRecorder = nullptr;
+    SurfaceComposerClient mComposerClient;
+    sp<SurfaceControl> mSurfaceControl = nullptr;
+    sp<Surface> mSurface = nullptr;
+    const int32_t mMediaRecorderOutputFd;
+};
+
+void MediaRecorderClientFuzzer::getConfig() {
+    int32_t max;
+    mStfRecorder->getMaxAmplitude(&max);
+
+    int32_t deviceId = mFdp.ConsumeIntegral<int32_t>();
+    mStfRecorder->setInputDevice(deviceId);
+    mStfRecorder->getRoutedDeviceId(&deviceId);
+
+    vector<android::media::MicrophoneInfo> activeMicrophones{};
+    mStfRecorder->getActiveMicrophones(&activeMicrophones);
+
+    int32_t portId;
+    mStfRecorder->getPortId(&portId);
+
+    uint64_t bytes;
+    mStfRecorder->getRtpDataUsage(&bytes);
+
+    Parcel parcel;
+    mStfRecorder->getMetrics(&parcel);
+
+    sp<IGraphicBufferProducer> buffer = mStfRecorder->querySurfaceMediaSource();
+}
+
+void MediaRecorderClientFuzzer::dumpInfo() {
+    int32_t dumpFd = memfd_create("DumpFile", MFD_ALLOW_SEALING);
+    Vector<String16> args;
+    args.push_back(String16(mFdp.ConsumeRandomLengthString().c_str()));
+    mStfRecorder->dump(dumpFd, args);
+    close(dumpFd);
+}
+
+void MediaRecorderClientFuzzer::setConfig() {
+    mStfRecorder->setOutputFile(mMediaRecorderOutputFd);
+    mStfRecorder->setAudioSource(mFdp.PickValueInArray(kSupportedAudioSources));
+    mStfRecorder->setVideoSource(mFdp.PickValueInArray(kSupportedVideoSources));
+    mStfRecorder->setPreferredMicrophoneDirection(
+        mFdp.PickValueInArray(kSupportedMicrophoneDirections));
+    mStfRecorder->setPrivacySensitive(mFdp.ConsumeBool());
+    bool isPrivacySensitive;
+    mStfRecorder->isPrivacySensitive(&isPrivacySensitive);
+    mStfRecorder->setVideoSize(mFdp.ConsumeIntegralInRange<int32_t>(kMinVideoSize, kMaxVideoSize),
+                               mFdp.ConsumeIntegralInRange<int32_t>(kMinVideoSize, kMaxVideoSize));
+    mStfRecorder->setVideoFrameRate(mFdp.ConsumeIntegral<int32_t>());
+    mStfRecorder->enableAudioDeviceCallback(mFdp.ConsumeBool());
+    mStfRecorder->setPreferredMicrophoneFieldDimension(mFdp.ConsumeFloatingPoint<float>());
+    mStfRecorder->setClientName(String16(mFdp.ConsumeRandomLengthString().c_str()));
+
+    int32_t Idx = mFdp.ConsumeIntegralInRange<int32_t>(0, size(kRecordingConfigList) - 1);
+    mStfRecorder->setOutputFormat(kRecordingConfigList[Idx].outputFormat);
+    mStfRecorder->setAudioEncoder(kRecordingConfigList[Idx].audioEncoder);
+    mStfRecorder->setVideoEncoder(kRecordingConfigList[Idx].videoEncoder);
+
+    int32_t nextOutputFd = memfd_create("NextOutputFile", MFD_ALLOW_SEALING);
+    mStfRecorder->setNextOutputFile(nextOutputFd);
+    close(nextOutputFd);
+
+    for (Idx = 0; Idx < size(kParametersList); ++Idx) {
+        if (mFdp.ConsumeBool()) {
+            int32_t value = mFdp.ConsumeIntegral<int32_t>();
+            mStfRecorder->setParameters(
+                String8((kParametersList[Idx] + "=" + to_string(value)).c_str()));
+        }
+    }
+}
+
+MediaRecorderClientFuzzer::MediaRecorderClientFuzzer(const uint8_t *data, size_t size)
+    : mFdp(data, size), mMediaRecorderOutputFd(memfd_create("OutputFile", MFD_ALLOW_SEALING)) {
+    AttributionSourceState attributionSource;
+    attributionSource.packageName = mFdp.ConsumeRandomLengthString().c_str();
+    attributionSource.token = sp<BBinder>::make();
+    mStfRecorder = make_unique<StagefrightRecorder>(attributionSource);
+
+    mSurfaceControl = mComposerClient.createSurface(
+        String8(mFdp.ConsumeRandomLengthString().c_str()), mFdp.ConsumeIntegral<uint32_t>(),
+        mFdp.ConsumeIntegral<uint32_t>(), mFdp.ConsumeIntegral<int32_t>(),
+        mFdp.ConsumeIntegral<int32_t>());
+    if (mSurfaceControl) {
+        mSurface = mSurfaceControl->getSurface();
+        mStfRecorder->setPreviewSurface(mSurface->getIGraphicBufferProducer());
+    }
+
+    sp<TestMediaRecorderClient> listener = sp<TestMediaRecorderClient>::make();
+    mStfRecorder->setListener(listener);
+
+    sp<TestCamera> testCamera = sp<TestCamera>::make();
+    sp<Camera> camera = Camera::create(testCamera);
+    mStfRecorder->setCamera(camera->remote(), camera->getRecordingProxy());
+
+    sp<PersistentSurface> persistentSurface = sp<PersistentSurface>::make();
+    mStfRecorder->setInputSurface(persistentSurface);
+
+    sp<TestAudioDeviceCallback> callback = sp<TestAudioDeviceCallback>::make();
+    mStfRecorder->setAudioDeviceCallback(callback);
+}
+
+void MediaRecorderClientFuzzer::process() {
+    setConfig();
+
+    mStfRecorder->init();
+    mStfRecorder->prepare();
+    size_t numRecord = mFdp.ConsumeIntegralInRange<size_t>(kNumRecordMin, kNumRecordMax);
+    for (size_t Idx = 0; Idx < numRecord; ++Idx) {
+        mStfRecorder->start();
+        this_thread::sleep_for(chrono::milliseconds(
+            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
+        mStfRecorder->pause();
+        this_thread::sleep_for(chrono::milliseconds(
+            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
+        mStfRecorder->resume();
+        this_thread::sleep_for(chrono::milliseconds(
+            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
+        mStfRecorder->stop();
+    }
+    dumpInfo();
+    getConfig();
+
+    mStfRecorder->close();
+    mStfRecorder->reset();
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+    MediaRecorderClientFuzzer mrcFuzzer(data, size);
+    mrcFuzzer.process();
+    return 0;
+}
diff --git a/media/libmediaplayerservice/fuzzer/metadataretriever_fuzzer.cpp b/media/libmediaplayerservice/fuzzer/metadataretriever_fuzzer.cpp
new file mode 100644
index 0000000..a7cb689
--- /dev/null
+++ b/media/libmediaplayerservice/fuzzer/metadataretriever_fuzzer.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <StagefrightMetadataRetriever.h>
+#include <binder/ProcessState.h>
+#include <datasource/FileSource.h>
+#include <media/IMediaHTTPService.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+#include <media/stagefright/foundation/base64.h>
+
+#include <fuzzer/FuzzedDataProvider.h>
+
+using namespace std;
+using namespace android;
+
+const char *kMimeTypes[] = {MEDIA_MIMETYPE_IMAGE_JPEG,         MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC,
+                            MEDIA_MIMETYPE_VIDEO_VP8,          MEDIA_MIMETYPE_VIDEO_VP9,
+                            MEDIA_MIMETYPE_VIDEO_AV1,          MEDIA_MIMETYPE_VIDEO_AVC,
+                            MEDIA_MIMETYPE_VIDEO_HEVC,         MEDIA_MIMETYPE_VIDEO_MPEG4,
+                            MEDIA_MIMETYPE_VIDEO_H263,         MEDIA_MIMETYPE_VIDEO_MPEG2,
+                            MEDIA_MIMETYPE_VIDEO_RAW,          MEDIA_MIMETYPE_VIDEO_DOLBY_VISION,
+                            MEDIA_MIMETYPE_VIDEO_SCRAMBLED,    MEDIA_MIMETYPE_VIDEO_DIVX,
+                            MEDIA_MIMETYPE_VIDEO_DIVX3,        MEDIA_MIMETYPE_VIDEO_XVID,
+                            MEDIA_MIMETYPE_VIDEO_MJPEG,        MEDIA_MIMETYPE_AUDIO_AMR_NB,
+                            MEDIA_MIMETYPE_AUDIO_AMR_WB,       MEDIA_MIMETYPE_AUDIO_MPEG,
+                            MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II,
+                            MEDIA_MIMETYPE_AUDIO_MIDI,         MEDIA_MIMETYPE_AUDIO_AAC,
+                            MEDIA_MIMETYPE_AUDIO_QCELP,        MEDIA_MIMETYPE_AUDIO_VORBIS,
+                            MEDIA_MIMETYPE_AUDIO_OPUS,         MEDIA_MIMETYPE_AUDIO_G711_ALAW,
+                            MEDIA_MIMETYPE_AUDIO_G711_MLAW,    MEDIA_MIMETYPE_AUDIO_RAW,
+                            MEDIA_MIMETYPE_AUDIO_FLAC,         MEDIA_MIMETYPE_AUDIO_AAC_ADTS,
+                            MEDIA_MIMETYPE_AUDIO_MSGSM,        MEDIA_MIMETYPE_AUDIO_AC3,
+                            MEDIA_MIMETYPE_AUDIO_EAC3,         MEDIA_MIMETYPE_AUDIO_EAC3_JOC,
+                            MEDIA_MIMETYPE_AUDIO_AC4,          MEDIA_MIMETYPE_AUDIO_SCRAMBLED,
+                            MEDIA_MIMETYPE_AUDIO_ALAC,         MEDIA_MIMETYPE_AUDIO_WMA,
+                            MEDIA_MIMETYPE_AUDIO_MS_ADPCM,     MEDIA_MIMETYPE_AUDIO_DVI_IMA_ADPCM,
+                            MEDIA_MIMETYPE_CONTAINER_MPEG4,    MEDIA_MIMETYPE_CONTAINER_WAV,
+                            MEDIA_MIMETYPE_CONTAINER_OGG,      MEDIA_MIMETYPE_CONTAINER_MATROSKA,
+                            MEDIA_MIMETYPE_CONTAINER_MPEG2TS,  MEDIA_MIMETYPE_CONTAINER_AVI,
+                            MEDIA_MIMETYPE_CONTAINER_MPEG2PS,  MEDIA_MIMETYPE_CONTAINER_HEIF,
+                            MEDIA_MIMETYPE_TEXT_3GPP,          MEDIA_MIMETYPE_TEXT_SUBRIP,
+                            MEDIA_MIMETYPE_TEXT_VTT,           MEDIA_MIMETYPE_TEXT_CEA_608,
+                            MEDIA_MIMETYPE_TEXT_CEA_708,       MEDIA_MIMETYPE_DATA_TIMED_ID3};
+
+class MetadataRetrieverFuzzer {
+   public:
+    MetadataRetrieverFuzzer(const uint8_t *data, size_t size)
+        : mFdp(data, size),
+          mMdRetriever(new StagefrightMetadataRetriever()),
+          mDataSourceFd(memfd_create("InputFile", MFD_ALLOW_SEALING)) {}
+    ~MetadataRetrieverFuzzer() { close(mDataSourceFd); }
+    bool setDataSource(const uint8_t *data, size_t size);
+    void getData();
+
+   private:
+    FuzzedDataProvider mFdp;
+    sp<StagefrightMetadataRetriever> mMdRetriever = nullptr;
+    const int32_t mDataSourceFd;
+};
+
+void MetadataRetrieverFuzzer::getData() {
+    int64_t timeUs = mFdp.ConsumeIntegral<int64_t>();
+    int32_t option = mFdp.ConsumeIntegral<int32_t>();
+    int32_t colorFormat = mFdp.ConsumeIntegral<int32_t>();
+    bool metaOnly = mFdp.ConsumeBool();
+    mMdRetriever->getFrameAtTime(timeUs, option, colorFormat, metaOnly);
+
+    int32_t index = mFdp.ConsumeIntegral<int32_t>();
+    colorFormat = mFdp.ConsumeIntegral<int32_t>();
+    metaOnly = mFdp.ConsumeBool();
+    bool thumbnail = mFdp.ConsumeBool();
+    mMdRetriever->getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
+
+    index = mFdp.ConsumeIntegral<int32_t>();
+    colorFormat = mFdp.ConsumeIntegral<int32_t>();
+    int32_t left = mFdp.ConsumeIntegral<int32_t>();
+    int32_t top = mFdp.ConsumeIntegral<int32_t>();
+    int32_t right = mFdp.ConsumeIntegral<int32_t>();
+    int32_t bottom = mFdp.ConsumeIntegral<int32_t>();
+    mMdRetriever->getImageRectAtIndex(index, colorFormat, left, top, right, bottom);
+
+    index = mFdp.ConsumeIntegral<int32_t>();
+    colorFormat = mFdp.ConsumeIntegral<int32_t>();
+    metaOnly = mFdp.ConsumeBool();
+    mMdRetriever->getFrameAtIndex(index, colorFormat, metaOnly);
+
+    mMdRetriever->extractAlbumArt();
+
+    int32_t keyCode = mFdp.ConsumeIntegral<int32_t>();
+    mMdRetriever->extractMetadata(keyCode);
+}
+
+bool MetadataRetrieverFuzzer::setDataSource(const uint8_t *data, size_t size) {
+    status_t status = -1;
+
+    enum DataSourceChoice {FromHttp, FromFd, FromFileSource, kMaxValue = FromFileSource};
+    switch (mFdp.ConsumeEnum<DataSourceChoice>()) {
+        case FromHttp: {
+            KeyedVector<String8, String8> mHeaders;
+            mHeaders.add(String8(mFdp.ConsumeRandomLengthString().c_str()),
+                         String8(mFdp.ConsumeRandomLengthString().c_str()));
+
+            uint32_t dataBlobSize = mFdp.ConsumeIntegralInRange<uint16_t>(0, size);
+            vector<uint8_t> uriSuffix = mFdp.ConsumeBytes<uint8_t>(dataBlobSize);
+
+            string uri("data:");
+            uri += ";base64,";
+            AString out;
+            encodeBase64(uriSuffix.data(), uriSuffix.size(), &out);
+            uri += out.c_str();
+            status = mMdRetriever->setDataSource(nullptr /*httpService*/, uri.c_str(), &mHeaders);
+            break;
+        }
+        case FromFd: {
+            write(mDataSourceFd, data, size);
+
+            status = mMdRetriever->setDataSource(mDataSourceFd, 0, size);
+            break;
+        }
+        case FromFileSource: {
+            write(mDataSourceFd, data, size);
+
+            sp<DataSource> dataSource = new FileSource(dup(mDataSourceFd), 0, size);
+            status = mMdRetriever->setDataSource(dataSource, mFdp.PickValueInArray(kMimeTypes));
+            break;
+        }
+    }
+
+    if (status != 0) {
+        return false;
+    }
+    return true;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+    MetadataRetrieverFuzzer mrtFuzzer(data, size);
+    ProcessState::self()->startThreadPool();
+    if (mrtFuzzer.setDataSource(data, size)) {
+        mrtFuzzer.getData();
+    }
+    return 0;
+}
diff --git a/media/libmediaplayerservice/nuplayer/AWakeLock.cpp b/media/libmediaplayerservice/nuplayer/AWakeLock.cpp
index af9cf45..c3bd207 100644
--- a/media/libmediaplayerservice/nuplayer/AWakeLock.cpp
+++ b/media/libmediaplayerservice/nuplayer/AWakeLock.cpp
@@ -62,7 +62,8 @@
             binder::Status status = mPowerManager->acquireWakeLock(
                     binder, POWERMANAGER_PARTIAL_WAKE_LOCK,
                     String16("AWakeLock"), String16("media"),
-                    {} /* workSource */, {} /* historyTag */, -1 /* displayId */);
+                    {} /* workSource */, {} /* historyTag */, -1 /* displayId */,
+                    nullptr /* callback */);
             IPCThreadState::self()->restoreCallingIdentity(token);
             if (status.isOk()) {
                 mWakeLockToken = binder;
diff --git a/media/libmediaplayerservice/nuplayer/Android.bp b/media/libmediaplayerservice/nuplayer/Android.bp
index 6d338db..71a3168 100644
--- a/media/libmediaplayerservice/nuplayer/Android.bp
+++ b/media/libmediaplayerservice/nuplayer/Android.bp
@@ -37,20 +37,22 @@
         "StreamingSource.cpp",
     ],
 
+    local_include_dirs: [
+        "include/nuplayer",
+    ],
+
+    export_include_dirs: [
+        "include",
+    ],
+
     header_libs: [
         "libmediadrm_headers",
         "libmediametrics_headers",
         "media_plugin_headers",
-    ],
-
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-        "frameworks/av/media/libstagefright/httplive",
-        "frameworks/av/media/libstagefright/include",
-        "frameworks/av/media/libstagefright/mpeg2ts",
-        "frameworks/av/media/libstagefright/rtsp",
-        "frameworks/av/media/libstagefright/timedtext",
-        "frameworks/native/include/android",
+        "libstagefright_headers",
+        "libstagefright_httplive_headers",
+        "libstagefright_mpeg2support_headers",
+        "libstagefright_rtsp_headers",
     ],
 
     cflags: [
@@ -74,10 +76,12 @@
         "libmedia",
         "libmediadrm",
         "libpowermanager",
+        "android.hardware.drm-V1-ndk",
     ],
 
     static_libs: [
         "libplayerservice_datasource",
+        "libstagefright_timedtext",
     ],
 
     name: "libstagefright_nuplayer",
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 439dbe8..36e4d4a 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -20,7 +20,6 @@
 #include "GenericSource.h"
 #include "NuPlayerDrm.h"
 
-#include "AnotherPacketSource.h"
 #include <binder/IServiceManager.h>
 #include <cutils/properties.h>
 #include <datasource/PlayerServiceDataSourceFactory.h>
@@ -44,6 +43,7 @@
 #include <media/stagefright/MediaExtractorFactory.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
+#include <mpeg2ts/AnotherPacketSource.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 77e7885..4e71e89 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -19,8 +19,6 @@
 #include <utils/Log.h>
 
 #include "HTTPLiveSource.h"
-
-#include "AnotherPacketSource.h"
 #include "LiveDataSource.h"
 
 #include <media/IMediaHTTPService.h>
@@ -31,6 +29,7 @@
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/Utils.h>
+#include <mpeg2ts/AnotherPacketSource.h>
 
 // default buffer prepare/ready/underflow marks
 static const int kReadyMarkMs     = 5000;  // 5 seconds
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 9ae7ddb..c6b22a6 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -35,9 +35,7 @@
 #include "RTSPSource.h"
 #include "StreamingSource.h"
 #include "GenericSource.h"
-#include "TextDescriptions.h"
-
-#include "ATSParser.h"
+#include <timedtext/TextDescriptions.h>
 
 #include <cutils/properties.h>
 
@@ -56,6 +54,8 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 
+#include <mpeg2ts/ATSParser.h>
+
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/Surface.h>
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 2c1f158..52b2041 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -40,10 +40,9 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/SurfaceUtils.h>
+#include <mpeg2ts/ATSParser.h>
 #include <gui/Surface.h>
 
-#include "ATSParser.h"
-
 namespace android {
 
 static float kDisplayRefreshingRate = 60.f; // TODO: get this from the display
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index 793014e..cb91fd9 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -30,8 +30,7 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaErrors.h>
-
-#include "ATSParser.h"
+#include <mpeg2ts/ATSParser.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 4a65f71..2828d44 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -1673,24 +1673,18 @@
 
         mDrainAudioQueuePending = false;
 
-        if (offloadingAudio()) {
-            mAudioSink->pause();
-            mAudioSink->flush();
-            if (!mPaused) {
-                mAudioSink->start();
-            }
-        } else {
-            mAudioSink->pause();
-            mAudioSink->flush();
+        mAudioSink->pause();
+        mAudioSink->flush();
+        if (!offloadingAudio()) {
             // Call stop() to signal to the AudioSink to completely fill the
             // internal buffer before resuming playback.
             // FIXME: this is ignored after flush().
             mAudioSink->stop();
-            if (!mPaused) {
-                mAudioSink->start();
-            }
             mNumFramesWritten = 0;
         }
+        if (!mPaused) {
+            mAudioSink->start();
+        }
         mNextAudioClockUpdateTimeUs = -1;
     } else {
         flushQueue(&mVideoQueue);
diff --git a/media/libmediaplayerservice/nuplayer/RTPSource.cpp b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
index 4d6a483..6a17972 100644
--- a/media/libmediaplayerservice/nuplayer/RTPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
@@ -336,7 +336,7 @@
     *durationUs = 0ll;
 
     int64_t audioDurationUs;
-    if (mAudioTrack != NULL
+    if (mAudioTrack != NULL && mAudioTrack->getFormat() != NULL
             && mAudioTrack->getFormat()->findInt64(
                 kKeyDuration, &audioDurationUs)
             && audioDurationUs > *durationUs) {
@@ -344,7 +344,7 @@
     }
 
     int64_t videoDurationUs;
-    if (mVideoTrack != NULL
+    if (mVideoTrack != NULL && mVideoTrack->getFormat() != NULL
             && mVideoTrack->getFormat()->findInt64(
                 kKeyDuration, &videoDurationUs)
             && videoDurationUs > *durationUs) {
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 8e05de8..75cedcc 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -20,13 +20,12 @@
 
 #include "RTSPSource.h"
 
-#include "AnotherPacketSource.h"
-#include "MyHandler.h"
-#include "SDPLoader.h"
-
 #include <media/IMediaHTTPService.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/rtsp/MyHandler.h>
+#include <media/stagefright/rtsp/SDPLoader.h>
+#include <mpeg2ts/AnotherPacketSource.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index bec27d3..9d67ca4 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -20,8 +20,6 @@
 
 #include "StreamingSource.h"
 
-#include "ATSParser.h"
-#include "AnotherPacketSource.h"
 #include "NuPlayerStreamListener.h"
 
 #include <media/stagefright/MediaSource.h>
@@ -31,6 +29,8 @@
 #include <media/stagefright/foundation/MediaKeys.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
+#include <mpeg2ts/AnotherPacketSource.h>
+#include <mpeg2ts/ATSParser.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/nuplayer/AWakeLock.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/AWakeLock.h
similarity index 100%
rename from media/libmediaplayerservice/nuplayer/AWakeLock.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/AWakeLock.h
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/GenericSource.h
similarity index 99%
rename from media/libmediaplayerservice/nuplayer/GenericSource.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/GenericSource.h
index 7a2ab8f..80e06f1 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/include/nuplayer/GenericSource.h
@@ -21,11 +21,10 @@
 #include "NuPlayer.h"
 #include "NuPlayerSource.h"
 
-#include "ATSParser.h"
-
 #include <android-base/unique_fd.h>
 #include <media/mediaplayer.h>
 #include <media/stagefright/MediaBuffer.h>
+#include <mpeg2ts/ATSParser.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/HTTPLiveSource.h
similarity index 100%
rename from media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/HTTPLiveSource.h
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayer.h
similarity index 100%
rename from media/libmediaplayerservice/nuplayer/NuPlayer.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayer.h
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerCCDecoder.h
similarity index 100%
rename from media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerCCDecoder.h
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDecoder.h
similarity index 100%
rename from media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDecoder.h
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDecoderBase.h
similarity index 100%
rename from media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDecoderBase.h
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDecoderPassThrough.h
similarity index 100%
rename from media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDecoderPassThrough.h
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDriver.h
similarity index 100%
rename from media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDriver.h
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDrm.h
similarity index 100%
rename from media/libmediaplayerservice/nuplayer/NuPlayerDrm.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDrm.h
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerRenderer.h
similarity index 100%
rename from media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerRenderer.h
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerSource.h
similarity index 100%
rename from media/libmediaplayerservice/nuplayer/NuPlayerSource.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerSource.h
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerStreamListener.h
similarity index 100%
rename from media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerStreamListener.h
diff --git a/media/libmediaplayerservice/nuplayer/RTPSource.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/RTPSource.h
similarity index 95%
rename from media/libmediaplayerservice/nuplayer/RTPSource.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/RTPSource.h
index 3b4f9e9..7d9bb8f 100644
--- a/media/libmediaplayerservice/nuplayer/RTPSource.h
+++ b/media/libmediaplayerservice/nuplayer/include/nuplayer/RTPSource.h
@@ -23,25 +23,20 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaSource.h>
+#include <media/stagefright/rtsp/APacketSource.h>
+#include <media/stagefright/rtsp/ARTPConnection.h>
+#include <media/stagefright/rtsp/ARTPSource.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 #include <media/stagefright/Utils.h>
 #include <media/BufferingSettings.h>
+#include <mpeg2ts/AnotherPacketSource.h>
 
 #include <utils/KeyedVector.h>
 #include <utils/Vector.h>
 #include <utils/RefBase.h>
 
-#include "AnotherPacketSource.h"
-#include "APacketSource.h"
-#include "ARTPConnection.h"
-#include "ARTPSource.h"
-#include "ASessionDescription.h"
 #include "NuPlayerSource.h"
 
-
-
-
-
-
 namespace android {
 
 struct ALooper;
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/RTSPSource.h
similarity index 99%
rename from media/libmediaplayerservice/nuplayer/RTSPSource.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/RTSPSource.h
index 03fce08..7497e41 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/include/nuplayer/RTSPSource.h
@@ -20,7 +20,7 @@
 
 #include "NuPlayerSource.h"
 
-#include "ATSParser.h"
+#include <mpeg2ts/ATSParser.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/StreamingSource.h
similarity index 100%
rename from media/libmediaplayerservice/nuplayer/StreamingSource.h
rename to media/libmediaplayerservice/nuplayer/include/nuplayer/StreamingSource.h
diff --git a/media/libmediaplayerservice/tests/Android.bp b/media/libmediaplayerservice/tests/Android.bp
index 98626fd..99202b8 100644
--- a/media/libmediaplayerservice/tests/Android.bp
+++ b/media/libmediaplayerservice/tests/Android.bp
@@ -30,7 +30,7 @@
     ],
 
     static_libs: [
-        "resourcemanager_aidl_interface-ndk_platform",
+        "resourcemanager_aidl_interface-ndk",
     ],
 
     include_dirs: [
diff --git a/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp b/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp
index 92236ea..6eb8c6f 100644
--- a/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp
+++ b/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp
@@ -33,10 +33,6 @@
         "StagefrightRecorderTest.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libmediaplayerservice",
-    ],
-
     static_libs: [
         "libmediaplayerservice",
         "libstagefright_httplive",
diff --git a/media/libmediatranscoding/Android.bp b/media/libmediatranscoding/Android.bp
index 042850c..937650f 100644
--- a/media/libmediatranscoding/Android.bp
+++ b/media/libmediatranscoding/Android.bp
@@ -106,8 +106,8 @@
     export_include_dirs: ["include"],
 
     static_libs: [
-        "mediatranscoding_aidl_interface-ndk_platform",
-        "resourceobserver_aidl_interface-V1-ndk_platform",
+        "mediatranscoding_aidl_interface-ndk",
+        "resourceobserver_aidl_interface-V1-ndk",
         "libstatslog_media",
     ],
 
diff --git a/media/libmediatranscoding/include/media/ControllerClientInterface.h b/media/libmediatranscoding/include/media/ControllerClientInterface.h
index 9311e2e..ea63da8 100644
--- a/media/libmediatranscoding/include/media/ControllerClientInterface.h
+++ b/media/libmediatranscoding/include/media/ControllerClientInterface.h
@@ -66,7 +66,7 @@
      * Returns false if the session doesn't exist, or the client is already requesting the
      * session. Returns true otherwise.
      */
-    virtual bool addClientUid(ClientIdType clientId, SessionIdType sessionId, uid_t clientUid);
+    virtual bool addClientUid(ClientIdType clientId, SessionIdType sessionId, uid_t clientUid) = 0;
 
     /**
      * Retrieves the (unsorted) list of all clients requesting the session identified by
@@ -81,7 +81,7 @@
      * Returns false if the session doesn't exist. Returns true otherwise.
      */
     virtual bool getClientUids(ClientIdType clientId, SessionIdType sessionId,
-                               std::vector<int32_t>* out_clientUids);
+                               std::vector<int32_t>* out_clientUids) = 0;
 
 protected:
     virtual ~ControllerClientInterface() = default;
diff --git a/media/libmediatranscoding/tests/Android.bp b/media/libmediatranscoding/tests/Android.bp
index 603611a..7a6980f 100644
--- a/media/libmediatranscoding/tests/Android.bp
+++ b/media/libmediatranscoding/tests/Android.bp
@@ -31,7 +31,7 @@
     ],
 
     static_libs: [
-        "mediatranscoding_aidl_interface-ndk_platform",
+        "mediatranscoding_aidl_interface-ndk",
         "libmediatranscoding",
     ],
 
diff --git a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
index e20f7ab..411b6ef 100644
--- a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
@@ -55,8 +55,8 @@
     AMediaFormat_getString(srcFormat, AMEDIAFORMAT_KEY_MIME, &srcMime);
     if (!AMediaFormat_getString(options, AMEDIAFORMAT_KEY_MIME, &dstMime) ||
         strcmp(srcMime, dstMime) == 0) {
-        srcParamsToCopy.push_back(ENTRY_COPIER(AMEDIAFORMAT_KEY_PROFILE, String));
-        srcParamsToCopy.push_back(ENTRY_COPIER(AMEDIAFORMAT_KEY_LEVEL, String));
+        srcParamsToCopy.push_back(ENTRY_COPIER(AMEDIAFORMAT_KEY_PROFILE, Int32));
+        srcParamsToCopy.push_back(ENTRY_COPIER(AMEDIAFORMAT_KEY_LEVEL, Int32));
     }
 
     // ------- Define parameters to copy from the caller's options -------
diff --git a/media/libnbaio/include/media/nbaio/AudioStreamOutSink.h b/media/libnbaio/include/media/nbaio/AudioStreamOutSink.h
index 348b4f8..635f67f 100644
--- a/media/libnbaio/include/media/nbaio/AudioStreamOutSink.h
+++ b/media/libnbaio/include/media/nbaio/AudioStreamOutSink.h
@@ -42,10 +42,6 @@
     //virtual size_t framesUnderrun() const;
     //virtual size_t underruns() const;
 
-    // This is an over-estimate, and could dupe the caller into making a blocking write()
-    // FIXME Use an audio HAL API to query the buffer emptying status when it's available.
-    virtual ssize_t availableToWrite() { return mStreamBufferSizeBytes / mFrameSize; }
-
     virtual ssize_t write(const void *buffer, size_t count);
 
     virtual status_t getTimestamp(ExtendedTimestamp &timestamp);
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 1aa1848..7917395 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2195,7 +2195,10 @@
             }
 
             if (!msg->findInt32("aac-max-output-channel_count", &maxOutputChannelCount)) {
-                maxOutputChannelCount = -1;
+                // check non AAC-specific key
+                if (!msg->findInt32("max-output-channel-count", &maxOutputChannelCount)) {
+                    maxOutputChannelCount = -1;
+                }
             }
             if (!msg->findInt32("aac-pcm-limiter-enable", &pcmLimiterEnable)) {
                 // value is unknown
@@ -3304,10 +3307,12 @@
     if (err != OK) {
         ALOGE("native_window_set_sideband_stream(%p) failed! (err %d).",
                 sidebandHandle, err);
-        return err;
     }
 
-    return OK;
+    native_handle_close(sidebandHandle);
+    native_handle_delete(sidebandHandle);
+
+    return err;
 }
 
 status_t ACodec::setVideoPortFormatType(
@@ -5395,21 +5400,21 @@
                             err = mOMXNode->getParameter(
                                     (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacDrcPresentation,
                                     &presentation, sizeof(presentation));
-                            if (err != OK) {
-                                return err;
+                            if (err == OK) {
+                                notify->setInt32("aac-encoded-target-level",
+                                                 presentation.nEncodedTargetLevel);
+                                notify->setInt32("aac-drc-cut-level", presentation.nDrcCut);
+                                notify->setInt32("aac-drc-boost-level", presentation.nDrcBoost);
+                                notify->setInt32("aac-drc-heavy-compression",
+                                                 presentation.nHeavyCompression);
+                                notify->setInt32("aac-target-ref-level",
+                                                 presentation.nTargetReferenceLevel);
+                                notify->setInt32("aac-drc-effect-type",
+                                                 presentation.nDrcEffectType);
+                                notify->setInt32("aac-drc-album-mode", presentation.nDrcAlbumMode);
+                                notify->setInt32("aac-drc-output-loudness",
+                                                 presentation.nDrcOutputLoudness);
                             }
-                            notify->setInt32("aac-encoded-target-level",
-                                             presentation.nEncodedTargetLevel);
-                            notify->setInt32("aac-drc-cut-level", presentation.nDrcCut);
-                            notify->setInt32("aac-drc-boost-level", presentation.nDrcBoost);
-                            notify->setInt32("aac-drc-heavy-compression",
-                                             presentation.nHeavyCompression);
-                            notify->setInt32("aac-target-ref-level",
-                                             presentation.nTargetReferenceLevel);
-                            notify->setInt32("aac-drc-effect-type", presentation.nDrcEffectType);
-                            notify->setInt32("aac-drc-album-mode", presentation.nDrcAlbumMode);
-                            notify->setInt32("aac-drc-output-loudness",
-                                             presentation.nDrcOutputLoudness);
                         }
                     }
                     break;
@@ -5431,6 +5436,7 @@
                     notify->setInt32("channel-count", params.nChannels);
                     notify->setInt32("sample-rate", params.nSampleRate);
                     notify->setInt32("bitrate", params.nBitRate);
+                    notify->setInt32("aac-profile", params.eAACProfile);
                     break;
                 }
 
@@ -9205,4 +9211,19 @@
     return OK;
 }
 
+status_t ACodec::querySupportedParameters(std::vector<std::string> *names) {
+    if (!names) {
+        return BAD_VALUE;
+    }
+    return OK;
+}
+
+status_t ACodec::subscribeToParameters([[maybe_unused]] const std::vector<std::string> &names) {
+    return OK;
+}
+
+status_t ACodec::unsubscribeFromParameters([[maybe_unused]] const std::vector<std::string> &names) {
+    return OK;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index a052a70..e47e7ff 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -109,6 +109,7 @@
 
     srcs: [
         "CodecBase.cpp",
+        "DataConverter.cpp",
         "FrameRenderTracker.cpp",
         "MediaCodecListWriter.cpp",
         "SkipCutBuffer.cpp",
@@ -125,6 +126,7 @@
     ],
 
     shared_libs: [
+        "libaudioutils",
         "libgui",
         "libhidlallocatorutils",
         "liblog",
@@ -266,7 +268,6 @@
         "CallbackMediaSource.cpp",
         "CameraSource.cpp",
         "CameraSourceTimeLapse.cpp",
-        "DataConverter.cpp",
         "FrameDecoder.cpp",
         "HevcUtils.cpp",
         "InterfaceUtils.cpp",
@@ -340,6 +341,7 @@
         "android.hardware.media.omx@1.0",
         "framework-permission-aidl-cpp",
         "libaudioclient_aidl_conversion",
+        "packagemanager_aidl-cpp",
     ],
 
     static_libs: [
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index efd4070..5da32c9 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -35,6 +35,7 @@
 #include <media/stagefright/FrameCaptureProcessor.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaCodecConstants.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/Utils.h>
@@ -192,6 +193,13 @@
             *dstBpp = 4;
             return true;
         }
+        case HAL_PIXEL_FORMAT_RGBA_1010102:
+        {
+            *dstFormat = (OMX_COLOR_FORMATTYPE)COLOR_Format32bitABGR2101010;
+            *captureFormat = ui::PixelFormat::RGBA_1010102;
+            *dstBpp = 4;
+            return true;
+        }
         default:
         {
             ALOGE("Unsupported color format: %d", colorFormat);
@@ -262,13 +270,10 @@
 }
 
 bool isHDR(const sp<AMessage> &format) {
-    uint32_t standard, range, transfer;
+    uint32_t standard, transfer;
     if (!format->findInt32("color-standard", (int32_t*)&standard)) {
         standard = 0;
     }
-    if (!format->findInt32("color-range", (int32_t*)&range)) {
-        range = 0;
-    }
     if (!format->findInt32("color-transfer", (int32_t*)&transfer)) {
         transfer = 0;
     }
@@ -526,8 +531,12 @@
         return NULL;
     }
 
-    // TODO: Use Flexible color instead
-    videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
+    if (dstFormat() == COLOR_Format32bitABGR2101010) {
+        videoFormat->setInt32("color-format", COLOR_FormatYUVP010);
+    } else {
+        // TODO: Use Flexible color instead
+        videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
+    }
 
     // For the thumbnail extraction case, try to allocate single buffer in both
     // input and output ports, if seeking to a sync frame. NOTE: This request may
@@ -635,6 +644,11 @@
         crop_bottom = height - 1;
     }
 
+    int32_t slice_height;
+    if (outputFormat->findInt32("slice-height", &slice_height) && slice_height > 0) {
+        height = slice_height;
+    }
+
     if (mFrame == NULL) {
         sp<IMemory> frameMem = allocVideoFrame(
                 trackMeta(),
@@ -796,8 +810,16 @@
     if (overrideMeta == NULL) {
         // check if we're dealing with a tiled heif
         int32_t tileWidth, tileHeight, gridRows, gridCols;
+        int32_t widthColsProduct = 0;
+        int32_t heightRowsProduct = 0;
         if (findGridInfo(trackMeta(), &tileWidth, &tileHeight, &gridRows, &gridCols)) {
-            if (mWidth <= tileWidth * gridCols && mHeight <= tileHeight * gridRows) {
+            if (__builtin_mul_overflow(tileWidth, gridCols, &widthColsProduct) ||
+                    __builtin_mul_overflow(tileHeight, gridRows, &heightRowsProduct)) {
+                ALOGE("Multiplication overflowed Grid size: %dx%d, Picture size: %dx%d",
+                        gridCols, gridRows, tileWidth, tileHeight);
+                return nullptr;
+            }
+            if (mWidth <= widthColsProduct && mHeight <= heightRowsProduct) {
                 ALOGV("grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
                         gridCols, gridRows, tileWidth, tileHeight, mWidth, mHeight);
 
@@ -826,8 +848,12 @@
         return NULL;
     }
 
-    // TODO: Use Flexible color instead
-    videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
+    if (dstFormat() == COLOR_Format32bitABGR2101010) {
+        videoFormat->setInt32("color-format", COLOR_FormatYUVP010);
+    } else {
+        // TODO: Use Flexible color instead
+        videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
+    }
 
     if ((mGridRows == 1) && (mGridCols == 1)) {
         videoFormat->setInt32("android._num-input-buffers", 1);
@@ -883,9 +909,18 @@
     }
 
     int32_t width, height, stride;
-    CHECK(outputFormat->findInt32("width", &width));
-    CHECK(outputFormat->findInt32("height", &height));
-    CHECK(outputFormat->findInt32("stride", &stride));
+    if (outputFormat->findInt32("width", &width) == false) {
+        ALOGE("MediaImageDecoder::onOutputReceived:width is missing in outputFormat");
+        return ERROR_MALFORMED;
+    }
+    if (outputFormat->findInt32("height", &height) == false) {
+        ALOGE("MediaImageDecoder::onOutputReceived:height is missing in outputFormat");
+        return ERROR_MALFORMED;
+    }
+    if (outputFormat->findInt32("stride", &stride) == false) {
+        ALOGE("MediaImageDecoder::onOutputReceived:stride is missing in outputFormat");
+        return ERROR_MALFORMED;
+    }
 
     if (mFrame == NULL) {
         sp<IMemory> frameMem = allocVideoFrame(
@@ -924,6 +959,11 @@
         crop_bottom = height - 1;
     }
 
+    int32_t slice_height;
+    if (outputFormat->findInt32("slice-height", &slice_height) && slice_height > 0) {
+        height = slice_height;
+    }
+
     int32_t crop_width, crop_height;
     crop_width = crop_right - crop_left + 1;
     crop_height = crop_bottom - crop_top + 1;
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 7c7fcac..a0c8f8a 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -156,7 +156,7 @@
     bool isHeic() const { return mIsHeic; }
     bool isAudio() const { return mIsAudio; }
     bool isMPEG4() const { return mIsMPEG4; }
-    bool usePrefix() const { return mIsAvc || mIsHevc || mIsHeic; }
+    bool usePrefix() const { return mIsAvc || mIsHevc || mIsHeic || mIsDovi; }
     bool isExifData(MediaBufferBase *buffer, uint32_t *tiffHdrOffset) const;
     void addChunkOffset(off64_t offset);
     void addItemOffsetAndSize(off64_t offset, size_t size, bool isExif);
@@ -164,6 +164,7 @@
     TrackId& getTrackId() { return mTrackId; }
     status_t dump(int fd, const Vector<String16>& args) const;
     static const char *getFourCCForMime(const char *mime);
+    const char *getDoviFourCC() const;
     const char *getTrackType() const;
     void resetInternal();
     int64_t trackMetaDataSize();
@@ -316,6 +317,7 @@
     volatile bool mStarted;
     bool mIsAvc;
     bool mIsHevc;
+    bool mIsDovi;
     bool mIsAudio;
     bool mIsVideo;
     bool mIsHeic;
@@ -370,6 +372,10 @@
     uint8_t mProfileCompatible;
     uint8_t mLevelIdc;
 
+    uint8_t mDoviProfile;
+    void *mDoviConfigData;
+    size_t mDoviConfigDataSize;
+
     void *mCodecSpecificData;
     size_t mCodecSpecificDataSize;
     bool mGotAllCodecSpecificData;
@@ -422,6 +428,8 @@
     status_t parseHEVCCodecSpecificData(
             const uint8_t *data, size_t size, HevcParameterSets &paramSets);
 
+    status_t makeDoviCodecSpecificData();
+
     // Track authoring progress status
     void trackProgressStatus(int64_t timeUs, status_t err = OK);
     void initTrackingProgressStatus(MetaData *params);
@@ -459,6 +467,7 @@
     void writePaspBox();
     void writeAvccBox();
     void writeHvccBox();
+    void writeDoviConfigBox();
     void writeUrlBox();
     void writeDrefBox();
     void writeDinfBox();
@@ -470,6 +479,7 @@
     void writeHdlrBox();
     void writeTkhdBox(uint32_t now);
     void writeColrBox();
+    void writeMdcvAndClliBoxes();
     void writeMp4aEsdsBox();
     void writeMp4vEsdsBox();
     void writeAudioFourCCBox();
@@ -617,6 +627,17 @@
     return OK;
 }
 
+const char *MPEG4Writer::Track::getDoviFourCC() const {
+    if (mDoviProfile == 5) {
+        return "dvh1";
+    } else if (mDoviProfile == 8) {
+        return "hvc1";
+    } else if (mDoviProfile == 9 || mDoviProfile == 32) {
+        return "avc1";
+    }
+    return (const char*)NULL;
+}
+
 // static
 const char *MPEG4Writer::Track::getFourCCForMime(const char *mime) {
     if (mime == NULL) {
@@ -671,7 +692,9 @@
         mIsBackgroundMode |= isBackgroundMode;
     }
 
-    if (Track::getFourCCForMime(mime) == NULL) {
+    if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
+        ALOGV("Add source mime '%s'", mime);
+    } else if (Track::getFourCCForMime(mime) == NULL) {
         ALOGE("Unsupported mime '%s'", mime);
         return ERROR_UNSUPPORTED;
     }
@@ -2150,6 +2173,8 @@
       mMinCttsOffsetTimeUs(0),
       mMinCttsOffsetTicks(0),
       mMaxCttsOffsetTicks(0),
+      mDoviConfigData(NULL),
+      mDoviConfigDataSize(0),
       mCodecSpecificData(NULL),
       mCodecSpecificDataSize(0),
       mGotAllCodecSpecificData(false),
@@ -2176,6 +2201,7 @@
     mMeta->findCString(kKeyMIMEType, &mime);
     mIsAvc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
     mIsHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
+    mIsDovi = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION);
     mIsAudio = !strncasecmp(mime, "audio/", 6);
     mIsVideo = !strncasecmp(mime, "video/", 6);
     mIsHeic = !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
@@ -2610,7 +2636,12 @@
                !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
         mMeta->findData(kKeyHVCC, &type, &data, &size);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
-        mMeta->findData(kKeyDVCC, &type, &data, &size);
+        makeDoviCodecSpecificData();
+        if (!mMeta->findData(kKeyAVCC, &type, &data, &size) &&
+                !mMeta->findData(kKeyHVCC, &type, &data, &size)) {
+            ALOGE("Failed: No HVCC/AVCC for Dolby Vision ..\n");
+            return;
+        }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
                !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
         if (mMeta->findData(kKeyESDS, &type, &data, &size)) {
@@ -2651,6 +2682,11 @@
         free(mCodecSpecificData);
         mCodecSpecificData = NULL;
     }
+
+    if (mDoviConfigData != NULL) {
+        free(mDoviConfigData);
+        mDoviConfigData = NULL;
+    }
 }
 
 void MPEG4Writer::Track::initTrackingProgressStatus(MetaData *params) {
@@ -3329,6 +3365,37 @@
     return OK;
 }
 
+status_t MPEG4Writer::Track::makeDoviCodecSpecificData() {
+    uint32_t type;
+    const void *data = NULL;
+    size_t size = 0;
+
+    if (mDoviConfigData != NULL) {
+        ALOGE("Already have Dolby Vision codec specific data");
+        return OK;
+    }
+
+    if (!mMeta->findData(kKeyDVCC, &type, &data, &size)
+             && !mMeta->findData(kKeyDVVC, &type, &data, &size)
+             && !mMeta->findData(kKeyDVWC, &type, &data, &size)) {
+        ALOGE("Failed getting Dovi config for Dolby Vision %d", (int)size);
+        return ERROR_MALFORMED;
+    }
+
+    mDoviConfigData = malloc(size);
+    if (mDoviConfigData == NULL) {
+        ALOGE("Failed allocating Dolby Vision config data");
+        return ERROR_MALFORMED;
+    }
+
+    mDoviConfigDataSize = size;
+    memcpy(mDoviConfigData, data, size);
+
+    mDoviProfile = (((char *)data)[2] >> 1) & 0x7f; //getting profile info
+
+    return OK;
+}
+
 /*
  * Updates the drift time from the audio track so that
  * the video track can get the updated drift time information
@@ -3474,6 +3541,23 @@
                     err = copyCodecSpecificData((const uint8_t *)buffer->data() + buffer->range_offset(),
                             buffer->range_length());
                 }
+                if (mIsDovi) {
+                    err = makeDoviCodecSpecificData();
+
+                    const void *data = NULL;
+                    size_t size = 0;
+
+                    uint32_t type = 0;
+                    if (mDoviProfile == 9){
+                        mMeta->findData(kKeyAVCC, &type, &data, &size);
+                    } else if (mDoviProfile < 9)  {
+                        mMeta->findData(kKeyHVCC, &type, &data, &size);
+                    }
+
+                    if (data != NULL && copyCodecSpecificData((uint8_t *)data, size) == OK) {
+                        mGotAllCodecSpecificData = true;
+                    }
+                }
             }
 
             buffer->release();
@@ -4173,6 +4257,7 @@
         !strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime) ||
         !strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime) ||
         !strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime) ||
+        !strcasecmp(MEDIA_MIMETYPE_VIDEO_DOLBY_VISION, mime) ||
         !strcasecmp(MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC, mime)) {
         if (!mCodecSpecificData ||
             mCodecSpecificDataSize <= 0) {
@@ -4297,7 +4382,13 @@
     const char *mime;
     bool success = mMeta->findCString(kKeyMIMEType, &mime);
     CHECK(success);
-    const char *fourcc = getFourCCForMime(mime);
+    const char *fourcc;
+    if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
+        fourcc = getDoviFourCC();
+    } else {
+        fourcc = getFourCCForMime(mime);
+    }
+
     if (fourcc == NULL) {
         ALOGE("Unknown mime type '%s'.", mime);
         TRESPASS();
@@ -4337,10 +4428,18 @@
         writeAvccBox();
     } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
         writeHvccBox();
+    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DOLBY_VISION, mime)) {
+        if (mDoviProfile <= 8) {
+            writeHvccBox();
+        } else if (mDoviProfile == 9 || mDoviProfile == 32) {
+            writeAvccBox();
+        }
+        writeDoviConfigBox();
     }
 
     writePaspBox();
     writeColrBox();
+    writeMdcvAndClliBoxes();
     mOwner->endBox();  // mp4v, s263 or avc1
 }
 
@@ -4375,6 +4474,54 @@
     }
 }
 
+void MPEG4Writer::Track::writeMdcvAndClliBoxes() {
+    sp<MetaData> meta = mSource->getFormat();
+    uint32_t type;
+    const uint8_t* data;
+    size_t size;
+    bool found =
+            meta->findData(kKeyHdrStaticInfo, &type, reinterpret_cast<const void**>(&data), &size);
+    if (found && size == 25) {
+        uint16_t displayPrimariesRX = U16LE_AT(&data[1]);
+        uint16_t displayPrimariesRY = U16LE_AT(&data[3]);
+
+        uint16_t displayPrimariesGX = U16LE_AT(&data[5]);
+        uint16_t displayPrimariesGY = U16LE_AT(&data[7]);
+
+        uint16_t displayPrimariesBX = U16LE_AT(&data[9]);
+        uint16_t displayPrimariesBY = U16LE_AT(&data[11]);
+
+        uint16_t whitePointX = U16LE_AT(&data[13]);
+        uint16_t whitePointY = U16LE_AT(&data[15]);
+
+        uint16_t maxDisplayMasteringLuminance = U16LE_AT(&data[17]);
+        uint16_t minDisplayMasteringLuminance = U16LE_AT(&data[19]);
+
+        uint16_t maxContentLightLevel = U16LE_AT(&data[21]);
+        uint16_t maxPicAverageLightLevel = U16LE_AT(&data[23]);
+
+        mOwner->beginBox("mdcv");
+        mOwner->writeInt16(displayPrimariesGX);
+        mOwner->writeInt16(displayPrimariesGY);
+        mOwner->writeInt16(displayPrimariesBX);
+        mOwner->writeInt16(displayPrimariesBY);
+        mOwner->writeInt16(displayPrimariesRX);
+        mOwner->writeInt16(displayPrimariesRY);
+        mOwner->writeInt16(whitePointX);
+        mOwner->writeInt16(whitePointY);
+        mOwner->writeInt32(maxDisplayMasteringLuminance * 10000);
+        mOwner->writeInt32(minDisplayMasteringLuminance * 10000);
+        mOwner->endBox();  // mdcv.
+
+        mOwner->beginBox("clli");
+        mOwner->writeInt16(maxContentLightLevel);
+        mOwner->writeInt16(maxPicAverageLightLevel);
+        mOwner->endBox();  // clli.
+    } else {
+        ALOGW("Ignoring HDR static info with unexpected size %d", (int)size);
+    }
+}
+
 void MPEG4Writer::Track::writeAudioFourCCBox() {
     const char *mime;
     bool success = mMeta->findCString(kKeyMIMEType, &mime);
@@ -4829,12 +4976,11 @@
     mOwner->endBox();  // avcC
 }
 
-
 void MPEG4Writer::Track::writeHvccBox() {
     CHECK(mCodecSpecificData);
     CHECK_GE(mCodecSpecificDataSize, 5u);
 
-    // Patch avcc's lengthSize field to match the number
+    // Patch hvcc's lengthSize field to match the number
     // of bytes we use to indicate the size of a nal unit.
     uint8_t *ptr = (uint8_t *)mCodecSpecificData;
     ptr[21] = (ptr[21] & 0xfc) | (mOwner->useNalLengthFour() ? 3 : 1);
@@ -4843,6 +4989,24 @@
     mOwner->endBox();  // hvcC
 }
 
+void MPEG4Writer::Track::writeDoviConfigBox() {
+    CHECK(mDoviConfigData);
+    CHECK_EQ(mDoviConfigDataSize, 24u);
+
+    uint8_t *ptr = (uint8_t *)mDoviConfigData;
+    uint8_t profile = (ptr[2] >> 1) & 0x7f;
+
+    if (profile > 10) {
+        mOwner->beginBox("dvwC");
+    } else if (profile > 7) {
+        mOwner->beginBox("dvvC");
+    } else {
+        mOwner->beginBox("dvcC");
+    }
+    mOwner->write(mDoviConfigData, mDoviConfigDataSize);
+    mOwner->endBox();  // dvwC/dvvC/dvcC
+}
+
 void MPEG4Writer::Track::writeD263Box() {
     mOwner->beginBox("d263");
     mOwner->writeInt32(0);  // vendor
diff --git a/media/libstagefright/MediaAppender.cpp b/media/libstagefright/MediaAppender.cpp
index 5d80b30..21dcfa1 100644
--- a/media/libstagefright/MediaAppender.cpp
+++ b/media/libstagefright/MediaAppender.cpp
@@ -75,10 +75,21 @@
         return status;
     }
 
-    if (strcmp("MPEG4Extractor", mExtractor->getName()) == 0) {
+    sp<AMessage> fileFormat;
+    status = mExtractor->getFileFormat(&fileFormat);
+    if (status != OK) {
+        ALOGE("extractor_getFileFormat failed, status :%d", status);
+        return status;
+    }
+
+    AString fileMime;
+    fileFormat->findString("mime", &fileMime);
+    // only compare the end of the file MIME type to allow for vendor customized mime type
+    if (fileMime.endsWith("mp4")){
         mFormat = MediaMuxer::OUTPUT_FORMAT_MPEG_4;
     } else {
-        ALOGE("Unsupported format, extractor name:%s", mExtractor->getName());
+        ALOGE("Unsupported file format, extractor name:%s, fileformat %s",
+              mExtractor->getName(), fileMime.c_str());
         return ERROR_UNSUPPORTED;
     }
 
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c03236a..e9dcb26 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -112,6 +112,13 @@
 static const char *kCodecCaptureRate = "android.media.mediacodec.capture-rate";
 static const char *kCodecOperatingRate = "android.media.mediacodec.operating-rate";
 static const char *kCodecPriority = "android.media.mediacodec.priority";
+static const char *kCodecConfigColorStandard = "android.media.mediacodec.config-color-standard";
+static const char *kCodecConfigColorRange = "android.media.mediacodec.config-color-range";
+static const char *kCodecConfigColorTransfer = "android.media.mediacodec.config-color-transfer";
+static const char *kCodecParsedColorStandard = "android.media.mediacodec.parsed-color-standard";
+static const char *kCodecParsedColorRange = "android.media.mediacodec.parsed-color-range";
+static const char *kCodecParsedColorTransfer = "android.media.mediacodec.parsed-color-transfer";
+static const char *kCodecHDRMetadataFlags = "android.media.mediacodec.hdr-metadata-flags";
 
 // Min/Max QP before shaping
 static const char *kCodecOriginalVideoQPIMin = "android.media.mediacodec.original-video-qp-i-min";
@@ -748,6 +755,7 @@
       mVideoWidth(0),
       mVideoHeight(0),
       mRotationDegrees(0),
+      mHDRMetadataFlags(0),
       mDequeueInputTimeoutGeneration(0),
       mDequeueInputReplyID(0),
       mDequeueOutputTimeoutGeneration(0),
@@ -898,6 +906,8 @@
         mediametrics_setInt64(mMetricsHandle, kCodecFirstFrameIndexLowLatencyModeOn,
                               mIndexOfFirstFrameWhenLowLatencyOn);
     }
+
+    mediametrics_setInt32(mMetricsHandle, kCodecHDRMetadataFlags, mHDRMetadataFlags);
 #if 0
     // enable for short term, only while debugging
     updateEphemeralMediametrics(mMetricsHandle);
@@ -1511,6 +1521,9 @@
         uint32_t flags) {
     sp<AMessage> msg = new AMessage(kWhatConfigure, this);
 
+    // TODO: validity check log-session-id: it should be a 32-hex-digit.
+    format->findString("log-session-id", &mLogSessionId);
+
     if (mMetricsHandle != 0) {
         int32_t profile = 0;
         if (format->findInt32("profile", &profile)) {
@@ -1522,11 +1535,11 @@
         }
         mediametrics_setInt32(mMetricsHandle, kCodecEncoder,
                               (flags & CONFIGURE_FLAG_ENCODE) ? 1 : 0);
+
+        mediametrics_setCString(mMetricsHandle, kCodecLogSessionId, mLogSessionId.c_str());
     }
 
     if (mIsVideo) {
-        // TODO: validity check log-session-id: it should be a 32-hex-digit.
-        format->findString("log-session-id", &mLogSessionId);
         format->findInt32("width", &mVideoWidth);
         format->findInt32("height", &mVideoHeight);
         if (!format->findInt32("rotation-degrees", &mRotationDegrees)) {
@@ -1534,7 +1547,6 @@
         }
 
         if (mMetricsHandle != 0) {
-            mediametrics_setCString(mMetricsHandle, kCodecLogSessionId, mLogSessionId.c_str());
             mediametrics_setInt32(mMetricsHandle, kCodecWidth, mVideoWidth);
             mediametrics_setInt32(mMetricsHandle, kCodecHeight, mVideoHeight);
             mediametrics_setInt32(mMetricsHandle, kCodecRotation, mRotationDegrees);
@@ -1566,6 +1578,23 @@
             if (format->findInt32("priority", &priority)) {
                 mediametrics_setInt32(mMetricsHandle, kCodecPriority, priority);
             }
+            int32_t colorStandard = -1;
+            if (format->findInt32(KEY_COLOR_STANDARD, &colorStandard)) {
+                mediametrics_setInt32(mMetricsHandle, kCodecConfigColorStandard, colorStandard);
+            }
+            int32_t colorRange = -1;
+            if (format->findInt32(KEY_COLOR_RANGE, &colorRange)) {
+                mediametrics_setInt32(mMetricsHandle, kCodecConfigColorRange, colorRange);
+            }
+            int32_t colorTransfer = -1;
+            if (format->findInt32(KEY_COLOR_TRANSFER, &colorTransfer)) {
+                mediametrics_setInt32(mMetricsHandle, kCodecConfigColorTransfer, colorTransfer);
+            }
+            HDRStaticInfo info;
+            if (ColorUtils::getHDRStaticInfoFromFormat(format, &info)
+                    && ColorUtils::isHDRStaticInfoValid(&info)) {
+                mHDRMetadataFlags |= kFlagHDRStaticInfo;
+            }
         }
 
         // Prevent possible integer overflow in downstream code.
@@ -3185,8 +3214,11 @@
                         mediametrics_setInt32(mMetricsHandle, kCodecSecure, 0);
                     }
 
-                    if (mIsVideo) {
-                        // audio codec is currently ignored.
+                    MediaCodecInfo::Attributes attr = mCodecInfo
+                            ? mCodecInfo->getAttributes()
+                            : MediaCodecInfo::Attributes(0);
+                    if (!(attr & MediaCodecInfo::kFlagIsSoftwareOnly)) {
+                        // software codec is currently ignored.
                         mResourceManagerProxy->addResource(
                                 MediaResource::CodecResource(mFlags & kFlagIsSecure, mIsVideo));
                     }
@@ -3502,6 +3534,20 @@
 
                 case kWhatDrainThisBuffer:
                 {
+                    if ((mFlags & kFlagUseBlockModel) == 0 && mTunneled) {
+                        sp<RefBase> obj;
+                        CHECK(msg->findObject("buffer", &obj));
+                        sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
+                        if (mFlags & kFlagIsAsync) {
+                            // In asynchronous mode, output format change is processed immediately.
+                            handleOutputFormatChangeIfNeeded(buffer);
+                        } else {
+                            postActivityNotificationIfPossible();
+                        }
+                        mBufferChannel->discardBuffer(buffer);
+                        break;
+                    }
+
                     /* size_t index = */updateBuffers(kPortIndexOutput, msg);
 
                     if (mState == FLUSHING
@@ -4499,6 +4545,9 @@
             HDRStaticInfo info;
             if (ColorUtils::getHDRStaticInfoFromFormat(mOutputFormat, &info)) {
                 setNativeWindowHdrMetadata(mSurface.get(), &info);
+                if (ColorUtils::isHDRStaticInfoValid(&info)) {
+                    mHDRMetadataFlags |= kFlagHDRStaticInfo;
+                }
             }
         }
 
@@ -4507,6 +4556,7 @@
                 && hdr10PlusInfo != nullptr && hdr10PlusInfo->size() > 0) {
             native_window_set_buffers_hdr10_plus_metadata(mSurface.get(),
                     hdr10PlusInfo->size(), hdr10PlusInfo->data());
+            mHDRMetadataFlags |= kFlagHDR10PlusInfo;
         }
 
         if (mime.startsWithIgnoreCase("video/")) {
@@ -4551,6 +4601,21 @@
             mCrypto->notifyResolution(width, height);
         }
     }
+
+    if (mMetricsHandle != 0) {
+        int32_t colorStandard = -1;
+        if (format->findInt32(KEY_COLOR_STANDARD, &colorStandard)) {
+            mediametrics_setInt32(mMetricsHandle, kCodecParsedColorStandard, colorStandard);
+        }
+        int32_t colorRange = -1;
+        if (format->findInt32( KEY_COLOR_RANGE, &colorRange)) {
+            mediametrics_setInt32(mMetricsHandle, kCodecParsedColorRange, colorRange);
+        }
+        int32_t colorTransfer = -1;
+        if (format->findInt32(KEY_COLOR_TRANSFER, &colorTransfer)) {
+            mediametrics_setInt32(mMetricsHandle, kCodecParsedColorTransfer, colorTransfer);
+        }
+    }
 }
 
 void MediaCodec::extractCSD(const sp<AMessage> &format) {
@@ -4775,8 +4840,8 @@
     }
     const CryptoPlugin::SubSample *subSamples;
     size_t numSubSamples;
-    const uint8_t *key;
-    const uint8_t *iv;
+    const uint8_t *key = NULL;
+    const uint8_t *iv = NULL;
     CryptoPlugin::Mode mode = CryptoPlugin::kMode_Unencrypted;
 
     // We allow the simpler queueInputBuffer API to be used even in
@@ -4791,8 +4856,6 @@
 
             subSamples = &ss;
             numSubSamples = 1;
-            key = NULL;
-            iv = NULL;
             pattern.mEncryptBlocks = 0;
             pattern.mSkipBlocks = 0;
         }
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 6243828..a3040f4 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -18,8 +18,6 @@
 #define LOG_TAG "MediaCodecList"
 #include <utils/Log.h>
 
-#include "MediaCodecListOverrides.h"
-
 #include <binder/IServiceManager.h>
 
 #include <media/IMediaCodecList.h>
@@ -34,6 +32,7 @@
 #include <media/stagefright/CCodec.h>
 #include <media/stagefright/Codec2InfoBuilder.h>
 #include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/MediaCodecListOverrides.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/OmxInfoBuilder.h>
 #include <media/stagefright/PersistentSurface.h>
@@ -509,6 +508,29 @@
                 }
             }
         }
+
+        int32_t profile = -1;
+        if (format->findInt32("profile", &profile)) {
+            int32_t level = -1;
+            format->findInt32("level", &level);
+            Vector<MediaCodecInfo::ProfileLevel> profileLevels;
+            capabilities->getSupportedProfileLevels(&profileLevels);
+            auto it = profileLevels.begin();
+            for (; it != profileLevels.end(); ++it) {
+                if (profile != it->mProfile) {
+                    continue;
+                }
+                if (level > -1 && level > it->mLevel) {
+                    continue;
+                }
+                break;
+            }
+
+            if (it == profileLevels.end()) {
+                ALOGV("Codec does not support profile %d with level %d", profile, level);
+                return false;
+            }
+        }
     }
 
     // haven't found a reason to discard this one
diff --git a/media/libstagefright/MediaCodecListOverrides.cpp b/media/libstagefright/MediaCodecListOverrides.cpp
index 4a167d1..9304e45 100644
--- a/media/libstagefright/MediaCodecListOverrides.cpp
+++ b/media/libstagefright/MediaCodecListOverrides.cpp
@@ -18,8 +18,6 @@
 #define LOG_TAG "MediaCodecListOverrides"
 #include <utils/Log.h>
 
-#include "MediaCodecListOverrides.h"
-
 #include <cutils/properties.h>
 #include <gui/Surface.h>
 #include <mediadrm/ICrypto.h>
@@ -30,6 +28,7 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/MediaCodecListOverrides.h>
 
 namespace android {
 
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 0107c32..b07f8f7 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -943,10 +943,17 @@
 
             sp<MediaCodecBuffer> outbuf;
             status_t err = mEncoder->getOutputBuffer(index, &outbuf);
-            if (err != OK || outbuf == NULL || outbuf->data() == NULL
-                || outbuf->size() == 0) {
+            if (err != OK || outbuf == NULL || outbuf->data() == NULL) {
                 signalEOS();
                 break;
+            } else if (outbuf->size() == 0) {
+                // Zero length CSD buffers are not treated as an error
+                if (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) {
+                    mEncoder->releaseOutputBuffer(index);
+                } else {
+                    signalEOS();
+                }
+                break;
             }
 
             MediaBufferBase *mbuf = new MediaBuffer(outbuf->size());
diff --git a/media/libstagefright/OggWriter.cpp b/media/libstagefright/OggWriter.cpp
index 0bc5976..0f5e95e 100644
--- a/media/libstagefright/OggWriter.cpp
+++ b/media/libstagefright/OggWriter.cpp
@@ -67,7 +67,11 @@
         mFd = -1;
     }
 
-    free(mOs);
+    if (mOs != nullptr) {
+        ogg_stream_clear(mOs);
+        free(mOs);
+        mOs = nullptr;
+    }
 }
 
 status_t OggWriter::initCheck() const {
diff --git a/media/libstagefright/SkipCutBuffer.cpp b/media/libstagefright/SkipCutBuffer.cpp
index ee9016d..de91533 100644
--- a/media/libstagefright/SkipCutBuffer.cpp
+++ b/media/libstagefright/SkipCutBuffer.cpp
@@ -145,7 +145,19 @@
     if (available < num) {
         int32_t newcapacity = mCapacity + (num - available);
         char * newbuffer = new char[newcapacity];
-        memcpy(newbuffer, mCutBuffer, mCapacity);
+        if (mWriteHead < mReadHead) {
+            // data isn't continuous, need to memcpy twice
+            // to move previous data to new buffer.
+            size_t copyLeft = mCapacity - mReadHead;
+            memcpy(newbuffer, mCutBuffer + mReadHead, copyLeft);
+            memcpy(newbuffer + copyLeft, mCutBuffer, mWriteHead);
+            mReadHead = 0;
+            mWriteHead += copyLeft;
+        } else {
+            memcpy(newbuffer, mCutBuffer + mReadHead, mWriteHead - mReadHead);
+            mWriteHead -= mReadHead;
+            mReadHead = 0;
+        }
         delete [] mCutBuffer;
         mCapacity = newcapacity;
         mCutBuffer = newbuffer;
diff --git a/media/libstagefright/TEST_MAPPING b/media/libstagefright/TEST_MAPPING
index 7ce2968..91a44d1 100644
--- a/media/libstagefright/TEST_MAPPING
+++ b/media/libstagefright/TEST_MAPPING
@@ -20,13 +20,68 @@
         },
         {
           "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+        }
+      ]
+    },
+    {
+      "name": "CtsMediaAudioTestCases",
+      "options": [
+        {
+          "include-annotation": "android.platform.test.annotations.Presubmit"
+        },
+        {
+          "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
         },
         // TODO: b/149314419
         {
-          "exclude-filter": "android.media.cts.AudioPlaybackCaptureTest"
+          "exclude-filter": "android.media.audio.cts.AudioPlaybackCaptureTest"
         },
         {
-          "exclude-filter": "android.media.cts.AudioRecordTest"
+          "exclude-filter": "android.media.audio.cts.AudioRecordTest"
+        }
+      ]
+    },
+    {
+      "name": "CtsMediaDecoderTestCases",
+      "options": [
+        {
+          "include-annotation": "android.platform.test.annotations.Presubmit"
+        },
+        {
+          "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+        }
+      ]
+    },
+    {
+      "name": "CtsMediaEncoderTestCases",
+      "options": [
+        {
+          "include-annotation": "android.platform.test.annotations.Presubmit"
+        },
+        {
+          "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+        }
+      ]
+    },
+    {
+      "name": "CtsMediaCodecTestCases",
+      "options": [
+        {
+          "include-annotation": "android.platform.test.annotations.Presubmit"
+        },
+        {
+          "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+        }
+      ]
+    },
+    {
+      "name": "CtsMediaPlayerTestCases",
+      "options": [
+        {
+          "include-annotation": "android.platform.test.annotations.Presubmit"
+        },
+        {
+          "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
         }
       ]
     }
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 4c18f87..1854588 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -28,9 +28,6 @@
 #include "include/HevcUtils.h"
 
 #include <cutils/properties.h>
-#include <media/openmax/OMX_Audio.h>
-#include <media/openmax/OMX_Video.h>
-#include <media/openmax/OMX_VideoExt.h>
 #include <media/stagefright/CodecBase.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -57,6 +54,14 @@
 #define AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS \
   "mpegh-compatible-sets"
 
+namespace {
+    // TODO: this should possibly be handled in an else
+    constexpr static int32_t AACObjectNull = 0;
+
+    // TODO: decide if we should just not transmit the level in this case
+    constexpr static int32_t DolbyVisionLevelUnknown = 0;
+}
+
 namespace android {
 
 static status_t copyNALUToABuffer(sp<ABuffer> *buffer, const uint8_t *ptr, size_t length) {
@@ -156,21 +161,22 @@
         audioObjectType >>= 11;
     }
 
-    const static ALookup<uint16_t, OMX_AUDIO_AACPROFILETYPE> profiles {
-        { 1,  OMX_AUDIO_AACObjectMain     },
-        { 2,  OMX_AUDIO_AACObjectLC       },
-        { 3,  OMX_AUDIO_AACObjectSSR      },
-        { 4,  OMX_AUDIO_AACObjectLTP      },
-        { 5,  OMX_AUDIO_AACObjectHE       },
-        { 6,  OMX_AUDIO_AACObjectScalable },
-        { 17, OMX_AUDIO_AACObjectERLC     },
-        { 23, OMX_AUDIO_AACObjectLD       },
-        { 29, OMX_AUDIO_AACObjectHE_PS    },
-        { 39, OMX_AUDIO_AACObjectELD      },
-        { 42, OMX_AUDIO_AACObjectXHE      },
+
+    const static ALookup<uint16_t, int32_t> profiles {
+        { 1,  AACObjectMain     },
+        { 2,  AACObjectLC       },
+        { 3,  AACObjectSSR      },
+        { 4,  AACObjectLTP      },
+        { 5,  AACObjectHE       },
+        { 6,  AACObjectScalable },
+        { 17, AACObjectERLC     },
+        { 23, AACObjectLD       },
+        { 29, AACObjectHE_PS    },
+        { 39, AACObjectELD      },
+        { 42, AACObjectXHE      },
     };
 
-    OMX_AUDIO_AACPROFILETYPE profile;
+    int32_t profile;
     if (profiles.map(audioObjectType, &profile)) {
         format->setInt32("profile", profile);
     }
@@ -184,53 +190,53 @@
     const uint8_t constraints = ptr[2];
     const uint8_t level = ptr[3];
 
-    const static ALookup<uint8_t, OMX_VIDEO_AVCLEVELTYPE> levels {
-        {  9, OMX_VIDEO_AVCLevel1b }, // technically, 9 is only used for High+ profiles
-        { 10, OMX_VIDEO_AVCLevel1  },
-        { 11, OMX_VIDEO_AVCLevel11 }, // prefer level 1.1 for the value 11
-        { 11, OMX_VIDEO_AVCLevel1b },
-        { 12, OMX_VIDEO_AVCLevel12 },
-        { 13, OMX_VIDEO_AVCLevel13 },
-        { 20, OMX_VIDEO_AVCLevel2  },
-        { 21, OMX_VIDEO_AVCLevel21 },
-        { 22, OMX_VIDEO_AVCLevel22 },
-        { 30, OMX_VIDEO_AVCLevel3  },
-        { 31, OMX_VIDEO_AVCLevel31 },
-        { 32, OMX_VIDEO_AVCLevel32 },
-        { 40, OMX_VIDEO_AVCLevel4  },
-        { 41, OMX_VIDEO_AVCLevel41 },
-        { 42, OMX_VIDEO_AVCLevel42 },
-        { 50, OMX_VIDEO_AVCLevel5  },
-        { 51, OMX_VIDEO_AVCLevel51 },
-        { 52, OMX_VIDEO_AVCLevel52 },
-        { 60, OMX_VIDEO_AVCLevel6  },
-        { 61, OMX_VIDEO_AVCLevel61 },
-        { 62, OMX_VIDEO_AVCLevel62 },
+    const static ALookup<uint8_t, int32_t> levels {
+        {  9, AVCLevel1b }, // technically, 9 is only used for High+ profiles
+        { 10, AVCLevel1  },
+        { 11, AVCLevel11 }, // prefer level 1.1 for the value 11
+        { 11, AVCLevel1b },
+        { 12, AVCLevel12 },
+        { 13, AVCLevel13 },
+        { 20, AVCLevel2  },
+        { 21, AVCLevel21 },
+        { 22, AVCLevel22 },
+        { 30, AVCLevel3  },
+        { 31, AVCLevel31 },
+        { 32, AVCLevel32 },
+        { 40, AVCLevel4  },
+        { 41, AVCLevel41 },
+        { 42, AVCLevel42 },
+        { 50, AVCLevel5  },
+        { 51, AVCLevel51 },
+        { 52, AVCLevel52 },
+        { 60, AVCLevel6  },
+        { 61, AVCLevel61 },
+        { 62, AVCLevel62 },
     };
-    const static ALookup<uint8_t, OMX_VIDEO_AVCPROFILETYPE> profiles {
-        { 66, OMX_VIDEO_AVCProfileBaseline },
-        { 77, OMX_VIDEO_AVCProfileMain     },
-        { 88, OMX_VIDEO_AVCProfileExtended },
-        { 100, OMX_VIDEO_AVCProfileHigh    },
-        { 110, OMX_VIDEO_AVCProfileHigh10  },
-        { 122, OMX_VIDEO_AVCProfileHigh422 },
-        { 244, OMX_VIDEO_AVCProfileHigh444 },
+    const static ALookup<uint8_t, int32_t> profiles {
+        { 66, AVCProfileBaseline },
+        { 77, AVCProfileMain     },
+        { 88, AVCProfileExtended },
+        { 100, AVCProfileHigh    },
+        { 110, AVCProfileHigh10  },
+        { 122, AVCProfileHigh422 },
+        { 244, AVCProfileHigh444 },
     };
 
     // set profile & level if they are recognized
-    OMX_VIDEO_AVCPROFILETYPE codecProfile;
-    OMX_VIDEO_AVCLEVELTYPE codecLevel;
+    int32_t codecProfile;
+    int32_t codecLevel;
     if (profiles.map(profile, &codecProfile)) {
         if (profile == 66 && (constraints & 0x40)) {
-            codecProfile = (OMX_VIDEO_AVCPROFILETYPE)OMX_VIDEO_AVCProfileConstrainedBaseline;
+            codecProfile = AVCProfileConstrainedBaseline;
         } else if (profile == 100 && (constraints & 0x0C) == 0x0C) {
-            codecProfile = (OMX_VIDEO_AVCPROFILETYPE)OMX_VIDEO_AVCProfileConstrainedHigh;
+            codecProfile = AVCProfileConstrainedHigh;
         }
         format->setInt32("profile", codecProfile);
         if (levels.map(level, &codecLevel)) {
             // for 9 && 11 decide level based on profile and constraint_set3 flag
             if (level == 11 && (profile == 66 || profile == 77 || profile == 88)) {
-                codecLevel = (constraints & 0x10) ? OMX_VIDEO_AVCLevel1b : OMX_VIDEO_AVCLevel11;
+                codecLevel = (constraints & 0x10) ? AVCLevel1b : AVCLevel11;
             }
             format->setInt32("level", codecLevel);
         }
@@ -256,41 +262,44 @@
 
     // All Dolby Profiles will have profile and level info in MediaFormat
     // Profile 8 and 9 will have bl_compatibility_id too.
-    const static ALookup<uint8_t, OMX_VIDEO_DOLBYVISIONPROFILETYPE> profiles{
-        {1, OMX_VIDEO_DolbyVisionProfileDvavPen},
-        {3, OMX_VIDEO_DolbyVisionProfileDvheDen},
-        {4, OMX_VIDEO_DolbyVisionProfileDvheDtr},
-        {5, OMX_VIDEO_DolbyVisionProfileDvheStn},
-        {6, OMX_VIDEO_DolbyVisionProfileDvheDth},
-        {7, OMX_VIDEO_DolbyVisionProfileDvheDtb},
-        {8, OMX_VIDEO_DolbyVisionProfileDvheSt},
-        {9, OMX_VIDEO_DolbyVisionProfileDvavSe},
-        {10, OMX_VIDEO_DolbyVisionProfileDvav110},
+    const static ALookup<uint8_t, int32_t> profiles{
+        {1, DolbyVisionProfileDvavPen},
+        {3, DolbyVisionProfileDvheDen},
+        {4, DolbyVisionProfileDvheDtr},
+        {5, DolbyVisionProfileDvheStn},
+        {6, DolbyVisionProfileDvheDth},
+        {7, DolbyVisionProfileDvheDtb},
+        {8, DolbyVisionProfileDvheSt},
+        {9, DolbyVisionProfileDvavSe},
+        {10, DolbyVisionProfileDvav110},
     };
 
-    const static ALookup<uint8_t, OMX_VIDEO_DOLBYVISIONLEVELTYPE> levels{
-        {0, OMX_VIDEO_DolbyVisionLevelUnknown},
-        {1, OMX_VIDEO_DolbyVisionLevelHd24},
-        {2, OMX_VIDEO_DolbyVisionLevelHd30},
-        {3, OMX_VIDEO_DolbyVisionLevelFhd24},
-        {4, OMX_VIDEO_DolbyVisionLevelFhd30},
-        {5, OMX_VIDEO_DolbyVisionLevelFhd60},
-        {6, OMX_VIDEO_DolbyVisionLevelUhd24},
-        {7, OMX_VIDEO_DolbyVisionLevelUhd30},
-        {8, OMX_VIDEO_DolbyVisionLevelUhd48},
-        {9, OMX_VIDEO_DolbyVisionLevelUhd60},
+    const static ALookup<uint8_t, int32_t> levels{
+        {0, DolbyVisionLevelUnknown},
+        {1, DolbyVisionLevelHd24},
+        {2, DolbyVisionLevelHd30},
+        {3, DolbyVisionLevelFhd24},
+        {4, DolbyVisionLevelFhd30},
+        {5, DolbyVisionLevelFhd60},
+        {6, DolbyVisionLevelUhd24},
+        {7, DolbyVisionLevelUhd30},
+        {8, DolbyVisionLevelUhd48},
+        {9, DolbyVisionLevelUhd60},
+        {10, DolbyVisionLevelUhd120},
+        {11, DolbyVisionLevel8k30},
+        {12, DolbyVisionLevel8k60},
     };
     // set rpuAssoc
     if (rpu_present_flag && el_present_flag && !bl_present_flag) {
         format->setInt32("rpuAssoc", 1);
     }
     // set profile & level if they are recognized
-    OMX_VIDEO_DOLBYVISIONPROFILETYPE codecProfile;
-    OMX_VIDEO_DOLBYVISIONLEVELTYPE codecLevel;
+    int32_t codecProfile;
+    int32_t codecLevel;
     if (profiles.map(profile, &codecProfile)) {
         format->setInt32("profile", codecProfile);
-        if (codecProfile == OMX_VIDEO_DolbyVisionProfileDvheSt ||
-            codecProfile == OMX_VIDEO_DolbyVisionProfileDvavSe) {
+        if (codecProfile == DolbyVisionProfileDvheSt ||
+            codecProfile == DolbyVisionProfileDvavSe) {
             format->setInt32("bl_compatibility_id", bl_compatibility_id);
         }
         if (levels.map(level, &codecLevel)) {
@@ -307,32 +316,32 @@
     const uint8_t profile = ptr[6];
     const uint8_t level = ptr[5];
 
-    const static ALookup<uint8_t, OMX_VIDEO_H263PROFILETYPE> profiles {
-        { 0, OMX_VIDEO_H263ProfileBaseline },
-        { 1, OMX_VIDEO_H263ProfileH320Coding },
-        { 2, OMX_VIDEO_H263ProfileBackwardCompatible },
-        { 3, OMX_VIDEO_H263ProfileISWV2 },
-        { 4, OMX_VIDEO_H263ProfileISWV3 },
-        { 5, OMX_VIDEO_H263ProfileHighCompression },
-        { 6, OMX_VIDEO_H263ProfileInternet },
-        { 7, OMX_VIDEO_H263ProfileInterlace },
-        { 8, OMX_VIDEO_H263ProfileHighLatency },
+    const static ALookup<uint8_t, int32_t> profiles {
+        { 0, H263ProfileBaseline },
+        { 1, H263ProfileH320Coding },
+        { 2, H263ProfileBackwardCompatible },
+        { 3, H263ProfileISWV2 },
+        { 4, H263ProfileISWV3 },
+        { 5, H263ProfileHighCompression },
+        { 6, H263ProfileInternet },
+        { 7, H263ProfileInterlace },
+        { 8, H263ProfileHighLatency },
     };
 
-    const static ALookup<uint8_t, OMX_VIDEO_H263LEVELTYPE> levels {
-        { 10, OMX_VIDEO_H263Level10 },
-        { 20, OMX_VIDEO_H263Level20 },
-        { 30, OMX_VIDEO_H263Level30 },
-        { 40, OMX_VIDEO_H263Level40 },
-        { 45, OMX_VIDEO_H263Level45 },
-        { 50, OMX_VIDEO_H263Level50 },
-        { 60, OMX_VIDEO_H263Level60 },
-        { 70, OMX_VIDEO_H263Level70 },
+    const static ALookup<uint8_t, int32_t> levels {
+        { 10, H263Level10 },
+        { 20, H263Level20 },
+        { 30, H263Level30 },
+        { 40, H263Level40 },
+        { 45, H263Level45 },
+        { 50, H263Level50 },
+        { 60, H263Level60 },
+        { 70, H263Level70 },
     };
 
     // set profile & level if they are recognized
-    OMX_VIDEO_H263PROFILETYPE codecProfile;
-    OMX_VIDEO_H263LEVELTYPE codecLevel;
+    int32_t codecProfile;
+    int32_t codecLevel;
     if (profiles.map(profile, &codecProfile)) {
         format->setInt32("profile", codecProfile);
         if (levels.map(level, &codecLevel)) {
@@ -350,59 +359,59 @@
     const uint8_t tier = (ptr[1] & 0x20) >> 5;
     const uint8_t level = ptr[12];
 
-    const static ALookup<std::pair<uint8_t, uint8_t>, OMX_VIDEO_HEVCLEVELTYPE> levels {
-        { { 0, 30  }, OMX_VIDEO_HEVCMainTierLevel1  },
-        { { 0, 60  }, OMX_VIDEO_HEVCMainTierLevel2  },
-        { { 0, 63  }, OMX_VIDEO_HEVCMainTierLevel21 },
-        { { 0, 90  }, OMX_VIDEO_HEVCMainTierLevel3  },
-        { { 0, 93  }, OMX_VIDEO_HEVCMainTierLevel31 },
-        { { 0, 120 }, OMX_VIDEO_HEVCMainTierLevel4  },
-        { { 0, 123 }, OMX_VIDEO_HEVCMainTierLevel41 },
-        { { 0, 150 }, OMX_VIDEO_HEVCMainTierLevel5  },
-        { { 0, 153 }, OMX_VIDEO_HEVCMainTierLevel51 },
-        { { 0, 156 }, OMX_VIDEO_HEVCMainTierLevel52 },
-        { { 0, 180 }, OMX_VIDEO_HEVCMainTierLevel6  },
-        { { 0, 183 }, OMX_VIDEO_HEVCMainTierLevel61 },
-        { { 0, 186 }, OMX_VIDEO_HEVCMainTierLevel62 },
-        { { 1, 30  }, OMX_VIDEO_HEVCHighTierLevel1  },
-        { { 1, 60  }, OMX_VIDEO_HEVCHighTierLevel2  },
-        { { 1, 63  }, OMX_VIDEO_HEVCHighTierLevel21 },
-        { { 1, 90  }, OMX_VIDEO_HEVCHighTierLevel3  },
-        { { 1, 93  }, OMX_VIDEO_HEVCHighTierLevel31 },
-        { { 1, 120 }, OMX_VIDEO_HEVCHighTierLevel4  },
-        { { 1, 123 }, OMX_VIDEO_HEVCHighTierLevel41 },
-        { { 1, 150 }, OMX_VIDEO_HEVCHighTierLevel5  },
-        { { 1, 153 }, OMX_VIDEO_HEVCHighTierLevel51 },
-        { { 1, 156 }, OMX_VIDEO_HEVCHighTierLevel52 },
-        { { 1, 180 }, OMX_VIDEO_HEVCHighTierLevel6  },
-        { { 1, 183 }, OMX_VIDEO_HEVCHighTierLevel61 },
-        { { 1, 186 }, OMX_VIDEO_HEVCHighTierLevel62 },
+    const static ALookup<std::pair<uint8_t, uint8_t>, int32_t> levels {
+        { { 0, 30  }, HEVCMainTierLevel1  },
+        { { 0, 60  }, HEVCMainTierLevel2  },
+        { { 0, 63  }, HEVCMainTierLevel21 },
+        { { 0, 90  }, HEVCMainTierLevel3  },
+        { { 0, 93  }, HEVCMainTierLevel31 },
+        { { 0, 120 }, HEVCMainTierLevel4  },
+        { { 0, 123 }, HEVCMainTierLevel41 },
+        { { 0, 150 }, HEVCMainTierLevel5  },
+        { { 0, 153 }, HEVCMainTierLevel51 },
+        { { 0, 156 }, HEVCMainTierLevel52 },
+        { { 0, 180 }, HEVCMainTierLevel6  },
+        { { 0, 183 }, HEVCMainTierLevel61 },
+        { { 0, 186 }, HEVCMainTierLevel62 },
+        { { 1, 30  }, HEVCHighTierLevel1  },
+        { { 1, 60  }, HEVCHighTierLevel2  },
+        { { 1, 63  }, HEVCHighTierLevel21 },
+        { { 1, 90  }, HEVCHighTierLevel3  },
+        { { 1, 93  }, HEVCHighTierLevel31 },
+        { { 1, 120 }, HEVCHighTierLevel4  },
+        { { 1, 123 }, HEVCHighTierLevel41 },
+        { { 1, 150 }, HEVCHighTierLevel5  },
+        { { 1, 153 }, HEVCHighTierLevel51 },
+        { { 1, 156 }, HEVCHighTierLevel52 },
+        { { 1, 180 }, HEVCHighTierLevel6  },
+        { { 1, 183 }, HEVCHighTierLevel61 },
+        { { 1, 186 }, HEVCHighTierLevel62 },
     };
 
-    const static ALookup<uint8_t, OMX_VIDEO_HEVCPROFILETYPE> profiles {
-        { 1, OMX_VIDEO_HEVCProfileMain   },
-        { 2, OMX_VIDEO_HEVCProfileMain10 },
+    const static ALookup<uint8_t, int32_t> profiles {
+        { 1, HEVCProfileMain   },
+        { 2, HEVCProfileMain10 },
         // use Main for Main Still Picture decoding
-        { 3, OMX_VIDEO_HEVCProfileMain },
+        { 3, HEVCProfileMain },
     };
 
     // set profile & level if they are recognized
-    OMX_VIDEO_HEVCPROFILETYPE codecProfile;
-    OMX_VIDEO_HEVCLEVELTYPE codecLevel;
+    int32_t codecProfile;
+    int32_t codecLevel;
     if (!profiles.map(profile, &codecProfile)) {
         if (ptr[2] & 0x40 /* general compatibility flag 1 */) {
             // Note that this case covers Main Still Picture too
-            codecProfile = OMX_VIDEO_HEVCProfileMain;
+            codecProfile = HEVCProfileMain;
         } else if (ptr[2] & 0x20 /* general compatibility flag 2 */) {
-            codecProfile = OMX_VIDEO_HEVCProfileMain10;
+            codecProfile = HEVCProfileMain10;
         } else {
             return;
         }
     }
 
     // bump to HDR profile
-    if (isHdr(format) && codecProfile == OMX_VIDEO_HEVCProfileMain10) {
-        codecProfile = OMX_VIDEO_HEVCProfileMain10HDR10;
+    if (isHdr(format) && codecProfile == HEVCProfileMain10) {
+        codecProfile = HEVCProfileMain10HDR10;
     }
 
     format->setInt32("profile", codecProfile);
@@ -422,36 +431,36 @@
         }
         const uint8_t indication = ((seq[4] & 0xF) << 4) | ((seq[5] & 0xF0) >> 4);
 
-        const static ALookup<uint8_t, OMX_VIDEO_MPEG2PROFILETYPE> profiles {
-            { 0x50, OMX_VIDEO_MPEG2ProfileSimple  },
-            { 0x40, OMX_VIDEO_MPEG2ProfileMain    },
-            { 0x30, OMX_VIDEO_MPEG2ProfileSNR     },
-            { 0x20, OMX_VIDEO_MPEG2ProfileSpatial },
-            { 0x10, OMX_VIDEO_MPEG2ProfileHigh    },
+        const static ALookup<uint8_t, int32_t> profiles {
+            { 0x50, MPEG2ProfileSimple  },
+            { 0x40, MPEG2ProfileMain    },
+            { 0x30, MPEG2ProfileSNR     },
+            { 0x20, MPEG2ProfileSpatial },
+            { 0x10, MPEG2ProfileHigh    },
         };
 
-        const static ALookup<uint8_t, OMX_VIDEO_MPEG2LEVELTYPE> levels {
-            { 0x0A, OMX_VIDEO_MPEG2LevelLL  },
-            { 0x08, OMX_VIDEO_MPEG2LevelML  },
-            { 0x06, OMX_VIDEO_MPEG2LevelH14 },
-            { 0x04, OMX_VIDEO_MPEG2LevelHL  },
-            { 0x02, OMX_VIDEO_MPEG2LevelHP  },
+        const static ALookup<uint8_t, int32_t> levels {
+            { 0x0A, MPEG2LevelLL  },
+            { 0x08, MPEG2LevelML  },
+            { 0x06, MPEG2LevelH14 },
+            { 0x04, MPEG2LevelHL  },
+            { 0x02, MPEG2LevelHP  },
         };
 
         const static ALookup<uint8_t,
-                std::pair<OMX_VIDEO_MPEG2PROFILETYPE, OMX_VIDEO_MPEG2LEVELTYPE>> escapes {
+                std::pair<int32_t, int32_t>> escapes {
             /* unsupported
-            { 0x8E, { XXX_MPEG2ProfileMultiView, OMX_VIDEO_MPEG2LevelLL  } },
-            { 0x8D, { XXX_MPEG2ProfileMultiView, OMX_VIDEO_MPEG2LevelML  } },
-            { 0x8B, { XXX_MPEG2ProfileMultiView, OMX_VIDEO_MPEG2LevelH14 } },
-            { 0x8A, { XXX_MPEG2ProfileMultiView, OMX_VIDEO_MPEG2LevelHL  } }, */
-            { 0x85, { OMX_VIDEO_MPEG2Profile422, OMX_VIDEO_MPEG2LevelML  } },
-            { 0x82, { OMX_VIDEO_MPEG2Profile422, OMX_VIDEO_MPEG2LevelHL  } },
+            { 0x8E, { XXX_MPEG2ProfileMultiView, MPEG2LevelLL  } },
+            { 0x8D, { XXX_MPEG2ProfileMultiView, MPEG2LevelML  } },
+            { 0x8B, { XXX_MPEG2ProfileMultiView, MPEG2LevelH14 } },
+            { 0x8A, { XXX_MPEG2ProfileMultiView, MPEG2LevelHL  } }, */
+            { 0x85, { MPEG2Profile422, MPEG2LevelML  } },
+            { 0x82, { MPEG2Profile422, MPEG2LevelHL  } },
         };
 
-        OMX_VIDEO_MPEG2PROFILETYPE profile;
-        OMX_VIDEO_MPEG2LEVELTYPE level;
-        std::pair<OMX_VIDEO_MPEG2PROFILETYPE, OMX_VIDEO_MPEG2LEVELTYPE> profileLevel;
+        int32_t profile;
+        int32_t level;
+        std::pair<int32_t, int32_t> profileLevel;
         if (escapes.map(indication, &profileLevel)) {
             format->setInt32("profile", profileLevel.first);
             format->setInt32("level", profileLevel.second);
@@ -468,16 +477,16 @@
     // esds seems to only contain the profile for MPEG-2
     uint8_t objType;
     if (esds.getObjectTypeIndication(&objType) == OK) {
-        const static ALookup<uint8_t, OMX_VIDEO_MPEG2PROFILETYPE> profiles{
-            { 0x60, OMX_VIDEO_MPEG2ProfileSimple  },
-            { 0x61, OMX_VIDEO_MPEG2ProfileMain    },
-            { 0x62, OMX_VIDEO_MPEG2ProfileSNR     },
-            { 0x63, OMX_VIDEO_MPEG2ProfileSpatial },
-            { 0x64, OMX_VIDEO_MPEG2ProfileHigh    },
-            { 0x65, OMX_VIDEO_MPEG2Profile422     },
+        const static ALookup<uint8_t, int32_t> profiles{
+            { 0x60, MPEG2ProfileSimple  },
+            { 0x61, MPEG2ProfileMain    },
+            { 0x62, MPEG2ProfileSNR     },
+            { 0x63, MPEG2ProfileSpatial },
+            { 0x64, MPEG2ProfileHigh    },
+            { 0x65, MPEG2Profile422     },
         };
 
-        OMX_VIDEO_MPEG2PROFILETYPE profile;
+        int32_t profile;
         if (profiles.map(objType, &profile)) {
             format->setInt32("profile", profile);
         }
@@ -492,82 +501,82 @@
         const uint8_t indication = seq[4];
 
         const static ALookup<uint8_t,
-                std::pair<OMX_VIDEO_MPEG4PROFILETYPE, OMX_VIDEO_MPEG4LEVELTYPE>> table {
-            { 0b00000001, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level1  } },
-            { 0b00000010, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level2  } },
-            { 0b00000011, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level3  } },
-            { 0b00000100, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level4a } },
-            { 0b00000101, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level5  } },
-            { 0b00000110, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level6  } },
-            { 0b00001000, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level0  } },
-            { 0b00001001, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level0b } },
-            { 0b00010000, { OMX_VIDEO_MPEG4ProfileSimpleScalable,    OMX_VIDEO_MPEG4Level0  } },
-            { 0b00010001, { OMX_VIDEO_MPEG4ProfileSimpleScalable,    OMX_VIDEO_MPEG4Level1  } },
-            { 0b00010010, { OMX_VIDEO_MPEG4ProfileSimpleScalable,    OMX_VIDEO_MPEG4Level2  } },
+                std::pair<int32_t, int32_t>> table {
+            { 0b00000001, { MPEG4ProfileSimple,            MPEG4Level1  } },
+            { 0b00000010, { MPEG4ProfileSimple,            MPEG4Level2  } },
+            { 0b00000011, { MPEG4ProfileSimple,            MPEG4Level3  } },
+            { 0b00000100, { MPEG4ProfileSimple,            MPEG4Level4a } },
+            { 0b00000101, { MPEG4ProfileSimple,            MPEG4Level5  } },
+            { 0b00000110, { MPEG4ProfileSimple,            MPEG4Level6  } },
+            { 0b00001000, { MPEG4ProfileSimple,            MPEG4Level0  } },
+            { 0b00001001, { MPEG4ProfileSimple,            MPEG4Level0b } },
+            { 0b00010000, { MPEG4ProfileSimpleScalable,    MPEG4Level0  } },
+            { 0b00010001, { MPEG4ProfileSimpleScalable,    MPEG4Level1  } },
+            { 0b00010010, { MPEG4ProfileSimpleScalable,    MPEG4Level2  } },
             /* unsupported
-            { 0b00011101, { XXX_MPEG4ProfileSimpleScalableER,        OMX_VIDEO_MPEG4Level0  } },
-            { 0b00011110, { XXX_MPEG4ProfileSimpleScalableER,        OMX_VIDEO_MPEG4Level1  } },
-            { 0b00011111, { XXX_MPEG4ProfileSimpleScalableER,        OMX_VIDEO_MPEG4Level2  } }, */
-            { 0b00100001, { OMX_VIDEO_MPEG4ProfileCore,              OMX_VIDEO_MPEG4Level1  } },
-            { 0b00100010, { OMX_VIDEO_MPEG4ProfileCore,              OMX_VIDEO_MPEG4Level2  } },
-            { 0b00110010, { OMX_VIDEO_MPEG4ProfileMain,              OMX_VIDEO_MPEG4Level2  } },
-            { 0b00110011, { OMX_VIDEO_MPEG4ProfileMain,              OMX_VIDEO_MPEG4Level3  } },
-            { 0b00110100, { OMX_VIDEO_MPEG4ProfileMain,              OMX_VIDEO_MPEG4Level4  } },
+            { 0b00011101, { XXX_MPEG4ProfileSimpleScalableER,        MPEG4Level0  } },
+            { 0b00011110, { XXX_MPEG4ProfileSimpleScalableER,        MPEG4Level1  } },
+            { 0b00011111, { XXX_MPEG4ProfileSimpleScalableER,        MPEG4Level2  } }, */
+            { 0b00100001, { MPEG4ProfileCore,              MPEG4Level1  } },
+            { 0b00100010, { MPEG4ProfileCore,              MPEG4Level2  } },
+            { 0b00110010, { MPEG4ProfileMain,              MPEG4Level2  } },
+            { 0b00110011, { MPEG4ProfileMain,              MPEG4Level3  } },
+            { 0b00110100, { MPEG4ProfileMain,              MPEG4Level4  } },
             /* deprecated
-            { 0b01000010, { OMX_VIDEO_MPEG4ProfileNbit,              OMX_VIDEO_MPEG4Level2  } }, */
-            { 0b01010001, { OMX_VIDEO_MPEG4ProfileScalableTexture,   OMX_VIDEO_MPEG4Level1  } },
-            { 0b01100001, { OMX_VIDEO_MPEG4ProfileSimpleFace,        OMX_VIDEO_MPEG4Level1  } },
-            { 0b01100010, { OMX_VIDEO_MPEG4ProfileSimpleFace,        OMX_VIDEO_MPEG4Level2  } },
-            { 0b01100011, { OMX_VIDEO_MPEG4ProfileSimpleFBA,         OMX_VIDEO_MPEG4Level1  } },
-            { 0b01100100, { OMX_VIDEO_MPEG4ProfileSimpleFBA,         OMX_VIDEO_MPEG4Level2  } },
-            { 0b01110001, { OMX_VIDEO_MPEG4ProfileBasicAnimated,     OMX_VIDEO_MPEG4Level1  } },
-            { 0b01110010, { OMX_VIDEO_MPEG4ProfileBasicAnimated,     OMX_VIDEO_MPEG4Level2  } },
-            { 0b10000001, { OMX_VIDEO_MPEG4ProfileHybrid,            OMX_VIDEO_MPEG4Level1  } },
-            { 0b10000010, { OMX_VIDEO_MPEG4ProfileHybrid,            OMX_VIDEO_MPEG4Level2  } },
-            { 0b10010001, { OMX_VIDEO_MPEG4ProfileAdvancedRealTime,  OMX_VIDEO_MPEG4Level1  } },
-            { 0b10010010, { OMX_VIDEO_MPEG4ProfileAdvancedRealTime,  OMX_VIDEO_MPEG4Level2  } },
-            { 0b10010011, { OMX_VIDEO_MPEG4ProfileAdvancedRealTime,  OMX_VIDEO_MPEG4Level3  } },
-            { 0b10010100, { OMX_VIDEO_MPEG4ProfileAdvancedRealTime,  OMX_VIDEO_MPEG4Level4  } },
-            { 0b10100001, { OMX_VIDEO_MPEG4ProfileCoreScalable,      OMX_VIDEO_MPEG4Level1  } },
-            { 0b10100010, { OMX_VIDEO_MPEG4ProfileCoreScalable,      OMX_VIDEO_MPEG4Level2  } },
-            { 0b10100011, { OMX_VIDEO_MPEG4ProfileCoreScalable,      OMX_VIDEO_MPEG4Level3  } },
-            { 0b10110001, { OMX_VIDEO_MPEG4ProfileAdvancedCoding,    OMX_VIDEO_MPEG4Level1  } },
-            { 0b10110010, { OMX_VIDEO_MPEG4ProfileAdvancedCoding,    OMX_VIDEO_MPEG4Level2  } },
-            { 0b10110011, { OMX_VIDEO_MPEG4ProfileAdvancedCoding,    OMX_VIDEO_MPEG4Level3  } },
-            { 0b10110100, { OMX_VIDEO_MPEG4ProfileAdvancedCoding,    OMX_VIDEO_MPEG4Level4  } },
-            { 0b11000001, { OMX_VIDEO_MPEG4ProfileAdvancedCore,      OMX_VIDEO_MPEG4Level1  } },
-            { 0b11000010, { OMX_VIDEO_MPEG4ProfileAdvancedCore,      OMX_VIDEO_MPEG4Level2  } },
-            { 0b11010001, { OMX_VIDEO_MPEG4ProfileAdvancedScalable,  OMX_VIDEO_MPEG4Level1  } },
-            { 0b11010010, { OMX_VIDEO_MPEG4ProfileAdvancedScalable,  OMX_VIDEO_MPEG4Level2  } },
-            { 0b11010011, { OMX_VIDEO_MPEG4ProfileAdvancedScalable,  OMX_VIDEO_MPEG4Level3  } },
+            { 0b01000010, { MPEG4ProfileNbit,              MPEG4Level2  } }, */
+            { 0b01010001, { MPEG4ProfileScalableTexture,   MPEG4Level1  } },
+            { 0b01100001, { MPEG4ProfileSimpleFace,        MPEG4Level1  } },
+            { 0b01100010, { MPEG4ProfileSimpleFace,        MPEG4Level2  } },
+            { 0b01100011, { MPEG4ProfileSimpleFBA,         MPEG4Level1  } },
+            { 0b01100100, { MPEG4ProfileSimpleFBA,         MPEG4Level2  } },
+            { 0b01110001, { MPEG4ProfileBasicAnimated,     MPEG4Level1  } },
+            { 0b01110010, { MPEG4ProfileBasicAnimated,     MPEG4Level2  } },
+            { 0b10000001, { MPEG4ProfileHybrid,            MPEG4Level1  } },
+            { 0b10000010, { MPEG4ProfileHybrid,            MPEG4Level2  } },
+            { 0b10010001, { MPEG4ProfileAdvancedRealTime,  MPEG4Level1  } },
+            { 0b10010010, { MPEG4ProfileAdvancedRealTime,  MPEG4Level2  } },
+            { 0b10010011, { MPEG4ProfileAdvancedRealTime,  MPEG4Level3  } },
+            { 0b10010100, { MPEG4ProfileAdvancedRealTime,  MPEG4Level4  } },
+            { 0b10100001, { MPEG4ProfileCoreScalable,      MPEG4Level1  } },
+            { 0b10100010, { MPEG4ProfileCoreScalable,      MPEG4Level2  } },
+            { 0b10100011, { MPEG4ProfileCoreScalable,      MPEG4Level3  } },
+            { 0b10110001, { MPEG4ProfileAdvancedCoding,    MPEG4Level1  } },
+            { 0b10110010, { MPEG4ProfileAdvancedCoding,    MPEG4Level2  } },
+            { 0b10110011, { MPEG4ProfileAdvancedCoding,    MPEG4Level3  } },
+            { 0b10110100, { MPEG4ProfileAdvancedCoding,    MPEG4Level4  } },
+            { 0b11000001, { MPEG4ProfileAdvancedCore,      MPEG4Level1  } },
+            { 0b11000010, { MPEG4ProfileAdvancedCore,      MPEG4Level2  } },
+            { 0b11010001, { MPEG4ProfileAdvancedScalable,  MPEG4Level1  } },
+            { 0b11010010, { MPEG4ProfileAdvancedScalable,  MPEG4Level2  } },
+            { 0b11010011, { MPEG4ProfileAdvancedScalable,  MPEG4Level3  } },
             /* unsupported
-            { 0b11100001, { XXX_MPEG4ProfileSimpleStudio,            OMX_VIDEO_MPEG4Level1  } },
-            { 0b11100010, { XXX_MPEG4ProfileSimpleStudio,            OMX_VIDEO_MPEG4Level2  } },
-            { 0b11100011, { XXX_MPEG4ProfileSimpleStudio,            OMX_VIDEO_MPEG4Level3  } },
-            { 0b11100100, { XXX_MPEG4ProfileSimpleStudio,            OMX_VIDEO_MPEG4Level4  } },
-            { 0b11100101, { XXX_MPEG4ProfileCoreStudio,              OMX_VIDEO_MPEG4Level1  } },
-            { 0b11100110, { XXX_MPEG4ProfileCoreStudio,              OMX_VIDEO_MPEG4Level2  } },
-            { 0b11100111, { XXX_MPEG4ProfileCoreStudio,              OMX_VIDEO_MPEG4Level3  } },
-            { 0b11101000, { XXX_MPEG4ProfileCoreStudio,              OMX_VIDEO_MPEG4Level4  } },
-            { 0b11101011, { XXX_MPEG4ProfileSimpleStudio,            OMX_VIDEO_MPEG4Level5  } },
-            { 0b11101100, { XXX_MPEG4ProfileSimpleStudio,            OMX_VIDEO_MPEG4Level6  } }, */
-            { 0b11110000, { OMX_VIDEO_MPEG4ProfileAdvancedSimple,    OMX_VIDEO_MPEG4Level0  } },
-            { 0b11110001, { OMX_VIDEO_MPEG4ProfileAdvancedSimple,    OMX_VIDEO_MPEG4Level1  } },
-            { 0b11110010, { OMX_VIDEO_MPEG4ProfileAdvancedSimple,    OMX_VIDEO_MPEG4Level2  } },
-            { 0b11110011, { OMX_VIDEO_MPEG4ProfileAdvancedSimple,    OMX_VIDEO_MPEG4Level3  } },
-            { 0b11110100, { OMX_VIDEO_MPEG4ProfileAdvancedSimple,    OMX_VIDEO_MPEG4Level4  } },
-            { 0b11110101, { OMX_VIDEO_MPEG4ProfileAdvancedSimple,    OMX_VIDEO_MPEG4Level5  } },
-            { 0b11110111, { OMX_VIDEO_MPEG4ProfileAdvancedSimple,    OMX_VIDEO_MPEG4Level3b } },
+            { 0b11100001, { XXX_MPEG4ProfileSimpleStudio,            MPEG4Level1  } },
+            { 0b11100010, { XXX_MPEG4ProfileSimpleStudio,            MPEG4Level2  } },
+            { 0b11100011, { XXX_MPEG4ProfileSimpleStudio,            MPEG4Level3  } },
+            { 0b11100100, { XXX_MPEG4ProfileSimpleStudio,            MPEG4Level4  } },
+            { 0b11100101, { XXX_MPEG4ProfileCoreStudio,              MPEG4Level1  } },
+            { 0b11100110, { XXX_MPEG4ProfileCoreStudio,              MPEG4Level2  } },
+            { 0b11100111, { XXX_MPEG4ProfileCoreStudio,              MPEG4Level3  } },
+            { 0b11101000, { XXX_MPEG4ProfileCoreStudio,              MPEG4Level4  } },
+            { 0b11101011, { XXX_MPEG4ProfileSimpleStudio,            MPEG4Level5  } },
+            { 0b11101100, { XXX_MPEG4ProfileSimpleStudio,            MPEG4Level6  } }, */
+            { 0b11110000, { MPEG4ProfileAdvancedSimple,    MPEG4Level0  } },
+            { 0b11110001, { MPEG4ProfileAdvancedSimple,    MPEG4Level1  } },
+            { 0b11110010, { MPEG4ProfileAdvancedSimple,    MPEG4Level2  } },
+            { 0b11110011, { MPEG4ProfileAdvancedSimple,    MPEG4Level3  } },
+            { 0b11110100, { MPEG4ProfileAdvancedSimple,    MPEG4Level4  } },
+            { 0b11110101, { MPEG4ProfileAdvancedSimple,    MPEG4Level5  } },
+            { 0b11110111, { MPEG4ProfileAdvancedSimple,    MPEG4Level3b } },
             /* deprecated
-            { 0b11111000, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level0  } },
-            { 0b11111001, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level1  } },
-            { 0b11111010, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level2  } },
-            { 0b11111011, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level3  } },
-            { 0b11111100, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level4  } },
-            { 0b11111101, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level5  } }, */
+            { 0b11111000, { XXX_MPEG4ProfileFineGranularityScalable, MPEG4Level0  } },
+            { 0b11111001, { XXX_MPEG4ProfileFineGranularityScalable, MPEG4Level1  } },
+            { 0b11111010, { XXX_MPEG4ProfileFineGranularityScalable, MPEG4Level2  } },
+            { 0b11111011, { XXX_MPEG4ProfileFineGranularityScalable, MPEG4Level3  } },
+            { 0b11111100, { XXX_MPEG4ProfileFineGranularityScalable, MPEG4Level4  } },
+            { 0b11111101, { XXX_MPEG4ProfileFineGranularityScalable, MPEG4Level5  } }, */
         };
 
-        std::pair<OMX_VIDEO_MPEG4PROFILETYPE, OMX_VIDEO_MPEG4LEVELTYPE> profileLevel;
+        std::pair<int32_t, int32_t> profileLevel;
         if (table.map(indication, &profileLevel)) {
             format->setInt32("profile", profileLevel.first);
             format->setInt32("level", profileLevel.second);
@@ -590,19 +599,19 @@
         switch (id) {
             case 1 /* profileId */:
                 if (length >= 1) {
-                    const static ALookup<uint8_t, OMX_VIDEO_VP9PROFILETYPE> profiles {
-                        { 0, OMX_VIDEO_VP9Profile0 },
-                        { 1, OMX_VIDEO_VP9Profile1 },
-                        { 2, OMX_VIDEO_VP9Profile2 },
-                        { 3, OMX_VIDEO_VP9Profile3 },
+                    const static ALookup<uint8_t, int32_t> profiles {
+                        { 0, VP9Profile0 },
+                        { 1, VP9Profile1 },
+                        { 2, VP9Profile2 },
+                        { 3, VP9Profile3 },
                     };
 
-                    const static ALookup<OMX_VIDEO_VP9PROFILETYPE, OMX_VIDEO_VP9PROFILETYPE> toHdr {
-                        { OMX_VIDEO_VP9Profile2, OMX_VIDEO_VP9Profile2HDR },
-                        { OMX_VIDEO_VP9Profile3, OMX_VIDEO_VP9Profile3HDR },
+                    const static ALookup<int32_t, int32_t> toHdr {
+                        { VP9Profile2, VP9Profile2HDR },
+                        { VP9Profile3, VP9Profile3HDR },
                     };
 
-                    OMX_VIDEO_VP9PROFILETYPE profile;
+                    int32_t profile;
                     if (profiles.map(data[0], &profile)) {
                         // convert to HDR profile
                         if (isHdr(format)) {
@@ -615,24 +624,24 @@
                 break;
             case 2 /* levelId */:
                 if (length >= 1) {
-                    const static ALookup<uint8_t, OMX_VIDEO_VP9LEVELTYPE> levels {
-                        { 10, OMX_VIDEO_VP9Level1  },
-                        { 11, OMX_VIDEO_VP9Level11 },
-                        { 20, OMX_VIDEO_VP9Level2  },
-                        { 21, OMX_VIDEO_VP9Level21 },
-                        { 30, OMX_VIDEO_VP9Level3  },
-                        { 31, OMX_VIDEO_VP9Level31 },
-                        { 40, OMX_VIDEO_VP9Level4  },
-                        { 41, OMX_VIDEO_VP9Level41 },
-                        { 50, OMX_VIDEO_VP9Level5  },
-                        { 51, OMX_VIDEO_VP9Level51 },
-                        { 52, OMX_VIDEO_VP9Level52 },
-                        { 60, OMX_VIDEO_VP9Level6  },
-                        { 61, OMX_VIDEO_VP9Level61 },
-                        { 62, OMX_VIDEO_VP9Level62 },
+                    const static ALookup<uint8_t, int32_t> levels {
+                        { 10, VP9Level1  },
+                        { 11, VP9Level11 },
+                        { 20, VP9Level2  },
+                        { 21, VP9Level21 },
+                        { 30, VP9Level3  },
+                        { 31, VP9Level31 },
+                        { 40, VP9Level4  },
+                        { 41, VP9Level41 },
+                        { 50, VP9Level5  },
+                        { 51, VP9Level51 },
+                        { 52, VP9Level52 },
+                        { 60, VP9Level6  },
+                        { 61, VP9Level61 },
+                        { 62, VP9Level62 },
                     };
 
-                    OMX_VIDEO_VP9LEVELTYPE level;
+                    int32_t level;
                     if (levels.map(data[0], &level)) {
                         format->setInt32("level", level);
                     }
@@ -1504,7 +1513,30 @@
         msg->setBuffer("csd-0", buffer);
     }
 
-    if (meta->findData(kKeyDVCC, &type, &data, &size)) {
+    if (meta->findData(kKeyDVCC, &type, &data, &size)
+            || meta->findData(kKeyDVVC, &type, &data, &size)
+            || meta->findData(kKeyDVWC, &type, &data, &size)) {
+        sp<ABuffer> buffer, csdOrg;
+        if (msg->findBuffer("csd-0", &csdOrg)) {
+            buffer = new (std::nothrow) ABuffer(size + csdOrg->size());
+            if (buffer.get() == NULL || buffer->base() == NULL) {
+                return NO_MEMORY;
+            }
+
+            memcpy(buffer->data(), csdOrg->data(), csdOrg->size());
+            memcpy(buffer->data() + csdOrg->size(), data, size);
+        } else {
+            buffer = new (std::nothrow) ABuffer(size);
+            if (buffer.get() == NULL || buffer->base() == NULL) {
+                return NO_MEMORY;
+            }
+            memcpy(buffer->data(), data, size);
+        }
+
+        buffer->meta()->setInt32("csd", true);
+        buffer->meta()->setInt64("timeUs", 0);
+        msg->setBuffer("csd-0", buffer);
+
         const uint8_t *ptr = (const uint8_t *)data;
         ALOGV("DV: calling parseDolbyVisionProfileLevelFromDvcc with data size %zu", size);
         parseDolbyVisionProfileLevelFromDvcc(ptr, size, msg);
@@ -1759,24 +1791,39 @@
     if (mime.startsWith("video/") || mime.startsWith("image/")) {
         int32_t width;
         int32_t height;
-        if (msg->findInt32("width", &width) && msg->findInt32("height", &height)) {
-            meta->setInt32(kKeyWidth, width);
-            meta->setInt32(kKeyHeight, height);
-        } else {
+        if (!msg->findInt32("width", &width) || !msg->findInt32("height", &height)) {
             ALOGV("did not find width and/or height");
             return BAD_VALUE;
         }
+        if (width <= 0 || height <= 0) {
+            ALOGE("Invalid value of width: %d and/or height: %d", width, height);
+            return BAD_VALUE;
+        }
+        meta->setInt32(kKeyWidth, width);
+        meta->setInt32(kKeyHeight, height);
 
-        int32_t sarWidth, sarHeight;
-        if (msg->findInt32("sar-width", &sarWidth)
-                && msg->findInt32("sar-height", &sarHeight)) {
+        int32_t sarWidth = -1, sarHeight = -1;
+        bool foundWidth, foundHeight;
+        foundWidth = msg->findInt32("sar-width", &sarWidth);
+        foundHeight = msg->findInt32("sar-height", &sarHeight);
+        if (foundWidth || foundHeight) {
+            if (sarWidth <= 0 || sarHeight <= 0) {
+                ALOGE("Invalid value of sarWidth: %d and/or sarHeight: %d", sarWidth, sarHeight);
+                return BAD_VALUE;
+            }
             meta->setInt32(kKeySARWidth, sarWidth);
             meta->setInt32(kKeySARHeight, sarHeight);
         }
 
-        int32_t displayWidth, displayHeight;
-        if (msg->findInt32("display-width", &displayWidth)
-                && msg->findInt32("display-height", &displayHeight)) {
+        int32_t displayWidth = -1, displayHeight = -1;
+        foundWidth = msg->findInt32("display-width", &displayWidth);
+        foundHeight = msg->findInt32("display-height", &displayHeight);
+        if (foundWidth || foundHeight) {
+            if (displayWidth <= 0 || displayHeight <= 0) {
+                ALOGE("Invalid value of displayWidth: %d and/or displayHeight: %d",
+                        displayWidth, displayHeight);
+                return BAD_VALUE;
+            }
             meta->setInt32(kKeyDisplayWidth, displayWidth);
             meta->setInt32(kKeyDisplayHeight, displayHeight);
         }
@@ -1786,17 +1833,29 @@
             if (msg->findInt32("is-default", &isPrimary) && isPrimary) {
                 meta->setInt32(kKeyTrackIsDefault, 1);
             }
-            int32_t tileWidth, tileHeight, gridRows, gridCols;
-            if (msg->findInt32("tile-width", &tileWidth)) {
+            int32_t tileWidth = -1, tileHeight = -1;
+            foundWidth = msg->findInt32("tile-width", &tileWidth);
+            foundHeight = msg->findInt32("tile-height", &tileHeight);
+            if (foundWidth || foundHeight) {
+                if (tileWidth <= 0 || tileHeight <= 0) {
+                    ALOGE("Invalid value of tileWidth: %d and/or tileHeight: %d",
+                            tileWidth, tileHeight);
+                    return BAD_VALUE;
+                }
                 meta->setInt32(kKeyTileWidth, tileWidth);
-            }
-            if (msg->findInt32("tile-height", &tileHeight)) {
                 meta->setInt32(kKeyTileHeight, tileHeight);
             }
-            if (msg->findInt32("grid-rows", &gridRows)) {
+            int32_t gridRows = -1, gridCols = -1;
+            bool foundRows, foundCols;
+            foundRows = msg->findInt32("grid-rows", &gridRows);
+            foundCols = msg->findInt32("grid-cols", &gridCols);
+            if (foundRows || foundCols) {
+                if (gridRows <= 0 || gridCols <= 0) {
+                    ALOGE("Invalid value of gridRows: %d and/or gridCols: %d",
+                            gridRows, gridCols);
+                    return BAD_VALUE;
+                }
                 meta->setInt32(kKeyGridRows, gridRows);
-            }
-            if (msg->findInt32("grid-cols", &gridCols)) {
                 meta->setInt32(kKeyGridCols, gridCols);
             }
         }
@@ -1812,6 +1871,14 @@
                           &cropTop,
                           &cropRight,
                           &cropBottom)) {
+            if (cropLeft < 0 || cropLeft > cropRight || cropRight >= width) {
+                ALOGE("Invalid value of cropLeft: %d and/or cropRight: %d", cropLeft, cropRight);
+                return BAD_VALUE;
+            }
+            if (cropTop < 0 || cropTop > cropBottom || cropBottom >= height) {
+                ALOGE("Invalid value of cropTop: %d and/or cropBottom: %d", cropTop, cropBottom);
+                return BAD_VALUE;
+            }
             meta->setRect(kKeyCropRect, cropLeft, cropTop, cropRight, cropBottom);
         }
 
@@ -1855,9 +1922,16 @@
             ALOGV("did not find channel-count and/or sample-rate");
             return BAD_VALUE;
         }
+        // channel count can be zero in some cases like mpeg h
+        if (sampleRate <= 0 || numChannels < 0) {
+            ALOGE("Invalid value of channel-count: %d and/or sample-rate: %d",
+                   numChannels, sampleRate);
+            return BAD_VALUE;
+        }
         meta->setInt32(kKeyChannelCount, numChannels);
         meta->setInt32(kKeySampleRate, sampleRate);
         int32_t bitsPerSample;
+        // TODO:(b/204430952) add appropriate bound check for bitsPerSample
         if (msg->findInt32("bits-per-sample", &bitsPerSample)) {
             meta->setInt32(kKeyBitsPerSample, bitsPerSample);
         }
@@ -1967,30 +2041,134 @@
                    mime == MEDIA_MIMETYPE_IMAGE_AVIF) {
             meta->setData(kKeyAV1C, 0, csd0->data(), csd0->size());
         } else if (mime == MEDIA_MIMETYPE_VIDEO_DOLBY_VISION) {
-            if (msg->findBuffer("csd-2", &csd2)) {
-                //dvcc should be 24
-                if (csd2->size() == 24) {
-                    meta->setData(kKeyDVCC, kTypeDVCC, csd2->data(), csd2->size());
-                    uint8_t *dvcc = csd2->data();
-                    const uint8_t profile = dvcc[2] >> 1;
-                    if (profile > 1 && profile < 9) {
+            int32_t needCreateDoviCSD = 0;
+            int32_t profile = 0;
+            uint8_t bl_compatibility = 0;
+            if (msg->findInt32("profile", &profile)) {
+                if (profile == DolbyVisionProfileDvheSt) {
+                    profile = 8;
+                    bl_compatibility = 4;
+                } else if (profile == DolbyVisionProfileDvavSe) {
+                    profile = 9;
+                    bl_compatibility = 2;
+                }
+                if (profile == 8 || profile == 9) {
+                    needCreateDoviCSD = 1;
+                }
+            } else {
+                ALOGW("did not find dolby vision profile");
+            }
+            // No dovi csd data, need to create it
+            if (needCreateDoviCSD) {
+                uint8_t dvcc[24];
+                int32_t level = 0;
+                uint8_t level_val = 0;
+
+                if (msg->findInt32("level", &level)) {
+                    const static ALookup<int32_t, uint8_t> levels {
+                        {DolbyVisionLevelUnknown, 0},
+                        {DolbyVisionLevelHd24, 1},
+                        {DolbyVisionLevelHd30, 2},
+                        {DolbyVisionLevelFhd24, 3},
+                        {DolbyVisionLevelFhd30, 4},
+                        {DolbyVisionLevelFhd60, 5},
+                        {DolbyVisionLevelUhd24, 6},
+                        {DolbyVisionLevelUhd30, 7},
+                        {DolbyVisionLevelUhd48, 8},
+                        {DolbyVisionLevelUhd60, 9},
+                        {DolbyVisionLevelUhd120, 10},
+                        {DolbyVisionLevel8k30, 11},
+                        {DolbyVisionLevel8k60, 12},
+                    };
+                    levels.map(level, &level_val);
+                    ALOGV("found dolby vision level: %d, value: %d", level, level_val);
+                }
+
+                dvcc[0] = 1; // major version
+                dvcc[1] = 0; // minor version
+                dvcc[2] = (uint8_t)((profile & 0x7f) << 1);// dolby vision profile
+                dvcc[2] = (uint8_t)((dvcc[2] | (uint8_t)((level_val >> 5) & 0x1)) & 0xff);
+                dvcc[3] = (uint8_t)((level_val & 0x1f) << 3); // dolby vision level
+                dvcc[3] = (uint8_t)(dvcc[3] | (1 << 2)); // rpu_present_flag
+                dvcc[3] = (uint8_t)(dvcc[3] | (1)); // bl_present_flag
+                dvcc[4] = (uint8_t)(bl_compatibility << 4);// bl_compatibility id
+
+                std::vector<uint8_t> dvcc_data(24);
+                memcpy(dvcc_data.data(), dvcc, 24);
+                if (profile > 10) {
+                    meta->setData(kKeyDVWC, kTypeDVWC, dvcc_data.data(), 24);
+                } else if (profile > 7) {
+                    meta->setData(kKeyDVVC, kTypeDVVC, dvcc_data.data(), 24);
+                } else {
+                    meta->setData(kKeyDVCC, kTypeDVCC, dvcc_data.data(), 24);
+                }
+            } else if (csd0size >= 24) { // have dovi csd, just send it out...
+                uint8_t *dvconfig = csd0->data() + (csd0size -24);
+                profile = dvconfig[2] >> 1;
+                if (profile > 10) {
+                    meta->setData(kKeyDVWC, kTypeDVWC, dvconfig, 24);
+                } else if (profile > 7) {
+                    meta->setData(kKeyDVVC, kTypeDVVC, dvconfig, 24);
+                } else {
+                    meta->setData(kKeyDVCC, kTypeDVCC, dvconfig, 24);
+                }
+            } else {
+                return BAD_VALUE;
+            }
+
+            // Send the avc/hevc/av1 csd data...
+            if (csd0size >= 24) {
+                sp<ABuffer> csd;
+                if ( profile > 1 && profile < 9) {
+                    if (msg->findBuffer("csd-hevc", &csd)) {
+                        meta->setData(kKeyHVCC, kTypeHVCC, csd->data(), csd->size());
+                    } else if (csd0size > 24) {
                         std::vector<uint8_t> hvcc(csd0size + 1024);
                         size_t outsize = reassembleHVCC(csd0, hvcc.data(), hvcc.size(), 4);
                         meta->setData(kKeyHVCC, kTypeHVCC, hvcc.data(), outsize);
-                    } else if (DolbyVisionProfileDvav110 == profile) {
-                        meta->setData(kKeyAV1C, 0, csd0->data(), csd0->size());
-                    } else {
-                        sp<ABuffer> csd1;
-                        if (msg->findBuffer("csd-1", &csd1)) {
-                            std::vector<char> avcc(csd0size + csd1->size() + 1024);
-                            size_t outsize = reassembleAVCC(csd0, csd1, avcc.data());
-                            meta->setData(kKeyAVCC, kTypeAVCC, avcc.data(), outsize);
-                        }
                     }
+                } else if (profile == 9) {
+                    sp<ABuffer> csd1;
+                    if (msg->findBuffer("csd-avc", &csd)) {
+                        meta->setData(kKeyAVCC, kTypeAVCC, csd->data(), csd->size());
+                    } else if (msg->findBuffer("csd-1", &csd1)) {
+                        std::vector<char> avcc(csd0size + csd1->size() + 1024);
+                        size_t outsize = reassembleAVCC(csd0, csd1, avcc.data());
+                        meta->setData(kKeyAVCC, kTypeAVCC, avcc.data(), outsize);
+                    } else { // for dolby vision avc, csd0 also holds csd1
+                        size_t i = 0;
+                        int csd0realsize = 0;
+                        do {
+                            i = findNextNalStartCode(csd0->data() + i,
+                                            csd0->size() - i) - csd0->data();
+                            if (i > 0) {
+                                csd0realsize = i;
+                                break;
+                            }
+                            i += 4;
+                        } while(i < csd0->size());
+                        // buffer0 -> csd0
+                        sp<ABuffer> buffer0 = new (std::nothrow) ABuffer(csd0realsize);
+                        if (buffer0.get() == NULL || buffer0->base() == NULL) {
+                            return NO_MEMORY;
+                        }
+                        memcpy(buffer0->data(), csd0->data(), csd0realsize);
+                        // buffer1 -> csd1
+                        sp<ABuffer> buffer1 = new (std::nothrow)
+                                ABuffer(csd0->size() - csd0realsize);
+                        if (buffer1.get() == NULL || buffer1->base() == NULL) {
+                            return NO_MEMORY;
+                        }
+                        memcpy(buffer1->data(), csd0->data()+csd0realsize,
+                                    csd0->size() - csd0realsize);
+
+                        std::vector<char> avcc(csd0->size() + 1024);
+                        size_t outsize = reassembleAVCC(buffer0, buffer1, avcc.data());
+                        meta->setData(kKeyAVCC, kTypeAVCC, avcc.data(), outsize);
+                    }
+                } else if (profile == 10) {
+                    meta->setData(kKeyAV1C, 0, csd0->data(), csd0->size() - 24);
                 }
-            } else {
-                ALOGE("We need csd-2!!. %s", msg->debugString().c_str());
-                return BAD_VALUE;
             }
         } else if (mime == MEDIA_MIMETYPE_VIDEO_VP9) {
             meta->setData(kKeyVp9CodecPrivate, 0, csd0->data(), csd0->size());
@@ -2038,17 +2216,6 @@
         meta->setData(kKeyStreamHeader, 'mdat', csd0->data(), csd0->size());
     } else if (msg->findBuffer("d263", &csd0)) {
         meta->setData(kKeyD263, kTypeD263, csd0->data(), csd0->size());
-    } else if (mime == MEDIA_MIMETYPE_VIDEO_DOLBY_VISION && msg->findBuffer("csd-2", &csd2)) {
-        meta->setData(kKeyDVCC, kTypeDVCC, csd2->data(), csd2->size());
-
-        // Remove CSD-2 from the data here to avoid duplicate data in meta
-        meta->remove(kKeyOpaqueCSD2);
-
-        if (msg->findBuffer("csd-avc", &csd0)) {
-            meta->setData(kKeyAVCC, kTypeAVCC, csd0->data(), csd0->size());
-        } else if (msg->findBuffer("csd-hevc", &csd0)) {
-            meta->setData(kKeyHVCC, kTypeHVCC, csd0->data(), csd0->size());
-        }
     }
     // XXX TODO add whatever other keys there are
 
@@ -2131,29 +2298,29 @@
 }
 
 struct aac_format_conv_t {
-    OMX_AUDIO_AACPROFILETYPE eAacProfileType;
+    int32_t eAacProfileType;
     audio_format_t format;
 };
 
 static const struct aac_format_conv_t profileLookup[] = {
-    { OMX_AUDIO_AACObjectMain,        AUDIO_FORMAT_AAC_MAIN},
-    { OMX_AUDIO_AACObjectLC,          AUDIO_FORMAT_AAC_LC},
-    { OMX_AUDIO_AACObjectSSR,         AUDIO_FORMAT_AAC_SSR},
-    { OMX_AUDIO_AACObjectLTP,         AUDIO_FORMAT_AAC_LTP},
-    { OMX_AUDIO_AACObjectHE,          AUDIO_FORMAT_AAC_HE_V1},
-    { OMX_AUDIO_AACObjectScalable,    AUDIO_FORMAT_AAC_SCALABLE},
-    { OMX_AUDIO_AACObjectERLC,        AUDIO_FORMAT_AAC_ERLC},
-    { OMX_AUDIO_AACObjectLD,          AUDIO_FORMAT_AAC_LD},
-    { OMX_AUDIO_AACObjectHE_PS,       AUDIO_FORMAT_AAC_HE_V2},
-    { OMX_AUDIO_AACObjectELD,         AUDIO_FORMAT_AAC_ELD},
-    { OMX_AUDIO_AACObjectXHE,         AUDIO_FORMAT_AAC_XHE},
-    { OMX_AUDIO_AACObjectNull,        AUDIO_FORMAT_AAC},
+    { AACObjectMain,        AUDIO_FORMAT_AAC_MAIN},
+    { AACObjectLC,          AUDIO_FORMAT_AAC_LC},
+    { AACObjectSSR,         AUDIO_FORMAT_AAC_SSR},
+    { AACObjectLTP,         AUDIO_FORMAT_AAC_LTP},
+    { AACObjectHE,          AUDIO_FORMAT_AAC_HE_V1},
+    { AACObjectScalable,    AUDIO_FORMAT_AAC_SCALABLE},
+    { AACObjectERLC,        AUDIO_FORMAT_AAC_ERLC},
+    { AACObjectLD,          AUDIO_FORMAT_AAC_LD},
+    { AACObjectHE_PS,       AUDIO_FORMAT_AAC_HE_V2},
+    { AACObjectELD,         AUDIO_FORMAT_AAC_ELD},
+    { AACObjectXHE,         AUDIO_FORMAT_AAC_XHE},
+    { AACObjectNull,        AUDIO_FORMAT_AAC},
 };
 
 void mapAACProfileToAudioFormat( audio_format_t& format, uint64_t eAacProfile)
 {
-const struct aac_format_conv_t* p = &profileLookup[0];
-    while (p->eAacProfileType != OMX_AUDIO_AACObjectNull) {
+    const struct aac_format_conv_t* p = &profileLookup[0];
+    while (p->eAacProfileType != AACObjectNull) {
         if (eAacProfile == p->eAacProfileType) {
             format = p->format;
             return;
@@ -2193,7 +2360,7 @@
     // Offloading depends on audio DSP capabilities.
     int32_t aacaot = -1;
     if (meta->findInt32(kKeyAACAOT, &aacaot)) {
-        mapAACProfileToAudioFormat(info->format,(OMX_AUDIO_AACPROFILETYPE) aacaot);
+        mapAACProfileToAudioFormat(info->format, aacaot);
     }
 
     int32_t srate = -1;
diff --git a/media/libstagefright/codecs/common/Android.bp b/media/libstagefright/codecs/common/Android.bp
index 08691e7..affc837 100644
--- a/media/libstagefright/codecs/common/Android.bp
+++ b/media/libstagefright/codecs/common/Android.bp
@@ -21,6 +21,10 @@
     name: "libstagefright_enc_common",
     vendor_available: true,
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media.swcodec",
+    ],
 
     srcs: ["cmnMemory.c"],
 
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
index fb6c4e2..bb1cb0b 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
@@ -354,7 +354,7 @@
             }
 
             if (mpeg4type->eProfile != OMX_VIDEO_MPEG4ProfileCore ||
-                mpeg4type->eLevel != OMX_VIDEO_MPEG4Level2 ||
+                mpeg4type->eLevel > OMX_VIDEO_MPEG4Level2 ||
                 (mpeg4type->nAllowedPictureTypes & OMX_VIDEO_PictureTypeB) ||
                 mpeg4type->nBFrames != 0 ||
                 mpeg4type->nIDCVLCThreshold != 0 ||
diff --git a/media/libstagefright/colorconversion/Android.bp b/media/libstagefright/colorconversion/Android.bp
index 06cebd3..7ff9b10 100644
--- a/media/libstagefright/colorconversion/Android.bp
+++ b/media/libstagefright/colorconversion/Android.bp
@@ -25,10 +25,6 @@
         "SoftwareRenderer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/native/include/media/openmax",
-    ],
-
     shared_libs: [
         "libui",
         "libnativewindow",
@@ -37,6 +33,7 @@
     header_libs: [
         "libstagefright_headers",
         "libstagefright_foundation_headers",
+        "media_plugin_headers",
     ],
 
     static_libs: ["libyuv_static"],
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index c7dc415..6004cf8 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -23,6 +23,7 @@
 #include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/foundation/ColorUtils.h>
 #include <media/stagefright/ColorConverter.h>
+#include <media/stagefright/MediaCodecConstants.h>
 #include <media/stagefright/MediaErrors.h>
 
 #include "libyuv/convert_from.h"
@@ -51,13 +52,17 @@
 static bool isRGB(OMX_COLOR_FORMATTYPE colorFormat) {
     return colorFormat == OMX_COLOR_Format16bitRGB565
             || colorFormat == OMX_COLOR_Format32BitRGBA8888
-            || colorFormat == OMX_COLOR_Format32bitBGRA8888;
+            || colorFormat == OMX_COLOR_Format32bitBGRA8888
+            || colorFormat == COLOR_Format32bitABGR2101010;
 }
 
 bool ColorConverter::ColorSpace::isBt709() {
     return (mStandard == ColorUtils::kColorStandardBT709);
 }
 
+bool ColorConverter::ColorSpace::isBt2020() {
+    return (mStandard == ColorUtils::kColorStandardBT2020);
+}
 
 bool ColorConverter::ColorSpace::isJpeg() {
     return ((mStandard == ColorUtils::kColorStandardBT601_625)
@@ -70,16 +75,19 @@
     : mSrcFormat(from),
       mDstFormat(to),
       mSrcColorSpace({0, 0, 0}),
-      mClip(NULL) {
+      mClip(NULL),
+      mClip10Bit(NULL) {
 }
 
 ColorConverter::~ColorConverter() {
     delete[] mClip;
     mClip = NULL;
+    delete[] mClip10Bit;
+    mClip10Bit = NULL;
 }
 
 bool ColorConverter::isValid() const {
-    switch (mSrcFormat) {
+    switch ((int32_t)mSrcFormat) {
         case OMX_COLOR_FormatYUV420Planar16:
             if (mDstFormat == OMX_COLOR_FormatYUV444Y410) {
                 return true;
@@ -102,6 +110,8 @@
 #else
             return mDstFormat == OMX_COLOR_Format16bitRGB565;
 #endif
+        case COLOR_FormatYUVP010:
+            return mDstFormat == COLOR_Format32bitABGR2101010;
 
         default:
             return false;
@@ -143,9 +153,10 @@
       mCropTop(cropTop),
       mCropRight(cropRight),
       mCropBottom(cropBottom) {
-    switch(mColorFormat) {
+    switch((int32_t)mColorFormat) {
     case OMX_COLOR_Format16bitRGB565:
     case OMX_COLOR_FormatYUV420Planar16:
+    case COLOR_FormatYUVP010:
     case OMX_COLOR_FormatCbYCrY:
         mBpp = 2;
         mStride = 2 * mWidth;
@@ -153,6 +164,7 @@
 
     case OMX_COLOR_Format32bitBGRA8888:
     case OMX_COLOR_Format32BitRGBA8888:
+    case COLOR_Format32bitABGR2101010:
     case OMX_COLOR_FormatYUV444Y410:
         mBpp = 4;
         mStride = 4 * mWidth;
@@ -213,7 +225,7 @@
 
     status_t err;
 
-    switch (mSrcFormat) {
+    switch ((int32_t)mSrcFormat) {
         case OMX_COLOR_FormatYUV420Planar:
 #ifdef USE_LIBYUV
             err = convertYUV420PlanarUseLibYUV(src, dst);
@@ -235,6 +247,19 @@
             break;
         }
 
+        case COLOR_FormatYUVP010:
+        {
+#if PERF_PROFILING
+            int64_t startTimeUs = ALooper::GetNowUs();
+#endif
+            err = convertYUVP010(src, dst);
+#if PERF_PROFILING
+            int64_t endTimeUs = ALooper::GetNowUs();
+            ALOGD("convertYUVP010 took %lld us", (long long) (endTimeUs - startTimeUs));
+#endif
+            break;
+        }
+
         case OMX_COLOR_FormatCbYCrY:
             err = convertCbYCrY(src, dst);
             break;
@@ -439,23 +464,23 @@
 }
 
 std::function<void (void *, bool, signed, signed, signed, signed, signed, signed)>
-getWriteToDst(OMX_COLOR_FORMATTYPE dstFormat, uint8_t *kAdjustedClip) {
-    switch (dstFormat) {
+getWriteToDst(OMX_COLOR_FORMATTYPE dstFormat, void *kAdjustedClip) {
+    switch ((int)dstFormat) {
     case OMX_COLOR_Format16bitRGB565:
     {
         return [kAdjustedClip](void *dst_ptr, bool uncropped,
                                signed r1, signed g1, signed b1,
                                signed r2, signed g2, signed b2) {
             uint32_t rgb1 =
-                ((kAdjustedClip[r1] >> 3) << 11)
-                | ((kAdjustedClip[g1] >> 2) << 5)
-                | (kAdjustedClip[b1] >> 3);
+                ((((uint8_t *)kAdjustedClip)[r1] >> 3) << 11)
+                | ((((uint8_t *)kAdjustedClip)[g1] >> 2) << 5)
+                | (((uint8_t *)kAdjustedClip)[b1] >> 3);
 
             if (uncropped) {
                 uint32_t rgb2 =
-                    ((kAdjustedClip[r2] >> 3) << 11)
-                    | ((kAdjustedClip[g2] >> 2) << 5)
-                    | (kAdjustedClip[b2] >> 3);
+                    ((((uint8_t *)kAdjustedClip)[r2] >> 3) << 11)
+                    | ((((uint8_t *)kAdjustedClip)[g2] >> 2) << 5)
+                    | (((uint8_t *)kAdjustedClip)[b2] >> 3);
 
                 *(uint32_t *)dst_ptr = (rgb2 << 16) | rgb1;
             } else {
@@ -469,16 +494,16 @@
                                signed r1, signed g1, signed b1,
                                signed r2, signed g2, signed b2) {
             ((uint32_t *)dst_ptr)[0] =
-                    (kAdjustedClip[r1])
-                    | (kAdjustedClip[g1] << 8)
-                    | (kAdjustedClip[b1] << 16)
+                    (((uint8_t *)kAdjustedClip)[r1])
+                    | (((uint8_t *)kAdjustedClip)[g1] << 8)
+                    | (((uint8_t *)kAdjustedClip)[b1] << 16)
                     | (0xFF << 24);
 
             if (uncropped) {
                 ((uint32_t *)dst_ptr)[1] =
-                        (kAdjustedClip[r2])
-                        | (kAdjustedClip[g2] << 8)
-                        | (kAdjustedClip[b2] << 16)
+                        (((uint8_t *)kAdjustedClip)[r2])
+                        | (((uint8_t *)kAdjustedClip)[g2] << 8)
+                        | (((uint8_t *)kAdjustedClip)[b2] << 16)
                         | (0xFF << 24);
             }
         };
@@ -489,20 +514,41 @@
                                signed r1, signed g1, signed b1,
                                signed r2, signed g2, signed b2) {
             ((uint32_t *)dst_ptr)[0] =
-                    (kAdjustedClip[b1])
-                    | (kAdjustedClip[g1] << 8)
-                    | (kAdjustedClip[r1] << 16)
+                    (((uint8_t *)kAdjustedClip)[b1])
+                    | (((uint8_t *)kAdjustedClip)[g1] << 8)
+                    | (((uint8_t *)kAdjustedClip)[r1] << 16)
                     | (0xFF << 24);
 
             if (uncropped) {
                 ((uint32_t *)dst_ptr)[1] =
-                        (kAdjustedClip[b2])
-                        | (kAdjustedClip[g2] << 8)
-                        | (kAdjustedClip[r2] << 16)
+                        (((uint8_t *)kAdjustedClip)[b2])
+                        | (((uint8_t *)kAdjustedClip)[g2] << 8)
+                        | (((uint8_t *)kAdjustedClip)[r2] << 16)
                         | (0xFF << 24);
             }
         };
     }
+    case COLOR_Format32bitABGR2101010:
+    {
+        return [kAdjustedClip](void *dst_ptr, bool uncropped,
+                               signed r1, signed g1, signed b1,
+                               signed r2, signed g2, signed b2) {
+            ((uint32_t *)dst_ptr)[0] =
+                    (((uint16_t *)kAdjustedClip)[r1])
+                    | (((uint16_t *)kAdjustedClip)[g1] << 10)
+                    | (((uint16_t *)kAdjustedClip)[b1] << 20)
+                    | (3 << 30);
+
+            if (uncropped) {
+                ((uint32_t *)dst_ptr)[1] =
+                        (((uint16_t *)kAdjustedClip)[r2])
+                        | (((uint16_t *)kAdjustedClip)[g2] << 10)
+                        | (((uint16_t *)kAdjustedClip)[b2] << 20)
+                        | (3 << 30);
+            }
+        };
+    }
+
     default:
         TRESPASS();
     }
@@ -514,7 +560,7 @@
     uint8_t *kAdjustedClip = initClip();
 
     auto readFromSrc = getReadFromSrc(mSrcFormat);
-    auto writeToDst = getWriteToDst(mDstFormat, kAdjustedClip);
+    auto writeToDst = getWriteToDst(mDstFormat, (void *)kAdjustedClip);
 
     uint8_t *dst_ptr = (uint8_t *)dst.mBits
             + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
@@ -591,34 +637,116 @@
     return convertYUV420Planar(src, dst);
 }
 
-/*
- * Pack 10-bit YUV into RGBA_1010102.
- *
- * Media sends 10-bit YUV in a RGBA_1010102 format buffer. SF will handle
- * the conversion to RGB using RenderEngine fallback.
- *
- * We do not perform a YUV->RGB conversion here, however the conversion with
- * BT2020 to Full range is below for reference:
- *
- *   B = 1.168  *(Y - 64) + 2.148  *(U - 512)
- *   G = 1.168  *(Y - 64) - 0.652  *(V - 512) - 0.188  *(U - 512)
- *   R = 1.168  *(Y - 64) + 1.683  *(V - 512)
- *
- *   B = 1196/1024  *(Y - 64) + 2200/1024  *(U - 512)
- *   G = .................... -  668/1024  *(V - 512) - 192/1024  *(U - 512)
- *   R = .................... + 1723/1024  *(V - 512)
- *
- *   min_B = (1196  *(- 64) + 2200  *(- 512)) / 1024 = -1175
- *   min_G = (1196  *(- 64) - 668  *(1023 - 512) - 192  *(1023 - 512)) / 1024 = -504
- *   min_R = (1196  *(- 64) + 1723  *(- 512)) / 1024 = -937
- *
- *   max_B = (1196  *(1023 - 64) + 2200  *(1023 - 512)) / 1024 = 2218
- *   max_G = (1196  *(1023 - 64) - 668  *(- 512) - 192  *(- 512)) / 1024 = 1551
- *   max_R = (1196  *(1023 - 64) + 1723  *(1023 - 512)) / 1024 = 1980
- *
- *   clip range -1175 .. 2218
- *
- */
+status_t ColorConverter::convertYUVP010(
+        const BitmapParams &src, const BitmapParams &dst) {
+    if (mDstFormat == COLOR_Format32bitABGR2101010) {
+        return convertYUVP010ToRGBA1010102(src, dst);
+    }
+
+    return ERROR_UNSUPPORTED;
+}
+
+status_t ColorConverter::convertYUVP010ToRGBA1010102(
+        const BitmapParams &src, const BitmapParams &dst) {
+    uint16_t *kAdjustedClip10bit = initClip10Bit();
+
+//    auto readFromSrc = getReadFromSrc(mSrcFormat);
+    auto writeToDst = getWriteToDst(mDstFormat, (void *)kAdjustedClip10bit);
+
+    uint8_t *dst_ptr = (uint8_t *)dst.mBits
+            + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
+
+    uint16_t *src_y = (uint16_t *)((uint8_t *)src.mBits
+            + src.mCropTop * src.mStride + src.mCropLeft * src.mBpp);
+
+    uint16_t *src_uv = (uint16_t *)((uint8_t *)src.mBits
+            + src.mStride * src.mHeight
+            + (src.mCropTop / 2) * src.mStride + src.mCropLeft * src.mBpp);
+
+    // BT.2020 Limited Range conversion
+
+    // B = 1.168  *(Y - 64) + 2.148  *(U - 512)
+    // G = 1.168  *(Y - 64) - 0.652  *(V - 512) - 0.188  *(U - 512)
+    // R = 1.168  *(Y - 64) + 1.683  *(V - 512)
+
+    // B = 1196/1024  *(Y - 64) + 2200/1024  *(U - 512)
+    // G = .................... -  668/1024  *(V - 512) - 192/1024  *(U - 512)
+    // R = .................... + 1723/1024  *(V - 512)
+
+    // min_B = (1196  *(- 64) + 2200  *(- 512)) / 1024 = -1175
+    // min_G = (1196  *(- 64) - 668  *(1023 - 512) - 192  *(1023 - 512)) / 1024 = -504
+    // min_R = (1196  *(- 64) + 1723  *(- 512)) / 1024 = -937
+
+    // max_B = (1196  *(1023 - 64) + 2200  *(1023 - 512)) / 1024 = 2218
+    // max_G = (1196  *(1023 - 64) - 668  *(- 512) - 192  *(- 512)) / 1024 = 1551
+    // max_R = (1196  *(1023 - 64) + 1723  *(1023 - 512)) / 1024 = 1980
+
+    // clip range -1175 .. 2218
+
+    // BT.709 Limited Range conversion
+
+    // B = 1.164 * (Y - 64) + 2.018 * (U - 512)
+    // G = 1.164 * (Y - 64) - 0.813 * (V - 512) - 0.391 * (U - 512)
+    // R = 1.164 * (Y - 64) + 1.596 * (V - 512)
+
+    // B = 1192/1024 * (Y - 64) + 2068/1024 * (U - 512)
+    // G = .................... -  832/1024 * (V - 512) - 400/1024 * (U - 512)
+    // R = .................... + 1636/1024 * (V - 512)
+
+    // min_B = (1192 * (- 64) + 2068 * (- 512)) / 1024 = -1108
+
+    // max_B = (1192 * (1023 - 64) + 517 * (1023 - 512)) / 1024 = 2148
+
+    // clip range -1108 .. 2148
+
+    signed mY = 1196, mU_B = 2200, mV_G = -668, mV_R = 1723, mU_G = -192;
+    if (!mSrcColorSpace.isBt2020()) {
+        mY = 1192;
+        mU_B = 2068;
+        mV_G = -832;
+        mV_R = 1636;
+        mU_G = -400;
+    }
+    for (size_t y = 0; y < src.cropHeight(); ++y) {
+        for (size_t x = 0; x < src.cropWidth(); x += 2) {
+            signed y1, y2, u, v;
+            y1 = (src_y[x] >> 6) - 64;
+            y2 = (src_y[x + 1] >> 6) - 64;
+            u = int(src_uv[x] >> 6) - 512;
+            v = int(src_uv[x + 1] >> 6) - 512;
+
+            signed u_b = u * mU_B;
+            signed u_g = u * mU_G;
+            signed v_g = v * mV_G;
+            signed v_r = v * mV_R;
+
+            signed tmp1 = y1 * mY;
+            signed b1 = (tmp1 + u_b) / 1024;
+            signed g1 = (tmp1 + v_g + u_g) / 1024;
+            signed r1 = (tmp1 + v_r) / 1024;
+
+            signed tmp2 = y2 * mY;
+            signed b2 = (tmp2 + u_b) / 1024;
+            signed g2 = (tmp2 + v_g + u_g) / 1024;
+            signed r2 = (tmp2 + v_r) / 1024;
+
+            bool uncropped = x + 1 < src.cropWidth();
+
+            writeToDst(dst_ptr + x * dst.mBpp, uncropped, r1, g1, b1, r2, g2, b2);
+        }
+
+        src_y += src.mStride / 2;
+
+        if (y & 1) {
+            src_uv += src.mStride / 2;
+        }
+
+        dst_ptr += dst.mStride;
+    }
+
+    return OK;
+}
+
 
 #if !USE_NEON_Y410
 
@@ -1033,4 +1161,19 @@
     return &mClip[-kClipMin];
 }
 
+uint16_t *ColorConverter::initClip10Bit() {
+    static const signed kClipMin = -1176;
+    static const signed kClipMax = 2219;
+
+    if (mClip10Bit == NULL) {
+        mClip10Bit = new uint16_t[kClipMax - kClipMin + 1];
+
+        for (signed i = kClipMin; i <= kClipMax; ++i) {
+            mClip10Bit[i - kClipMin] = (i < 0) ? 0 : (i > 1023) ? 1023 : (uint16_t)i;
+        }
+    }
+
+    return &mClip10Bit[-kClipMin];
+}
+
 }  // namespace android
diff --git a/media/libstagefright/filters/Android.bp b/media/libstagefright/filters/Android.bp
index acc9e87..e6d59ad 100644
--- a/media/libstagefright/filters/Android.bp
+++ b/media/libstagefright/filters/Android.bp
@@ -22,8 +22,12 @@
         "ZeroFilter.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/native/include/media/openmax",
+    export_include_dirs: [
+        "include",
+    ],
+
+    local_include_dirs: [
+        "include/filters",
     ],
 
     cflags: [
diff --git a/media/libstagefright/filters/ColorConvert.h b/media/libstagefright/filters/include/filters/ColorConvert.h
similarity index 100%
rename from media/libstagefright/filters/ColorConvert.h
rename to media/libstagefright/filters/include/filters/ColorConvert.h
diff --git a/media/libstagefright/filters/GraphicBufferListener.h b/media/libstagefright/filters/include/filters/GraphicBufferListener.h
similarity index 100%
rename from media/libstagefright/filters/GraphicBufferListener.h
rename to media/libstagefright/filters/include/filters/GraphicBufferListener.h
diff --git a/media/libstagefright/filters/IntrinsicBlurFilter.h b/media/libstagefright/filters/include/filters/IntrinsicBlurFilter.h
similarity index 100%
rename from media/libstagefright/filters/IntrinsicBlurFilter.h
rename to media/libstagefright/filters/include/filters/IntrinsicBlurFilter.h
diff --git a/media/libstagefright/filters/RSFilter.h b/media/libstagefright/filters/include/filters/RSFilter.h
similarity index 100%
rename from media/libstagefright/filters/RSFilter.h
rename to media/libstagefright/filters/include/filters/RSFilter.h
diff --git a/media/libstagefright/filters/SaturationFilter.h b/media/libstagefright/filters/include/filters/SaturationFilter.h
similarity index 100%
rename from media/libstagefright/filters/SaturationFilter.h
rename to media/libstagefright/filters/include/filters/SaturationFilter.h
diff --git a/media/libstagefright/filters/SimpleFilter.h b/media/libstagefright/filters/include/filters/SimpleFilter.h
similarity index 100%
rename from media/libstagefright/filters/SimpleFilter.h
rename to media/libstagefright/filters/include/filters/SimpleFilter.h
diff --git a/media/libstagefright/filters/ZeroFilter.h b/media/libstagefright/filters/include/filters/ZeroFilter.h
similarity index 100%
rename from media/libstagefright/filters/ZeroFilter.h
rename to media/libstagefright/filters/include/filters/ZeroFilter.h
diff --git a/media/libstagefright/flac/dec/Android.bp b/media/libstagefright/flac/dec/Android.bp
index 665aae1..83fcc01 100644
--- a/media/libstagefright/flac/dec/Android.bp
+++ b/media/libstagefright/flac/dec/Android.bp
@@ -21,6 +21,12 @@
     name: "libstagefright_flacdec",
     vendor_available: true,
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media",
+        "com.android.media.swcodec",
+    ],
+
     host_supported: true,
 
     srcs: [
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index c2114b3..5c99cc9 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -33,7 +33,7 @@
 
 #include <media/stagefright/foundation/hexdump.h>
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 #include <binder/Parcel.h>
 #endif
 
@@ -659,7 +659,7 @@
     return s;
 }
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 // static
 sp<AMessage> AMessage::FromParcel(const Parcel &parcel, size_t maxNestingLevel) {
     int32_t what = parcel.readInt32();
@@ -825,7 +825,7 @@
         }
     }
 }
-#endif  // !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#endif  // defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 
 sp<AMessage> AMessage::changesFrom(const sp<const AMessage> &other, bool deep) const {
     if (other == NULL) {
diff --git a/media/libstagefright/foundation/AString.cpp b/media/libstagefright/foundation/AString.cpp
index b1ed077..a5e0ff8 100644
--- a/media/libstagefright/foundation/AString.cpp
+++ b/media/libstagefright/foundation/AString.cpp
@@ -27,7 +27,7 @@
 #include "ADebug.h"
 #include "AString.h"
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 #include <binder/Parcel.h>
 #endif
 
@@ -365,7 +365,7 @@
     return !strcasecmp(mData + mSize - suffixLen, suffix);
 }
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 // static
 AString AString::FromParcel(const Parcel &parcel) {
     size_t size = static_cast<size_t>(parcel.readInt32());
@@ -380,7 +380,7 @@
     }
     return err;
 }
-#endif // !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#endif // defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 
 AString AStringPrintf(const char *format, ...) {
     va_list ap;
diff --git a/media/libstagefright/foundation/Android.bp b/media/libstagefright/foundation/Android.bp
index dd2c66f..1b31392 100644
--- a/media/libstagefright/foundation/Android.bp
+++ b/media/libstagefright/foundation/Android.bp
@@ -23,6 +23,11 @@
     vendor_available: true,
     host_supported: true,
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media",
+        "com.android.media.swcodec",
+    ],
 }
 
 cc_defaults {
@@ -33,18 +38,13 @@
     },
     host_supported: true,
     double_loadable: true,
-    include_dirs: [
-        "frameworks/av/include",
-        "frameworks/native/include",
-        "frameworks/native/libs/arect/include",
-        "frameworks/native/libs/nativebase/include",
-    ],
 
     local_include_dirs: [
         "include/media/stagefright/foundation",
     ],
 
     header_libs: [
+        "av-headers",
         // this is only needed for the vendor variant that removes libbinder, but vendor
         // target below does not allow adding header_libs.
         "libbinder_headers",
@@ -135,12 +135,19 @@
     name: "libstagefright_foundation",
     defaults: ["libstagefright_foundation_defaults"],
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media",
+        "com.android.media.swcodec",
+    ],
 }
 
 cc_library_static {
     name: "libstagefright_foundation_without_imemory",
     defaults: ["libstagefright_foundation_defaults"],
     min_sdk_version: "29",
+    apex_available: ["com.android.media"],
+
 
     cflags: [
         "-Wno-multichar",
diff --git a/media/libstagefright/foundation/ColorUtils.cpp b/media/libstagefright/foundation/ColorUtils.cpp
index fa722b5..a5affb9 100644
--- a/media/libstagefright/foundation/ColorUtils.cpp
+++ b/media/libstagefright/foundation/ColorUtils.cpp
@@ -781,5 +781,14 @@
     return true;
 }
 
+// static
+bool ColorUtils::isHDRStaticInfoValid(HDRStaticInfo *info) {
+    if (info->sType1.mMaxDisplayLuminance > 0.0f
+        && info->sType1.mMinDisplayLuminance > 0.0f)  return true;
+    if (info->sType1.mMaxContentLightLevel > 0.0f
+        && info->sType1.mMaxFrameAverageLightLevel > 0.0f)  return true;
+    return false;
+}
+
 }  // namespace android
 
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index ada5d81..5c4ec17 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -60,12 +60,66 @@
 const char *MEDIA_MIMETYPE_AUDIO_AC4 = "audio/ac4";
 const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1 = "audio/mha1";
 const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1 = "audio/mhm1";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3 = "audio/mhm1.03";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4 = "audio/mhm1.04";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3 = "audio/mhm1.0d";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4 = "audio/mhm1.0e";
 const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
 const char *MEDIA_MIMETYPE_AUDIO_ALAC = "audio/alac";
 const char *MEDIA_MIMETYPE_AUDIO_WMA = "audio/x-ms-wma";
 const char *MEDIA_MIMETYPE_AUDIO_MS_ADPCM = "audio/x-adpcm-ms";
 const char *MEDIA_MIMETYPE_AUDIO_DVI_IMA_ADPCM = "audio/x-adpcm-dvi-ima";
-
+const char *MEDIA_MIMETYPE_AUDIO_DTS = "audio/vnd.dts";
+const char *MEDIA_MIMETYPE_AUDIO_DTS_HD = "audio/vnd.dts.hd";
+const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD = "audio/vnd.dts.uhd";
+const char *MEDIA_MIMETYPE_AUDIO_EVRC = "audio/evrc";
+const char *MEDIA_MIMETYPE_AUDIO_EVRCB = "audio/evrcb";
+const char *MEDIA_MIMETYPE_AUDIO_EVRCWB = "audio/evrcwb";
+const char *MEDIA_MIMETYPE_AUDIO_EVRCNW = "audio/evrcnw";
+const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS = "audio/amr-wb+";
+const char *MEDIA_MIMETYPE_AUDIO_APTX = "audio/aptx";
+const char *MEDIA_MIMETYPE_AUDIO_DRA = "audio/vnd.dra";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT = "audio/vnd.dolby.mat";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_1_0 = "audio/vnd.dolby.mat.1.0";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_0 = "audio/vnd.dolby.mat.2.0";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_1 = "audio/vnd.dolby.mat.2.1";
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD = "audio/vnd.dolby.mlp";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_MP4 = "audio/mp4a.40";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_MAIN = "audio/mp4a.40.01";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LC = "audio/mp4a.40.02";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_SSR = "audio/mp4a.40.03";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LTP = "audio/mp4a.40.04";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V1 = "audio/mp4a.40.05";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE = "audio/mp4a.40.06";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ERLC = "audio/mp4a.40.17";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LD = "audio/mp4a.40.23";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V2 = "audio/mp4a.40.29";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ELD = "audio/mp4a.40.39";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_XHE = "audio/mp4a.40.42";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADIF = "audio/aac-adif";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN = "audio/aac-adts.01";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC = "audio/aac-adts.02";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR = "audio/aac-adts.03";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP = "audio/aac-adts.04";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1 = "audio/aac-adts.05";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE = "audio/aac-adts.06";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC = "audio/aac-adts.17";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD = "audio/aac-adts.23";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2 = "audio/aac-adts.29";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD = "audio/aac-adts.39";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE = "audio/aac-adts.42";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC = "audio/mp4a-latm.02";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1 = "audio/mp4a-latm.05";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2 = "audio/mp4a-latm.29";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_IEC61937 = "audio/x-iec61937";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_IEC60958 = "audio/x-iec60958";
 
 const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
 const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
diff --git a/media/libstagefright/foundation/MetaData.cpp b/media/libstagefright/foundation/MetaData.cpp
index 7f48cfd..77913d5 100644
--- a/media/libstagefright/foundation/MetaData.cpp
+++ b/media/libstagefright/foundation/MetaData.cpp
@@ -28,7 +28,7 @@
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/MetaData.h>
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 #include <binder/Parcel.h>
 #endif
 
@@ -48,7 +48,7 @@
 MetaData::~MetaData() {
 }
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 /* static */
 sp<MetaData> MetaData::createFromParcel(const Parcel &parcel) {
 
diff --git a/media/libstagefright/foundation/MetaDataBase.cpp b/media/libstagefright/foundation/MetaDataBase.cpp
index 3f050ea..980eb22 100644
--- a/media/libstagefright/foundation/MetaDataBase.cpp
+++ b/media/libstagefright/foundation/MetaDataBase.cpp
@@ -28,7 +28,7 @@
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/MetaDataBase.h>
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 #include <binder/Parcel.h>
 #endif
 
@@ -452,7 +452,7 @@
     }
 }
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 status_t MetaDataBase::writeToParcel(Parcel &parcel) {
     status_t ret;
     size_t numItems = mInternalData->mItems.size();
@@ -532,7 +532,7 @@
     ALOGW("no metadata in parcel");
     return UNKNOWN_ERROR;
 }
-#endif // !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#endif // defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 
 }  // namespace android
 
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h b/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h
index a2b6c4f..72c8074 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h
@@ -193,6 +193,9 @@
     static void setHDRStaticInfoIntoAMediaFormat(const HDRStaticInfo &info, AMediaFormat *format);
     // (internal) used by the setHDRStaticInfoInfo* routines
     static void fillHdrStaticInfoBuffer( const HDRStaticInfo &info, uint8_t *data);
+
+    // determine whether HDR static info is valid
+    static bool isHDRStaticInfoValid(HDRStaticInfo *info);
 };
 
 inline static const char *asString(android::ColorUtils::ColorStandard i, const char *def = "??") {
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index f5cecef..fb8c299 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -62,12 +62,59 @@
 extern const char *MEDIA_MIMETYPE_AUDIO_AC4;
 extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1;
 extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4;
 extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
 extern const char *MEDIA_MIMETYPE_AUDIO_ALAC;
 extern const char *MEDIA_MIMETYPE_AUDIO_WMA;
 extern const char *MEDIA_MIMETYPE_AUDIO_MS_ADPCM;
 extern const char *MEDIA_MIMETYPE_AUDIO_DVI_IMA_ADPCM;
-
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS_HD;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRC;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRCB;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRCWB;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRCNW;
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS;
+extern const char *MEDIA_MIMETYPE_AUDIO_APTX;
+extern const char *MEDIA_MIMETYPE_AUDIO_DRA;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_1_0;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_0;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_1;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_MP4;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_MAIN;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_SSR;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LTP;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V1;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ERLC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V2;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ELD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_XHE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADIF;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2;
+extern const char *MEDIA_MIMETYPE_AUDIO_IEC61937;
+extern const char *MEDIA_MIMETYPE_AUDIO_IEC60958;
 
 extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
 extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
diff --git a/media/libstagefright/foundation/tests/AVCUtils/Android.bp b/media/libstagefright/foundation/tests/AVCUtils/Android.bp
index 594da56..ee7db21 100644
--- a/media/libstagefright/foundation/tests/AVCUtils/Android.bp
+++ b/media/libstagefright/foundation/tests/AVCUtils/Android.bp
@@ -43,10 +43,6 @@
         "libstagefright_foundation",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright/foundation",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/media/libstagefright/foundation/tests/Android.bp b/media/libstagefright/foundation/tests/Android.bp
index e50742e..e72ce43 100644
--- a/media/libstagefright/foundation/tests/Android.bp
+++ b/media/libstagefright/foundation/tests/Android.bp
@@ -18,10 +18,6 @@
         "-Wall",
     ],
 
-    include_dirs: [
-        "frameworks/av/include",
-    ],
-
     shared_libs: [
         "liblog",
         "libstagefright_foundation",
diff --git a/media/libstagefright/http/Android.bp b/media/libstagefright/http/Android.bp
index f4d6d99..f25318d 100644
--- a/media/libstagefright/http/Android.bp
+++ b/media/libstagefright/http/Android.bp
@@ -12,10 +12,8 @@
 
     srcs: ["HTTPHelper.cpp"],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-        "frameworks/native/include/media/openmax",
-        "frameworks/base/core/jni",
+    header_libs: [
+        "libstagefright_headers",
     ],
 
     shared_libs: [
diff --git a/media/libstagefright/httplive/Android.bp b/media/libstagefright/httplive/Android.bp
index 0b0acbf..7e26bd6 100644
--- a/media/libstagefright/httplive/Android.bp
+++ b/media/libstagefright/httplive/Android.bp
@@ -28,11 +28,6 @@
         "PlaylistFetcher.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-        "frameworks/native/include/media/openmax",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
@@ -65,6 +60,8 @@
 
     header_libs: [
         "libbase_headers",
+        "libstagefright_headers",
+        "libstagefright_httplive_headers",
     ],
 
     static_libs: [
@@ -74,3 +71,8 @@
     ],
 
 }
+
+cc_library_headers {
+    name: "libstagefright_httplive_headers",
+    export_include_dirs: ["."],
+}
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 3bad015..09ca1c9 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -23,7 +23,7 @@
 #include "M3UParser.h"
 #include "PlaylistFetcher.h"
 
-#include "mpeg2ts/AnotherPacketSource.h"
+#include <mpeg2ts/AnotherPacketSource.h>
 
 #include <cutils/properties.h>
 #include <media/MediaHTTPService.h>
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 7a6d487..ed38a2e 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -24,7 +24,7 @@
 
 #include <utils/String8.h>
 
-#include "mpeg2ts/ATSParser.h"
+#include <mpeg2ts/ATSParser.h>
 
 namespace android {
 
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index b23aa8a..b339fd2 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -24,9 +24,9 @@
 #include "HTTPDownloader.h"
 #include "LiveSession.h"
 #include "M3UParser.h"
-#include "include/ID3.h"
-#include "mpeg2ts/AnotherPacketSource.h"
-#include "mpeg2ts/HlsSampleDecryptor.h"
+#include <ID3.h>
+#include <mpeg2ts/AnotherPacketSource.h>
+#include <mpeg2ts/HlsSampleDecryptor.h>
 
 #include <datasource/DataURISource.h>
 #include <media/stagefright/foundation/ABitReader.h>
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index 5d3f9c1..716df63 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -21,7 +21,7 @@
 #include <media/stagefright/foundation/AHandler.h>
 #include <openssl/aes.h>
 
-#include "mpeg2ts/ATSParser.h"
+#include <mpeg2ts/ATSParser.h>
 #include "LiveSession.h"
 
 namespace android {
diff --git a/media/libstagefright/httplive/fuzzer/Android.bp b/media/libstagefright/httplive/fuzzer/Android.bp
new file mode 100644
index 0000000..14097b0
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/Android.bp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_av_media_libstagefright_httplive_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: [
+        "frameworks_av_media_libstagefright_httplive_license",
+    ],
+}
+
+cc_fuzz {
+    name: "httplive_fuzzer",
+    srcs: [
+        "httplive_fuzzer.cpp",
+    ],
+    static_libs: [
+        "libstagefright_httplive",
+        "libstagefright_id3",
+        "libstagefright_metadatautils",
+        "libstagefright_mpeg2support",
+        "liblog",
+        "libcutils",
+        "libdatasource",
+        "libmedia",
+        "libstagefright",
+        "libutils",
+    ],
+    header_libs: [
+        "libbase_headers",
+        "libstagefright_foundation_headers",
+        "libstagefright_headers",
+        "libstagefright_httplive_headers",
+    ],
+    shared_libs: [
+        "libcrypto",
+        "libstagefright_foundation",
+        "libhidlbase",
+        "libhidlmemory",
+        "android.hidl.allocator@1.0",
+    ],
+    corpus: ["corpus/*"],
+    dictionary: "httplive_fuzzer.dict",
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
diff --git a/media/libstagefright/httplive/fuzzer/README.md b/media/libstagefright/httplive/fuzzer/README.md
new file mode 100644
index 0000000..3a64ea4
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/README.md
@@ -0,0 +1,56 @@
+# Fuzzer for libstagefright_httplive
+
+## Plugin Design Considerations
+The fuzzer plugin for libstagefright_httplive is designed based on the understanding of the library and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data.Also, several .m3u8 files are hand-crafted and added to the corpus directory to increase the code coverage. This ensures more code paths are reached by the fuzzer.
+
+libstagefright_httplive supports the following parameters:
+1. Final Result (parameter name: `finalResult`)
+2. Flags (parameter name: `flags`)
+3. Time Us (parameter name: `timeUs`)
+4. Track Index (parameter name: `trackIndex`)
+5. Index (parameter name: `index`)
+6. Select (parameter name: `select`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `finalResult` | `-34` to `-1` | Value obtained from FuzzedDataProvider|
+| `flags` | `0` to `1` | Value obtained from FuzzedDataProvider|
+| `timeUs` | `0` to `10000000` | Value obtained from FuzzedDataProvider|
+| `trackIndex` | `UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `index` | `UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `select` | `True` to `False` | Value obtained from FuzzedDataProvider|
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feeds the entire input data to the httplive module.
+This ensures that the plugin tolerates any kind of input (empty, huge,
+malformed, etc) and doesnt `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build httplive_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+  $ mm -j$(nproc) httplive_fuzzer
+```
+#### Steps to run
+To run on device
+```
+  $ adb push $ANDROID_PRODUCT_OUT/data/fuzz/$(TARGET_ARCH)/lib /data/fuzz/$(TARGET_ARCH)/lib
+  $ adb push $ANDROID_PRODUCT_OUT/data/fuzz/$(TARGET_ARCH)/httplive_fuzzer /data/fuzz/$(TARGET_ARCH)/httplive_fuzzer
+  $ adb shell /data/fuzz/${TARGET_ARCH}/httplive_fuzzer/httplive_fuzzer /data/fuzz/${TARGET_ARCH}/httplive_fuzzer/corpus
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/media/libstagefright/httplive/fuzzer/corpus/crypt.key b/media/libstagefright/httplive/fuzzer/corpus/crypt.key
new file mode 100644
index 0000000..f9d5d7f
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/crypt.key
@@ -0,0 +1,2 @@

+ÏŒüÐ5Љ_xïHÎ3
diff --git a/media/libstagefright/httplive/fuzzer/corpus/encrypted.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/encrypted.m3u8
new file mode 100644
index 0000000..32b0eac
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/encrypted.m3u8
@@ -0,0 +1,12 @@
+#EXTM3U
+#EXT-X-TARGETDURATION:10
+#EXT-X-ALLOW-CACHE:YES
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXT-X-VERSION:3
+#EXT-X-MEDIA-SEQUENCE:1
+#EXT-X-KEY:METHOD=AES-128,URI="../../fuzz/arm64/httplive_fuzzer/corpus/crypt.key"
+#EXTINF:10.000,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5.092,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/hls.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/hls.m3u8
new file mode 100644
index 0000000..9338e04
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/hls.m3u8
@@ -0,0 +1,8 @@
+#EXTM3U
+#EXT-X-TARGETDURATION:10
+#EXT-X-MEDIA-SEQUENCE:0
+#EXTINF:10, no desc
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:10, no desc
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index1.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index1.m3u8
new file mode 100644
index 0000000..e1eff58
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index1.m3u8
@@ -0,0 +1,14 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=NONE
+#EXT-X-DISCONTINUITY-SEQUENCE:0
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence0.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index2.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index2.m3u8
new file mode 100644
index 0000000..37a0189
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index2.m3u8
@@ -0,0 +1,6 @@
+#EXTM3U
+#EXT-X-INDEPENDENT-SEGMENTS
+#EXT-X-STREAM-INF:CLOSED-CAPTIONS=NONE,BANDWIDTH=165340,RESOLUTION=256x144,CODECS="mp4a.40.5,avc1.42c00b"
+https://non.existentsite.com/test-doesnt-dereference-these-paths/prog_index.m3u8
+#EXT-X-STREAM-INF:CLOSED-CAPTIONS=NONE,BANDWIDTH=344388,RESOLUTION=426x240,CODECS="mp4a.40.5,avc1.4d4015"
+https://non.existentsite.com/test-doesnt-dereference-these-paths/prog_index1.m3u8
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index3.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index3.m3u8
new file mode 100644
index 0000000..1b7f489
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index3.m3u8
@@ -0,0 +1,13 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=AES-128,URI="https://demo.unified-streaming.com/video/tears-of-steel/aes.key",IV=0X99b74007b6254e4bd1c6e03631cad15b
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence3.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index4.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index4.m3u8
new file mode 100644
index 0000000..89ba37c
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index4.m3u8
@@ -0,0 +1,15 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;charset=utf-8,a4cd9995a1aa91e1",IV=0X99b74007b6254e4bd1c6e03631cad15b
+#EXT-X-DISCONTINUITY-SEQUENCE:0
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence0.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index5.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index5.m3u8
new file mode 100644
index 0000000..2120de4
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index5.m3u8
@@ -0,0 +1,14 @@
+#EXTM3U
+#EXT-X-TARGETDURATION:11
+#EXT-X-KEY:METHOD=NONE
+#EXT-X-MEDIA-SEQUENCE:0
+#EXT-X-VERSION:4
+#EXTINF:10.0,
+#EXT-X-BYTERANGE:10@0
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:10.0,
+#EXT-X-BYTERANGE:20@10
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:10.0,
+#EXT-X-BYTERANGE:80
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index6.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index6.m3u8
new file mode 100644
index 0000000..588368a
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index6.m3u8
@@ -0,0 +1,12 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=AES-128,URI="data:text/plain;charset=utf-8,a4cd9995a1aa91e1",IV=0x30303030303030303030303030303030
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence3.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index7.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index7.m3u8
new file mode 100644
index 0000000..b09948e
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index7.m3u8
@@ -0,0 +1,46 @@
+#EXTM3U
+#EXT-X-VERSION:4
+## Created with Unified Streaming Platform  (version=1.11.3-24438)
+#EXT-X-SESSION-KEY:METHOD=AES-128,URI="https://demo.unified-streaming.com/video/tears-of-steel/aes.key"
+
+# AUDIO groups
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio-aacl-64",LANGUAGE="en",NAME="English",DEFAULT=YES,AUTOSELECT=YES,CHANNELS="2"
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio-aacl-128",LANGUAGE="en",NAME="English",DEFAULT=YES,AUTOSELECT=YES,CHANNELS="2"
+
+# SUBTITLES groups
+#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="textstream",LANGUAGE="en",NAME="English",DEFAULT=YES,AUTOSELECT=YES,URI="tears-of-steel-aes-textstream_eng=1000.m3u8"
+#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="textstream",LANGUAGE="ru",NAME="Russian",AUTOSELECT=YES,URI="tears-of-steel-aes-textstream_rus=1000.m3u8"
+
+# variants
+#EXT-X-STREAM-INF:BANDWIDTH=494000,CODECS="mp4a.40.2,avc1.42C00D",RESOLUTION=224x100,FRAME-RATE=24,AUDIO="audio-aacl-64",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=64008-video_eng=401000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=933000,CODECS="mp4a.40.2,avc1.42C016",RESOLUTION=448x200,FRAME-RATE=24,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=751000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1198000,CODECS="mp4a.40.2,avc1.4D401F",RESOLUTION=784x350,FRAME-RATE=24,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=1001000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1728000,CODECS="mp4a.40.2,avc1.640028",RESOLUTION=1680x750,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=1501000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=2469000,CODECS="mp4a.40.2,avc1.640028",RESOLUTION=1680x750,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=2200000.m3u8
+
+# variants
+#EXT-X-STREAM-INF:BANDWIDTH=1025000,CODECS="mp4a.40.2,hvc1.1.6.L150.90",RESOLUTION=1680x750,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-64",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=64008-video_eng_1=902000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1368000,CODECS="mp4a.40.2,hvc1.1.6.L150.90",RESOLUTION=2576x1150,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng_1=1161000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1815000,CODECS="mp4a.40.2,hvc1.1.6.L150.90",RESOLUTION=3360x1500,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng_1=1583000.m3u8
+
+# variants
+#EXT-X-STREAM-INF:BANDWIDTH=69000,CODECS="mp4a.40.2",AUDIO="audio-aacl-64",SUBTITLES="textstream"
+tears-of-steel-aes-audio_eng=64008.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=137000,CODECS="mp4a.40.2",AUDIO="audio-aacl-128",SUBTITLES="textstream"
+tears-of-steel-aes-audio_eng=128002.m3u8
+
+# keyframes
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=54000,CODECS="avc1.42C00D",RESOLUTION=224x100,URI="keyframes/tears-of-steel-aes-video_eng=401000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=100000,CODECS="avc1.42C016",RESOLUTION=448x200,URI="keyframes/tears-of-steel-aes-video_eng=751000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=133000,CODECS="avc1.4D401F",RESOLUTION=784x350,URI="keyframes/tears-of-steel-aes-video_eng=1001000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=120000,CODECS="hvc1.1.6.L150.90",RESOLUTION=1680x750,VIDEO-RANGE=SDR,URI="keyframes/tears-of-steel-aes-video_eng_1=902000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=154000,CODECS="hvc1.1.6.L150.90",RESOLUTION=2576x1150,VIDEO-RANGE=SDR,URI="keyframes/tears-of-steel-aes-video_eng_1=1161000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=210000,CODECS="hvc1.1.6.L150.90",RESOLUTION=3360x1500,VIDEO-RANGE=SDR,URI="keyframes/tears-of-steel-aes-video_eng_1=1583000.m3u8"
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index8.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index8.m3u8
new file mode 100644
index 0000000..353d589
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index8.m3u8
@@ -0,0 +1,13 @@
+#EXTM3U
+#EXT-X-VERSION:5
+
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",NAME="English stereo",LANGUAGE="en",AUTOSELECT=YES,URI="../../fuzz/arm64/httplive_fuzzer/index1.m3u8"
+
+#EXT-X-STREAM-INF:BANDWIDTH=628000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=320x180,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index1.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=928000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=480x270,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index2.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1728000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=640x360,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index3.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=2528000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=960x540,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index1.m3u8
diff --git a/media/libstagefright/httplive/fuzzer/corpus/prog_index.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/prog_index.m3u8
new file mode 100644
index 0000000..eb88422
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/prog_index.m3u8
@@ -0,0 +1,17 @@
+#EXTM3U
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="eng",NAME="English",AUTOSELECT=YES,DEFAULT=YES,URI="corpus/index1.m3u8"
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="fre",NAME="Français",AUTOSELECT=YES,DEFAULT=NO,URI="corpus/index1.m3u8"
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="sp",NAME="Espanol",AUTOSELECT=YES,DEFAULT=NO,URI="corpus/index1.m3u8"
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=NONE
+#EXT-X-DISCONTINUITY-SEQUENCE:0
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/httplive_fuzzer.cpp b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.cpp
new file mode 100644
index 0000000..aa777b3
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.cpp
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fstream>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <LiveDataSource.h>
+#include <LiveSession.h>
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
+#include <media/mediaplayer_common.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/ALooperRoster.h>
+#include <string>
+#include <utils/Log.h>
+
+using namespace std;
+using namespace android;
+
+constexpr char kFileNamePrefix[] = "/data/local/tmp/httplive-";
+constexpr char kFileNameSuffix[] = ".m3u8";
+constexpr char kFileUrlPrefix[] = "file://";
+constexpr int64_t kOffSet = 0;
+constexpr int32_t kReadyMarkMs = 5000;
+constexpr int32_t kPrepareMarkMs = 1500;
+constexpr int32_t kErrorNoMax = -1;
+constexpr int32_t kErrorNoMin = -34;
+constexpr int32_t kMaxTimeUs = 1000;
+constexpr int32_t kRandomStringLength = 64;
+constexpr int32_t kRangeMin = 0;
+constexpr int32_t kRangeMax = 1000;
+
+constexpr LiveSession::StreamType kValidStreamType[] = {
+    LiveSession::STREAMTYPE_AUDIO, LiveSession::STREAMTYPE_VIDEO,
+    LiveSession::STREAMTYPE_SUBTITLES, LiveSession::STREAMTYPE_METADATA};
+
+constexpr MediaSource::ReadOptions::SeekMode kValidSeekMode[] = {
+    MediaSource::ReadOptions::SeekMode::SEEK_PREVIOUS_SYNC,
+    MediaSource::ReadOptions::SeekMode::SEEK_NEXT_SYNC,
+    MediaSource::ReadOptions::SeekMode::SEEK_CLOSEST_SYNC,
+    MediaSource::ReadOptions::SeekMode::SEEK_CLOSEST,
+    MediaSource::ReadOptions::SeekMode::SEEK_FRAME_INDEX};
+
+constexpr media_track_type kValidMediaTrackType[] = {
+    MEDIA_TRACK_TYPE_UNKNOWN,  MEDIA_TRACK_TYPE_VIDEO,
+    MEDIA_TRACK_TYPE_AUDIO,    MEDIA_TRACK_TYPE_TIMEDTEXT,
+    MEDIA_TRACK_TYPE_SUBTITLE, MEDIA_TRACK_TYPE_METADATA};
+
+struct TestAHandler : public AHandler {
+public:
+  TestAHandler(std::function<void()> signalEosFunction)
+      : mSignalEosFunction(signalEosFunction) {}
+  virtual ~TestAHandler() {}
+
+protected:
+  void onMessageReceived(const sp<AMessage> &msg) override {
+    int32_t what = -1;
+    msg->findInt32("what", &what);
+    switch (what) {
+    case LiveSession::kWhatError:
+    case LiveSession::kWhatPrepared:
+    case LiveSession::kWhatPreparationFailed: {
+      mSignalEosFunction();
+      break;
+    }
+    }
+    return;
+  }
+
+private:
+  std::function<void()> mSignalEosFunction;
+};
+
+struct TestMediaHTTPConnection : public MediaHTTPConnection {
+public:
+  TestMediaHTTPConnection() {}
+  virtual ~TestMediaHTTPConnection() {}
+
+  virtual bool connect(const char * /*uri*/,
+                       const KeyedVector<String8, String8> * /*headers*/) {
+    return true;
+  }
+
+  virtual void disconnect() { return; }
+
+  virtual ssize_t readAt(off64_t /*offset*/, void * /*data*/, size_t size) {
+    return size;
+  }
+
+  virtual off64_t getSize() { return 0; }
+  virtual status_t getMIMEType(String8 * /*mimeType*/) { return NO_ERROR; }
+  virtual status_t getUri(String8 * /*uri*/) { return NO_ERROR; }
+
+private:
+  DISALLOW_EVIL_CONSTRUCTORS(TestMediaHTTPConnection);
+};
+
+struct TestMediaHTTPService : public MediaHTTPService {
+public:
+  TestMediaHTTPService() {}
+  ~TestMediaHTTPService(){};
+
+  virtual sp<MediaHTTPConnection> makeHTTPConnection() {
+    mediaHTTPConnection = sp<TestMediaHTTPConnection>::make();
+    return mediaHTTPConnection;
+  }
+
+private:
+  sp<TestMediaHTTPConnection> mediaHTTPConnection = nullptr;
+  DISALLOW_EVIL_CONSTRUCTORS(TestMediaHTTPService);
+};
+
+class HttpLiveFuzzer {
+public:
+  void process(const uint8_t *data, size_t size);
+  void deInitLiveSession();
+  ~HttpLiveFuzzer() { deInitLiveSession(); }
+
+private:
+  void invokeLiveDataSource();
+  void createM3U8File(const uint8_t *data, size_t size);
+  void initLiveDataSource();
+  void invokeLiveSession();
+  void initLiveSession();
+  void invokeDequeueAccessUnit();
+  void invokeConnectAsync();
+  void invokeSeekTo();
+  void invokeGetConfig();
+  void signalEos();
+  string generateFileName();
+  sp<LiveDataSource> mLiveDataSource = nullptr;
+  sp<LiveSession> mLiveSession = nullptr;
+  sp<ALooper> mLiveLooper = nullptr;
+  sp<TestMediaHTTPService> httpService = nullptr;
+  sp<TestAHandler> mHandler = nullptr;
+  FuzzedDataProvider *mFDP = nullptr;
+  bool mEosReached = false;
+  std::mutex mDownloadCompleteMutex;
+  std::condition_variable mConditionalVariable;
+};
+
+string HttpLiveFuzzer::generateFileName() {
+  return kFileNamePrefix + to_string(getpid()) + kFileNameSuffix;
+}
+
+void HttpLiveFuzzer::createM3U8File(const uint8_t *data, size_t size) {
+  ofstream m3u8File;
+  string currentFileName = generateFileName();
+  m3u8File.open(currentFileName, ios::out | ios::binary);
+  m3u8File.write((char *)data, size);
+  m3u8File.close();
+}
+
+void HttpLiveFuzzer::initLiveDataSource() {
+  mLiveDataSource = sp<LiveDataSource>::make();
+}
+
+void HttpLiveFuzzer::invokeLiveDataSource() {
+  initLiveDataSource();
+  size_t size = mFDP->ConsumeIntegralInRange<size_t>(kRangeMin, kRangeMax);
+  sp<ABuffer> buffer = new ABuffer(size);
+  mLiveDataSource->queueBuffer(buffer);
+  uint8_t *data = new uint8_t[size];
+  mLiveDataSource->readAtNonBlocking(kOffSet, data, size);
+  int32_t finalResult = mFDP->ConsumeIntegralInRange(kErrorNoMin, kErrorNoMax);
+  mLiveDataSource->queueEOS(finalResult);
+  mLiveDataSource->reset();
+  mLiveDataSource->countQueuedBuffers();
+  mLiveDataSource->initCheck();
+  delete[] data;
+}
+
+void HttpLiveFuzzer::initLiveSession() {
+  ALooperRoster looperRoster;
+  mHandler =
+      sp<TestAHandler>::make(std::bind(&HttpLiveFuzzer::signalEos, this));
+  mLiveLooper = sp<ALooper>::make();
+  mLiveLooper->setName("http live");
+  mLiveLooper->start();
+  sp<AMessage> notify = sp<AMessage>::make(0, mHandler);
+  httpService = new TestMediaHTTPService();
+  uint32_t flags = mFDP->ConsumeIntegral<uint32_t>();
+  mLiveSession = sp<LiveSession>::make(notify, flags, httpService);
+  mLiveLooper->registerHandler(mLiveSession);
+  looperRoster.registerHandler(mLiveLooper, mHandler);
+}
+
+void HttpLiveFuzzer::invokeDequeueAccessUnit() {
+  LiveSession::StreamType stream = mFDP->PickValueInArray(kValidStreamType);
+  sp<ABuffer> buffer;
+  mLiveSession->dequeueAccessUnit(stream, &buffer);
+}
+
+void HttpLiveFuzzer::invokeSeekTo() {
+  int64_t timeUs = mFDP->ConsumeIntegralInRange<int64_t>(0, kMaxTimeUs);
+  MediaSource::ReadOptions::SeekMode mode =
+      mFDP->PickValueInArray(kValidSeekMode);
+  mLiveSession->seekTo(timeUs, mode);
+}
+
+void HttpLiveFuzzer::invokeGetConfig() {
+  mLiveSession->getTrackCount();
+  size_t trackIndex = mFDP->ConsumeIntegral<size_t>();
+  mLiveSession->getTrackInfo(trackIndex);
+  media_track_type type = mFDP->PickValueInArray(kValidMediaTrackType);
+  mLiveSession->getSelectedTrack(type);
+  sp<MetaData> meta;
+  LiveSession::StreamType stream = mFDP->PickValueInArray(kValidStreamType);
+  mLiveSession->getStreamFormatMeta(stream, &meta);
+  mLiveSession->getKeyForStream(stream);
+  if (stream != LiveSession::STREAMTYPE_SUBTITLES) {
+    mLiveSession->getSourceTypeForStream(stream);
+  }
+}
+
+void HttpLiveFuzzer::invokeConnectAsync() {
+  string currentFileName = generateFileName();
+  string url = kFileUrlPrefix + currentFileName;
+  string str_1 = mFDP->ConsumeRandomLengthString(kRandomStringLength);
+  string str_2 = mFDP->ConsumeRandomLengthString(kRandomStringLength);
+
+  KeyedVector<String8, String8> headers;
+  headers.add(String8(str_1.c_str()), String8(str_2.c_str()));
+  mLiveSession->connectAsync(url.c_str(), &headers);
+}
+
+void HttpLiveFuzzer::invokeLiveSession() {
+  initLiveSession();
+  BufferingSettings bufferingSettings;
+  bufferingSettings.mInitialMarkMs = kPrepareMarkMs;
+  bufferingSettings.mResumePlaybackMarkMs = kReadyMarkMs;
+  mLiveSession->setBufferingSettings(bufferingSettings);
+  invokeConnectAsync();
+  std::unique_lock waitForDownloadComplete(mDownloadCompleteMutex);
+  mConditionalVariable.wait(waitForDownloadComplete,
+                            [this] { return mEosReached; });
+  if (mLiveSession->isSeekable()) {
+    invokeSeekTo();
+  }
+  invokeDequeueAccessUnit();
+  size_t index = mFDP->ConsumeIntegral<size_t>();
+  bool select = mFDP->ConsumeBool();
+  mLiveSession->selectTrack(index, select);
+  mLiveSession->hasDynamicDuration();
+  int64_t firstTimeUs =
+      mFDP->ConsumeIntegralInRange<int64_t>(kRangeMin, kRangeMax);
+  int64_t timeUs = mFDP->ConsumeIntegralInRange<int64_t>(kRangeMin, kRangeMax);
+  int32_t discontinuitySeq = mFDP->ConsumeIntegral<int32_t>();
+  mLiveSession->calculateMediaTimeUs(firstTimeUs, timeUs, discontinuitySeq);
+  invokeGetConfig();
+}
+
+void HttpLiveFuzzer::process(const uint8_t *data, size_t size) {
+  mFDP = new FuzzedDataProvider(data, size);
+  createM3U8File(data, size);
+  invokeLiveDataSource();
+  invokeLiveSession();
+  delete mFDP;
+}
+
+void HttpLiveFuzzer::deInitLiveSession() {
+  if (mLiveSession != nullptr) {
+    mLiveSession->disconnect();
+    mLiveLooper->unregisterHandler(mLiveSession->id());
+    mLiveLooper->stop();
+  }
+  mLiveSession.clear();
+  mLiveLooper.clear();
+}
+
+void HttpLiveFuzzer::signalEos() {
+  mEosReached = true;
+  {
+    std::lock_guard<std::mutex> waitForDownloadComplete(mDownloadCompleteMutex);
+  }
+  mConditionalVariable.notify_one();
+  return;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+  HttpLiveFuzzer httpliveFuzzer;
+  httpliveFuzzer.process(data, size);
+  return 0;
+}
diff --git a/media/libstagefright/httplive/fuzzer/httplive_fuzzer.dict b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.dict
new file mode 100644
index 0000000..703cc7e
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.dict
@@ -0,0 +1,15 @@
+#m3u8-Tags
+kw1="#EXTM3U"
+kw2="#EXT-X-VERSION:"
+kw3="#EXT-X-TARGETDURATION:"
+kw4="#EXT-X-PLAYLIST-TYPE:"
+kw5="#EXTINF:"
+kw6="#EXT-X-ENDLIST"
+kw7="#EXT-X-MEDIA-SEQUENCE:"
+kw8="#EXT-X-KEY:METHOD=NONE"
+kw9="#EXT-X-DISCONTINUITY:"
+kw10="#EXT-X-DISCONTINUITY-SEQUENCE:0"
+kw11="#EXT-X-STREAM-INF:BANDWIDTH="
+kw12="#EXT-X-STREAM-INF:CODECS="
+kw13="#EXT-X-BYTERANGE:"
+kw14="#EXT-X-MEDIA"
diff --git a/media/libstagefright/id3/Android.bp b/media/libstagefright/id3/Android.bp
index 3f5ba47..bea3e34 100644
--- a/media/libstagefright/id3/Android.bp
+++ b/media/libstagefright/id3/Android.bp
@@ -20,6 +20,11 @@
 cc_library_static {
     name: "libstagefright_id3",
     min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media",
+    ],
+
 
     srcs: ["ID3.cpp"],
 
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index c84cc10..632b32c 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -71,6 +71,9 @@
     virtual void initiateSetInputSurface(const sp<PersistentSurface> &surface);
     virtual void initiateStart();
     virtual void initiateShutdown(bool keepComponentAllocated = false);
+    virtual status_t querySupportedParameters(std::vector<std::string> *names) override;
+    virtual status_t subscribeToParameters(const std::vector<std::string> &names) override;
+    virtual status_t unsubscribeFromParameters(const std::vector<std::string> &names) override;
 
     status_t queryCapabilities(
             const char* owner, const char* name,
diff --git a/media/libstagefright/include/media/stagefright/ColorConverter.h b/media/libstagefright/include/media/stagefright/ColorConverter.h
index 75b0d8e..1d86a22 100644
--- a/media/libstagefright/include/media/stagefright/ColorConverter.h
+++ b/media/libstagefright/include/media/stagefright/ColorConverter.h
@@ -54,6 +54,7 @@
         uint32_t mTransfer;
 
         bool isBt709();
+        bool isBt2020();
         bool isJpeg();
     };
 
@@ -78,8 +79,10 @@
     OMX_COLOR_FORMATTYPE mSrcFormat, mDstFormat;
     ColorSpace mSrcColorSpace;
     uint8_t *mClip;
+    uint16_t *mClip10Bit;
 
     uint8_t *initClip();
+    uint16_t *initClip10Bit();
 
     status_t convertCbYCrY(
             const BitmapParams &src, const BitmapParams &dst);
@@ -111,6 +114,12 @@
     status_t convertTIYUV420PackedSemiPlanar(
             const BitmapParams &src, const BitmapParams &dst);
 
+    status_t convertYUVP010(
+                const BitmapParams &src, const BitmapParams &dst);
+
+    status_t convertYUVP010ToRGBA1010102(
+                const BitmapParams &src, const BitmapParams &dst);
+
     ColorConverter(const ColorConverter &);
     ColorConverter &operator=(const ColorConverter &);
 };
diff --git a/media/libstagefright/include/media/stagefright/MediaBuffer.h b/media/libstagefright/include/media/stagefright/MediaBuffer.h
index 2c03f27..f070aac 100644
--- a/media/libstagefright/include/media/stagefright/MediaBuffer.h
+++ b/media/libstagefright/include/media/stagefright/MediaBuffer.h
@@ -105,7 +105,6 @@
         if (mMemory.get() == nullptr || mMemory->unsecurePointer() == nullptr) return 0;
         int32_t remoteRefcount =
                 reinterpret_cast<SharedControl *>(mMemory->unsecurePointer())->getRemoteRefcount();
-        // Sanity check so that remoteRefCount() is non-negative.
         return remoteRefcount >= 0 ? remoteRefcount : 0; // do not allow corrupted data.
 #else
         return 0;
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index d372140..ce3b0d0 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -445,6 +445,12 @@
     int32_t mRotationDegrees;
     int32_t mAllowFrameDroppingBySurface;
 
+    uint32_t mHDRMetadataFlags; /* bitmask of kFlagHDR* */
+    enum {
+        kFlagHDRStaticInfo = 1 << 0,
+        kFlagHDR10PlusInfo = 1 << 1,
+    };
+
     // initial create parameters
     AString mInitName;
 
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index 4237e8c..84653eb 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -364,7 +364,7 @@
 inline static const char *asString_AV1Profile(int32_t i, const char *def = "??") {
     switch (i) {
         case AV1ProfileMain8:           return "Main8";
-        case AV1ProfileMain10:          return "Main10HDR";
+        case AV1ProfileMain10:          return "Main10";
         case AV1ProfileMain10HDR10:     return "Main10HDR10";
         case AV1ProfileMain10HDR10Plus: return "Main10HDRPlus";
         default:                        return def;
@@ -540,6 +540,9 @@
 constexpr int32_t DolbyVisionLevelUhd30   = 0x40;
 constexpr int32_t DolbyVisionLevelUhd48   = 0x80;
 constexpr int32_t DolbyVisionLevelUhd60   = 0x100;
+constexpr int32_t DolbyVisionLevelUhd120  = 0x200;
+constexpr int32_t DolbyVisionLevel8k30    = 0x400;
+constexpr int32_t DolbyVisionLevel8k60    = 0x800;
 
 inline static const char *asString_DolbyVisionLevel(int32_t i, const char *def = "??") {
     switch (i) {
@@ -552,6 +555,9 @@
         case DolbyVisionLevelUhd30: return "Uhd30";
         case DolbyVisionLevelUhd48: return "Uhd48";
         case DolbyVisionLevelUhd60: return "Uhd60";
+        case DolbyVisionLevelUhd120: return "Uhd120";
+        case DolbyVisionLevel8k30:  return "8k30";
+        case DolbyVisionLevel8k60:  return "8k60";
         default:                    return def;
     }
 }
@@ -586,9 +592,11 @@
 constexpr int32_t COLOR_Format24bitBGR888             = 12;
 constexpr int32_t COLOR_Format24bitRGB888             = 11;
 constexpr int32_t COLOR_Format25bitARGB1888           = 14;
+constexpr int32_t COLOR_Format32bitABGR2101010        = 0x7F00AAA2;
 constexpr int32_t COLOR_Format32bitABGR8888           = 0x7F00A000;
 constexpr int32_t COLOR_Format32bitARGB8888           = 16;
 constexpr int32_t COLOR_Format32bitBGRA8888           = 15;
+constexpr int32_t COLOR_Format64bitABGRFloat          = 0x7F000F16;
 constexpr int32_t COLOR_Format8bitRGB332              = 2;
 constexpr int32_t COLOR_FormatCbYCrY                  = 27;
 constexpr int32_t COLOR_FormatCrYCbY                  = 28;
@@ -642,9 +650,11 @@
         case COLOR_Format24bitBGR888:               return "24bitBGR888";
         case COLOR_Format24bitRGB888:               return "24bitRGB888";
         case COLOR_Format25bitARGB1888:             return "25bitARGB1888";
+        case COLOR_Format32bitABGR2101010:          return "32bitABGR2101010";
         case COLOR_Format32bitABGR8888:             return "32bitABGR8888";
         case COLOR_Format32bitARGB8888:             return "32bitARGB8888";
         case COLOR_Format32bitBGRA8888:             return "32bitBGRA8888";
+        case COLOR_Format64bitABGRFloat:            return "64bitABGRFloat";
         case COLOR_Format8bitRGB332:                return "8bitRGB332";
         case COLOR_FormatCbYCrY:                    return "CbYCrY";
         case COLOR_FormatCrYCbY:                    return "CrYCbY";
@@ -677,6 +687,7 @@
         case COLOR_FormatYUV422SemiPlanar:          return "YUV422SemiPlanar";
         case COLOR_FormatYUV444Flexible:            return "YUV444Flexible";
         case COLOR_FormatYUV444Interleaved:         return "YUV444Interleaved";
+        case COLOR_FormatYUVP010:                   return "YUVP010";
         case COLOR_QCOM_FormatYUV420SemiPlanar:     return "QCOM_YUV420SemiPlanar";
         case COLOR_TI_FormatYUV420PackedSemiPlanar: return "TI_YUV420PackedSemiPlanar";
         default:                                    return def;
@@ -684,6 +695,7 @@
 }
 
 constexpr char FEATURE_AdaptivePlayback[]       = "adaptive-playback";
+constexpr char FEATURE_EncodingStatistics[]     = "encoding-statistics";
 constexpr char FEATURE_IntraRefresh[] = "intra-refresh";
 constexpr char FEATURE_PartialFrame[] = "partial-frame";
 constexpr char FEATURE_QpBounds[] = "qp-bounds";
@@ -737,6 +749,14 @@
 constexpr int32_t COLOR_TRANSFER_SDR_VIDEO = 3;
 constexpr int32_t COLOR_TRANSFER_ST2084 = 6;
 
+constexpr int32_t PICTURE_TYPE_I = 1;
+constexpr int32_t PICTURE_TYPE_P = 2;
+constexpr int32_t PICTURE_TYPE_B = 3;
+constexpr int32_t PICTURE_TYPE_UNKNOWN = 0;
+
+constexpr int32_t VIDEO_ENCODING_STATISTICS_LEVEL_1 = 1;
+constexpr int32_t VIDEO_ENCODING_STATISTICS_LEVEL_NONE = 0;
+
 constexpr char KEY_AAC_DRC_ALBUM_MODE[] = "aac-drc-album-mode";
 constexpr char KEY_AAC_DRC_ATTENUATION_FACTOR[] = "aac-drc-cut-level";
 constexpr char KEY_AAC_DRC_BOOST_FACTOR[] = "aac-drc-boost-level";
@@ -789,12 +809,14 @@
 constexpr char KEY_MAX_FPS_TO_ENCODER[] = "max-fps-to-encoder";
 constexpr char KEY_MAX_HEIGHT[] = "max-height";
 constexpr char KEY_MAX_INPUT_SIZE[] = "max-input-size";
+constexpr char KEY_MAX_OUTPUT_CHANNEL_COUNT[] = "max-output-channel-count";
 constexpr char KEY_MAX_PTS_GAP_TO_ENCODER[] = "max-pts-gap-to-encoder";
 constexpr char KEY_MAX_WIDTH[] = "max-width";
 constexpr char KEY_MIME[] = "mime";
 constexpr char KEY_OPERATING_RATE[] = "operating-rate";
 constexpr char KEY_OUTPUT_REORDER_DEPTH[] = "output-reorder-depth";
 constexpr char KEY_PCM_ENCODING[] = "pcm-encoding";
+constexpr char KEY_PICTURE_TYPE[] = "picture_type";
 constexpr char KEY_PIXEL_ASPECT_RATIO_HEIGHT[] = "sar-height";
 constexpr char KEY_PIXEL_ASPECT_RATIO_WIDTH[] = "sar-width";
 constexpr char KEY_PREPEND_HEADER_TO_SYNC_FRAMES[] = "prepend-sps-pps-to-idr-frames";
@@ -811,6 +833,8 @@
 constexpr char KEY_TILE_HEIGHT[] = "tile-height";
 constexpr char KEY_TILE_WIDTH[] = "tile-width";
 constexpr char KEY_TRACK_ID[] = "track-id";
+constexpr char KEY_VIDEO_ENCODING_STATISTICS_LEVEL[] = "video-encoding-statistics-level";
+constexpr char KEY_VIDEO_QP_AVERAGE[] = "video-qp-average";
 constexpr char KEY_VIDEO_QP_B_MAX[] = "video-qp-b-max";
 constexpr char KEY_VIDEO_QP_B_MIN[] = "video-qp-b-min";
 constexpr char KEY_VIDEO_QP_I_MAX[] = "video-qp-i-max";
diff --git a/media/libstagefright/MediaCodecListOverrides.h b/media/libstagefright/include/media/stagefright/MediaCodecListOverrides.h
similarity index 100%
rename from media/libstagefright/MediaCodecListOverrides.h
rename to media/libstagefright/include/media/stagefright/MediaCodecListOverrides.h
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index c80012e..88c1f3f 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -60,6 +60,8 @@
     kKeyAVCC              = 'avcc',  // raw data
     kKeyHVCC              = 'hvcc',  // raw data
     kKeyDVCC              = 'dvcc',  // raw data
+    kKeyDVVC              = 'dvvc',  // raw data
+    kKeyDVWC              = 'dvwc',  // raw data
     kKeyAV1C              = 'av1c',  // raw data
     kKeyThumbnailHVCC     = 'thvc',  // raw data
     kKeyThumbnailAV1C     = 'tav1',  // raw data
@@ -283,6 +285,8 @@
     kTypeHVCC        = 'hvcc',
     kTypeAV1C        = 'av1c',
     kTypeDVCC        = 'dvcc',
+    kTypeDVVC        = 'dvvc',
+    kTypeDVWC        = 'dvwc',
     kTypeD263        = 'd263',
     kTypeHCOS        = 'hcos',
 };
diff --git a/media/libstagefright/mpeg2ts/Android.bp b/media/libstagefright/mpeg2ts/Android.bp
index fbfa8cc..283df1e 100644
--- a/media/libstagefright/mpeg2ts/Android.bp
+++ b/media/libstagefright/mpeg2ts/Android.bp
@@ -54,16 +54,21 @@
         "libstagefright_foundation_headers",
     ],
 
-    export_include_dirs: ["."],
+    export_include_dirs: ["include"],
+
+    local_include_dirs: ["include/mpeg2ts"],
 
     whole_static_libs: [
         "libstagefright_metadatautils",
     ],
 
+}
+
+cc_defaults {
+    name: "libstagefright_mpeg2support_sdk_defaults",
+
     min_sdk_version: "29",
-
     host_supported: true,
-
     target: {
         darwin: {
             enabled: false,
@@ -71,10 +76,19 @@
     },
 }
 
+cc_library_headers {
+    name: "libstagefright_mpeg2support_headers",
+    defaults: [
+        "libstagefright_mpeg2support_sdk_defaults",
+    ],
+    export_include_dirs: ["include"],
+}
+
 cc_library_static {
     name: "libstagefright_mpeg2support",
     defaults: [
         "libstagefright_mpeg2support_defaults",
+        "libstagefright_mpeg2support_sdk_defaults",
     ],
     cflags: [
         "-DENABLE_CRYPTO",
@@ -91,6 +105,7 @@
     name: "libstagefright_mpeg2support_nocrypto",
     defaults: [
         "libstagefright_mpeg2support_defaults",
+        "libstagefright_mpeg2support_sdk_defaults",
     ],
     apex_available: [
         "com.android.media",
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/include/mpeg2ts/ATSParser.h
similarity index 100%
rename from media/libstagefright/mpeg2ts/ATSParser.h
rename to media/libstagefright/mpeg2ts/include/mpeg2ts/ATSParser.h
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/include/mpeg2ts/AnotherPacketSource.h
similarity index 100%
rename from media/libstagefright/mpeg2ts/AnotherPacketSource.h
rename to media/libstagefright/mpeg2ts/include/mpeg2ts/AnotherPacketSource.h
diff --git a/media/libstagefright/mpeg2ts/CasManager.h b/media/libstagefright/mpeg2ts/include/mpeg2ts/CasManager.h
similarity index 100%
rename from media/libstagefright/mpeg2ts/CasManager.h
rename to media/libstagefright/mpeg2ts/include/mpeg2ts/CasManager.h
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/include/mpeg2ts/ESQueue.h
similarity index 100%
rename from media/libstagefright/mpeg2ts/ESQueue.h
rename to media/libstagefright/mpeg2ts/include/mpeg2ts/ESQueue.h
diff --git a/media/libstagefright/mpeg2ts/HlsSampleDecryptor.h b/media/libstagefright/mpeg2ts/include/mpeg2ts/HlsSampleDecryptor.h
similarity index 100%
rename from media/libstagefright/mpeg2ts/HlsSampleDecryptor.h
rename to media/libstagefright/mpeg2ts/include/mpeg2ts/HlsSampleDecryptor.h
diff --git a/media/libstagefright/mpeg2ts/SampleDecryptor.h b/media/libstagefright/mpeg2ts/include/mpeg2ts/SampleDecryptor.h
similarity index 100%
rename from media/libstagefright/mpeg2ts/SampleDecryptor.h
rename to media/libstagefright/mpeg2ts/include/mpeg2ts/SampleDecryptor.h
diff --git a/media/libstagefright/mpeg2ts/test/Android.bp b/media/libstagefright/mpeg2ts/test/Android.bp
index 464b039..34a8d3e 100644
--- a/media/libstagefright/mpeg2ts/test/Android.bp
+++ b/media/libstagefright/mpeg2ts/test/Android.bp
@@ -57,11 +57,6 @@
         "libstagefright_mpeg2support",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/",
-        "frameworks/av/media/libstagefright/",
-    ],
-
     header_libs: [
         "libmedia_headers",
         "libaudioclient_headers",
diff --git a/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp b/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp
index 79c233b..9e24a99 100644
--- a/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp
+++ b/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp
@@ -26,9 +26,8 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaDataBase.h>
 #include <media/stagefright/foundation/AUtils.h>
-
-#include "mpeg2ts/ATSParser.h"
-#include "mpeg2ts/AnotherPacketSource.h"
+#include <mpeg2ts/AnotherPacketSource.h>
+#include <mpeg2ts/ATSParser.h>
 
 #include "Mpeg2tsUnitTestEnvironment.h"
 
diff --git a/media/libstagefright/rtsp/AAMRAssembler.cpp b/media/libstagefright/rtsp/AAMRAssembler.cpp
index bb2a238..e773031 100644
--- a/media/libstagefright/rtsp/AAMRAssembler.cpp
+++ b/media/libstagefright/rtsp/AAMRAssembler.cpp
@@ -18,9 +18,8 @@
 #define LOG_TAG "AAMRAssembler"
 #include <utils/Log.h>
 
-#include "AAMRAssembler.h"
-
-#include "ARTPSource.h"
+#include <media/stagefright/rtsp/AAMRAssembler.h>
+#include <media/stagefright/rtsp/ARTPSource.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp
index 30cdbc9..2f516d5 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AAVCAssembler.cpp
@@ -18,9 +18,9 @@
 #define LOG_TAG "AAVCAssembler"
 #include <utils/Log.h>
 
-#include "AAVCAssembler.h"
+#include <media/stagefright/rtsp/AAVCAssembler.h>
 
-#include "ARTPSource.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/AH263Assembler.cpp b/media/libstagefright/rtsp/AH263Assembler.cpp
index 3436e95..584b4de 100644
--- a/media/libstagefright/rtsp/AH263Assembler.cpp
+++ b/media/libstagefright/rtsp/AH263Assembler.cpp
@@ -17,9 +17,9 @@
 #define LOG_TAG "AH263Assembler"
 #include <utils/Log.h>
 
-#include "AH263Assembler.h"
+#include <media/stagefright/rtsp/AH263Assembler.h>
 
-#include "ARTPSource.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/AHEVCAssembler.cpp b/media/libstagefright/rtsp/AHEVCAssembler.cpp
index b240339..bb42d1f 100644
--- a/media/libstagefright/rtsp/AHEVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AHEVCAssembler.cpp
@@ -18,14 +18,14 @@
 #define LOG_TAG "AHEVCAssembler"
 #include <utils/Log.h>
 
-#include "AHEVCAssembler.h"
+#include <media/stagefright/rtsp/AHEVCAssembler.h>
 
-#include "ARTPSource.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
 
+#include <HevcUtils.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
-#include <include/HevcUtils.h>
 #include <media/stagefright/foundation/hexdump.h>
 
 #include <stdint.h>
diff --git a/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp b/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp
index 0988774..2101de1 100644
--- a/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp
@@ -18,10 +18,10 @@
 #define LOG_TAG "AMPEG2TSAssembler"
 #include <utils/Log.h>
 
-#include "AMPEG2TSAssembler.h"
+#include <media/stagefright/rtsp/AMPEG2TSAssembler.h>
 
-#include "ARTPSource.h"
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
index 4302aee..0fc03ae 100644
--- a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
@@ -17,9 +17,9 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "AMPEG4AudioAssembler"
 
-#include "AMPEG4AudioAssembler.h"
+#include <media/stagefright/rtsp/AMPEG4AudioAssembler.h>
 
-#include "ARTPSource.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
 
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/foundation/ABitReader.h>
diff --git a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
index 7bd33c1..6b1d2a1 100644
--- a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
@@ -18,10 +18,10 @@
 #define LOG_TAG "AMPEG4ElementaryAssembler"
 #include <utils/Log.h>
 
-#include "AMPEG4ElementaryAssembler.h"
+#include <media/stagefright/rtsp/AMPEG4ElementaryAssembler.h>
 
-#include "ARTPSource.h"
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <media/stagefright/foundation/ABitReader.h>
 #include <media/stagefright/foundation/ABuffer.h>
diff --git a/media/libstagefright/rtsp/APacketSource.cpp b/media/libstagefright/rtsp/APacketSource.cpp
index 169df46..db63183 100644
--- a/media/libstagefright/rtsp/APacketSource.cpp
+++ b/media/libstagefright/rtsp/APacketSource.cpp
@@ -18,10 +18,9 @@
 #define LOG_TAG "APacketSource"
 #include <utils/Log.h>
 
-#include "APacketSource.h"
-
-#include "ARawAudioAssembler.h"
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/APacketSource.h>
+#include <media/stagefright/rtsp/ARawAudioAssembler.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <ctype.h>
 
diff --git a/media/libstagefright/rtsp/ARTPAssembler.cpp b/media/libstagefright/rtsp/ARTPAssembler.cpp
index 52aa3a0..b9869de 100644
--- a/media/libstagefright/rtsp/ARTPAssembler.cpp
+++ b/media/libstagefright/rtsp/ARTPAssembler.cpp
@@ -15,7 +15,7 @@
  */
 
 #define LOG_TAG "ARTPAssembler"
-#include "ARTPAssembler.h"
+#include <media/stagefright/rtsp/ARTPAssembler.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index 0bd342a..5a8f471 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -18,9 +18,9 @@
 #define LOG_TAG "ARTPConnection"
 #include <utils/Log.h>
 
-#include "ARTPConnection.h"
-#include "ARTPSource.h"
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/ARTPConnection.h>
+#include <media/stagefright/rtsp/ARTPSource.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/ARTPSession.cpp b/media/libstagefright/rtsp/ARTPSession.cpp
index e5acb06..dae46f9 100644
--- a/media/libstagefright/rtsp/ARTPSession.cpp
+++ b/media/libstagefright/rtsp/ARTPSession.cpp
@@ -18,7 +18,10 @@
 #define LOG_TAG "ARTPSession"
 #include <utils/Log.h>
 
-#include "ARTPSession.h"
+#include <media/stagefright/rtsp/APacketSource.h>
+#include <media/stagefright/rtsp/ARTPConnection.h>
+#include <media/stagefright/rtsp/ARTPSession.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -29,9 +32,6 @@
 #include <arpa/inet.h>
 #include <sys/socket.h>
 
-#include "APacketSource.h"
-#include "ARTPConnection.h"
-#include "ASessionDescription.h"
 
 namespace android {
 
diff --git a/media/libstagefright/rtsp/ARTPSource.cpp b/media/libstagefright/rtsp/ARTPSource.cpp
index 38a370b..5f62b9d 100644
--- a/media/libstagefright/rtsp/ARTPSource.cpp
+++ b/media/libstagefright/rtsp/ARTPSource.cpp
@@ -18,17 +18,17 @@
 #define LOG_TAG "ARTPSource"
 #include <utils/Log.h>
 
-#include "ARTPSource.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
 
-#include "AAMRAssembler.h"
-#include "AAVCAssembler.h"
-#include "AHEVCAssembler.h"
-#include "AH263Assembler.h"
-#include "AMPEG2TSAssembler.h"
-#include "AMPEG4AudioAssembler.h"
-#include "AMPEG4ElementaryAssembler.h"
-#include "ARawAudioAssembler.h"
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/AAMRAssembler.h>
+#include <media/stagefright/rtsp/AAVCAssembler.h>
+#include <media/stagefright/rtsp/AHEVCAssembler.h>
+#include <media/stagefright/rtsp/AH263Assembler.h>
+#include <media/stagefright/rtsp/AMPEG2TSAssembler.h>
+#include <media/stagefright/rtsp/AMPEG4AudioAssembler.h>
+#include <media/stagefright/rtsp/AMPEG4ElementaryAssembler.h>
+#include <media/stagefright/rtsp/ARawAudioAssembler.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/ARTPWriter.cpp b/media/libstagefright/rtsp/ARTPWriter.cpp
index 11c7aeb..8990f0c 100644
--- a/media/libstagefright/rtsp/ARTPWriter.cpp
+++ b/media/libstagefright/rtsp/ARTPWriter.cpp
@@ -18,7 +18,7 @@
 #define LOG_TAG "ARTPWriter"
 #include <utils/Log.h>
 
-#include "ARTPWriter.h"
+#include <media/stagefright/rtsp/ARTPWriter.h>
 
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/foundation/ABuffer.h>
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index c33bf3f..aab63a8 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -18,8 +18,8 @@
 #define LOG_TAG "ARTSPConnection"
 #include <utils/Log.h>
 
-#include "ARTSPConnection.h"
-#include "NetworkUtils.h"
+#include <media/stagefright/rtsp/ARTSPConnection.h>
+#include <media/stagefright/rtsp/NetworkUtils.h>
 
 #include <datasource/HTTPBase.h>
 #include <media/stagefright/foundation/ABuffer.h>
diff --git a/media/libstagefright/rtsp/ARawAudioAssembler.cpp b/media/libstagefright/rtsp/ARawAudioAssembler.cpp
index 167f7a4..9210af3 100644
--- a/media/libstagefright/rtsp/ARawAudioAssembler.cpp
+++ b/media/libstagefright/rtsp/ARawAudioAssembler.cpp
@@ -18,10 +18,10 @@
 #define LOG_TAG "ARawAudioAssembler"
 #include <utils/Log.h>
 
-#include "ARawAudioAssembler.h"
+#include <media/stagefright/rtsp/ARawAudioAssembler.h>
 
-#include "ARTPSource.h"
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index 5b5b4b1..217eca7 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -18,7 +18,7 @@
 #define LOG_TAG "ASessionDescription"
 #include <utils/Log.h>
 
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AString.h>
diff --git a/media/libstagefright/rtsp/Android.bp b/media/libstagefright/rtsp/Android.bp
index 34d1788..97d4abe 100644
--- a/media/libstagefright/rtsp/Android.bp
+++ b/media/libstagefright/rtsp/Android.bp
@@ -47,10 +47,9 @@
         "libmedia",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-        "frameworks/native/include/media/openmax",
-        "frameworks/native/include/android",
+    header_libs: [
+        "libstagefright_headers",
+        "libstagefright_rtsp_headers",
     ],
 
     arch: {
@@ -73,6 +72,18 @@
     },
 }
 
+cc_library_headers {
+    name: "libstagefright_rtsp_headers",
+    export_include_dirs: ["include"],
+    vendor_available: true,
+    host_supported: true,
+    target: {
+        darwin: {
+            enabled: false,
+        },
+    },
+}
+
 cc_library_static {
     name: "libstagefright_rtsp",
 
diff --git a/media/libstagefright/rtsp/JitterCalculator.cpp b/media/libstagefright/rtsp/JitterCalculator.cpp
index 93b5a83..93afe9c 100644
--- a/media/libstagefright/rtsp/JitterCalculator.cpp
+++ b/media/libstagefright/rtsp/JitterCalculator.cpp
@@ -17,7 +17,7 @@
 #define LOG_TAG "JitterCalc"
 #include <utils/Log.h>
 
-#include "JitterCalculator.h"
+#include <media/stagefright/rtsp/JitterCalculator.h>
 
 #include <stdlib.h>
 
@@ -38,14 +38,13 @@
     mInterArrivalJitterUs = inter;
 }
 
-void JitterCalc::putBaseData(int64_t rtpTime, int64_t arrivalTimeUs) {
-    // A RTP time wraps around after UINT32_MAX. We must consider this case.
-    const int64_t UINT32_MSB = 0x80000000;
-    int64_t overflowMask = (mFirstTimeStamp & UINT32_MSB & ~rtpTime) << 1;
-    int64_t tempRtpTime = overflowMask | rtpTime;
+void JitterCalc::putBaseData(uint32_t rtpTime, int64_t arrivalTimeUs) {
+    // A RTP time wraps around after UINT32_MAX. Overflow can present.
+    uint32_t diff = 0;
+    __builtin_usub_overflow(rtpTime, mFirstTimeStamp, &diff);
 
     // Base jitter implementation can be various
-    int64_t scheduledTimeUs = (tempRtpTime - (int64_t)mFirstTimeStamp) * 1000000ll / mClockRate;
+    int64_t scheduledTimeUs = ((int32_t)diff) * 1000000ll / mClockRate;
     int64_t elapsedTimeUs = arrivalTimeUs - mFirstArrivalTimeUs;
     int64_t correctionTimeUs = elapsedTimeUs - scheduledTimeUs; // additional propagation delay;
     mBaseJitterUs = (mBaseJitterUs * 15 + correctionTimeUs) / 16;
@@ -53,18 +52,13 @@
             (long long)mBaseJitterUs, (long long)correctionTimeUs);
 }
 
-void JitterCalc::putInterArrivalData(int64_t rtpTime, int64_t arrivalTimeUs) {
-    const int64_t UINT32_MSB = 0x80000000;
-    int64_t tempRtpTime = rtpTime;
-    int64_t tempLastTimeStamp = mLastTimeStamp;
-
-    // A RTP time wraps around after UINT32_MAX. We must consider this case.
-    int64_t overflowMask = (mLastTimeStamp ^ rtpTime) & UINT32_MSB;
-    tempRtpTime |= ((overflowMask & ~rtpTime) << 1);
-    tempLastTimeStamp |= ((overflowMask & ~mLastTimeStamp) << 1);
+void JitterCalc::putInterArrivalData(uint32_t rtpTime, int64_t arrivalTimeUs) {
+    // A RTP time wraps around after UINT32_MAX. Overflow can present.
+    uint32_t diff = 0;
+    __builtin_usub_overflow(rtpTime, mLastTimeStamp, &diff);
 
     // 6.4.1 of RFC3550 defines this interarrival jitter value.
-    int64_t diffTimeStampUs = abs(tempRtpTime - tempLastTimeStamp) * 1000000ll / mClockRate;
+    int64_t diffTimeStampUs = abs((int32_t)diff) * 1000000ll / mClockRate;
     int64_t diffArrivalUs = arrivalTimeUs - mLastArrivalTimeUs; // Can't be minus
     ALOGV("diffTimeStampUs %lld \t\t diffArrivalUs %lld",
             (long long)diffTimeStampUs, (long long)diffArrivalUs);
@@ -72,7 +66,7 @@
     int64_t varianceUs = diffArrivalUs - diffTimeStampUs;
     mInterArrivalJitterUs = (mInterArrivalJitterUs * 15 + abs(varianceUs)) / 16;
 
-    mLastTimeStamp = (uint32_t)rtpTime;
+    mLastTimeStamp = rtpTime;
     mLastArrivalTimeUs = arrivalTimeUs;
 }
 
diff --git a/media/libstagefright/rtsp/MyTransmitter.h b/media/libstagefright/rtsp/MyTransmitter.h
deleted file mode 100644
index bf44aff..0000000
--- a/media/libstagefright/rtsp/MyTransmitter.h
+++ /dev/null
@@ -1,984 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MY_TRANSMITTER_H_
-
-#define MY_TRANSMITTER_H_
-
-#include "ARTPConnection.h"
-
-#include <arpa/inet.h>
-#include <sys/socket.h>
-
-#include <openssl/md5.h>
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/base64.h>
-#include <media/stagefright/foundation/hexdump.h>
-
-#ifdef ANDROID
-#include "VideoSource.h"
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaCodecSource.h>
-#endif
-
-namespace android {
-
-#define TRACK_SUFFIX    "trackid=1"
-#define PT              96
-#define PT_STR          "96"
-
-#define USERNAME        "bcast"
-#define PASSWORD        "test"
-
-static int uniformRand(int limit) {
-    return ((double)rand() * limit) / RAND_MAX;
-}
-
-static bool GetAttribute(const char *s, const char *key, AString *value) {
-    value->clear();
-
-    size_t keyLen = strlen(key);
-
-    for (;;) {
-        const char *colonPos = strchr(s, ';');
-
-        size_t len =
-            (colonPos == NULL) ? strlen(s) : colonPos - s;
-
-        if (len >= keyLen + 1 && s[keyLen] == '=' && !strncmp(s, key, keyLen)) {
-            value->setTo(&s[keyLen + 1], len - keyLen - 1);
-            return true;
-        }
-
-        if (colonPos == NULL) {
-            return false;
-        }
-
-        s = colonPos + 1;
-    }
-}
-
-struct MyTransmitter : public AHandler {
-    MyTransmitter(const char *url, const sp<ALooper> &looper)
-        : mServerURL(url),
-          mLooper(looper),
-          mConn(new ARTSPConnection),
-          mConnected(false),
-          mAuthType(NONE),
-          mRTPSocket(-1),
-          mRTCPSocket(-1),
-          mSourceID(rand()),
-          mSeqNo(uniformRand(65536)),
-          mRTPTimeBase(rand()),
-          mNumSamplesSent(0),
-          mNumRTPSent(0),
-          mNumRTPOctetsSent(0),
-          mLastRTPTime(0),
-          mLastNTPTime(0) {
-        mStreamURL = mServerURL;
-        mStreamURL.append("/bazong.sdp");
-
-        mTrackURL = mStreamURL;
-        mTrackURL.append("/");
-        mTrackURL.append(TRACK_SUFFIX);
-
-        mLooper->registerHandler(this);
-        mLooper->registerHandler(mConn);
-
-        sp<AMessage> reply = new AMessage('conn', this);
-        mConn->connect(mServerURL.c_str(), reply);
-
-#ifdef ANDROID
-        int width = 640;
-        int height = 480;
-
-        sp<MediaSource> source = new VideoSource(width, height);
-
-        sp<AMessage> encMeta = new AMessage;
-        encMeta->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
-        encMeta->setInt32("width", width);
-        encMeta->setInt32("height", height);
-        encMeta->setInt32("frame-rate", 30);
-        encMeta->setInt32("bitrate", 256000);
-        encMeta->setInt32("i-frame-interval", 10);
-
-        sp<ALooper> encLooper = new ALooper;
-        encLooper->setName("rtsp_transmitter");
-        encLooper->start();
-
-        mEncoder = MediaCodecSource::Create(encLooper, encMeta, source);
-
-        mEncoder->start();
-
-        MediaBuffer *buffer;
-        CHECK_EQ(mEncoder->read(&buffer), (status_t)OK);
-        CHECK(buffer != NULL);
-
-        makeH264SPropParamSets(buffer);
-
-        buffer->release();
-        buffer = NULL;
-#endif
-    }
-
-    uint64_t ntpTime() {
-        struct timeval tv;
-        gettimeofday(&tv, NULL);
-
-        uint64_t nowUs = tv.tv_sec * 1000000ll + tv.tv_usec;
-
-        nowUs += ((70ll * 365 + 17) * 24) * 60 * 60 * 1000000ll;
-
-        uint64_t hi = nowUs / 1000000ll;
-        uint64_t lo = ((1ll << 32) * (nowUs % 1000000ll)) / 1000000ll;
-
-        return (hi << 32) | lo;
-    }
-
-    void issueAnnounce() {
-        AString sdp;
-        sdp = "v=0\r\n";
-
-        sdp.append("o=- ");
-
-        uint64_t ntp = ntpTime();
-        sdp.append(ntp);
-        sdp.append(" ");
-        sdp.append(ntp);
-        sdp.append(" IN IP4 127.0.0.0\r\n");
-
-        sdp.append(
-              "s=Sample\r\n"
-              "i=Playing around with ANNOUNCE\r\n"
-              "c=IN IP4 ");
-
-        struct in_addr addr;
-        addr.s_addr = htonl(mServerIP);
-
-        sdp.append(inet_ntoa(addr));
-
-        sdp.append(
-              "\r\n"
-              "t=0 0\r\n"
-              "a=range:npt=now-\r\n");
-
-#ifdef ANDROID
-        sp<MetaData> meta = mEncoder->getFormat();
-        int32_t width, height;
-        CHECK(meta->findInt32(kKeyWidth, &width));
-        CHECK(meta->findInt32(kKeyHeight, &height));
-
-        sdp.append(
-              "m=video 0 RTP/AVP " PT_STR "\r\n"
-              "b=AS 320000\r\n"
-              "a=rtpmap:" PT_STR " H264/90000\r\n");
-
-        sdp.append("a=cliprect 0,0,");
-        sdp.append(height);
-        sdp.append(",");
-        sdp.append(width);
-        sdp.append("\r\n");
-
-        sdp.append(
-              "a=framesize:" PT_STR " ");
-        sdp.append(width);
-        sdp.append("-");
-        sdp.append(height);
-        sdp.append("\r\n");
-
-        sdp.append(
-              "a=fmtp:" PT_STR " profile-level-id=42C015;sprop-parameter-sets=");
-
-        sdp.append(mSeqParamSet);
-        sdp.append(",");
-        sdp.append(mPicParamSet);
-        sdp.append(";packetization-mode=1\r\n");
-#else
-        sdp.append(
-                "m=audio 0 RTP/AVP " PT_STR "\r\n"
-                "a=rtpmap:" PT_STR " L8/8000/1\r\n");
-#endif
-
-        sdp.append("a=control:" TRACK_SUFFIX "\r\n");
-
-        AString request;
-        request.append("ANNOUNCE ");
-        request.append(mStreamURL);
-        request.append(" RTSP/1.0\r\n");
-
-        addAuthentication(&request, "ANNOUNCE", mStreamURL.c_str());
-
-        request.append("Content-Type: application/sdp\r\n");
-        request.append("Content-Length: ");
-        request.append(sdp.size());
-        request.append("\r\n");
-
-        request.append("\r\n");
-        request.append(sdp);
-
-        sp<AMessage> reply = new AMessage('anno', this);
-        mConn->sendRequest(request.c_str(), reply);
-    }
-
-    void H(const AString &s, AString *out) {
-        out->clear();
-
-        MD5_CTX m;
-        MD5_Init(&m);
-        MD5_Update(&m, s.c_str(), s.size());
-
-        uint8_t key[16];
-        MD5_Final(key, &m);
-
-        for (size_t i = 0; i < 16; ++i) {
-            char nibble = key[i] >> 4;
-            if (nibble <= 9) {
-                nibble += '0';
-            } else {
-                nibble += 'a' - 10;
-            }
-            out->append(&nibble, 1);
-
-            nibble = key[i] & 0x0f;
-            if (nibble <= 9) {
-                nibble += '0';
-            } else {
-                nibble += 'a' - 10;
-            }
-            out->append(&nibble, 1);
-        }
-    }
-
-    void authenticate(const sp<ARTSPResponse> &response) {
-        ssize_t i = response->mHeaders.indexOfKey("www-authenticate");
-        CHECK_GE(i, 0);
-
-        AString value = response->mHeaders.valueAt(i);
-
-        if (!strncmp(value.c_str(), "Basic", 5)) {
-            mAuthType = BASIC;
-        } else {
-            CHECK(!strncmp(value.c_str(), "Digest", 6));
-            mAuthType = DIGEST;
-
-            i = value.find("nonce=");
-            CHECK_GE(i, 0);
-            CHECK_EQ(value.c_str()[i + 6], '\"');
-            ssize_t j = value.find("\"", i + 7);
-            CHECK_GE(j, 0);
-
-            mNonce.setTo(value, i + 7, j - i - 7);
-        }
-
-        issueAnnounce();
-    }
-
-    void addAuthentication(
-            AString *request, const char *method, const char *url) {
-        if (mAuthType == NONE) {
-            return;
-        }
-
-        if (mAuthType == BASIC) {
-            request->append("Authorization: Basic YmNhc3Q6dGVzdAo=\r\n");
-            return;
-        }
-
-        CHECK_EQ((int)mAuthType, (int)DIGEST);
-
-        AString A1;
-        A1.append(USERNAME);
-        A1.append(":");
-        A1.append("Streaming Server");
-        A1.append(":");
-        A1.append(PASSWORD);
-
-        AString A2;
-        A2.append(method);
-        A2.append(":");
-        A2.append(url);
-
-        AString HA1, HA2;
-        H(A1, &HA1);
-        H(A2, &HA2);
-
-        AString tmp;
-        tmp.append(HA1);
-        tmp.append(":");
-        tmp.append(mNonce);
-        tmp.append(":");
-        tmp.append(HA2);
-
-        AString digest;
-        H(tmp, &digest);
-
-        request->append("Authorization: Digest ");
-        request->append("nonce=\"");
-        request->append(mNonce);
-        request->append("\", ");
-        request->append("username=\"" USERNAME "\", ");
-        request->append("uri=\"");
-        request->append(url);
-        request->append("\", ");
-        request->append("response=\"");
-        request->append(digest);
-        request->append("\"");
-        request->append("\r\n");
-    }
-
-    virtual void onMessageReceived(const sp<AMessage> &msg) {
-        switch (msg->what()) {
-            case 'conn':
-            {
-                int32_t result;
-                CHECK(msg->findInt32("result", &result));
-
-                LOG(INFO) << "connection request completed with result "
-                     << result << " (" << strerror(-result) << ")";
-
-                if (result != OK) {
-                    (new AMessage('quit', this))->post();
-                    break;
-                }
-
-                mConnected = true;
-
-                CHECK(msg->findInt32("server-ip", (int32_t *)&mServerIP));
-
-                issueAnnounce();
-                break;
-            }
-
-            case 'anno':
-            {
-                int32_t result;
-                CHECK(msg->findInt32("result", &result));
-
-                LOG(INFO) << "ANNOUNCE completed with result "
-                     << result << " (" << strerror(-result) << ")";
-
-                sp<RefBase> obj;
-                CHECK(msg->findObject("response", &obj));
-                sp<ARTSPResponse> response;
-
-                if (result == OK) {
-                    response = static_cast<ARTSPResponse *>(obj.get());
-                    CHECK(response != NULL);
-
-                    if (response->mStatusCode == 401) {
-                        if (mAuthType != NONE) {
-                            LOG(INFO) << "FAILED to authenticate";
-                            (new AMessage('quit', this))->post();
-                            break;
-                        }
-
-                        authenticate(response);
-                        break;
-                    }
-                }
-
-                if (result != OK || response->mStatusCode != 200) {
-                    (new AMessage('quit', this))->post();
-                    break;
-                }
-
-                unsigned rtpPort;
-                ARTPConnection::MakePortPair(&mRTPSocket, &mRTCPSocket, &rtpPort);
-
-                // (new AMessage('poll', this))->post();
-
-                AString request;
-                request.append("SETUP ");
-                request.append(mTrackURL);
-                request.append(" RTSP/1.0\r\n");
-
-                addAuthentication(&request, "SETUP", mTrackURL.c_str());
-
-                request.append("Transport: RTP/AVP;unicast;client_port=");
-                request.append(rtpPort);
-                request.append("-");
-                request.append(rtpPort + 1);
-                request.append(";mode=record\r\n");
-                request.append("\r\n");
-
-                sp<AMessage> reply = new AMessage('setu', this);
-                mConn->sendRequest(request.c_str(), reply);
-                break;
-            }
-
-#if 0
-            case 'poll':
-            {
-                fd_set rs;
-                FD_ZERO(&rs);
-                FD_SET(mRTCPSocket, &rs);
-
-                struct timeval tv;
-                tv.tv_sec = 0;
-                tv.tv_usec = 0;
-
-                int res = select(mRTCPSocket + 1, &rs, NULL, NULL, &tv);
-
-                if (res == 1) {
-                    sp<ABuffer> buffer = new ABuffer(65536);
-                    ssize_t n = recv(mRTCPSocket, buffer->data(), buffer->size(), 0);
-
-                    if (n <= 0) {
-                        LOG(ERROR) << "recv returned " << n;
-                    } else {
-                        LOG(INFO) << "recv returned " << n << " bytes of data.";
-
-                        hexdump(buffer->data(), n);
-                    }
-                }
-
-                msg->post(50000);
-                break;
-            }
-#endif
-
-            case 'setu':
-            {
-                int32_t result;
-                CHECK(msg->findInt32("result", &result));
-
-                LOG(INFO) << "SETUP completed with result "
-                     << result << " (" << strerror(-result) << ")";
-
-                sp<RefBase> obj;
-                CHECK(msg->findObject("response", &obj));
-                sp<ARTSPResponse> response;
-
-                if (result == OK) {
-                    response = static_cast<ARTSPResponse *>(obj.get());
-                    CHECK(response != NULL);
-                }
-
-                if (result != OK || response->mStatusCode != 200) {
-                    (new AMessage('quit', this))->post();
-                    break;
-                }
-
-                ssize_t i = response->mHeaders.indexOfKey("session");
-                CHECK_GE(i, 0);
-                mSessionID = response->mHeaders.valueAt(i);
-                i = mSessionID.find(";");
-                if (i >= 0) {
-                    // Remove options, i.e. ";timeout=90"
-                    mSessionID.erase(i, mSessionID.size() - i);
-                }
-
-                i = response->mHeaders.indexOfKey("transport");
-                CHECK_GE(i, 0);
-                AString transport = response->mHeaders.valueAt(i);
-
-                LOG(INFO) << "transport = '" << transport << "'";
-
-                AString value;
-                CHECK(GetAttribute(transport.c_str(), "server_port", &value));
-
-                unsigned rtpPort, rtcpPort;
-                CHECK_EQ(sscanf(value.c_str(), "%u-%u", &rtpPort, &rtcpPort), 2);
-
-                CHECK(GetAttribute(transport.c_str(), "source", &value));
-
-                memset(mRemoteAddr.sin_zero, 0, sizeof(mRemoteAddr.sin_zero));
-                mRemoteAddr.sin_family = AF_INET;
-                mRemoteAddr.sin_addr.s_addr = inet_addr(value.c_str());
-                mRemoteAddr.sin_port = htons(rtpPort);
-
-                mRemoteRTCPAddr = mRemoteAddr;
-                mRemoteRTCPAddr.sin_port = htons(rtpPort + 1);
-
-                CHECK_EQ(0, connect(mRTPSocket,
-                                    (const struct sockaddr *)&mRemoteAddr,
-                                    sizeof(mRemoteAddr)));
-
-                CHECK_EQ(0, connect(mRTCPSocket,
-                                    (const struct sockaddr *)&mRemoteRTCPAddr,
-                                    sizeof(mRemoteRTCPAddr)));
-
-                uint32_t x = ntohl(mRemoteAddr.sin_addr.s_addr);
-                LOG(INFO) << "sending data to "
-                     << (x >> 24)
-                     << "."
-                     << ((x >> 16) & 0xff)
-                     << "."
-                     << ((x >> 8) & 0xff)
-                     << "."
-                     << (x & 0xff)
-                     << ":"
-                     << rtpPort;
-
-                AString request;
-                request.append("RECORD ");
-                request.append(mStreamURL);
-                request.append(" RTSP/1.0\r\n");
-
-                addAuthentication(&request, "RECORD", mStreamURL.c_str());
-
-                request.append("Session: ");
-                request.append(mSessionID);
-                request.append("\r\n");
-                request.append("\r\n");
-
-                sp<AMessage> reply = new AMessage('reco', this);
-                mConn->sendRequest(request.c_str(), reply);
-                break;
-            }
-
-            case 'reco':
-            {
-                int32_t result;
-                CHECK(msg->findInt32("result", &result));
-
-                LOG(INFO) << "RECORD completed with result "
-                     << result << " (" << strerror(-result) << ")";
-
-                sp<RefBase> obj;
-                CHECK(msg->findObject("response", &obj));
-                sp<ARTSPResponse> response;
-
-                if (result == OK) {
-                    response = static_cast<ARTSPResponse *>(obj.get());
-                    CHECK(response != NULL);
-                }
-
-                if (result != OK) {
-                    (new AMessage('quit', this))->post();
-                    break;
-                }
-
-                (new AMessage('more', this))->post();
-                (new AMessage('sr  ', this))->post();
-                (new AMessage('aliv', this))->post(30000000ll);
-                break;
-            }
-
-            case 'aliv':
-            {
-                if (!mConnected) {
-                    break;
-                }
-
-                AString request;
-                request.append("OPTIONS ");
-                request.append(mStreamURL);
-                request.append(" RTSP/1.0\r\n");
-
-                addAuthentication(&request, "RECORD", mStreamURL.c_str());
-
-                request.append("Session: ");
-                request.append(mSessionID);
-                request.append("\r\n");
-                request.append("\r\n");
-
-                sp<AMessage> reply = new AMessage('opts', this);
-                mConn->sendRequest(request.c_str(), reply);
-                break;
-            }
-
-            case 'opts':
-            {
-                int32_t result;
-                CHECK(msg->findInt32("result", &result));
-
-                LOG(INFO) << "OPTIONS completed with result "
-                     << result << " (" << strerror(-result) << ")";
-
-                if (!mConnected) {
-                    break;
-                }
-
-                (new AMessage('aliv', this))->post(30000000ll);
-                break;
-            }
-
-            case 'more':
-            {
-                if (!mConnected) {
-                    break;
-                }
-
-                sp<ABuffer> buffer = new ABuffer(65536);
-                uint8_t *data = buffer->data();
-                data[0] = 0x80;
-                data[1] = (1 << 7) | PT;  // M-bit
-                data[2] = (mSeqNo >> 8) & 0xff;
-                data[3] = mSeqNo & 0xff;
-                data[8] = mSourceID >> 24;
-                data[9] = (mSourceID >> 16) & 0xff;
-                data[10] = (mSourceID >> 8) & 0xff;
-                data[11] = mSourceID & 0xff;
-
-#ifdef ANDROID
-                MediaBuffer *mediaBuf = NULL;
-                for (;;) {
-                    CHECK_EQ(mEncoder->read(&mediaBuf), (status_t)OK);
-                    if (mediaBuf->range_length() > 0) {
-                        break;
-                    }
-                    mediaBuf->release();
-                    mediaBuf = NULL;
-                }
-
-                int64_t timeUs;
-                CHECK(mediaBuf->meta_data()->findInt64(kKeyTime, &timeUs));
-
-                uint32_t rtpTime = mRTPTimeBase + (timeUs * 9 / 100ll);
-
-                const uint8_t *mediaData =
-                    (const uint8_t *)mediaBuf->data() + mediaBuf->range_offset();
-
-                CHECK(!memcmp("\x00\x00\x00\x01", mediaData, 4));
-
-                CHECK_LE(mediaBuf->range_length() - 4 + 12, buffer->size());
-
-                memcpy(&data[12],
-                       mediaData + 4, mediaBuf->range_length() - 4);
-
-                buffer->setRange(0, mediaBuf->range_length() - 4 + 12);
-
-                mediaBuf->release();
-                mediaBuf = NULL;
-#else
-                uint32_t rtpTime = mRTPTimeBase + mNumRTPSent * 128;
-                memset(&data[12], 0, 128);
-                buffer->setRange(0, 12 + 128);
-#endif
-
-                data[4] = rtpTime >> 24;
-                data[5] = (rtpTime >> 16) & 0xff;
-                data[6] = (rtpTime >> 8) & 0xff;
-                data[7] = rtpTime & 0xff;
-
-                ssize_t n = send(
-                        mRTPSocket, data, buffer->size(), 0);
-                if (n < 0) {
-                    LOG(ERROR) << "send failed (" << strerror(errno) << ")";
-                }
-                CHECK_EQ(n, (ssize_t)buffer->size());
-
-                ++mSeqNo;
-
-                ++mNumRTPSent;
-                mNumRTPOctetsSent += buffer->size() - 12;
-
-                mLastRTPTime = rtpTime;
-                mLastNTPTime = ntpTime();
-
-#ifdef ANDROID
-                if (mNumRTPSent < 60 * 25) {  // 60 secs worth
-                    msg->post(40000);
-#else
-                if (mNumRTPOctetsSent < 8000 * 60) {
-                    msg->post(1000000ll * 128 / 8000);
-#endif
-                } else {
-                    LOG(INFO) << "That's enough, pausing.";
-
-                    AString request;
-                    request.append("PAUSE ");
-                    request.append(mStreamURL);
-                    request.append(" RTSP/1.0\r\n");
-
-                    addAuthentication(&request, "PAUSE", mStreamURL.c_str());
-
-                    request.append("Session: ");
-                    request.append(mSessionID);
-                    request.append("\r\n");
-                    request.append("\r\n");
-
-                    sp<AMessage> reply = new AMessage('paus', this);
-                    mConn->sendRequest(request.c_str(), reply);
-                }
-                break;
-            }
-
-            case 'sr  ':
-            {
-                if (!mConnected) {
-                    break;
-                }
-
-                sp<ABuffer> buffer = new ABuffer(65536);
-                buffer->setRange(0, 0);
-
-                addSR(buffer);
-                addSDES(buffer);
-
-                uint8_t *data = buffer->data();
-                ssize_t n = send(
-                        mRTCPSocket, data, buffer->size(), 0);
-                CHECK_EQ(n, (ssize_t)buffer->size());
-
-                msg->post(3000000);
-                break;
-            }
-
-            case 'paus':
-            {
-                int32_t result;
-                CHECK(msg->findInt32("result", &result));
-
-                LOG(INFO) << "PAUSE completed with result "
-                     << result << " (" << strerror(-result) << ")";
-
-                sp<RefBase> obj;
-                CHECK(msg->findObject("response", &obj));
-                sp<ARTSPResponse> response;
-
-                AString request;
-                request.append("TEARDOWN ");
-                request.append(mStreamURL);
-                request.append(" RTSP/1.0\r\n");
-
-                addAuthentication(&request, "TEARDOWN", mStreamURL.c_str());
-
-                request.append("Session: ");
-                request.append(mSessionID);
-                request.append("\r\n");
-                request.append("\r\n");
-
-                sp<AMessage> reply = new AMessage('tear', this);
-                mConn->sendRequest(request.c_str(), reply);
-                break;
-            }
-
-            case 'tear':
-            {
-                int32_t result;
-                CHECK(msg->findInt32("result", &result));
-
-                LOG(INFO) << "TEARDOWN completed with result "
-                     << result << " (" << strerror(-result) << ")";
-
-                sp<RefBase> obj;
-                CHECK(msg->findObject("response", &obj));
-                sp<ARTSPResponse> response;
-
-                if (result == OK) {
-                    response = static_cast<ARTSPResponse *>(obj.get());
-                    CHECK(response != NULL);
-                }
-
-                (new AMessage('quit', this))->post();
-                break;
-            }
-
-            case 'disc':
-            {
-                LOG(INFO) << "disconnect completed";
-
-                mConnected = false;
-                (new AMessage('quit', this))->post();
-                break;
-            }
-
-            case 'quit':
-            {
-                if (mConnected) {
-                    mConn->disconnect(new AMessage('disc', this));
-                    break;
-                }
-
-                if (mRTPSocket >= 0) {
-                    close(mRTPSocket);
-                    mRTPSocket = -1;
-                }
-
-                if (mRTCPSocket >= 0) {
-                    close(mRTCPSocket);
-                    mRTCPSocket = -1;
-                }
-
-#ifdef ANDROID
-                mEncoder->stop();
-                mEncoder.clear();
-#endif
-
-                mLooper->stop();
-                break;
-            }
-
-            default:
-                TRESPASS();
-        }
-    }
-
-protected:
-    virtual ~MyTransmitter() {
-    }
-
-private:
-    enum AuthType {
-        NONE,
-        BASIC,
-        DIGEST
-    };
-
-    AString mServerURL;
-    AString mTrackURL;
-    AString mStreamURL;
-
-    sp<ALooper> mLooper;
-    sp<ARTSPConnection> mConn;
-    bool mConnected;
-    uint32_t mServerIP;
-    AuthType mAuthType;
-    AString mNonce;
-    AString mSessionID;
-    int mRTPSocket, mRTCPSocket;
-    uint32_t mSourceID;
-    uint32_t mSeqNo;
-    uint32_t mRTPTimeBase;
-    struct sockaddr_in mRemoteAddr;
-    struct sockaddr_in mRemoteRTCPAddr;
-    size_t mNumSamplesSent;
-    uint32_t mNumRTPSent;
-    uint32_t mNumRTPOctetsSent;
-    uint32_t mLastRTPTime;
-    uint64_t mLastNTPTime;
-
-#ifdef ANDROID
-    sp<MediaSource> mEncoder;
-    AString mSeqParamSet;
-    AString mPicParamSet;
-
-    void makeH264SPropParamSets(MediaBuffer *buffer) {
-        static const char kStartCode[] = "\x00\x00\x00\x01";
-
-        const uint8_t *data =
-            (const uint8_t *)buffer->data() + buffer->range_offset();
-        size_t size = buffer->range_length();
-
-        CHECK_GE(size, 0u);
-        CHECK(!memcmp(kStartCode, data, 4));
-
-        data += 4;
-        size -= 4;
-
-        size_t startCodePos = 0;
-        while (startCodePos + 3 < size
-                && memcmp(kStartCode, &data[startCodePos], 4)) {
-            ++startCodePos;
-        }
-
-        CHECK_LT(startCodePos + 3, size);
-
-        encodeBase64(data, startCodePos, &mSeqParamSet);
-
-        encodeBase64(&data[startCodePos + 4], size - startCodePos - 4,
-                     &mPicParamSet);
-    }
-#endif
-
-    void addSR(const sp<ABuffer> &buffer) {
-        uint8_t *data = buffer->data() + buffer->size();
-
-        data[0] = 0x80 | 0;
-        data[1] = 200;  // SR
-        data[2] = 0;
-        data[3] = 6;
-        data[4] = mSourceID >> 24;
-        data[5] = (mSourceID >> 16) & 0xff;
-        data[6] = (mSourceID >> 8) & 0xff;
-        data[7] = mSourceID & 0xff;
-
-        data[8] = mLastNTPTime >> (64 - 8);
-        data[9] = (mLastNTPTime >> (64 - 16)) & 0xff;
-        data[10] = (mLastNTPTime >> (64 - 24)) & 0xff;
-        data[11] = (mLastNTPTime >> 32) & 0xff;
-        data[12] = (mLastNTPTime >> 24) & 0xff;
-        data[13] = (mLastNTPTime >> 16) & 0xff;
-        data[14] = (mLastNTPTime >> 8) & 0xff;
-        data[15] = mLastNTPTime & 0xff;
-
-        data[16] = (mLastRTPTime >> 24) & 0xff;
-        data[17] = (mLastRTPTime >> 16) & 0xff;
-        data[18] = (mLastRTPTime >> 8) & 0xff;
-        data[19] = mLastRTPTime & 0xff;
-
-        data[20] = mNumRTPSent >> 24;
-        data[21] = (mNumRTPSent >> 16) & 0xff;
-        data[22] = (mNumRTPSent >> 8) & 0xff;
-        data[23] = mNumRTPSent & 0xff;
-
-        data[24] = mNumRTPOctetsSent >> 24;
-        data[25] = (mNumRTPOctetsSent >> 16) & 0xff;
-        data[26] = (mNumRTPOctetsSent >> 8) & 0xff;
-        data[27] = mNumRTPOctetsSent & 0xff;
-
-        buffer->setRange(buffer->offset(), buffer->size() + 28);
-    }
-
-    void addSDES(const sp<ABuffer> &buffer) {
-        uint8_t *data = buffer->data() + buffer->size();
-        data[0] = 0x80 | 1;
-        data[1] = 202;  // SDES
-        data[4] = mSourceID >> 24;
-        data[5] = (mSourceID >> 16) & 0xff;
-        data[6] = (mSourceID >> 8) & 0xff;
-        data[7] = mSourceID & 0xff;
-
-        size_t offset = 8;
-
-        data[offset++] = 1;  // CNAME
-
-        static const char *kCNAME = "andih@laptop";
-        data[offset++] = strlen(kCNAME);
-
-        memcpy(&data[offset], kCNAME, strlen(kCNAME));
-        offset += strlen(kCNAME);
-
-        data[offset++] = 7;  // NOTE
-
-        static const char *kNOTE = "Hell's frozen over.";
-        data[offset++] = strlen(kNOTE);
-
-        memcpy(&data[offset], kNOTE, strlen(kNOTE));
-        offset += strlen(kNOTE);
-
-        data[offset++] = 0;
-
-        if ((offset % 4) > 0) {
-            size_t count = 4 - (offset % 4);
-            switch (count) {
-                case 3:
-                    data[offset++] = 0;
-                case 2:
-                    data[offset++] = 0;
-                case 1:
-                    data[offset++] = 0;
-            }
-        }
-
-        size_t numWords = (offset / 4) - 1;
-        data[2] = numWords >> 8;
-        data[3] = numWords & 0xff;
-
-        buffer->setRange(buffer->offset(), buffer->size() + offset);
-    }
-
-    DISALLOW_EVIL_CONSTRUCTORS(MyTransmitter);
-};
-
-}  // namespace android
-
-#endif  // MY_TRANSMITTER_H_
diff --git a/media/libstagefright/rtsp/NetworkUtils.cpp b/media/libstagefright/rtsp/NetworkUtils.cpp
index c053be8..e8ec64d 100644
--- a/media/libstagefright/rtsp/NetworkUtils.cpp
+++ b/media/libstagefright/rtsp/NetworkUtils.cpp
@@ -20,7 +20,7 @@
 #define LOG_TAG "NetworkUtils"
 #include <utils/Log.h>
 
-#include "NetworkUtils.h"
+#include <media/stagefright/rtsp/NetworkUtils.h>
 #include <cutils/qtaguid.h>
 #include <NetdClient.h>
 
diff --git a/media/libstagefright/rtsp/NetworkUtilsForAppProc.cpp b/media/libstagefright/rtsp/NetworkUtilsForAppProc.cpp
index 662159c..30fc38a 100644
--- a/media/libstagefright/rtsp/NetworkUtilsForAppProc.cpp
+++ b/media/libstagefright/rtsp/NetworkUtilsForAppProc.cpp
@@ -18,7 +18,7 @@
 #define LOG_TAG "NetworkUtils"
 #include <utils/Log.h>
 
-#include "NetworkUtils.h"
+#include <media/stagefright/rtsp/NetworkUtils.h>
 
 // NetworkUtils implementation for application process.
 namespace android {
diff --git a/media/libstagefright/rtsp/QualManager.cpp b/media/libstagefright/rtsp/QualManager.cpp
index 37aa326..f1f8222 100644
--- a/media/libstagefright/rtsp/QualManager.cpp
+++ b/media/libstagefright/rtsp/QualManager.cpp
@@ -21,7 +21,7 @@
 #include <sys/prctl.h>
 #include <utils/Log.h>
 
-#include "QualManager.h"
+#include <media/stagefright/rtsp/QualManager.h>
 
 namespace android {
 
diff --git a/media/libstagefright/rtsp/SDPLoader.cpp b/media/libstagefright/rtsp/SDPLoader.cpp
index e236267..8cd33cf 100644
--- a/media/libstagefright/rtsp/SDPLoader.cpp
+++ b/media/libstagefright/rtsp/SDPLoader.cpp
@@ -18,9 +18,10 @@
 #define LOG_TAG "SDPLoader"
 #include <utils/Log.h>
 
-#include "include/SDPLoader.h"
+// #include "include/SDPLoader.h"
+#include <media/stagefright/rtsp/SDPLoader.h>
 
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <datasource/MediaHTTP.h>
 #include <media/MediaHTTPConnection.h>
diff --git a/media/libstagefright/rtsp/UDPPusher.cpp b/media/libstagefright/rtsp/UDPPusher.cpp
index 5c685a1..4e812f5 100644
--- a/media/libstagefright/rtsp/UDPPusher.cpp
+++ b/media/libstagefright/rtsp/UDPPusher.cpp
@@ -18,7 +18,7 @@
 #define LOG_TAG "UDPPusher"
 #include <utils/Log.h>
 
-#include "UDPPusher.h"
+#include <media/stagefright/rtsp/UDPPusher.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/AAMRAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AAMRAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/AAMRAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/AAMRAssembler.h
diff --git a/media/libstagefright/rtsp/AAVCAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AAVCAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/AAVCAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/AAVCAssembler.h
diff --git a/media/libstagefright/rtsp/AH263Assembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AH263Assembler.h
similarity index 100%
rename from media/libstagefright/rtsp/AH263Assembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/AH263Assembler.h
diff --git a/media/libstagefright/rtsp/AHEVCAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AHEVCAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/AHEVCAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/AHEVCAssembler.h
diff --git a/media/libstagefright/rtsp/AMPEG2TSAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AMPEG2TSAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/AMPEG2TSAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/AMPEG2TSAssembler.h
diff --git a/media/libstagefright/rtsp/AMPEG4AudioAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AMPEG4AudioAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/AMPEG4AudioAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/AMPEG4AudioAssembler.h
diff --git a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AMPEG4ElementaryAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/AMPEG4ElementaryAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/AMPEG4ElementaryAssembler.h
diff --git a/media/libstagefright/rtsp/APacketSource.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/APacketSource.h
similarity index 100%
rename from media/libstagefright/rtsp/APacketSource.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/APacketSource.h
diff --git a/media/libstagefright/rtsp/ARTPAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/ARTPAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPAssembler.h
diff --git a/media/libstagefright/rtsp/ARTPConnection.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPConnection.h
similarity index 100%
rename from media/libstagefright/rtsp/ARTPConnection.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPConnection.h
diff --git a/media/libstagefright/rtsp/ARTPSession.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSession.h
similarity index 100%
rename from media/libstagefright/rtsp/ARTPSession.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSession.h
diff --git a/media/libstagefright/rtsp/ARTPSource.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSource.h
similarity index 100%
rename from media/libstagefright/rtsp/ARTPSource.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSource.h
diff --git a/media/libstagefright/rtsp/ARTPWriter.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPWriter.h
similarity index 100%
rename from media/libstagefright/rtsp/ARTPWriter.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPWriter.h
diff --git a/media/libstagefright/rtsp/ARTSPConnection.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTSPConnection.h
similarity index 100%
rename from media/libstagefright/rtsp/ARTSPConnection.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTSPConnection.h
diff --git a/media/libstagefright/rtsp/ARawAudioAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARawAudioAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/ARawAudioAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ARawAudioAssembler.h
diff --git a/media/libstagefright/rtsp/ASessionDescription.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ASessionDescription.h
similarity index 100%
rename from media/libstagefright/rtsp/ASessionDescription.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ASessionDescription.h
diff --git a/media/libstagefright/rtsp/JitterCalculator.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/JitterCalculator.h
similarity index 90%
rename from media/libstagefright/rtsp/JitterCalculator.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/JitterCalculator.h
index ff36f1f..4f3b761 100644
--- a/media/libstagefright/rtsp/JitterCalculator.h
+++ b/media/libstagefright/rtsp/include/media/stagefright/rtsp/JitterCalculator.h
@@ -40,8 +40,8 @@
     JitterCalc(int32_t clockRate);
 
     void init(uint32_t rtpTime, int64_t arrivalTimeUs, int32_t base, int32_t inter);
-    void putInterArrivalData(int64_t rtpTime, int64_t arrivalTime);
-    void putBaseData(int64_t rtpTime, int64_t arrivalTimeUs);
+    void putInterArrivalData(uint32_t rtpTime, int64_t arrivalTime);
+    void putBaseData(uint32_t rtpTime, int64_t arrivalTimeUs);
     int32_t getBaseJitterMs();
     int32_t getInterArrivalJitterMs();
 };
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/MyHandler.h
similarity index 100%
rename from media/libstagefright/rtsp/MyHandler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/MyHandler.h
diff --git a/media/libstagefright/rtsp/NetworkUtils.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/NetworkUtils.h
similarity index 100%
rename from media/libstagefright/rtsp/NetworkUtils.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/NetworkUtils.h
diff --git a/media/libstagefright/rtsp/QualManager.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/QualManager.h
similarity index 100%
rename from media/libstagefright/rtsp/QualManager.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/QualManager.h
diff --git a/media/libstagefright/include/SDPLoader.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/SDPLoader.h
similarity index 100%
rename from media/libstagefright/include/SDPLoader.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/SDPLoader.h
diff --git a/media/libstagefright/rtsp/TrafficRecorder.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/TrafficRecorder.h
similarity index 100%
rename from media/libstagefright/rtsp/TrafficRecorder.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/TrafficRecorder.h
diff --git a/media/libstagefright/rtsp/UDPPusher.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/UDPPusher.h
similarity index 100%
rename from media/libstagefright/rtsp/UDPPusher.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/UDPPusher.h
diff --git a/media/libstagefright/rtsp/VideoSource.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/VideoSource.h
similarity index 100%
rename from media/libstagefright/rtsp/VideoSource.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/VideoSource.h
diff --git a/media/libstagefright/rtsp/rtp_test.cpp b/media/libstagefright/rtsp/rtp_test.cpp
index 4590699..1ae4a09 100644
--- a/media/libstagefright/rtsp/rtp_test.cpp
+++ b/media/libstagefright/rtsp/rtp_test.cpp
@@ -27,9 +27,9 @@
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/SimpleDecodingSource.h>
 
-#include "ARTPSession.h"
-#include "ASessionDescription.h"
-#include "UDPPusher.h"
+#include <media/stagefright/rtsp/ARTPSession.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
+#include <media/stagefright/rtsp/UDPPusher.h>
 
 using namespace android;
 
diff --git a/media/libstagefright/tests/Android.bp b/media/libstagefright/tests/Android.bp
index a799a13..e6b67ce 100644
--- a/media/libstagefright/tests/Android.bp
+++ b/media/libstagefright/tests/Android.bp
@@ -32,11 +32,6 @@
         "liblog",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-        "frameworks/av/media/libstagefright/include",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/media/libstagefright/tests/HEVC/Android.bp b/media/libstagefright/tests/HEVC/Android.bp
index 91bf385..7a0ba52 100644
--- a/media/libstagefright/tests/HEVC/Android.bp
+++ b/media/libstagefright/tests/HEVC/Android.bp
@@ -44,10 +44,6 @@
         "libstagefright_foundation",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp b/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp
index 324a042..c43e1f8 100644
--- a/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp
+++ b/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp
@@ -21,7 +21,7 @@
 #include <fstream>
 
 #include <media/stagefright/foundation/ABitReader.h>
-#include "include/HevcUtils.h"
+#include <HevcUtils.h>
 
 #include "HEVCUtilsTestEnvironment.h"
 
diff --git a/media/libstagefright/tests/MediaCodecListOverrides_test.cpp b/media/libstagefright/tests/MediaCodecListOverrides_test.cpp
index 0c22a42..20737db 100644
--- a/media/libstagefright/tests/MediaCodecListOverrides_test.cpp
+++ b/media/libstagefright/tests/MediaCodecListOverrides_test.cpp
@@ -20,11 +20,10 @@
 
 #include <gtest/gtest.h>
 
-#include "MediaCodecListOverrides.h"
-
 #include <media/MediaCodecInfo.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/MediaCodecListOverrides.h>
 
 #include <vector>
 
diff --git a/media/libstagefright/tests/extractorFactory/Android.bp b/media/libstagefright/tests/extractorFactory/Android.bp
index 13d5b89..a067284 100644
--- a/media/libstagefright/tests/extractorFactory/Android.bp
+++ b/media/libstagefright/tests/extractorFactory/Android.bp
@@ -51,10 +51,6 @@
         "libstagefright_foundation",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-    ],
-
     // TODO: (b/150181583)
     compile_multilib: "first",
 
diff --git a/media/libstagefright/tests/fuzzers/Android.bp b/media/libstagefright/tests/fuzzers/Android.bp
index 0097830..2bcfd67 100644
--- a/media/libstagefright/tests/fuzzers/Android.bp
+++ b/media/libstagefright/tests/fuzzers/Android.bp
@@ -32,9 +32,6 @@
         "liblog",
         "media_permission-aidl-cpp",
     ],
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-    ],
 }
 
 cc_fuzz {
@@ -74,6 +71,7 @@
     srcs: [
         "FrameDecoderFuzzer.cpp",
     ],
+    corpus: ["corpus/*"],
     defaults: ["libstagefright_fuzzer_defaults"],
 }
 
@@ -85,10 +83,10 @@
     ],
     dictionary: "dictionaries/formats.dict",
     defaults: ["libstagefright_fuzzer_defaults"],
+    header_libs: [
+        "libstagefright_webm_headers",
+    ],
     static_libs: [
-        "libstagefright_webm",
         "libdatasource",
-        "libstagefright_esds",
-        "libogg",
     ],
 }
diff --git a/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp b/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp
index c251479..4218d2d 100644
--- a/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp
+++ b/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include "include/FrameDecoder.h"
+#include <FrameDecoder.h>
 #include <fuzzer/FuzzedDataProvider.h>
 #include <media/IMediaSource.h>
 #include <media/stagefright/MetaData.h>
@@ -46,12 +46,15 @@
     }
 
     while (fdp.remaining_bytes()) {
-        switch (fdp.ConsumeIntegralInRange<uint8_t>(0, 3)) {
-            case 0:
-                decoder->init(/*frameTimeUs*/ fdp.ConsumeIntegral<int64_t>(),
-                              /*option*/ fdp.ConsumeIntegral<int>(),
-                              /*colorFormat*/ fdp.ConsumeIntegral<int>());
+        uint8_t switchCase = fdp.ConsumeIntegralInRange<uint8_t>(0, 3);
+        switch (switchCase) {
+            case 0: {
+                int64_t frameTimeUs = fdp.ConsumeIntegral<int64_t>();
+                int option = fdp.ConsumeIntegral<int>();
+                int colorFormat = fdp.ConsumeIntegral<int>();
+                decoder->init(frameTimeUs, option, colorFormat);
                 break;
+            }
             case 1:
                 decoder->extractFrame();
                 break;
diff --git a/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.cpp b/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.cpp
index 810ae95..d94c8ff 100644
--- a/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.cpp
+++ b/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.cpp
@@ -23,7 +23,8 @@
 #include <media/stagefright/OggWriter.h>
 
 #include "MediaMimeTypes.h"
-#include "webm/WebmWriter.h"
+
+#include <webm/WebmWriter.h>
 
 namespace android {
 std::string genMimeType(FuzzedDataProvider *dataProvider) {
@@ -121,4 +122,4 @@
     }
     return writer;
 }
-}  // namespace android
\ No newline at end of file
+}  // namespace android
diff --git a/media/libstagefright/tests/fuzzers/corpus/color_format_rgb_565.dat b/media/libstagefright/tests/fuzzers/corpus/color_format_rgb_565.dat
new file mode 100644
index 0000000..698e21d
--- /dev/null
+++ b/media/libstagefright/tests/fuzzers/corpus/color_format_rgb_565.dat
Binary files differ
diff --git a/media/libstagefright/tests/writer/Android.bp b/media/libstagefright/tests/writer/Android.bp
index 38d5ecc..49fb569 100644
--- a/media/libstagefright/tests/writer/Android.bp
+++ b/media/libstagefright/tests/writer/Android.bp
@@ -52,10 +52,6 @@
         "libogg",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/media/libstagefright/timedtext/Android.bp b/media/libstagefright/timedtext/Android.bp
index 6590ef7..619e06b 100644
--- a/media/libstagefright/timedtext/Android.bp
+++ b/media/libstagefright/timedtext/Android.bp
@@ -35,8 +35,16 @@
         cfi: true,
     },
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
+    export_include_dirs: [
+        "include",
+    ],
+
+    local_include_dirs: [
+        "include/timedtext",
+    ],
+
+    header_libs: [
+        "libstagefright_headers",
     ],
 
     shared_libs: ["libmedia"],
diff --git a/media/libstagefright/timedtext/TextDescriptions.h b/media/libstagefright/timedtext/include/timedtext/TextDescriptions.h
similarity index 100%
rename from media/libstagefright/timedtext/TextDescriptions.h
rename to media/libstagefright/timedtext/include/timedtext/TextDescriptions.h
diff --git a/media/libstagefright/timedtext/test/Android.bp b/media/libstagefright/timedtext/test/Android.bp
index 0b632bf..58c68ef 100644
--- a/media/libstagefright/timedtext/test/Android.bp
+++ b/media/libstagefright/timedtext/test/Android.bp
@@ -39,8 +39,8 @@
         "libstagefright_foundation",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
+    header_libs: [
+        "libstagefright_headers",
     ],
 
     shared_libs: [
diff --git a/media/libstagefright/timedtext/test/TimedTextUnitTest.cpp b/media/libstagefright/timedtext/test/TimedTextUnitTest.cpp
index d85ae39..f934b54 100644
--- a/media/libstagefright/timedtext/test/TimedTextUnitTest.cpp
+++ b/media/libstagefright/timedtext/test/TimedTextUnitTest.cpp
@@ -27,7 +27,7 @@
 #include <media/stagefright/foundation/AString.h>
 #include <media/stagefright/foundation/ByteUtils.h>
 
-#include "timedtext/TextDescriptions.h"
+#include <timedtext/TextDescriptions.h>
 
 #include "TimedTextTestEnvironment.h"
 
diff --git a/media/libstagefright/webm/Android.bp b/media/libstagefright/webm/Android.bp
index 32a22ba..9d5f430 100644
--- a/media/libstagefright/webm/Android.bp
+++ b/media/libstagefright/webm/Android.bp
@@ -33,7 +33,11 @@
         "WebmWriter.cpp",
     ],
 
-    include_dirs: ["frameworks/av/include"],
+    local_include_dirs: [
+        "include/webm",
+    ],
+
+    export_include_dirs: ["include"],
 
     shared_libs: [
         "libdatasource",
@@ -44,7 +48,21 @@
     ],
 
     header_libs: [
+        "av-headers",
         "libmedia_headers",
         "media_ndk_headers",
     ],
 }
+
+
+cc_library_headers {
+    name: "libstagefright_webm_headers",
+    export_include_dirs: ["include"],
+
+    host_supported: true,
+    target: {
+        darwin: {
+            enabled: false,
+        },
+    },
+}
diff --git a/media/libstagefright/webm/EbmlUtil.h b/media/libstagefright/webm/include/webm/EbmlUtil.h
similarity index 100%
rename from media/libstagefright/webm/EbmlUtil.h
rename to media/libstagefright/webm/include/webm/EbmlUtil.h
diff --git a/media/libstagefright/webm/LinkedBlockingQueue.h b/media/libstagefright/webm/include/webm/LinkedBlockingQueue.h
similarity index 100%
rename from media/libstagefright/webm/LinkedBlockingQueue.h
rename to media/libstagefright/webm/include/webm/LinkedBlockingQueue.h
diff --git a/media/libstagefright/webm/WebmConstants.h b/media/libstagefright/webm/include/webm/WebmConstants.h
similarity index 100%
rename from media/libstagefright/webm/WebmConstants.h
rename to media/libstagefright/webm/include/webm/WebmConstants.h
diff --git a/media/libstagefright/webm/WebmElement.h b/media/libstagefright/webm/include/webm/WebmElement.h
similarity index 100%
rename from media/libstagefright/webm/WebmElement.h
rename to media/libstagefright/webm/include/webm/WebmElement.h
diff --git a/media/libstagefright/webm/WebmFrame.h b/media/libstagefright/webm/include/webm/WebmFrame.h
similarity index 100%
rename from media/libstagefright/webm/WebmFrame.h
rename to media/libstagefright/webm/include/webm/WebmFrame.h
diff --git a/media/libstagefright/webm/WebmFrameThread.h b/media/libstagefright/webm/include/webm/WebmFrameThread.h
similarity index 100%
rename from media/libstagefright/webm/WebmFrameThread.h
rename to media/libstagefright/webm/include/webm/WebmFrameThread.h
diff --git a/media/libstagefright/webm/WebmWriter.h b/media/libstagefright/webm/include/webm/WebmWriter.h
similarity index 100%
rename from media/libstagefright/webm/WebmWriter.h
rename to media/libstagefright/webm/include/webm/WebmWriter.h
diff --git a/media/libstagefright/webm/tests/Android.bp b/media/libstagefright/webm/tests/Android.bp
index 4443766..629ee47 100644
--- a/media/libstagefright/webm/tests/Android.bp
+++ b/media/libstagefright/webm/tests/Android.bp
@@ -31,8 +31,8 @@
         "WebmFrameThreadUnitTest.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
+    header_libs: [
+        "libstagefright_headers",
     ],
 
     static_libs: [
diff --git a/media/libstagefright/writer_fuzzers/Android.bp b/media/libstagefright/writer_fuzzers/Android.bp
index a33b888..b81f27e 100644
--- a/media/libstagefright/writer_fuzzers/Android.bp
+++ b/media/libstagefright/writer_fuzzers/Android.bp
@@ -119,7 +119,7 @@
         "libstagefright_webm",
         "libdatasource",
     ],
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
+    header_libs: [
+        "libstagefright_headers",
     ],
 }
diff --git a/media/mediaserver/Android.bp b/media/mediaserver/Android.bp
index e25658f..537df76 100644
--- a/media/mediaserver/Android.bp
+++ b/media/mediaserver/Android.bp
@@ -47,11 +47,6 @@
         "libregistermsext",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libmediaplayerservice",
-        "frameworks/av/services/mediaresourcemanager",
-    ],
-
     // By default mediaserver runs in 32-bit to save memory, except
     // on 64-bit-only lunch targets.
     // ****************************************************************
diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp
index 58e2d2a..026847a 100644
--- a/media/mediaserver/main_mediaserver.cpp
+++ b/media/mediaserver/main_mediaserver.cpp
@@ -25,9 +25,8 @@
 #include <utils/Log.h>
 #include "RegisterExtensions.h"
 
-// from LOCAL_C_INCLUDES
-#include "MediaPlayerService.h"
-#include "ResourceManagerService.h"
+#include <MediaPlayerService.h>
+#include <ResourceManagerService.h>
 
 using namespace android;
 
diff --git a/media/mtp/OWNERS b/media/mtp/OWNERS
index 1928ba8..54d3d4a 100644
--- a/media/mtp/OWNERS
+++ b/media/mtp/OWNERS
@@ -1,6 +1,5 @@
 set noparent
 
-marcone@google.com
 jsharkey@android.com
 jameswei@google.com
 rmojumder@google.com
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 8d527e9..f50acae 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -145,6 +145,7 @@
         "libgui",
         "libui",
         "libmediandk_utils",
+        "android.hardware.drm-V1-ndk",
     ],
 
     export_header_lib_headers: ["jni_headers"],
@@ -167,7 +168,7 @@
     stubs: {
         symbol_file: "libmediandk.map.txt",
         versions: ["29"],
-    },
+    }
 }
 
 cc_library {
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 1ae2b44..0e2de4e 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -452,17 +452,19 @@
         uint32_t flags) {
     sp<AMessage> nativeFormat;
     AMediaFormat_getFormat(format, &nativeFormat);
-    ALOGV("configure with format: %s", nativeFormat->debugString(0).c_str());
+    // create our shallow copy, so we aren't victim to any later changes.
+    sp<AMessage> dupNativeFormat = nativeFormat->dup();
+    ALOGV("configure with format: %s", dupNativeFormat->debugString(0).c_str());
     sp<Surface> surface = NULL;
     if (window != NULL) {
         surface = (Surface*) window;
     }
 
-    status_t err = mData->mCodec->configure(nativeFormat, surface,
+    status_t err = mData->mCodec->configure(dupNativeFormat, surface,
             crypto ? crypto->mCrypto : NULL, flags);
     if (err != OK) {
         ALOGE("configure: err(%d), failed with format: %s",
-              err, nativeFormat->debugString(0).c_str());
+              err, dupNativeFormat->debugString(0).c_str());
     }
     return translate_error(err);
 }
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 6e9945d..59c1103 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -97,6 +97,8 @@
     List<idvec_t> mIds;
     KeyedVector<String8, String8> mQueryResults;
     Vector<uint8_t> mKeyRequest;
+    String8 mDefaultUrl;
+    AMediaDrmKeyRequestType mkeyRequestType;
     Vector<uint8_t> mProvisionRequest;
     String8 mProvisionUrl;
     String8 mPropertyString;
@@ -416,6 +418,21 @@
         const AMediaDrmKeyValue *optionalParameters, size_t numOptionalParameters,
         const uint8_t **keyRequest, size_t *keyRequestSize) {
 
+    return AMediaDrm_getKeyRequestWithDefaultUrlAndType(mObj,
+        scope, init, initSize, mimeType, keyType, optionalParameters,
+        numOptionalParameters, keyRequest,
+        keyRequestSize, NULL, NULL);
+}
+
+EXPORT
+media_status_t AMediaDrm_getKeyRequestWithDefaultUrlAndType(AMediaDrm *mObj,
+        const AMediaDrmScope *scope, const uint8_t *init, size_t initSize,
+        const char *mimeType, AMediaDrmKeyType keyType,
+        const AMediaDrmKeyValue *optionalParameters,
+        size_t numOptionalParameters, const uint8_t **keyRequest,
+        size_t *keyRequestSize, const char **defaultUrl,
+        AMediaDrmKeyRequestType *keyRequestType) {
+
     if (!mObj || mObj->mDrm == NULL) {
         return AMEDIA_ERROR_INVALID_OBJECT;
     }
@@ -449,18 +466,43 @@
         mdOptionalParameters.add(String8(optionalParameters[i].mKey),
                 String8(optionalParameters[i].mValue));
     }
-    String8 defaultUrl;
-    DrmPlugin::KeyRequestType keyRequestType;
+
+    DrmPlugin::KeyRequestType requestType;
     mObj->mKeyRequest.clear();
     status_t status = mObj->mDrm->getKeyRequest(*iter, mdInit, String8(mimeType),
-            mdKeyType, mdOptionalParameters, mObj->mKeyRequest, defaultUrl,
-            &keyRequestType);
+            mdKeyType, mdOptionalParameters, mObj->mKeyRequest, mObj->mDefaultUrl,
+            &requestType);
     if (status != OK) {
         return translateStatus(status);
     } else {
         *keyRequest = mObj->mKeyRequest.array();
         *keyRequestSize = mObj->mKeyRequest.size();
+        if (defaultUrl != NULL)
+            *defaultUrl = mObj->mDefaultUrl.string();
+        switch(requestType) {
+            case DrmPlugin::kKeyRequestType_Initial:
+                mObj->mkeyRequestType = KEY_REQUEST_TYPE_INITIAL;
+                break;
+            case DrmPlugin::kKeyRequestType_Renewal:
+                mObj->mkeyRequestType = KEY_REQUEST_TYPE_RENEWAL;
+                break;
+            case DrmPlugin::kKeyRequestType_Release:
+                mObj->mkeyRequestType = KEY_REQUEST_TYPE_RELEASE;
+                break;
+            case DrmPlugin::kKeyRequestType_None:
+                mObj->mkeyRequestType = KEY_REQUEST_TYPE_NONE;
+                break;
+            case DrmPlugin::kKeyRequestType_Update:
+                mObj->mkeyRequestType = KEY_REQUEST_TYPE_UPDATE;
+                break;
+            default:
+                return AMEDIA_ERROR_UNKNOWN;
+        }
+
+        if (keyRequestType != NULL)
+            *keyRequestType = mObj->mkeyRequestType;
     }
+
     return AMEDIA_OK;
 }
 
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index c1793ce..923453a 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -200,8 +200,11 @@
     AString tmp;
     if (mData->mFormat->findString(name, &tmp)) {
         String8 ret(tmp.c_str());
-        mData->mStringCache.add(String8(name), ret);
-        *out = ret.string();
+        ssize_t i = mData->mStringCache.add(String8(name), ret);
+        if (i < 0) {
+            return false;
+        }
+        *out = mData->mStringCache.valueAt(i).string();
         return true;
     }
     return false;
@@ -351,8 +354,14 @@
 EXPORT const char* AMEDIAFORMAT_KEY_MIME = "mime";
 EXPORT const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA = "mpeg-user-data";
 EXPORT const char* AMEDIAFORMAT_KEY_MPEG2_STREAM_HEADER = "mpeg2-stream-header";
+EXPORT const char* AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS = "mpegh-compatible-sets";
+EXPORT const char* AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION =
+        "mpegh-profile-level-indication";
+EXPORT const char* AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT =
+        "mpegh-reference-channel-layout";
 EXPORT const char* AMEDIAFORMAT_KEY_OPERATING_RATE = "operating-rate";
 EXPORT const char* AMEDIAFORMAT_KEY_PCM_ENCODING = "pcm-encoding";
+EXPORT const char* AMEDIAFORMAT_KEY_PICTURE_TYPE = "picture-type";
 EXPORT const char* AMEDIAFORMAT_KEY_PRIORITY = "priority";
 EXPORT const char* AMEDIAFORMAT_KEY_PROFILE = "profile";
 EXPORT const char* AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN = "pcm-big-endian";
@@ -386,6 +395,9 @@
 EXPORT const char* AMEDIAFORMAT_KEY_TRACK_ID = "track-id";
 EXPORT const char* AMEDIAFORMAT_KEY_TRACK_INDEX = "track-index";
 EXPORT const char* AMEDIAFORMAT_KEY_VALID_SAMPLES = "valid-samples";
+EXPORT const char* AMEDIAFORMAT_KEY_VIDEO_ENCODING_STATISTICS_LEVEL =
+        "video-encoding-statistics-level";
+EXPORT const char* AMEDIAFORMAT_KEY_VIDEO_QP_AVERAGE = "video-qp-average";
 EXPORT const char* AMEDIAFORMAT_VIDEO_QP_B_MAX = "video-qp-b-max";
 EXPORT const char* AMEDIAFORMAT_VIDEO_QP_B_MIN = "video-qp-b-min";
 EXPORT const char* AMEDIAFORMAT_VIDEO_QP_I_MAX = "video-qp-i-max";
diff --git a/media/ndk/OWNERS b/media/ndk/OWNERS
index 9dc441e..83644f0 100644
--- a/media/ndk/OWNERS
+++ b/media/ndk/OWNERS
@@ -1,3 +1,4 @@
-marcone@google.com
+essick@google.com
+lajos@google.com
 # For AImage/AImageReader
 include platform/frameworks/av:/camera/OWNERS
diff --git a/media/ndk/include/media/NdkMediaDrm.h b/media/ndk/include/media/NdkMediaDrm.h
index 849a8f9..4eca3d7 100644
--- a/media/ndk/include/media/NdkMediaDrm.h
+++ b/media/ndk/include/media/NdkMediaDrm.h
@@ -112,6 +112,41 @@
 } AMediaDrmKeyType;
 
 /**
+ * Introduced in API 33.
+ */
+typedef enum AMediaDrmKeyRequestType : int32_t {
+    /**
+     * Key request type is initial license request.
+     * An initial license request is necessary to load keys.
+     */
+    KEY_REQUEST_TYPE_INITIAL,
+
+    /**
+     * Key request type is license renewal.
+     * A renewal license request is necessary to prevent the keys from expiring.
+     */
+    KEY_REQUEST_TYPE_RENEWAL,
+
+    /**
+     * Key request type is license release.
+     * A license release request indicates that keys are removed.
+     */
+    KEY_REQUEST_TYPE_RELEASE,
+
+    /**
+     * Keys are already loaded and are available for use. No license request is necessary, and
+     * no key request data is returned.
+     */
+    KEY_REQUEST_TYPE_NONE,
+
+    /**
+     * Keys have been loaded but an additional license request is needed
+     * to update their values.
+     */
+    KEY_REQUEST_TYPE_UPDATE
+} AMediaDrmKeyRequestType;
+
+/**
  *  Data type containing {key, value} pair
  */
 typedef struct AMediaDrmKeyValuePair {
@@ -248,7 +283,10 @@
  * to obtain or release keys used to decrypt encrypted content.
  * AMediaDrm_getKeyRequest is used to obtain an opaque key request byte array that
  * is delivered to the license server.  The opaque key request byte array is
- * returned in KeyRequest.data.
+ * returned in *keyRequest and the number of bytes in the request is
+ * returned in *keyRequestSize.
+ * This API has same functionality as AMediaDrm_getKeyRequestWithDefaultUrlAndType()
+ * when defaultUrl and keyRequestType are passed in as NULL.
  *
  * After the app has received the key request response from the server,
  * it should deliver to the response to the DRM engine plugin using the method
@@ -280,11 +318,14 @@
  *   by the caller
  *
  * On exit:
+ *   If this returns AMEDIA_OK,
  *   1. The keyRequest pointer will reference the opaque key request data.  It
  *       will reside in memory owned by the AMediaDrm object, and will remain
- *       accessible until the next call to AMediaDrm_getKeyRequest or until the
+ *       accessible until the next call to AMediaDrm_getKeyRequest
+ *       or AMediaDrm_getKeyRequestWithDefaultUrlAndType or until the
  *       MediaDrm object is released.
  *   2. keyRequestSize will be set to the size of the request
+ *   If this does not return AMEDIA_OK, value of these parameters should not be used.
  *
  * Returns MEDIADRM_NOT_PROVISIONED_ERROR if reprovisioning is needed, due to a
  * problem with the device certificate.
@@ -297,6 +338,72 @@
         const uint8_t **keyRequest, size_t *keyRequestSize) __INTRODUCED_IN(21);
 
 /**
+ * A key request/response exchange occurs between the app and a license server
+ * to obtain or release keys used to decrypt encrypted content.
+ * AMediaDrm_getKeyRequest is used to obtain an opaque key request byte array that
+ * is delivered to the license server.  The opaque key request byte array is
+ * returned in *keyRequest and the number of bytes in the request is
+ * returned in *keyRequestSize.
+ *
+ * After the app has received the key request response from the server,
+ * it should deliver to the response to the DRM engine plugin using the method
+ * AMediaDrm_provideKeyResponse.
+ *
+ * scope may be a sessionId or a keySetId, depending on the specified keyType.
+ * When the keyType is KEY_TYPE_STREAMING or KEY_TYPE_OFFLINE, scope should be set
+ * to the sessionId the keys will be provided to.  When the keyType is
+ * KEY_TYPE_RELEASE, scope should be set to the keySetId of the keys being released.
+ * Releasing keys from a device invalidates them for all sessions.
+ *
+ * init container-specific data, its meaning is interpreted based on the mime type
+ * provided in the mimeType parameter.  It could contain, for example, the content
+ * ID, key ID or other data obtained from the content metadata that is required in
+ * generating the key request. init may be null when keyType is KEY_TYPE_RELEASE.
+ *
+ * initSize is the number of bytes of initData
+ *
+ * mimeType identifies the mime type of the content.
+ *
+ * keyType specifes the type of the request. The request may be to acquire keys for
+ *   streaming or offline content, or to release previously acquired keys, which are
+ *   identified by a keySetId.
+ *
+ * optionalParameters are included in the key request message to allow a client
+ *   application to provide additional message parameters to the server.
+ *
+ * numOptionalParameters indicates the number of optional parameters provided
+ *   by the caller
+ *
+ * On exit:
+ *   If this returns AMEDIA_OK,
+ *   1. The keyRequest pointer will reference the opaque key request data.  It
+ *       will reside in memory owned by the AMediaDrm object, and will remain
+ *       accessible until the next call to either AMediaDrm_getKeyRequest
+ *       or AMediaDrm_getKeyRequestWithDefaultUrlAndType or until the
+ *       MediaDrm object is released.
+ *   2. keyRequestSize will be set to the size of the request.
+ *   3. defaultUrl will be set to the recommended URL to deliver the key request.
+ *      The defaultUrl pointer will reference a NULL terminated URL string.
+ *      It will be UTF-8 encoded and have same lifetime with the key request data
+ *      KeyRequest pointer references to. Passing in NULL means you don't need it
+ *      to be reported.
+ *   4. keyRequestType will be set to the key request type. Passing in NULL means
+*       you don't need it to be reported.
+ *
+ * Returns MEDIADRM_NOT_PROVISIONED_ERROR if reprovisioning is needed, due to a
+ * problem with the device certificate.
+ *
+ * Available since API level 33.
+ */
+media_status_t AMediaDrm_getKeyRequestWithDefaultUrlAndType(AMediaDrm *,
+        const AMediaDrmScope *scope, const uint8_t *init, size_t initSize,
+        const char *mimeType, AMediaDrmKeyType keyType,
+        const AMediaDrmKeyValue *optionalParameters,
+        size_t numOptionalParameters, const uint8_t **keyRequest,
+        size_t *keyRequestSize, const char **defaultUrl,
+        AMediaDrmKeyRequestType *keyRequestType) __INTRODUCED_IN(__ANDROID_API_T__);
+
+/**
  * A key response is received from the license server by the app, then it is
  * provided to the DRM engine plugin using provideKeyResponse.  When the
  * response is for an offline key request, a keySetId is returned that can be
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index fbd855d..2195657 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -311,6 +311,10 @@
 extern const char* AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK __INTRODUCED_IN(31);
 extern const char* AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND __INTRODUCED_IN(31);
 
+extern const char* AMEDIAFORMAT_KEY_PICTURE_TYPE __INTRODUCED_IN(33);
+extern const char* AMEDIAFORMAT_KEY_VIDEO_ENCODING_STATISTICS_LEVEL __INTRODUCED_IN(33);
+extern const char* AMEDIAFORMAT_KEY_VIDEO_QP_AVERAGE __INTRODUCED_IN(33);
+
 extern const char* AMEDIAFORMAT_VIDEO_QP_B_MAX __INTRODUCED_IN(31);
 extern const char* AMEDIAFORMAT_VIDEO_QP_B_MIN __INTRODUCED_IN(31);
 extern const char* AMEDIAFORMAT_VIDEO_QP_I_MAX __INTRODUCED_IN(31);
@@ -320,6 +324,34 @@
 extern const char* AMEDIAFORMAT_VIDEO_QP_P_MAX __INTRODUCED_IN(31);
 extern const char* AMEDIAFORMAT_VIDEO_QP_P_MIN __INTRODUCED_IN(31);
 
+/**
+ * MPEG-H audio profile and level compatibility.
+ *
+ * See FDAmd_2 of ISO_IEC_23008-3;2019 MHAProfileAndLevelCompatibilitySetBox.
+ *
+ * Available since API level 32.
+ */
+extern const char* AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS __INTRODUCED_IN(32);
+
+/**
+ * MPEG-H audio profile level indication.
+ *
+ * See ISO_IEC_23008-3;2019 MHADecoderConfigurationRecord mpegh3daProfileLevelIndication.
+ *
+ * Available since API level 32.
+ */
+extern const char* AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION __INTRODUCED_IN(32);
+
+/**
+ * MPEG-H audio reference channel layout.
+ *
+ * See ISO_IEC_23008-3;2019 MHADecoderConfigurationRecord referenceChannelLayout
+ * and ISO_IEC_23001‐8 ChannelConfiguration value.
+ *
+ * Available since API level 32.
+ */
+extern const char* AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT __INTRODUCED_IN(32);
+
 __END_DECLS
 
 #endif // _NDK_MEDIA_FORMAT_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 7e9e57e..b228945 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -126,6 +126,9 @@
     AMEDIAFORMAT_KEY_MIME; # var introduced=21
     AMEDIAFORMAT_KEY_MPEG_USER_DATA; # var introduced=28
     AMEDIAFORMAT_KEY_MPEG2_STREAM_HEADER; # var introduced=29
+    AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS; # var introduced=32
+    AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION; # var introduced=32
+    AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT; # var introduced=32
     AMEDIAFORMAT_KEY_OPERATING_RATE; # var introduced=28
     AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN; # var introduced=29
     AMEDIAFORMAT_KEY_PCM_ENCODING; # var introduced=28
@@ -226,6 +229,7 @@
     AMediaDrm_decrypt;
     AMediaDrm_encrypt;
     AMediaDrm_getKeyRequest;
+    AMediaDrm_getKeyRequestWithDefaultUrlAndType; # introduced=Tiramisu
     AMediaDrm_getPropertyByteArray;
     AMediaDrm_getPropertyString;
     AMediaDrm_getProvisionRequest;
diff --git a/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml b/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml
index b17541d..75d73bf 100644
--- a/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml
+++ b/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml
@@ -30,7 +30,8 @@
         android:roundIcon="@mipmap/ic_launcher_round"
         android:supportsRtl="true"
         android:theme="@style/AppTheme">
-        <activity android:name="com.android.media.samplevideoencoder.MainActivity">
+        <activity android:name="com.android.media.samplevideoencoder.MainActivity"
+            android:exported="true">
             <intent-filter>
                 <action android:name="android.intent.action.MAIN" />
                 <category android:name="android.intent.category.LAUNCHER" />
@@ -42,4 +43,4 @@
         android:targetPackage="com.android.media.samplevideoencoder"
         android:label="SampleVideoEncoder Test"/>
 
-</manifest>
\ No newline at end of file
+</manifest>
diff --git a/media/tests/benchmark/MediaBenchmarkTest/Android.bp b/media/tests/benchmark/MediaBenchmarkTest/Android.bp
index 2e06da5..4b44dcf 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/Android.bp
+++ b/media/tests/benchmark/MediaBenchmarkTest/Android.bp
@@ -69,7 +69,6 @@
 java_defaults {
     name: "MediaBenchmark-defaults",
 
-    sdk_version: "system_current",
     min_sdk_version: "28",
-    target_sdk_version: "29",
+    target_sdk_version: "30",
 }
diff --git a/media/tests/benchmark/MediaBenchmarkTest/build.gradle b/media/tests/benchmark/MediaBenchmarkTest/build.gradle
index b2aee1a..b222d47 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/build.gradle
+++ b/media/tests/benchmark/MediaBenchmarkTest/build.gradle
@@ -17,21 +17,21 @@
 buildscript {
     repositories {
         google()
-        jcenter()
+        mavenCentral()
     }
     dependencies {
-        classpath 'com.android.tools.build:gradle:3.5.0'
+        classpath 'com.android.tools.build:gradle:4.2.1'
     }
 }
 
 apply plugin: 'com.android.application'
 
 android {
-    compileSdkVersion 29
+    compileSdkVersion 30
     defaultConfig {
         applicationId "com.android.media.benchmark"
         minSdkVersion 28
-        targetSdkVersion 29
+        targetSdkVersion 30
         versionCode 1
         versionName "1.0"
         testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
@@ -57,20 +57,20 @@
     externalNativeBuild {
         cmake {
             path "src/main/cpp/CMakeLists.txt"
-            version "3.10.2"
+            version "3.18.1"
         }
     }
 }
 
 repositories {
     google()
-    jcenter()
+    mavenCentral()
 }
 
 dependencies {
     implementation fileTree(dir: 'libs', include: ['*.jar'])
-    implementation 'androidx.appcompat:appcompat:1.1.0'
-    testImplementation 'junit:junit:4.12'
-    androidTestImplementation 'androidx.test:runner:1.2.0'
-    androidTestImplementation 'androidx.test.ext:junit:1.1.1'
+    implementation 'androidx.appcompat:appcompat:1.3.0'
+    testImplementation 'junit:junit:4.13.2'
+    androidTestImplementation 'androidx.test:runner:1.3.0'
+    androidTestImplementation 'androidx.test.ext:junit:1.1.2'
 }
\ No newline at end of file
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/Android.bp b/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/Android.bp
index af92424..0192d68 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/Android.bp
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/Android.bp
@@ -9,7 +9,6 @@
 
 cc_test_library {
     name: "libmediabenchmark_jni",
-    sdk_version: "current",
 
     defaults: [
         "libmediabenchmark_common-defaults",
diff --git a/media/tests/benchmark/src/native/common/Android.bp b/media/tests/benchmark/src/native/common/Android.bp
index 6b54c6a..718d217 100644
--- a/media/tests/benchmark/src/native/common/Android.bp
+++ b/media/tests/benchmark/src/native/common/Android.bp
@@ -55,7 +55,6 @@
 
 cc_defaults {
     name: "libmediabenchmark-defaults",
-    sdk_version: "current",
     stl: "c++_shared",
 
     shared_libs: [
diff --git a/media/tests/benchmark/src/native/extractor/Extractor.cpp b/media/tests/benchmark/src/native/extractor/Extractor.cpp
index f0bb3b9..3bdfbad 100644
--- a/media/tests/benchmark/src/native/extractor/Extractor.cpp
+++ b/media/tests/benchmark/src/native/extractor/Extractor.cpp
@@ -124,9 +124,7 @@
 
     int64_t sTime = mStats->getCurTime();
     if (mExtractor) {
-        // TODO: (b/140128505) Multiple calls result in DoS.
-        // Uncomment call to AMediaExtractor_delete() once this is resolved
-        // AMediaExtractor_delete(mExtractor);
+        AMediaExtractor_delete(mExtractor);
         mExtractor = nullptr;
     }
     int64_t eTime = mStats->getCurTime();
diff --git a/media/tests/benchmark/tests/Android.bp b/media/tests/benchmark/tests/Android.bp
index 0fbd20d..9a8caa3 100644
--- a/media/tests/benchmark/tests/Android.bp
+++ b/media/tests/benchmark/tests/Android.bp
@@ -33,7 +33,12 @@
 
     srcs: ["ExtractorTest.cpp"],
 
-    static_libs: ["libmediabenchmark_extractor"]
+    static_libs: ["libmediabenchmark_extractor"],
+
+    shared_libs: [
+        "libbase",
+        "libbinder_ndk",
+    ],
 }
 
 cc_test {
@@ -50,6 +55,11 @@
         "libmediabenchmark_extractor",
         "libmediabenchmark_decoder",
     ],
+
+    shared_libs: [
+        "libbase",
+        "libbinder_ndk",
+    ],
 }
 
 cc_test {
diff --git a/media/tests/benchmark/tests/DecoderTest.cpp b/media/tests/benchmark/tests/DecoderTest.cpp
index 81ef02a..3666724 100644
--- a/media/tests/benchmark/tests/DecoderTest.cpp
+++ b/media/tests/benchmark/tests/DecoderTest.cpp
@@ -21,6 +21,8 @@
 #include <iostream>
 #include <limits>
 
+#include <android/binder_process.h>
+
 #include "BenchmarkTestEnvironment.h"
 #include "Decoder.h"
 
@@ -175,6 +177,7 @@
                                             "c2.android.hevc.decoder", true)));
 
 int main(int argc, char **argv) {
+    ABinderProcess_startThreadPool();
     gEnv = new BenchmarkTestEnvironment();
     ::testing::AddGlobalTestEnvironment(gEnv);
     ::testing::InitGoogleTest(&argc, argv);
diff --git a/media/tests/benchmark/tests/ExtractorTest.cpp b/media/tests/benchmark/tests/ExtractorTest.cpp
index d14d15b..27ee9ba 100644
--- a/media/tests/benchmark/tests/ExtractorTest.cpp
+++ b/media/tests/benchmark/tests/ExtractorTest.cpp
@@ -19,6 +19,8 @@
 
 #include <gtest/gtest.h>
 
+#include <android/binder_process.h>
+
 #include "BenchmarkTestEnvironment.h"
 #include "Extractor.h"
 
@@ -73,6 +75,7 @@
                                                      0)));
 
 int main(int argc, char **argv) {
+    ABinderProcess_startThreadPool();
     gEnv = new BenchmarkTestEnvironment();
     ::testing::AddGlobalTestEnvironment(gEnv);
     ::testing::InitGoogleTest(&argc, argv);
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index bfe73d5..c1c7df5 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -34,6 +34,7 @@
         "SchedulingPolicyService.cpp",
         "ServiceUtilities.cpp",
         "TimeCheck.cpp",
+        "TimerThread.cpp",
     ],
     static_libs: [
         "libc_malloc_debug_backtrace",
@@ -51,6 +52,7 @@
         "libpermission",
         "android.hardware.graphics.bufferqueue@1.0",
         "android.hidl.token@1.0-utils",
+        "packagemanager_aidl-cpp",
     ],
     export_static_lib_headers: [
         "libbatterystats_aidl",
@@ -81,9 +83,48 @@
     export_include_dirs: ["include"],
 }
 
+cc_library {
+    name: "libmediautils_vendor",
+    vendor_available: true,  // required for platform/hardware/interfaces
+    srcs: [
+        "MemoryLeakTrackUtil.cpp",
+    ],
+
+    cflags: [
+        "-Wall",
+        "-Wextra",
+        "-Werror",
+    ],
+    shared_libs: [
+        "liblog",
+        "libutils",
+    ],
+
+    static_libs: [
+        "libc_malloc_debug_backtrace",
+    ],
+
+    header_libs: [
+        "bionic_libc_platform_headers",
+    ],
+
+    local_include_dirs: ["include"],
+    export_include_dirs: ["include"],
+}
+
+
 cc_library_headers {
     name: "libmediautils_headers",
     vendor_available: true,  // required for platform/hardware/interfaces
 
     export_include_dirs: ["include"],
 }
+
+cc_test {
+    name: "libmediautils_test",
+    srcs: ["TimerThread-test.cpp"],
+    shared_libs: [
+      "libmediautils",
+      "libutils",
+    ]
+}
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 9c7b863..1ab5bc1 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -45,6 +45,7 @@
 static const String16 sAndroidPermissionRecordAudio("android.permission.RECORD_AUDIO");
 static const String16 sModifyPhoneState("android.permission.MODIFY_PHONE_STATE");
 static const String16 sModifyAudioRouting("android.permission.MODIFY_AUDIO_ROUTING");
+static const String16 sCallAudioInterception("android.permission.CALL_AUDIO_INTERCEPTION");
 
 static String16 resolveCallingPackage(PermissionController& permissionController,
         const std::optional<String16> opPackageName, uid_t uid) {
@@ -71,6 +72,7 @@
   switch (source) {
     case AUDIO_SOURCE_HOTWORD:
       return AppOpsManager::OP_RECORD_AUDIO_HOTWORD;
+    case AUDIO_SOURCE_ECHO_REFERENCE: // fallthrough
     case AUDIO_SOURCE_REMOTE_SUBMIX:
       return AppOpsManager::OP_RECORD_AUDIO_OUTPUT;
     case AUDIO_SOURCE_VOICE_DOWNLINK:
@@ -101,7 +103,11 @@
     AttributionSourceState myAttributionSource;
     myAttributionSource.uid = VALUE_OR_FATAL(android::legacy2aidl_uid_t_int32_t(getuid()));
     myAttributionSource.pid = VALUE_OR_FATAL(android::legacy2aidl_pid_t_int32_t(getpid()));
-    myAttributionSource.token = sp<BBinder>::make();
+    if (callerAttributionSource.token != nullptr) {
+        myAttributionSource.token = callerAttributionSource.token;
+    } else {
+        myAttributionSource.token = sp<BBinder>::make();
+    }
     myAttributionSource.next.push_back(nextAttributionSource);
 
     return std::optional<AttributionSourceState>{myAttributionSource};
@@ -214,6 +220,17 @@
     return ok;
 }
 
+bool accessUltrasoundAllowed(const AttributionSourceState& attributionSource) {
+    uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+    uid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
+    if (isAudioServerOrRootUid(uid)) return true;
+    static const String16 sAccessUltrasound(
+        "android.permission.ACCESS_ULTRASOUND");
+    bool ok = PermissionCache::checkPermission(sAccessUltrasound, pid, uid);
+    if (!ok) ALOGE("Request requires android.permission.ACCESS_ULTRASOUND");
+    return ok;
+}
+
 bool captureHotwordAllowed(const AttributionSourceState& attributionSource) {
     // CAPTURE_AUDIO_HOTWORD permission implies RECORD_AUDIO permission
     bool ok = recordingAllowed(attributionSource);
@@ -304,6 +321,17 @@
     return ok;
 }
 
+bool callAudioInterceptionAllowed(const AttributionSourceState& attributionSource) {
+    uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+    pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
+
+    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+    bool ok = PermissionCache::checkPermission(sCallAudioInterception, pid, uid);
+    if (!ok) ALOGV("%s(): android.permission.CALL_AUDIO_INTERCEPTION denied for uid %d",
+        __func__, uid);
+    return ok;
+}
+
 AttributionSourceState getCallingAttributionSource() {
     AttributionSourceState attributionSource = AttributionSourceState();
     attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(
diff --git a/media/utils/TimeCheck.cpp b/media/utils/TimeCheck.cpp
index 819e146..2b765cc 100644
--- a/media/utils/TimeCheck.cpp
+++ b/media/utils/TimeCheck.cpp
@@ -16,13 +16,25 @@
 
 #define LOG_TAG "TimeCheck"
 
-#include <utils/Log.h>
-#include <mediautils/TimeCheck.h>
+#include <optional>
+#include <sstream>
+
 #include <mediautils/EventLog.h>
+#include <mediautils/TimeCheck.h>
+#include <utils/Log.h>
 #include "debuggerd/handler.h"
 
 namespace android {
 
+namespace {
+
+std::string formatTime(std::chrono::system_clock::time_point t) {
+    auto msSinceEpoch = std::chrono::round<std::chrono::milliseconds>(t.time_since_epoch());
+    return (std::ostringstream() << msSinceEpoch.count()).str();
+}
+
+}  // namespace
+
 // Audio HAL server pids vector used to generate audio HAL processes tombstone
 // when audioserver watchdog triggers.
 // We use a lockless storage to avoid potential deadlocks in the context of watchdog
@@ -58,84 +70,39 @@
 }
 
 /* static */
-sp<TimeCheck::TimeCheckThread> TimeCheck::getTimeCheckThread()
-{
-    static sp<TimeCheck::TimeCheckThread> sTimeCheckThread = new TimeCheck::TimeCheckThread();
+TimerThread* TimeCheck::getTimeCheckThread() {
+    static TimerThread* sTimeCheckThread = new TimerThread();
     return sTimeCheckThread;
 }
 
-TimeCheck::TimeCheck(const char *tag, uint32_t timeoutMs)
-    : mEndTimeNs(getTimeCheckThread()->startMonitoring(tag, timeoutMs))
-{
-}
+TimeCheck::TimeCheck(const char* tag, uint32_t timeoutMs)
+    : mTimerHandle(getTimeCheckThread()->scheduleTask(
+              [tag, startTime = std::chrono::system_clock::now()] { crash(tag, startTime); },
+              std::chrono::milliseconds(timeoutMs))) {}
 
 TimeCheck::~TimeCheck() {
-    getTimeCheckThread()->stopMonitoring(mEndTimeNs);
+    getTimeCheckThread()->cancelTask(mTimerHandle);
 }
 
-TimeCheck::TimeCheckThread::~TimeCheckThread()
-{
-    AutoMutex _l(mMutex);
-    requestExit();
-    mMonitorRequests.clear();
-    mCond.signal();
-}
+/* static */
+void TimeCheck::crash(const char* tag, std::chrono::system_clock::time_point startTime) {
+    std::chrono::system_clock::time_point endTime = std::chrono::system_clock::now();
 
-nsecs_t TimeCheck::TimeCheckThread::startMonitoring(const char *tag, uint32_t timeoutMs) {
-    Mutex::Autolock _l(mMutex);
-    nsecs_t endTimeNs = systemTime() + milliseconds(timeoutMs);
-    for (; mMonitorRequests.indexOfKey(endTimeNs) >= 0; ++endTimeNs);
-    mMonitorRequests.add(endTimeNs, tag);
-    mCond.signal();
-    return endTimeNs;
-}
-
-void TimeCheck::TimeCheckThread::stopMonitoring(nsecs_t endTimeNs) {
-    Mutex::Autolock _l(mMutex);
-    mMonitorRequests.removeItem(endTimeNs);
-    mCond.signal();
-}
-
-bool TimeCheck::TimeCheckThread::threadLoop()
-{
-    status_t status = TIMED_OUT;
-    {
-        AutoMutex _l(mMutex);
-
-        if (exitPending()) {
-            return false;
+    // Generate audio HAL processes tombstones and allow time to complete
+    // before forcing restart
+    std::vector<pid_t> pids = getAudioHalPids();
+    if (pids.size() != 0) {
+        for (const auto& pid : pids) {
+            ALOGI("requesting tombstone for pid: %d", pid);
+            sigqueue(pid, DEBUGGER_SIGNAL, {.sival_int = 0});
         }
-
-        nsecs_t endTimeNs = INT64_MAX;
-        const char *tag = "<unspecified>";
-        // KeyedVector mMonitorRequests is ordered so take first entry as next timeout
-        if (mMonitorRequests.size() != 0) {
-            endTimeNs = mMonitorRequests.keyAt(0);
-            tag = mMonitorRequests.valueAt(0);
-        }
-
-        const nsecs_t waitTimeNs = endTimeNs - systemTime();
-        if (waitTimeNs > 0) {
-            status = mCond.waitRelative(mMutex, waitTimeNs);
-        }
-        if (status != NO_ERROR) {
-            // Generate audio HAL processes tombstones and allow time to complete
-            // before forcing restart
-            std::vector<pid_t> pids = getAudioHalPids();
-            if (pids.size() != 0) {
-                for (const auto& pid : pids) {
-                    ALOGI("requesting tombstone for pid: %d", pid);
-                    sigqueue(pid, DEBUGGER_SIGNAL, {.sival_int = 0});
-                }
-                sleep(1);
-            } else {
-                ALOGI("No HAL process pid available, skipping tombstones");
-            }
-            LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
-            LOG_ALWAYS_FATAL("TimeCheck timeout for %s", tag);
-        }
+        sleep(1);
+    } else {
+        ALOGI("No HAL process pid available, skipping tombstones");
     }
-    return true;
+    LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
+    LOG_ALWAYS_FATAL("TimeCheck timeout for %s (start=%s, end=%s)", tag,
+                     formatTime(startTime).c_str(), formatTime(endTime).c_str());
 }
 
-}; // namespace android
+};  // namespace android
diff --git a/media/utils/TimerThread-test.cpp b/media/utils/TimerThread-test.cpp
new file mode 100644
index 0000000..ee8a811
--- /dev/null
+++ b/media/utils/TimerThread-test.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <chrono>
+#include <thread>
+#include <gtest/gtest.h>
+#include <mediautils/TimerThread.h>
+
+using namespace std::chrono_literals;
+
+namespace android {
+namespace {
+
+constexpr auto kJitter = 10ms;
+
+TEST(TimerThread, Basic) {
+    std::atomic<bool> taskRan = false;
+    TimerThread thread;
+    thread.scheduleTask([&taskRan] { taskRan = true; }, 100ms);
+    std::this_thread::sleep_for(100ms - kJitter);
+    ASSERT_FALSE(taskRan);
+    std::this_thread::sleep_for(2 * kJitter);
+    ASSERT_TRUE(taskRan);
+}
+
+TEST(TimerThread, Cancel) {
+    std::atomic<bool> taskRan = false;
+    TimerThread thread;
+    TimerThread::Handle handle = thread.scheduleTask([&taskRan] { taskRan = true; }, 100ms);
+    std::this_thread::sleep_for(100ms - kJitter);
+    ASSERT_FALSE(taskRan);
+    thread.cancelTask(handle);
+    std::this_thread::sleep_for(2 * kJitter);
+    ASSERT_FALSE(taskRan);
+}
+
+TEST(TimerThread, CancelAfterRun) {
+    std::atomic<bool> taskRan = false;
+    TimerThread thread;
+    TimerThread::Handle handle = thread.scheduleTask([&taskRan] { taskRan = true; }, 100ms);
+    std::this_thread::sleep_for(100ms + kJitter);
+    ASSERT_TRUE(taskRan);
+    thread.cancelTask(handle);
+}
+
+TEST(TimerThread, MultipleTasks) {
+    std::array<std::atomic<bool>, 6> taskRan;
+    TimerThread thread;
+
+    auto startTime = std::chrono::steady_clock::now();
+
+    thread.scheduleTask([&taskRan] { taskRan[0] = true; }, 300ms);
+    thread.scheduleTask([&taskRan] { taskRan[1] = true; }, 100ms);
+    thread.scheduleTask([&taskRan] { taskRan[2] = true; }, 200ms);
+    thread.scheduleTask([&taskRan] { taskRan[3] = true; }, 400ms);
+    auto handle4 = thread.scheduleTask([&taskRan] { taskRan[4] = true; }, 200ms);
+    thread.scheduleTask([&taskRan] { taskRan[5] = true; }, 200ms);
+
+    // Task 1 should trigger around 100ms.
+    std::this_thread::sleep_until(startTime + 100ms - kJitter);
+    ASSERT_FALSE(taskRan[0]);
+    ASSERT_FALSE(taskRan[1]);
+    ASSERT_FALSE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_FALSE(taskRan[5]);
+
+    std::this_thread::sleep_until(startTime + 100ms + kJitter);
+    ASSERT_FALSE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_FALSE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_FALSE(taskRan[5]);
+
+    // Cancel task 4 before it gets a chance to run.
+    thread.cancelTask(handle4);
+
+    // Tasks 2 and 5 should trigger around 200ms.
+    std::this_thread::sleep_until(startTime + 200ms - kJitter);
+    ASSERT_FALSE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_FALSE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_FALSE(taskRan[5]);
+
+    std::this_thread::sleep_until(startTime + 200ms + kJitter);
+    ASSERT_FALSE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_TRUE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_TRUE(taskRan[5]);
+
+    // Task 0 should trigger around 300ms.
+    std::this_thread::sleep_until(startTime + 300ms - kJitter);
+    ASSERT_FALSE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_TRUE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_TRUE(taskRan[5]);
+
+    std::this_thread::sleep_until(startTime + 300ms + kJitter);
+    ASSERT_TRUE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_TRUE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_TRUE(taskRan[5]);
+
+    // Task 3 should trigger around 400ms.
+    std::this_thread::sleep_until(startTime + 400ms - kJitter);
+    ASSERT_TRUE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_TRUE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_TRUE(taskRan[5]);
+
+    std::this_thread::sleep_until(startTime + 400ms + kJitter);
+    ASSERT_TRUE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_TRUE(taskRan[2]);
+    ASSERT_TRUE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_TRUE(taskRan[5]);
+}
+
+
+}  // namespace
+}  // namespace android
diff --git a/media/utils/TimerThread.cpp b/media/utils/TimerThread.cpp
new file mode 100644
index 0000000..3c95798
--- /dev/null
+++ b/media/utils/TimerThread.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TimerThread"
+
+#include <optional>
+
+#include <mediautils/TimerThread.h>
+#include <utils/ThreadDefs.h>
+
+namespace android {
+
+TimerThread::TimerThread() : mThread([this] { threadFunc(); }) {
+    pthread_setname_np(mThread.native_handle(), "TimeCheckThread");
+    pthread_setschedprio(mThread.native_handle(), PRIORITY_URGENT_AUDIO);
+}
+
+TimerThread::~TimerThread() {
+    {
+        std::lock_guard _l(mMutex);
+        mShouldExit = true;
+        mCond.notify_all();
+    }
+    mThread.join();
+}
+
+TimerThread::Handle TimerThread::scheduleTaskAtDeadline(std::function<void()>&& func,
+                                                        TimePoint deadline) {
+    std::lock_guard _l(mMutex);
+
+    // To avoid key collisions, advance by 1 tick until the key is unique.
+    for (; mMonitorRequests.find(deadline) != mMonitorRequests.end();
+         deadline += TimePoint::duration(1))
+        ;
+    mMonitorRequests.emplace(deadline, std::move(func));
+    mCond.notify_all();
+    return deadline;
+}
+
+void TimerThread::cancelTask(Handle handle) {
+    std::lock_guard _l(mMutex);
+    mMonitorRequests.erase(handle);
+}
+
+void TimerThread::threadFunc() {
+    std::unique_lock _l(mMutex);
+
+    while (!mShouldExit) {
+        if (!mMonitorRequests.empty()) {
+            TimePoint nextDeadline = mMonitorRequests.begin()->first;
+            if (nextDeadline < std::chrono::steady_clock::now()) {
+                // Deadline expired.
+                mMonitorRequests.begin()->second();
+                mMonitorRequests.erase(mMonitorRequests.begin());
+            }
+            mCond.wait_until(_l, nextDeadline);
+        } else {
+            mCond.wait(_l);
+        }
+    }
+}
+
+}  // namespace android
diff --git a/media/utils/fuzzers/Android.bp b/media/utils/fuzzers/Android.bp
index c1698dc..d26e6c2 100644
--- a/media/utils/fuzzers/Android.bp
+++ b/media/utils/fuzzers/Android.bp
@@ -18,6 +18,7 @@
         "libutils",
         "libbinder",
         "framework-permission-aidl-cpp",
+        "packagemanager_aidl-cpp",
     ],
 
     cflags: [
@@ -31,11 +32,6 @@
         "bionic_libc_platform_headers",
         "libmedia_headers",
     ],
-
-    include_dirs: [
-        // For DEBUGGER_SIGNAL
-        "system/core/debuggerd/include",
-    ],
 }
 
 cc_fuzz {
diff --git a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
index 6e52512..51e8d7a 100644
--- a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
+++ b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
@@ -17,7 +17,7 @@
 #include <fcntl.h>
 
 #include <functional>
-#include <type_traits>
+#include  <type_traits>
 
 #include <android/content/AttributionSourceState.h>
 #include "fuzzer/FuzzedDataProvider.h"
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 734313c..de20d55 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -61,6 +61,7 @@
 
 // Used for calls that should come from system server or internal.
 // Note: system server is multiprocess for multiple users.  audioserver is not.
+// Note: if this method is modified, also update the same method in SensorService.h.
 static inline bool isAudioServerOrSystemServerUid(uid_t uid) {
     return multiuser_get_app_id(uid) == AID_SYSTEM || uid == AID_AUDIOSERVER;
 }
@@ -95,6 +96,7 @@
 bool captureMediaOutputAllowed(const AttributionSourceState& attributionSource);
 bool captureTunerAudioInputAllowed(const AttributionSourceState& attributionSource);
 bool captureVoiceCommunicationOutputAllowed(const AttributionSourceState& attributionSource);
+bool accessUltrasoundAllowed(const AttributionSourceState& attributionSource);
 bool captureHotwordAllowed(const AttributionSourceState& attributionSource);
 bool settingsAllowed();
 bool modifyAudioRoutingAllowed();
@@ -104,6 +106,7 @@
 bool dumpAllowed();
 bool modifyPhoneStateAllowed(const AttributionSourceState& attributionSource);
 bool bypassInterruptionPolicyAllowed(const AttributionSourceState& attributionSource);
+bool callAudioInterceptionAllowed(const AttributionSourceState& attributionSource);
 void purgePermissionCache();
 int32_t getOpForSource(audio_source_t source);
 
diff --git a/media/utils/include/mediautils/TimeCheck.h b/media/utils/include/mediautils/TimeCheck.h
index 5ba6d7c..0d6e80d 100644
--- a/media/utils/include/mediautils/TimeCheck.h
+++ b/media/utils/include/mediautils/TimeCheck.h
@@ -14,62 +14,33 @@
  * limitations under the License.
  */
 
+#pragma once
 
-#ifndef ANDROID_TIME_CHECK_H
-#define ANDROID_TIME_CHECK_H
-
-#include <utils/KeyedVector.h>
-#include <utils/Thread.h>
 #include <vector>
 
+#include <mediautils/TimerThread.h>
+
 namespace android {
 
 // A class monitoring execution time for a code block (scoped variable) and causing an assert
 // if it exceeds a certain time
 
 class TimeCheck {
-public:
-
+  public:
     // The default timeout is chosen to be less than system server watchdog timeout
     static constexpr uint32_t kDefaultTimeOutMs = 5000;
 
-            TimeCheck(const char *tag, uint32_t timeoutMs = kDefaultTimeOutMs);
-            ~TimeCheck();
-    static  void setAudioHalPids(const std::vector<pid_t>& pids);
-    static  std::vector<pid_t> getAudioHalPids();
+    TimeCheck(const char* tag, uint32_t timeoutMs = kDefaultTimeOutMs);
+    ~TimeCheck();
+    static void setAudioHalPids(const std::vector<pid_t>& pids);
+    static std::vector<pid_t> getAudioHalPids();
 
-private:
-
-    class TimeCheckThread : public Thread {
-    public:
-
-                            TimeCheckThread() {}
-        virtual             ~TimeCheckThread() override;
-
-                nsecs_t     startMonitoring(const char *tag, uint32_t timeoutMs);
-                void        stopMonitoring(nsecs_t endTimeNs);
-
-    private:
-
-                // RefBase
-        virtual void        onFirstRef() override { run("TimeCheckThread", PRIORITY_URGENT_AUDIO); }
-
-                // Thread
-        virtual bool        threadLoop() override;
-
-                Condition           mCond;
-                Mutex               mMutex;
-                // using the end time in ns as key is OK given the risk is low that two entries
-                // are added in such a way that <add time> + <timeout> are the same for both.
-                KeyedVector< nsecs_t, const char*>  mMonitorRequests;
-    };
-
-    static sp<TimeCheckThread> getTimeCheckThread();
+  private:
+    static TimerThread* getTimeCheckThread();
     static void accessAudioHalPids(std::vector<pid_t>* pids, bool update);
+    static void crash(const char* tag, std::chrono::system_clock::time_point startTime);
 
-    const           nsecs_t mEndTimeNs;
+    const TimerThread::Handle mTimerHandle;
 };
 
-}; // namespace android
-
-#endif  // ANDROID_TIME_CHECK_H
+};  // namespace android
diff --git a/media/utils/include/mediautils/TimerThread.h b/media/utils/include/mediautils/TimerThread.h
new file mode 100644
index 0000000..cf457b8
--- /dev/null
+++ b/media/utils/include/mediautils/TimerThread.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <condition_variable>
+#include <functional>
+#include <map>
+#include <mutex>
+#include <thread>
+
+#include <android-base/thread_annotations.h>
+
+namespace android {
+
+/**
+ * A thread for deferred execution of tasks, with cancellation.
+ */
+class TimerThread {
+  public:
+    using Handle = std::chrono::steady_clock::time_point;
+
+    TimerThread();
+    ~TimerThread();
+
+    /**
+     * Schedule a task to be executed in the future (`timeout` duration from now).
+     * Returns a handle that can be used for cancellation.
+     */
+    template <typename R, typename P>
+    Handle scheduleTask(std::function<void()>&& func, std::chrono::duration<R, P> timeout) {
+        auto deadline = std::chrono::steady_clock::now() + std::chrono::milliseconds(timeout);
+        return scheduleTaskAtDeadline(std::move(func), deadline);
+    }
+
+    /**
+     * Cancel a task, previously scheduled with scheduleTask().
+     * If the task has already executed, this is a no-op.
+     */
+    void cancelTask(Handle handle);
+
+  private:
+    using TimePoint = std::chrono::steady_clock::time_point;
+
+    std::condition_variable mCond;
+    std::mutex mMutex;
+    std::thread mThread;
+    std::map<TimePoint, std::function<void()>> mMonitorRequests GUARDED_BY(mMutex);
+    bool mShouldExit GUARDED_BY(mMutex) = false;
+
+    void threadFunc();
+    Handle scheduleTaskAtDeadline(std::function<void()>&& func, TimePoint deadline);
+};
+
+}  // namespace android
diff --git a/services/OWNERS b/services/OWNERS
index f0b5e2f..17e605d 100644
--- a/services/OWNERS
+++ b/services/OWNERS
@@ -1,9 +1,6 @@
-chz@google.com
 elaurent@google.com
 essick@google.com
 etalvala@google.com
-gkasten@google.com
 hunga@google.com
-marcone@google.com
 nchalko@google.com
 quxiangfang@google.com
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index b91f302..763c070 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -41,6 +41,7 @@
         "FastThreadState.cpp",
         "NBAIO_Tee.cpp",
         "PatchPanel.cpp",
+        "PropertyUtils.cpp",
         "SpdifStreamOut.cpp",
         "StateQueue.cpp",
         "Threads.cpp",
@@ -54,6 +55,7 @@
     ],
 
     shared_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioflinger-aidl-cpp",
         "audioclient-types-aidl-cpp",
         "av-types-aidl-cpp",
@@ -81,6 +83,7 @@
         "libmedia_helper",
         "libshmemcompat",
         "libvibrator",
+        "packagemanager_aidl-cpp",
     ],
 
     static_libs: [
@@ -90,6 +93,7 @@
     ],
 
     header_libs: [
+        "libaaudio_headers",
         "libaudioclient_headers",
         "libaudiohal_headers",
         "libmedia_headers",
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 65a163f..43f79ce 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -31,6 +31,7 @@
 #include <sys/resource.h>
 #include <thread>
 
+#include <android-base/stringprintf.h>
 #include <android/media/IAudioPolicyService.h>
 #include <android/os/IExternalVibratorService.h>
 #include <binder/IPCThreadState.h>
@@ -57,6 +58,7 @@
 
 #include "AudioFlinger.h"
 #include "NBAIO_Tee.h"
+#include "PropertyUtils.h"
 
 #include <media/AudioResamplerPublic.h>
 
@@ -64,6 +66,7 @@
 #include <system/audio_effects/effect_ns.h>
 #include <system/audio_effects/effect_aec.h>
 #include <system/audio_effects/effect_hapticgenerator.h>
+#include <system/audio_effects/effect_spatializer.h>
 
 #include <audio_utils/primitives.h>
 
@@ -102,7 +105,12 @@
 
 namespace android {
 
+#define MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION 7.0
+
+using ::android::base::StringPrintf;
 using media::IEffectClient;
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
 using android::content::AttributionSourceState;
 
 static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
@@ -300,6 +308,11 @@
 
     mDevicesFactoryHalCallback = new DevicesFactoryHalCallbackImpl;
     mDevicesFactoryHal->setCallbackOnce(mDevicesFactoryHalCallback);
+
+    if (mDevicesFactoryHal->getHalVersion() <= MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION) {
+        mAAudioBurstsPerBuffer = getAAudioMixerBurstCountFromSystemProperty();
+        mAAudioHwBurstMinMicros = getAAudioHardwareBurstMinUsecFromSystemProperty();
+    }
 }
 
 status_t AudioFlinger::setAudioHalPids(const std::vector<pid_t>& pids) {
@@ -335,12 +348,50 @@
     return NO_ERROR;
 }
 
-// getDefaultVibratorInfo_l must be called with AudioFlinger lock held.
-const media::AudioVibratorInfo* AudioFlinger::getDefaultVibratorInfo_l() {
-    if (mAudioVibratorInfos.empty()) {
-        return nullptr;
+status_t AudioFlinger::getMmapPolicyInfos(
+            AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+    Mutex::Autolock _l(mLock);
+    if (const auto it = mPolicyInfos.find(policyType); it != mPolicyInfos.end()) {
+        *policyInfos = it->second;
+        return NO_ERROR;
     }
-    return &mAudioVibratorInfos.front();
+    if (mDevicesFactoryHal->getHalVersion() > MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION) {
+        AutoMutex lock(mHardwareLock);
+        for (size_t i = 0; i < mAudioHwDevs.size(); ++i) {
+            AudioHwDevice *dev = mAudioHwDevs.valueAt(i);
+            std::vector<AudioMMapPolicyInfo> infos;
+            status_t status = dev->getMmapPolicyInfos(policyType, &infos);
+            if (status != NO_ERROR) {
+                ALOGE("Failed to query mmap policy info of %d, error %d",
+                      mAudioHwDevs.keyAt(i), status);
+                continue;
+            }
+            policyInfos->insert(policyInfos->end(), infos.begin(), infos.end());
+        }
+        mPolicyInfos[policyType] = *policyInfos;
+    } else {
+        getMmapPolicyInfosFromSystemProperty(policyType, policyInfos);
+        mPolicyInfos[policyType] = *policyInfos;
+    }
+    return NO_ERROR;
+}
+
+int32_t AudioFlinger::getAAudioMixerBurstCount() {
+    Mutex::Autolock _l(mLock);
+    return mAAudioBurstsPerBuffer;
+}
+
+int32_t AudioFlinger::getAAudioHardwareBurstMinUsec() {
+    Mutex::Autolock _l(mLock);
+    return mAAudioHwBurstMinMicros;
+}
+
+// getDefaultVibratorInfo_l must be called with AudioFlinger lock held.
+std::optional<media::AudioVibratorInfo> AudioFlinger::getDefaultVibratorInfo_l() {
+    if (mAudioVibratorInfos.empty()) {
+        return {};
+    }
+    return mAudioVibratorInfos.front();
 }
 
 AudioFlinger::~AudioFlinger()
@@ -566,10 +617,12 @@
     String8 result;
 
     result.append("Clients:\n");
+    result.append("   pid    heap_size\n");
     for (size_t i = 0; i < mClients.size(); ++i) {
         sp<Client> client = mClients.valueAt(i).promote();
         if (client != 0) {
-            result.appendFormat("  pid: %d\n", client->pid());
+            result.appendFormat("%6d %12zu\n", client->pid(),
+                    client->heap()->getMemoryHeap()->getSize());
         }
     }
 
@@ -695,7 +748,7 @@
         // dump all hardware devs
         for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
             sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
-            dev->dump(fd);
+            dev->dump(fd, args);
         }
 
         mPatchPanel.dump(fd);
@@ -997,8 +1050,9 @@
                 }
             }
         }
-
-        setAudioHwSyncForSession_l(thread, sessionId);
+        if ((output.flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) == AUDIO_OUTPUT_FLAG_HW_AV_SYNC) {
+            setAudioHwSyncForSession_l(thread, sessionId);
+        }
     }
 
     if (lStatus != NO_ERROR) {
@@ -1873,13 +1927,13 @@
     }
 }
 
-void AudioFlinger::ioConfigChanged(audio_io_config_event event,
+void AudioFlinger::ioConfigChanged(audio_io_config_event_t event,
                                    const sp<AudioIoDescriptor>& ioDesc,
                                    pid_t pid) {
+    media::AudioIoConfigEvent eventAidl = VALUE_OR_FATAL(
+            legacy2aidl_audio_io_config_event_t_AudioIoConfigEvent(event));
     media::AudioIoDescriptor descAidl = VALUE_OR_FATAL(
             legacy2aidl_AudioIoDescriptor_AudioIoDescriptor(ioDesc));
-    media::AudioIoConfigEvent eventAidl = VALUE_OR_FATAL(
-            legacy2aidl_audio_io_config_event_AudioIoConfigEvent(event));
 
     Mutex::Autolock _l(mClientLock);
     size_t size = mNotificationClients.size();
@@ -2142,6 +2196,20 @@
             goto Exit;
         }
 
+        if (recordTrack->isFastTrack()) {
+            output.serverConfig = {
+                    thread->sampleRate(),
+                    thread->channelMask(),
+                    thread->format()
+            };
+        } else {
+            output.serverConfig = {
+                    recordTrack->sampleRate(),
+                    recordTrack->channelMask(),
+                    recordTrack->format()
+            };
+        }
+
         // Check if one effect chain was awaiting for an AudioRecord to be created on this
         // session and move it to this thread.
         sp<EffectChain> chain = getOrphanEffectChain_l(sessionId);
@@ -2272,6 +2340,17 @@
         mHardwareStatus = AUDIO_HW_IDLE;
     }
 
+    if (mDevicesFactoryHal->getHalVersion() > MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION) {
+        if (int32_t mixerBursts = dev->getAAudioMixerBurstCount();
+            mixerBursts > mAAudioBurstsPerBuffer) {
+            mAAudioBurstsPerBuffer = mixerBursts;
+        }
+        if (int32_t hwBurstMinMicros = dev->getAAudioHardwareBurstMinUsec();
+            hwBurstMinMicros < mAAudioHwBurstMinMicros || mAAudioHwBurstMinMicros == 0) {
+            mAAudioHwBurstMinMicros = hwBurstMinMicros;
+        }
+    }
+
     mAudioHwDevs.add(handle, audioDevice);
 
     ALOGI("loadHwModule() Loaded %s audio interface, handle %d", name, handle);
@@ -2455,6 +2534,10 @@
         ThreadBase *thread = (ThreadBase *)mRecordThreads.valueAt(i).get();
         thread->systemReady();
     }
+    for (size_t i = 0; i < mMmapThreads.size(); i++) {
+        ThreadBase *thread = (ThreadBase *)mMmapThreads.valueAt(i).get();
+        thread->systemReady();
+    }
     return NO_ERROR;
 }
 
@@ -2501,14 +2584,15 @@
 
 sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
                                                         audio_io_handle_t *output,
-                                                        audio_config_t *config,
+                                                        audio_config_t *halConfig,
+                                                        audio_config_base_t *mixerConfig __unused,
                                                         audio_devices_t deviceType,
                                                         const String8& address,
                                                         audio_output_flags_t flags)
 {
     AudioHwDevice *outHwDev = findSuitableHwDev_l(module, deviceType);
     if (outHwDev == NULL) {
-        return 0;
+        return nullptr;
     }
 
     if (*output == AUDIO_IO_HANDLE_NONE) {
@@ -2517,9 +2601,17 @@
         // Audio Policy does not currently request a specific output handle.
         // If this is ever needed, see openInput_l() for example code.
         ALOGE("openOutput_l requested output handle %d is not AUDIO_IO_HANDLE_NONE", *output);
-        return 0;
+        return nullptr;
     }
 
+#ifndef MULTICHANNEL_EFFECT_CHAIN
+    if (flags & AUDIO_OUTPUT_FLAG_SPATIALIZER) {
+        ALOGE("openOutput_l() cannot create spatializer thread "
+                "without #define MULTICHANNEL_EFFECT_CHAIN");
+        return nullptr;
+    }
+#endif
+
     mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
 
     // FOR TESTING ONLY:
@@ -2529,16 +2621,16 @@
         // Check only for Normal Mixing mode
         if (kEnableExtendedPrecision) {
             // Specify format (uncomment one below to choose)
-            //config->format = AUDIO_FORMAT_PCM_FLOAT;
-            //config->format = AUDIO_FORMAT_PCM_24_BIT_PACKED;
-            //config->format = AUDIO_FORMAT_PCM_32_BIT;
-            //config->format = AUDIO_FORMAT_PCM_8_24_BIT;
-            // ALOGV("openOutput_l() upgrading format to %#08x", config->format);
+            //halConfig->format = AUDIO_FORMAT_PCM_FLOAT;
+            //halConfig->format = AUDIO_FORMAT_PCM_24_BIT_PACKED;
+            //halConfig->format = AUDIO_FORMAT_PCM_32_BIT;
+            //halConfig->format = AUDIO_FORMAT_PCM_8_24_BIT;
+            // ALOGV("openOutput_l() upgrading format to %#08x", halConfig->format);
         }
         if (kEnableExtendedChannels) {
             // Specify channel mask (uncomment one below to choose)
-            //config->channel_mask = audio_channel_out_mask_from_count(4);  // for USB 4ch
-            //config->channel_mask = audio_channel_mask_from_representation_and_bits(
+            //halConfig->channel_mask = audio_channel_out_mask_from_count(4);  // for USB 4ch
+            //halConfig->channel_mask = audio_channel_mask_from_representation_and_bits(
             //        AUDIO_CHANNEL_REPRESENTATION_INDEX, (1 << 4) - 1);  // another 4ch example
         }
     }
@@ -2549,7 +2641,7 @@
             *output,
             deviceType,
             flags,
-            config,
+            halConfig,
             address.string());
 
     mHardwareStatus = AUDIO_HW_IDLE;
@@ -2564,13 +2656,18 @@
             return thread;
         } else {
             sp<PlaybackThread> thread;
-            if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+            if (flags & AUDIO_OUTPUT_FLAG_SPATIALIZER) {
+                thread = new SpatializerThread(this, outputStream, *output,
+                                                    mSystemReady, mixerConfig);
+                ALOGV("openOutput_l() created spatializer output: ID %d thread %p",
+                      *output, thread.get());
+            } else if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
                 thread = new OffloadThread(this, outputStream, *output, mSystemReady);
                 ALOGV("openOutput_l() created offload output: ID %d thread %p",
                       *output, thread.get());
             } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
-                    || !isValidPcmSinkFormat(config->format)
-                    || !isValidPcmSinkChannelMask(config->channel_mask)) {
+                    || !isValidPcmSinkFormat(halConfig->format)
+                    || !isValidPcmSinkChannelMask(halConfig->channel_mask)) {
                 thread = new DirectOutputThread(this, outputStream, *output, mSystemReady);
                 ALOGV("openOutput_l() created direct output: ID %d thread %p",
                       *output, thread.get());
@@ -2589,7 +2686,7 @@
         }
     }
 
-    return 0;
+    return nullptr;
 }
 
 status_t AudioFlinger::openOutput(const media::OpenOutputRequest& request,
@@ -2597,8 +2694,10 @@
 {
     audio_module_handle_t module = VALUE_OR_RETURN_STATUS(
             aidl2legacy_int32_t_audio_module_handle_t(request.module));
-    audio_config_t config = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioConfig_audio_config_t(request.config));
+    audio_config_t halConfig = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioConfig_audio_config_t(request.halConfig, false /*isInput*/));
+    audio_config_base_t mixerConfig = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioConfigBase_audio_config_base_t(request.mixerConfig, false/*isInput*/));
     sp<DeviceDescriptorBase> device = VALUE_OR_RETURN_STATUS(
             aidl2legacy_DeviceDescriptorBase(request.device));
     audio_output_flags_t flags = VALUE_OR_RETURN_STATUS(
@@ -2611,9 +2710,9 @@
               "Channels %#x, flags %#x",
               this, module,
               device->toString().c_str(),
-              config.sample_rate,
-              config.format,
-              config.channel_mask,
+              halConfig.sample_rate,
+              halConfig.format,
+              halConfig.channel_mask,
               flags);
 
     audio_devices_t deviceType = device->type();
@@ -2625,7 +2724,8 @@
 
     Mutex::Autolock _l(mLock);
 
-    sp<ThreadBase> thread = openOutput_l(module, &output, &config, deviceType, address, flags);
+    sp<ThreadBase> thread = openOutput_l(module, &output, &halConfig,
+            &mixerConfig, deviceType, address, flags);
     if (thread != 0) {
         if ((flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) == 0) {
             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
@@ -2650,7 +2750,8 @@
             mmapThread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
         }
         response->output = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
-        response->config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+        response->config = VALUE_OR_RETURN_STATUS(
+                legacy2aidl_audio_config_t_AudioConfig(halConfig, false /*isInput*/));
         response->latencyMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(latencyMs));
         response->flags = VALUE_OR_RETURN_STATUS(
                 legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
@@ -2736,9 +2837,7 @@
             mMmapThreads.removeItem(output);
             ALOGD("closing mmapThread %p", mmapThread.get());
         }
-        const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
-        ioDesc->mIoHandle = output;
-        ioConfigChanged(AUDIO_OUTPUT_CLOSED, ioDesc);
+        ioConfigChanged(AUDIO_OUTPUT_CLOSED, sp<AudioIoDescriptor>::make(output));
         mPatchPanel.notifyStreamClosed(output);
     }
     // The thread entity (active unit of execution) is no longer running here,
@@ -2811,16 +2910,16 @@
 {
     Mutex::Autolock _l(mLock);
 
-    if (request.device.type == AUDIO_DEVICE_NONE) {
+    AudioDeviceTypeAddr device = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioDeviceTypeAddress(request.device));
+    if (device.mType == AUDIO_DEVICE_NONE) {
         return BAD_VALUE;
     }
 
     audio_io_handle_t input = VALUE_OR_RETURN_STATUS(
             aidl2legacy_int32_t_audio_io_handle_t(request.input));
     audio_config_t config = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioConfig_audio_config_t(request.config));
-    AudioDeviceTypeAddr device = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioDeviceTypeAddress(request.device));
+            aidl2legacy_AudioConfig_audio_config_t(request.config, true /*isInput*/));
 
     sp<ThreadBase> thread = openInput_l(
             VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_module_handle_t(request.module)),
@@ -2828,13 +2927,14 @@
             &config,
             device.mType,
             device.address().c_str(),
-            VALUE_OR_RETURN_STATUS(aidl2legacy_AudioSourceType_audio_source_t(request.source)),
+            VALUE_OR_RETURN_STATUS(aidl2legacy_AudioSource_audio_source_t(request.source)),
             VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_input_flags_t_mask(request.flags)),
             AUDIO_DEVICE_NONE,
             String8{});
 
     response->input = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(input));
-    response->config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+    response->config = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(config, true /*isInput*/));
     response->device = request.device;
 
     if (thread != 0) {
@@ -2996,9 +3096,7 @@
             dumpToThreadLog_l(mmapThread);
             mMmapThreads.removeItem(input);
         }
-        const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
-        ioDesc->mIoHandle = input;
-        ioConfigChanged(AUDIO_INPUT_CLOSED, ioDesc);
+        ioConfigChanged(AUDIO_INPUT_CLOSED, sp<AudioIoDescriptor>::make(input));
     }
     // FIXME: calling thread->exit() without mLock held should not be needed anymore now that
     // we have a different lock for notification client
@@ -3729,6 +3827,15 @@
             goto Exit;
         }
 
+        // Only audio policy service can create a spatializer effect
+        if ((memcmp(&descOut.type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0) &&
+            (callingUid != AID_AUDIOSERVER || currentPid != getpid())) {
+            ALOGW("%s: attempt to create a spatializer effect from uid/pid %d/%d",
+                    __func__, callingUid, currentPid);
+            lStatus = PERMISSION_DENIED;
+            goto Exit;
+        }
+
         if (io == AUDIO_IO_HANDLE_NONE && sessionId == AUDIO_SESSION_OUTPUT_MIX) {
             // if the output returned by getOutputForEffect() is removed before we lock the
             // mutex below, the call to checkPlaybackThread_l(io) below will detect it
@@ -3744,7 +3851,7 @@
             ALOGV("%s device type %#x address %s", __func__, device.mType, device.getAddress());
             handle = mDeviceEffectManager.createEffect_l(
                     &descOut, device, client, effectClient, mPatchPanel.patches_l(),
-                    &enabledOut, &lStatus, probe);
+                    &enabledOut, &lStatus, probe, request.notifyFramesProcessed);
             if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
                 // remove local strong reference to Client with mClientLock held
                 Mutex::Autolock _cl(mClientLock);
@@ -3797,7 +3904,8 @@
                 io = mPlaybackThreads.keyAt(0);
             }
             ALOGV("createEffect() got io %d for effect %s", io, descOut.name);
-        } else if (checkPlaybackThread_l(io) != nullptr) {
+        } else if (checkPlaybackThread_l(io) != nullptr
+                        && sessionId != AUDIO_SESSION_OUTPUT_STAGE) {
             // allow only one effect chain per sessionId on mPlaybackThreads.
             for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
                 const audio_io_handle_t checkIo = mPlaybackThreads.keyAt(i);
@@ -3863,7 +3971,8 @@
             }
         }
         handle = thread->createEffect_l(client, effectClient, priority, sessionId,
-                                        &descOut, &enabledOut, &lStatus, pinned, probe);
+                                        &descOut, &enabledOut, &lStatus, pinned, probe,
+                                        request.notifyFramesProcessed);
         if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
             // remove local strong reference to Client with mClientLock held
             Mutex::Autolock _cl(mClientLock);
@@ -3881,6 +3990,12 @@
 
 Register:
     if (!probe && (lStatus == NO_ERROR || lStatus == ALREADY_EXISTS)) {
+        if (lStatus == ALREADY_EXISTS) {
+            response->alreadyExists = true;
+            lStatus = NO_ERROR;
+        } else {
+            response->alreadyExists = false;
+        }
         // Check CPU and memory usage
         sp<EffectBase> effect = handle->effect().promote();
         if (effect != nullptr) {
@@ -3973,47 +4088,87 @@
     // so that a new chain is created with correct parameters when first effect is added. This is
     // otherwise unnecessary as removeEffect_l() will remove the chain when last effect is
     // removed.
+    // TODO(b/216875016): consider holding the effect chain locks for the duration of the move.
     srcThread->removeEffectChain_l(chain);
 
     // transfer all effects one by one so that new effect chain is created on new thread with
     // correct buffer sizes and audio parameters and effect engines reconfigured accordingly
     sp<EffectChain> dstChain;
-    uint32_t strategy = 0; // prevent compiler warning
     sp<EffectModule> effect = chain->getEffectFromId_l(0);
     Vector< sp<EffectModule> > removed;
     status_t status = NO_ERROR;
-    while (effect != 0) {
+    std::string errorString;
+    while (effect != nullptr) {
         srcThread->removeEffect_l(effect);
         removed.add(effect);
         status = dstThread->addEffect_l(effect);
         if (status != NO_ERROR) {
+            errorString = StringPrintf(
+                    "cannot add effect %p to destination thread", effect.get());
             break;
         }
-        // removeEffect_l() has stopped the effect if it was active so it must be restarted
-        if (effect->state() == EffectModule::ACTIVE ||
-                effect->state() == EffectModule::STOPPING) {
-            effect->start();
-        }
         // if the move request is not received from audio policy manager, the effect must be
-        // re-registered with the new strategy and output
-        if (dstChain == 0) {
+        // re-registered with the new strategy and output.
+
+        // We obtain the dstChain once the effect is on the new thread.
+        if (dstChain == nullptr) {
             dstChain = effect->getCallback()->chain().promote();
-            if (dstChain == 0) {
-                ALOGW("moveEffectChain_l() cannot get chain from effect %p", effect.get());
+            if (dstChain == nullptr) {
+                errorString = StringPrintf("cannot get chain from effect %p", effect.get());
                 status = NO_INIT;
                 break;
             }
-            strategy = dstChain->strategy();
         }
         effect = chain->getEffectFromId_l(0);
     }
 
+    size_t restored = 0;
     if (status != NO_ERROR) {
-        for (size_t i = 0; i < removed.size(); i++) {
-            srcThread->addEffect_l(removed[i]);
+        dstChain.clear(); // dstChain is now from the srcThread (could be recreated).
+        for (const auto& effect : removed) {
+            dstThread->removeEffect_l(effect); // Note: Depending on error location, the last
+                                               // effect may not have been placed on dstThread.
+            if (srcThread->addEffect_l(effect) == NO_ERROR) {
+                ++restored;
+                if (dstChain == nullptr) {
+                    dstChain = effect->getCallback()->chain().promote();
+                }
+            }
         }
     }
 
+    // After all the effects have been moved to new thread (or put back) we restart the effects
+    // because removeEffect_l() has stopped the effect if it is currently active.
+    size_t started = 0;
+    if (dstChain != nullptr && !removed.empty()) {
+        // If we do not take the dstChain lock, it is possible that processing is ongoing
+        // while we are starting the effect.  This can cause glitches with volume,
+        // see b/202360137.
+        dstChain->lock();
+        for (const auto& effect : removed) {
+            if (effect->state() == EffectModule::ACTIVE ||
+                    effect->state() == EffectModule::STOPPING) {
+                ++started;
+                effect->start();
+            }
+        }
+        dstChain->unlock();
+    }
+
+    if (status != NO_ERROR) {
+        if (errorString.empty()) {
+            errorString = StringPrintf("%s: failed status %d", __func__, status);
+        }
+        ALOGW("%s: %s unsuccessful move of session %d from srcThread %p to dstThread %p "
+                "(%zu effects removed from srcThread, %zu effects restored to srcThread, "
+                "%zu effects started)",
+                __func__, errorString.c_str(), sessionId, srcThread, dstThread,
+                removed.size(), restored, started);
+    } else {
+        ALOGD("%s: successful move of session %d from srcThread %p to dstThread %p "
+                "(%zu effects moved, %zu effects started)",
+                __func__, sessionId, srcThread, dstThread, removed.size(), started);
+    }
     return status;
 }
 
@@ -4178,6 +4333,7 @@
         case TransactionCode::LIST_AUDIO_PATCHES:
         case TransactionCode::SET_AUDIO_PORT_CONFIG:
         case TransactionCode::SET_RECORD_SILENCED:
+        case TransactionCode::AUDIO_POLICY_READY:
             ALOGW("%s: transaction %d received from PID %d",
                   __func__, code, IPCThreadState::self()->getCallingPid());
             // return status only for non void methods
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index fff61f8..8c546cc 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -269,6 +269,9 @@
 
     /* Indicate JAVA services are ready (scheduling, power management ...) */
     virtual status_t systemReady();
+    virtual status_t audioPolicyReady() { mAudioPolicyReady.store(true); return NO_ERROR; }
+            bool isAudioPolicyReady() const { return mAudioPolicyReady.load(); }
+
 
     virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
 
@@ -279,6 +282,14 @@
     virtual status_t updateSecondaryOutputs(
             const TrackSecondaryOutputsMap& trackSecondaryOutputs);
 
+    virtual status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos);
+
+    virtual int32_t getAAudioMixerBurstCount();
+
+    virtual int32_t getAAudioHardwareBurstMinUsec();
+
     status_t onTransactWrapper(TransactionCode code, const Parcel& data, uint32_t flags,
         const std::function<status_t()>& delegate) override;
 
@@ -309,7 +320,7 @@
     void updateDownStreamPatches_l(const struct audio_patch *patch,
                                    const std::set<audio_io_handle_t> streams);
 
-    const media::AudioVibratorInfo* getDefaultVibratorInfo_l();
+    std::optional<media::AudioVibratorInfo> getDefaultVibratorInfo_l();
 
 private:
     // FIXME The 400 is temporarily too high until a leak of writers in media.log is fixed.
@@ -735,7 +746,8 @@
                                            const String8& outputDeviceAddress);
               sp<ThreadBase> openOutput_l(audio_module_handle_t module,
                                           audio_io_handle_t *output,
-                                          audio_config_t *config,
+                                          audio_config_t *halConfig,
+                                          audio_config_base_t *mixerConfig,
                                           audio_devices_t deviceType,
                                           const String8& address,
                                           audio_output_flags_t flags);
@@ -746,7 +758,7 @@
               // no range check, AudioFlinger::mLock held
               bool streamMute_l(audio_stream_type_t stream) const
                                 { return mStreamTypes[stream].mute; }
-              void ioConfigChanged(audio_io_config_event event,
+              void ioConfigChanged(audio_io_config_event_t event,
                                    const sp<AudioIoDescriptor>& ioDesc,
                                    pid_t pid = 0);
 
@@ -986,6 +998,7 @@
     DeviceEffectManager mDeviceEffectManager;
 
     bool       mSystemReady;
+    std::atomic_bool mAudioPolicyReady{};
 
     mediautils::UidInfo mUidInfo;
 
@@ -999,6 +1012,11 @@
 
     // Keep in sync with java definition in media/java/android/media/AudioRecord.java
     static constexpr int32_t kMaxSharedAudioHistoryMs = 5000;
+
+    std::map<media::audio::common::AudioMMapPolicyType,
+             std::vector<media::audio::common::AudioMMapPolicyInfo>> mPolicyInfos;
+    int32_t mAAudioBurstsPerBuffer = 0;
+    int32_t mAAudioHwBurstMinMicros = 0;
 };
 
 #undef INCLUDING_FROM_AUDIOFLINGER_H
diff --git a/services/audioflinger/AudioHwDevice.cpp b/services/audioflinger/AudioHwDevice.cpp
index 16b25f6..dee6161 100644
--- a/services/audioflinger/AudioHwDevice.cpp
+++ b/services/audioflinger/AudioHwDevice.cpp
@@ -29,6 +29,9 @@
 
 namespace android {
 
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
+
 // ----------------------------------------------------------------------------
 
 status_t AudioHwDevice::openOutputStream(
@@ -102,5 +105,18 @@
     return mHwDevice->getAudioPort(port);
 }
 
+status_t AudioHwDevice::getMmapPolicyInfos(
+            AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) const {
+    return mHwDevice->getMmapPolicyInfos(policyType, policyInfos);
+}
+
+int32_t AudioHwDevice::getAAudioMixerBurstCount() const {
+    return mHwDevice->getAAudioMixerBurstCount();
+}
+
+int32_t AudioHwDevice::getAAudioHardwareBurstMinUsec() const {
+    return mHwDevice->getAAudioHardwareBurstMinUsec();
+}
+
 
 }; // namespace android
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
index fc2c693..8c5d239 100644
--- a/services/audioflinger/AudioHwDevice.h
+++ b/services/audioflinger/AudioHwDevice.h
@@ -22,6 +22,8 @@
 #include <stdlib.h>
 #include <sys/types.h>
 
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
 #include <media/audiohal/DeviceHalInterface.h>
 #include <utils/Errors.h>
 #include <system/audio.h>
@@ -85,6 +87,14 @@
 
     status_t getAudioPort(struct audio_port_v7 *port) const;
 
+    status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) const;
+
+    int32_t getAAudioMixerBurstCount() const;
+
+    int32_t getAAudioHardwareBurstMinUsec() const;
+
 private:
     const audio_module_handle_t mHandle;
     const char * const          mModuleName;
diff --git a/services/audioflinger/DeviceEffectManager.cpp b/services/audioflinger/DeviceEffectManager.cpp
index cecd52b..53ac5cb 100644
--- a/services/audioflinger/DeviceEffectManager.cpp
+++ b/services/audioflinger/DeviceEffectManager.cpp
@@ -77,7 +77,8 @@
         const std::map<audio_patch_handle_t, PatchPanel::Patch>& patches,
         int *enabled,
         status_t *status,
-        bool probe) {
+        bool probe,
+        bool notifyFramesProcessed) {
     sp<DeviceEffectProxy> effect;
     sp<EffectHandle> handle;
     status_t lStatus;
@@ -95,10 +96,12 @@
             effect = iter->second;
         } else {
             effect = new DeviceEffectProxy(device, mMyCallback,
-                    descriptor, mAudioFlinger.nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT));
+                    descriptor, mAudioFlinger.nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT),
+                    notifyFramesProcessed);
         }
         // create effect handle and connect it to effect module
-        handle = new EffectHandle(effect, client, effectClient, 0 /*priority*/);
+        handle = new EffectHandle(effect, client, effectClient, 0 /*priority*/,
+                                  notifyFramesProcessed);
         lStatus = handle->initCheck();
         if (lStatus == NO_ERROR) {
             lStatus = effect->addHandle(handle.get());
diff --git a/services/audioflinger/DeviceEffectManager.h b/services/audioflinger/DeviceEffectManager.h
index a05f5fe..d2faa70 100644
--- a/services/audioflinger/DeviceEffectManager.h
+++ b/services/audioflinger/DeviceEffectManager.h
@@ -37,7 +37,8 @@
                 const std::map<audio_patch_handle_t, PatchPanel::Patch>& patches,
                 int *enabled,
                 status_t *status,
-                bool probe);
+                bool probe,
+                bool notifyFramesProcessed);
     void createAudioPatch(audio_patch_handle_t handle, const PatchPanel::Patch& patch);
     void releaseAudioPatch(audio_patch_handle_t handle);
 
@@ -161,10 +162,16 @@
     bool isOffload() const override { return false; }
     bool isOffloadOrDirect() const override { return false; }
     bool isOffloadOrMmap() const override { return false; }
+    bool isSpatializer() const override { return false; }
 
     uint32_t  sampleRate() const override { return 0; }
-    audio_channel_mask_t channelMask() const override { return AUDIO_CHANNEL_NONE; }
-    uint32_t channelCount() const override { return 0; }
+    audio_channel_mask_t inChannelMask(int id __unused) const override {
+        return AUDIO_CHANNEL_NONE;
+    }
+    uint32_t inChannelCount(int id __unused) const override { return 0; }
+    audio_channel_mask_t outChannelMask() const override { return AUDIO_CHANNEL_NONE; }
+    uint32_t outChannelCount() const override { return 0; }
+
     audio_channel_mask_t hapticChannelMask() const override { return AUDIO_CHANNEL_NONE; }
     size_t    frameCount() const override  { return 0; }
     uint32_t  latency() const override  { return 0; }
@@ -190,6 +197,10 @@
 
     wp<EffectChain> chain() const override { return nullptr; }
 
+    bool isAudioPolicyReady() const override {
+        return mManager.audioFlinger().isAudioPolicyReady();
+    }
+
     int newEffectId() { return mManager.audioFlinger().nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT); }
 
     status_t addEffectToHal(audio_port_handle_t deviceId,
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index b267d88..b748f9d 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -24,9 +24,11 @@
 #include "Configuration.h"
 #include <utils/Log.h>
 #include <system/audio_effects/effect_aec.h>
+#include <system/audio_effects/effect_downmix.h>
 #include <system/audio_effects/effect_dynamicsprocessing.h>
 #include <system/audio_effects/effect_hapticgenerator.h>
 #include <system/audio_effects/effect_ns.h>
+#include <system/audio_effects/effect_spatializer.h>
 #include <system/audio_effects/effect_visualizer.h>
 #include <audio_utils/channels.h>
 #include <audio_utils/primitives.h>
@@ -242,6 +244,12 @@
 
     {
         Mutex::Autolock _l(mLock);
+
+        if ((isInternal_l() && !mPolicyRegistered)
+                || !getCallback()->isAudioPolicyReady()) {
+            return NO_ERROR;
+        }
+
         // register effect when first handle is attached and unregister when last handle is removed
         if (mPolicyRegistered != mHandles.size() > 0) {
             doRegister = true;
@@ -642,6 +650,13 @@
             mState = IDLE;
         }
         break;
+    case ACTIVE:
+        for (size_t i = 0; i < mHandles.size(); i++) {
+            if (!mHandles[i]->disconnected()) {
+                mHandles[i]->framesProcessed(mConfig.inputCfg.buffer.frameCount);
+            }
+        }
+        break;
     default: //IDLE , ACTIVE, DESTROYED
         break;
     }
@@ -875,9 +890,9 @@
     // similar to output EFFECT_FLAG_TYPE_INSERT/REPLACE,
     // in which case input channel masks should be used here.
     callback = getCallback();
-    channelMask = callback->channelMask();
+    channelMask = callback->inChannelMask(mId);
     mConfig.inputCfg.channels = channelMask;
-    mConfig.outputCfg.channels = channelMask;
+    mConfig.outputCfg.channels = callback->outChannelMask();
 
     if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
         if (mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_MONO) {
@@ -940,11 +955,7 @@
     // Auxiliary effect:
     //      accumulates in output buffer: input buffer != output buffer
     // Therefore: accumulate <=> input buffer != output buffer
-    if (mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
-        mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
-    } else {
-        mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
-    }
+    mConfig.outputCfg.accessMode = requiredEffectBufferAccessMode();
     mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
     mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
     mConfig.inputCfg.buffer.frameCount = callback->frameCount();
@@ -1600,7 +1611,7 @@
     return status;
 }
 
-status_t AudioFlinger::EffectModule::setVibratorInfo(const media::AudioVibratorInfo* vibratorInfo)
+status_t AudioFlinger::EffectModule::setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo)
 {
     if (mStatus != NO_ERROR) {
         return mStatus;
@@ -1610,15 +1621,17 @@
         return INVALID_OPERATION;
     }
 
+    const size_t paramCount = 3;
     std::vector<uint8_t> request(
-            sizeof(effect_param_t) + sizeof(int32_t) + 2 * sizeof(float));
+            sizeof(effect_param_t) + sizeof(int32_t) + paramCount * sizeof(float));
     effect_param_t *param = (effect_param_t*) request.data();
     param->psize = sizeof(int32_t);
-    param->vsize = 2 * sizeof(float);
+    param->vsize = paramCount * sizeof(float);
     *(int32_t*)param->data = HG_PARAM_VIBRATOR_INFO;
     float* vibratorInfoPtr = reinterpret_cast<float*>(param->data + sizeof(int32_t));
-    vibratorInfoPtr[0] = vibratorInfo->resonantFrequency;
-    vibratorInfoPtr[1] = vibratorInfo->qFactor;
+    vibratorInfoPtr[0] = vibratorInfo.resonantFrequency;
+    vibratorInfoPtr[1] = vibratorInfo.qFactor;
+    vibratorInfoPtr[2] = vibratorInfo.maxAmplitude;
     std::vector<uint8_t> response;
     status_t status = command(EFFECT_CMD_SET_PARAM, request, sizeof(int32_t), &response);
     if (status == NO_ERROR) {
@@ -1708,10 +1721,11 @@
 AudioFlinger::EffectHandle::EffectHandle(const sp<EffectBase>& effect,
                                          const sp<AudioFlinger::Client>& client,
                                          const sp<media::IEffectClient>& effectClient,
-                                         int32_t priority)
+                                         int32_t priority, bool notifyFramesProcessed)
     : BnEffect(),
     mEffect(effect), mEffectClient(effectClient), mClient(client), mCblk(NULL),
-    mPriority(priority), mHasControl(false), mEnabled(false), mDisconnected(false)
+    mPriority(priority), mHasControl(false), mEnabled(false), mDisconnected(false),
+    mNotifyFramesProcessed(notifyFramesProcessed)
 {
     ALOGV("constructor %p client %p", this, client.get());
 
@@ -2020,6 +2034,13 @@
     }
 }
 
+void AudioFlinger::EffectHandle::framesProcessed(int32_t frames) const
+{
+    if (mEffectClient != 0 && mNotifyFramesProcessed) {
+        mEffectClient->framesProcessed(frames);
+    }
+}
+
 void AudioFlinger::EffectHandle::dumpToBuffer(char* buffer, size_t size)
 {
     bool locked = mCblk != NULL && AudioFlinger::dumpTryLock(mCblk->lock);
@@ -2048,11 +2069,11 @@
       mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX),
       mEffectCallback(new EffectCallback(wp<EffectChain>(this), thread))
 {
-    mStrategy = AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
     sp<ThreadBase> p = thread.promote();
     if (p == nullptr) {
         return;
     }
+    mStrategy = p->getStrategyForStream(AUDIO_STREAM_MUSIC);
     mMaxTailBuffers = ((kProcessTailDurationMs * p->sampleRate()) / 1000) /
                                     p->frameCount();
 }
@@ -2125,8 +2146,8 @@
     if (mInBuffer == NULL) {
         return;
     }
-    const size_t frameSize =
-            audio_bytes_per_sample(EFFECT_BUFFER_FORMAT) * mEffectCallback->channelCount();
+    const size_t frameSize = audio_bytes_per_sample(EFFECT_BUFFER_FORMAT)
+            * mEffectCallback->inChannelCount(mEffects[0]->id());
 
     memset(mInBuffer->audioBuffer()->raw, 0, mEffectCallback->frameCount() * frameSize);
     mInBuffer->commit();
@@ -2212,11 +2233,9 @@
 // addEffect_l() must be called with ThreadBase::mLock and EffectChain::mLock held
 status_t AudioFlinger::EffectChain::addEffect_ll(const sp<EffectModule>& effect)
 {
-    effect_descriptor_t desc = effect->desc();
-    uint32_t insertPref = desc.flags & EFFECT_FLAG_INSERT_MASK;
-
     effect->setCallback(mEffectCallback);
 
+    effect_descriptor_t desc = effect->desc();
     if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
         // Auxiliary effects are inserted at the beginning of mEffects vector as
         // they are processed first and accumulated in chain input buffer
@@ -2236,97 +2255,139 @@
                 numSamples * sizeof(int32_t), &halBuffer);
 #endif
         if (result != OK) return result;
+
+        effect->configure();
+
         effect->setInBuffer(halBuffer);
         // auxiliary effects output samples to chain input buffer for further processing
         // by insert effects
         effect->setOutBuffer(mInBuffer);
     } else {
-        // Insert effects are inserted at the end of mEffects vector as they are processed
-        //  after track and auxiliary effects.
-        // Insert effect order as a function of indicated preference:
-        //  if EFFECT_FLAG_INSERT_EXCLUSIVE, insert in first position or reject if
-        //  another effect is present
-        //  else if EFFECT_FLAG_INSERT_FIRST, insert in first position or after the
-        //  last effect claiming first position
-        //  else if EFFECT_FLAG_INSERT_LAST, insert in last position or before the
-        //  first effect claiming last position
-        //  else if EFFECT_FLAG_INSERT_ANY insert after first or before last
-        // Reject insertion if an effect with EFFECT_FLAG_INSERT_EXCLUSIVE is
-        // already present
-
-        size_t size = mEffects.size();
-        size_t idx_insert = size;
-        ssize_t idx_insert_first = -1;
-        ssize_t idx_insert_last = -1;
-
-        for (size_t i = 0; i < size; i++) {
-            effect_descriptor_t d = mEffects[i]->desc();
-            uint32_t iMode = d.flags & EFFECT_FLAG_TYPE_MASK;
-            uint32_t iPref = d.flags & EFFECT_FLAG_INSERT_MASK;
-            if (iMode == EFFECT_FLAG_TYPE_INSERT) {
-                // check invalid effect chaining combinations
-                if (insertPref == EFFECT_FLAG_INSERT_EXCLUSIVE ||
-                    iPref == EFFECT_FLAG_INSERT_EXCLUSIVE) {
-                    ALOGW("addEffect_l() could not insert effect %s: exclusive conflict with %s",
-                            desc.name, d.name);
-                    return INVALID_OPERATION;
-                }
-                // remember position of first insert effect and by default
-                // select this as insert position for new effect
-                if (idx_insert == size) {
-                    idx_insert = i;
-                }
-                // remember position of last insert effect claiming
-                // first position
-                if (iPref == EFFECT_FLAG_INSERT_FIRST) {
-                    idx_insert_first = i;
-                }
-                // remember position of first insert effect claiming
-                // last position
-                if (iPref == EFFECT_FLAG_INSERT_LAST &&
-                    idx_insert_last == -1) {
-                    idx_insert_last = i;
-                }
-            }
+        ssize_t idx_insert = getInsertIndex(desc);
+        if (idx_insert < 0) {
+            return INVALID_OPERATION;
         }
 
-        // modify idx_insert from first position if needed
-        if (insertPref == EFFECT_FLAG_INSERT_LAST) {
-            if (idx_insert_last != -1) {
-                idx_insert = idx_insert_last;
-            } else {
-                idx_insert = size;
-            }
-        } else {
-            if (idx_insert_first != -1) {
-                idx_insert = idx_insert_first + 1;
-            }
-        }
-
-        // always read samples from chain input buffer
-        effect->setInBuffer(mInBuffer);
-
-        // if last effect in the chain, output samples to chain
-        // output buffer, otherwise to chain input buffer
-        if (idx_insert == size) {
-            if (idx_insert != 0) {
-                mEffects[idx_insert-1]->setOutBuffer(mInBuffer);
-                mEffects[idx_insert-1]->configure();
-            }
-            effect->setOutBuffer(mOutBuffer);
-        } else {
-            effect->setOutBuffer(mInBuffer);
-        }
+        size_t previousSize = mEffects.size();
         mEffects.insertAt(effect, idx_insert);
 
-        ALOGV("addEffect_l() effect %p, added in chain %p at rank %zu", effect.get(), this,
-                idx_insert);
+        effect->configure();
+
+        // - By default:
+        //   All effects read samples from chain input buffer.
+        //   The last effect in the chain, writes samples to chain output buffer,
+        //   otherwise to chain input buffer
+        // - In the OUTPUT_STAGE chain of a spatializer mixer thread:
+        //   The spatializer effect (first effect) reads samples from the input buffer
+        //   and writes samples to the output buffer.
+        //   All other effects read and writes samples to the output buffer
+        if (mEffectCallback->isSpatializer()
+                && mSessionId == AUDIO_SESSION_OUTPUT_STAGE) {
+            effect->setOutBuffer(mOutBuffer);
+            if (idx_insert == 0) {
+                if (previousSize != 0) {
+                    mEffects[1]->configure();
+                    mEffects[1]->setInBuffer(mOutBuffer);
+                    mEffects[1]->updateAccessMode();      // reconfig if neeeded.
+                }
+                effect->setInBuffer(mInBuffer);
+            } else {
+                effect->setInBuffer(mOutBuffer);
+            }
+        } else {
+            effect->setInBuffer(mInBuffer);
+            if (idx_insert == previousSize) {
+                if (idx_insert != 0) {
+                    mEffects[idx_insert-1]->configure();
+                    mEffects[idx_insert-1]->setOutBuffer(mInBuffer);
+                    mEffects[idx_insert - 1]->updateAccessMode();      // reconfig if neeeded.
+                }
+                effect->setOutBuffer(mOutBuffer);
+            } else {
+                effect->setOutBuffer(mInBuffer);
+            }
+        }
+        ALOGV("%s effect %p, added in chain %p at rank %zu",
+                __func__, effect.get(), this, idx_insert);
     }
     effect->configure();
 
     return NO_ERROR;
 }
 
+ssize_t AudioFlinger::EffectChain::getInsertIndex(const effect_descriptor_t& desc) {
+    // Insert effects are inserted at the end of mEffects vector as they are processed
+    //  after track and auxiliary effects.
+    // Insert effect order as a function of indicated preference:
+    //  if EFFECT_FLAG_INSERT_EXCLUSIVE, insert in first position or reject if
+    //  another effect is present
+    //  else if EFFECT_FLAG_INSERT_FIRST, insert in first position or after the
+    //  last effect claiming first position
+    //  else if EFFECT_FLAG_INSERT_LAST, insert in last position or before the
+    //  first effect claiming last position
+    //  else if EFFECT_FLAG_INSERT_ANY insert after first or before last
+    // Reject insertion if an effect with EFFECT_FLAG_INSERT_EXCLUSIVE is
+    // already present
+    // Spatializer or Downmixer effects are inserted in first position because
+    // they adapt the channel count for all other effects in the chain
+    if ((memcmp(&desc.type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0)
+            || (memcmp(&desc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0)) {
+        return 0;
+    }
+
+    size_t size = mEffects.size();
+    uint32_t insertPref = desc.flags & EFFECT_FLAG_INSERT_MASK;
+    ssize_t idx_insert;
+    ssize_t idx_insert_first = -1;
+    ssize_t idx_insert_last = -1;
+
+    idx_insert = size;
+    for (size_t i = 0; i < size; i++) {
+        effect_descriptor_t d = mEffects[i]->desc();
+        uint32_t iMode = d.flags & EFFECT_FLAG_TYPE_MASK;
+        uint32_t iPref = d.flags & EFFECT_FLAG_INSERT_MASK;
+        if (iMode == EFFECT_FLAG_TYPE_INSERT) {
+            // check invalid effect chaining combinations
+            if (insertPref == EFFECT_FLAG_INSERT_EXCLUSIVE ||
+                iPref == EFFECT_FLAG_INSERT_EXCLUSIVE) {
+                ALOGW("%s could not insert effect %s: exclusive conflict with %s",
+                        __func__, desc.name, d.name);
+                return -1;
+            }
+            // remember position of first insert effect and by default
+            // select this as insert position for new effect
+            if (idx_insert == size) {
+                idx_insert = i;
+            }
+            // remember position of last insert effect claiming
+            // first position
+            if (iPref == EFFECT_FLAG_INSERT_FIRST) {
+                idx_insert_first = i;
+            }
+            // remember position of first insert effect claiming
+            // last position
+            if (iPref == EFFECT_FLAG_INSERT_LAST &&
+                idx_insert_last == -1) {
+                idx_insert_last = i;
+            }
+        }
+    }
+
+    // modify idx_insert from first position if needed
+    if (insertPref == EFFECT_FLAG_INSERT_LAST) {
+        if (idx_insert_last != -1) {
+            idx_insert = idx_insert_last;
+        } else {
+            idx_insert = size;
+        }
+    } else {
+        if (idx_insert_first != -1) {
+            idx_insert = idx_insert_first + 1;
+        }
+    }
+    return idx_insert;
+}
+
 // removeEffect_l() must be called with ThreadBase::mLock held
 size_t AudioFlinger::EffectChain::removeEffect_l(const sp<EffectModule>& effect,
                                                  bool release)
@@ -2350,14 +2411,23 @@
 
             if (type != EFFECT_FLAG_TYPE_AUXILIARY) {
                 if (i == size - 1 && i != 0) {
-                    mEffects[i - 1]->setOutBuffer(mOutBuffer);
                     mEffects[i - 1]->configure();
+                    mEffects[i - 1]->setOutBuffer(mOutBuffer);
+                    mEffects[i - 1]->updateAccessMode();      // reconfig if neeeded.
                 }
             }
             mEffects.removeAt(i);
+
+            // make sure the input buffer configuration for the new first effect in the chain
+            // is updated if needed (can switch from HAL channel mask to mixer channel mask)
+            if (i == 0 && size > 1) {
+                mEffects[0]->configure();
+                mEffects[0]->setInBuffer(mInBuffer);
+                mEffects[0]->updateAccessMode();      // reconfig if neeeded.
+            }
+
             ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %zu", effect.get(),
                     this, i);
-
             break;
         }
     }
@@ -2901,27 +2971,26 @@
 }
 
 bool AudioFlinger::EffectChain::EffectCallback::isOffload() const {
-    sp<ThreadBase> t = thread().promote();
-    if (t == nullptr) {
-        return false;
-    }
-    return t->type() == ThreadBase::OFFLOAD;
+    return mThreadType == ThreadBase::OFFLOAD;
 }
 
 bool AudioFlinger::EffectChain::EffectCallback::isOffloadOrDirect() const {
-    sp<ThreadBase> t = thread().promote();
-    if (t == nullptr) {
-        return false;
-    }
-    return t->type() == ThreadBase::OFFLOAD || t->type() == ThreadBase::DIRECT;
+    return mThreadType == ThreadBase::OFFLOAD || mThreadType == ThreadBase::DIRECT;
 }
 
 bool AudioFlinger::EffectChain::EffectCallback::isOffloadOrMmap() const {
-    sp<ThreadBase> t = thread().promote();
-    if (t == nullptr) {
+    switch (mThreadType) {
+    case ThreadBase::OFFLOAD:
+    case ThreadBase::MMAP_PLAYBACK:
+    case ThreadBase::MMAP_CAPTURE:
+        return true;
+    default:
         return false;
     }
-    return t->isOffloadOrMmap();
+}
+
+bool AudioFlinger::EffectChain::EffectCallback::isSpatializer() const {
+    return mThreadType == ThreadBase::SPATIALIZER;
 }
 
 uint32_t AudioFlinger::EffectChain::EffectCallback::sampleRate() const {
@@ -2932,20 +3001,68 @@
     return t->sampleRate();
 }
 
-audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::channelMask() const {
+audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::inChannelMask(int id) const {
     sp<ThreadBase> t = thread().promote();
     if (t == nullptr) {
         return AUDIO_CHANNEL_NONE;
     }
-    return t->channelMask();
+    sp<EffectChain> c = chain().promote();
+    if (c == nullptr) {
+        return AUDIO_CHANNEL_NONE;
+    }
+
+    if (mThreadType == ThreadBase::SPATIALIZER) {
+        if (c->sessionId() == AUDIO_SESSION_OUTPUT_STAGE) {
+            if (c->isFirstEffect(id)) {
+                return t->mixerChannelMask();
+            } else {
+                return t->channelMask();
+            }
+        } else if (!audio_is_global_session(c->sessionId())) {
+            if ((t->hasAudioSession_l(c->sessionId()) & ThreadBase::SPATIALIZED_SESSION) != 0) {
+                return t->mixerChannelMask();
+            } else {
+                return t->channelMask();
+            }
+        } else {
+            return t->channelMask();
+        }
+    } else {
+        return t->channelMask();
+    }
 }
 
-uint32_t AudioFlinger::EffectChain::EffectCallback::channelCount() const {
+uint32_t AudioFlinger::EffectChain::EffectCallback::inChannelCount(int id) const {
+    return audio_channel_count_from_out_mask(inChannelMask(id));
+}
+
+audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::outChannelMask() const {
     sp<ThreadBase> t = thread().promote();
     if (t == nullptr) {
-        return 0;
+        return AUDIO_CHANNEL_NONE;
     }
-    return t->channelCount();
+    sp<EffectChain> c = chain().promote();
+    if (c == nullptr) {
+        return AUDIO_CHANNEL_NONE;
+    }
+
+    if (mThreadType == ThreadBase::SPATIALIZER) {
+        if (!audio_is_global_session(c->sessionId())) {
+            if ((t->hasAudioSession_l(c->sessionId()) & ThreadBase::SPATIALIZED_SESSION) != 0) {
+                return t->mixerChannelMask();
+            } else {
+                return t->channelMask();
+            }
+        } else {
+            return t->channelMask();
+        }
+    } else {
+        return t->channelMask();
+    }
+}
+
+uint32_t AudioFlinger::EffectChain::EffectCallback::outChannelCount() const {
+    return audio_channel_count_from_out_mask(outChannelMask());
 }
 
 audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::hapticChannelMask() const {
@@ -3143,7 +3260,10 @@
         } else {
             mHalEffect->setDevices({mDevice});
         }
-        *handle = new EffectHandle(mHalEffect, nullptr, nullptr, 0 /*priority*/);
+        mHalEffect->configure();
+
+        *handle = new EffectHandle(mHalEffect, nullptr, nullptr, 0 /*priority*/,
+                                   mNotifyFramesProcessed);
         status = (*handle)->initCheck();
         if (status == OK) {
             status = mHalEffect->addHandle((*handle).get());
@@ -3169,7 +3289,8 @@
         int enabled;
         *handle = thread->createEffect_l(nullptr, nullptr, 0, AUDIO_SESSION_DEVICE,
                                          const_cast<effect_descriptor_t *>(&mDescriptor),
-                                         &enabled, &status, false, false /*probe*/);
+                                         &enabled, &status, false, false /*probe*/,
+                                         mNotifyFramesProcessed);
         ALOGV("%s thread->createEffect_l status %d", __func__, status);
     } else {
         status = BAD_VALUE;
@@ -3189,8 +3310,14 @@
 }
 
 void AudioFlinger::DeviceEffectProxy::onReleasePatch(audio_patch_handle_t patchHandle) {
-    Mutex::Autolock _l(mProxyLock);
-    mEffectHandles.erase(patchHandle);
+    sp<EffectHandle> effect;
+    {
+        Mutex::Autolock _l(mProxyLock);
+        if (mEffectHandles.find(patchHandle) != mEffectHandles.end()) {
+            effect = mEffectHandles.at(patchHandle);
+            mEffectHandles.erase(patchHandle);
+        }
+    }
 }
 
 
@@ -3198,6 +3325,7 @@
 {
     Mutex::Autolock _l(mProxyLock);
     if (effect == mHalEffect) {
+        mHalEffect->release_l();
         mHalEffect.clear();
         mDevicePort.id = AUDIO_PORT_HANDLE_NONE;
     }
@@ -3345,7 +3473,7 @@
     if (proxy == nullptr) {
         return NO_INIT;
     }
-    return proxy->addEffectToHal(effect);
+    return proxy->removeEffectFromHal(effect);
 }
 
 bool AudioFlinger::DeviceEffectProxy::ProxyCallback::isOutput() const {
@@ -3364,7 +3492,8 @@
     return proxy->sampleRate();
 }
 
-audio_channel_mask_t AudioFlinger::DeviceEffectProxy::ProxyCallback::channelMask() const {
+audio_channel_mask_t AudioFlinger::DeviceEffectProxy::ProxyCallback::inChannelMask(
+        int id __unused) const {
     sp<DeviceEffectProxy> proxy = mProxy.promote();
     if (proxy == nullptr) {
         return AUDIO_CHANNEL_OUT_STEREO;
@@ -3372,7 +3501,7 @@
     return proxy->channelMask();
 }
 
-uint32_t AudioFlinger::DeviceEffectProxy::ProxyCallback::channelCount() const {
+uint32_t AudioFlinger::DeviceEffectProxy::ProxyCallback::inChannelCount(int id __unused) const {
     sp<DeviceEffectProxy> proxy = mProxy.promote();
     if (proxy == nullptr) {
         return 2;
@@ -3380,4 +3509,38 @@
     return proxy->channelCount();
 }
 
+audio_channel_mask_t AudioFlinger::DeviceEffectProxy::ProxyCallback::outChannelMask() const {
+    sp<DeviceEffectProxy> proxy = mProxy.promote();
+    if (proxy == nullptr) {
+        return AUDIO_CHANNEL_OUT_STEREO;
+    }
+    return proxy->channelMask();
+}
+
+uint32_t AudioFlinger::DeviceEffectProxy::ProxyCallback::outChannelCount() const {
+    sp<DeviceEffectProxy> proxy = mProxy.promote();
+    if (proxy == nullptr) {
+        return 2;
+    }
+    return proxy->channelCount();
+}
+
+void AudioFlinger::DeviceEffectProxy::ProxyCallback::onEffectEnable(
+        const sp<EffectBase>& effectBase) {
+    sp<EffectModule> effect = effectBase->asEffectModule();
+    if (effect == nullptr) {
+        return;
+    }
+    effect->start();
+}
+
+void AudioFlinger::DeviceEffectProxy::ProxyCallback::onEffectDisable(
+        const sp<EffectBase>& effectBase) {
+    sp<EffectModule> effect = effectBase->asEffectModule();
+    if (effect == nullptr) {
+        return;
+    }
+    effect->stop();
+}
+
 } // namespace android
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index a727e04..e2bea67 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -33,9 +33,12 @@
     virtual bool isOffload() const = 0;
     virtual bool isOffloadOrDirect() const = 0;
     virtual bool isOffloadOrMmap() const = 0;
+    virtual bool isSpatializer() const = 0;
     virtual uint32_t sampleRate() const = 0;
-    virtual audio_channel_mask_t channelMask() const = 0;
-    virtual uint32_t channelCount() const = 0;
+    virtual audio_channel_mask_t inChannelMask(int id) const = 0;
+    virtual uint32_t inChannelCount(int id) const = 0;
+    virtual audio_channel_mask_t outChannelMask() const = 0;
+    virtual uint32_t outChannelCount() const = 0;
     virtual audio_channel_mask_t hapticChannelMask() const = 0;
     virtual size_t frameCount() const = 0;
 
@@ -64,6 +67,8 @@
     virtual void resetVolume() = 0;
 
     virtual wp<EffectChain> chain() const = 0;
+
+    virtual bool isAudioPolicyReady() const = 0;
 };
 
 // EffectBase(EffectModule) and EffectChain classes both have their own mutex to protect
@@ -164,6 +169,16 @@
 
     void             dump(int fd, const Vector<String16>& args);
 
+protected:
+    bool             isInternal_l() const {
+                         for (auto handle : mHandles) {
+                            if (handle->client() != nullptr) {
+                                return false;
+                            }
+                         }
+                         return true;
+                     }
+
 private:
     friend class AudioFlinger;      // for mHandles
     bool             mPinned = false;
@@ -240,6 +255,13 @@
         return mOutBuffer != 0 ? reinterpret_cast<int16_t*>(mOutBuffer->ptr()) : NULL;
     }
 
+    // Updates the access mode if it is out of date.  May issue a new effect configure.
+    void        updateAccessMode() {
+                    if (requiredEffectBufferAccessMode() != mConfig.outputCfg.accessMode) {
+                        configure();
+                    }
+                }
+
     status_t         setDevices(const AudioDeviceTypeAddrVector &devices);
     status_t         setInputDevice(const AudioDeviceTypeAddr &device);
     status_t         setVolume(uint32_t *left, uint32_t *right, bool controller);
@@ -259,7 +281,7 @@
     bool             isHapticGenerator() const;
 
     status_t         setHapticIntensity(int id, int intensity);
-    status_t         setVibratorInfo(const media::AudioVibratorInfo* vibratorInfo);
+    status_t         setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo);
 
     void             dump(int fd, const Vector<String16>& args);
 
@@ -275,6 +297,11 @@
     status_t stop_l();
     status_t removeEffectFromHal_l();
     status_t sendSetAudioDevicesCommand(const AudioDeviceTypeAddrVector &devices, uint32_t cmdCode);
+    effect_buffer_access_e requiredEffectBufferAccessMode() const {
+        return mConfig.inputCfg.buffer.raw == mConfig.outputCfg.buffer.raw
+                ? EFFECT_BUFFER_ACCESS_WRITE : EFFECT_BUFFER_ACCESS_ACCUMULATE;
+    }
+
 
     effect_config_t     mConfig;    // input and output audio configuration
     sp<EffectHalInterface> mEffectInterface; // Effect module HAL
@@ -327,7 +354,7 @@
     EffectHandle(const sp<EffectBase>& effect,
             const sp<AudioFlinger::Client>& client,
             const sp<media::IEffectClient>& effectClient,
-            int32_t priority);
+            int32_t priority, bool notifyFramesProcessed);
     virtual ~EffectHandle();
     virtual status_t initCheck();
 
@@ -342,6 +369,8 @@
     android::binder::Status disconnect() override;
     android::binder::Status getCblk(media::SharedFileRegion* _aidl_return) override;
 
+    sp<Client> client() const { return mClient; }
+
 private:
     void disconnect(bool unpinIfLast);
 
@@ -356,6 +385,8 @@
     void setEnabled(bool enabled);
     bool enabled() const { return mEnabled; }
 
+    void framesProcessed(int32_t frames) const;
+
     // Getters
     wp<EffectBase> effect() const { return mEffect; }
     int id() const {
@@ -389,6 +420,8 @@
     bool mEnabled;                           // cached enable state: needed when the effect is
                                              // restored after being suspended
     bool mDisconnected;                      // Set to true by disconnect()
+    const bool mNotifyFramesProcessed;       // true if the client callback event
+                                             // EVENT_FRAMES_PROCESSED must be generated
 };
 
 // the EffectChain class represents a group of effects associated to one audio session.
@@ -511,6 +544,8 @@
     sp<EffectCallbackInterface> effectCallback() const { return mEffectCallback; }
     wp<ThreadBase> thread() const { return mEffectCallback->thread(); }
 
+    bool isFirstEffect(int id) const { return !mEffects.isEmpty() && id == mEffects[0]->id(); }
+
     void dump(int fd, const Vector<String16>& args);
 
 private:
@@ -530,6 +565,12 @@
             : mChain(owner)
             , mThread(thread)
             , mAudioFlinger(*gAudioFlinger) {
+            sp<ThreadBase> base = thread.promote();
+            if (base != nullptr) {
+                mThreadType = base->type();
+            } else {
+                mThreadType = ThreadBase::MIXER;  // assure a consistent value.
+            }
         }
 
         status_t createEffectHal(const effect_uuid_t *pEffectUuid,
@@ -542,10 +583,13 @@
         bool isOffload() const override;
         bool isOffloadOrDirect() const override;
         bool isOffloadOrMmap() const override;
+        bool isSpatializer() const override;
 
         uint32_t sampleRate() const override;
-        audio_channel_mask_t channelMask() const override;
-        uint32_t channelCount() const override;
+        audio_channel_mask_t inChannelMask(int id) const override;
+        uint32_t inChannelCount(int id) const override;
+        audio_channel_mask_t outChannelMask() const override;
+        uint32_t outChannelCount() const override;
         audio_channel_mask_t hapticChannelMask() const override;
         size_t frameCount() const override;
         uint32_t latency() const override;
@@ -566,16 +610,22 @@
 
         wp<EffectChain> chain() const override { return mChain; }
 
+        bool isAudioPolicyReady() const override {
+            return mAudioFlinger.isAudioPolicyReady();
+        }
+
         wp<ThreadBase> thread() const { return mThread.load(); }
 
-        void setThread(const wp<ThreadBase>& thread) {
+        void setThread(const sp<ThreadBase>& thread) {
             mThread = thread;
+            mThreadType = thread->type();
         }
 
     private:
         const wp<EffectChain> mChain;
         mediautils::atomic_wp<ThreadBase> mThread;
         AudioFlinger &mAudioFlinger;  // implementation detail: outer instance always exists.
+        ThreadBase::type_t mThreadType;
     };
 
     friend class AudioFlinger;  // for mThread, mEffects
@@ -612,6 +662,8 @@
 
     void setVolumeForOutput_l(uint32_t left, uint32_t right);
 
+    ssize_t getInsertIndex(const effect_descriptor_t& desc);
+
     mutable  Mutex mLock;        // mutex protecting effect list
              Vector< sp<EffectModule> > mEffects; // list of effect modules
              audio_session_t mSessionId; // audio session ID
@@ -643,11 +695,11 @@
 public:
         DeviceEffectProxy (const AudioDeviceTypeAddr& device,
                 const sp<DeviceEffectManagerCallback>& callback,
-                effect_descriptor_t *desc, int id)
+                effect_descriptor_t *desc, int id, bool notifyFramesProcessed)
             : EffectBase(callback, desc, id, AUDIO_SESSION_DEVICE, false),
                 mDevice(device), mManagerCallback(callback),
-                mMyCallback(new ProxyCallback(wp<DeviceEffectProxy>(this),
-                                              callback)) {}
+                mMyCallback(new ProxyCallback(wp<DeviceEffectProxy>(this), callback)),
+                mNotifyFramesProcessed(notifyFramesProcessed) {}
 
     status_t setEnabled(bool enabled, bool fromHandle) override;
     sp<DeviceEffectProxy> asDeviceEffectProxy() override { return this; }
@@ -692,10 +744,13 @@
         bool isOffload() const override { return false; }
         bool isOffloadOrDirect() const override { return false; }
         bool isOffloadOrMmap() const override { return false; }
+        bool isSpatializer() const override { return false; }
 
         uint32_t sampleRate() const override;
-        audio_channel_mask_t channelMask() const override;
-        uint32_t channelCount() const override;
+        audio_channel_mask_t inChannelMask(int id) const override;
+        uint32_t inChannelCount(int id) const override;
+        audio_channel_mask_t outChannelMask() const override;
+        uint32_t outChannelCount() const override;
         audio_channel_mask_t hapticChannelMask() const override { return AUDIO_CHANNEL_NONE; }
         size_t frameCount() const override  { return 0; }
         uint32_t latency() const override  { return 0; }
@@ -711,11 +766,15 @@
         void resetVolume() override {}
         product_strategy_t strategy() const override  { return static_cast<product_strategy_t>(0); }
         int32_t activeTrackCnt() const override { return 0; }
-        void onEffectEnable(const sp<EffectBase>& effect __unused) override {}
-        void onEffectDisable(const sp<EffectBase>& effect __unused) override {}
+        void onEffectEnable(const sp<EffectBase>& effect __unused) override;
+        void onEffectDisable(const sp<EffectBase>& effect __unused) override;
 
         wp<EffectChain> chain() const override { return nullptr; }
 
+        bool isAudioPolicyReady() const override {
+            return mManagerCallback->isAudioPolicyReady();
+        }
+
         int newEffectId();
 
     private:
@@ -734,4 +793,5 @@
     std::map<audio_patch_handle_t, sp<EffectHandle>> mEffectHandles; // protected by mProxyLock
     sp<EffectModule> mHalEffect; // protected by mProxyLock
     struct audio_port_config mDevicePort = { .id = AUDIO_PORT_HANDLE_NONE };
+    const bool mNotifyFramesProcessed;
 };
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 88d4eaf..26bd92d 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -89,6 +89,7 @@
     // TODO: Add channel mask to NBAIO_Format.
     // We assume that the channel mask must be a valid positional channel mask.
     mSinkChannelMask = getChannelMaskFromCount(mSinkChannelCount);
+    mBalance.setChannelMask(mSinkChannelMask);
 
     unsigned i;
     for (i = 0; i < FastMixerState::sMaxFastTracks; ++i) {
@@ -204,6 +205,8 @@
                 (void *)(uintptr_t)fastTrack->mHapticPlaybackEnabled);
         mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_INTENSITY,
                 (void *)(uintptr_t)fastTrack->mHapticIntensity);
+        mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_MAX_AMPLITUDE,
+                (void *)(&(fastTrack->mHapticMaxAmplitude)));
 
         mMixer->enable(index);
         break;
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index 857d3de..ce3cc14 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_AUDIO_FAST_MIXER_STATE_H
 #define ANDROID_AUDIO_FAST_MIXER_STATE_H
 
+#include <math.h>
+
 #include <audio_utils/minifloat.h>
 #include <system/audio.h>
 #include <media/AudioMixer.h>
@@ -51,6 +53,7 @@
     int                     mGeneration;     // increment when any field is assigned
     bool                    mHapticPlaybackEnabled = false; // haptic playback is enabled or not
     os::HapticScale         mHapticIntensity = os::HapticScale::MUTE; // intensity of haptic data
+    float                   mHapticMaxAmplitude = NAN; // max amplitude allowed for haptic data
 };
 
 // Represents a single state of the fast mixer
diff --git a/services/audioflinger/OWNERS b/services/audioflinger/OWNERS
index 034d161..17d4c37 100644
--- a/services/audioflinger/OWNERS
+++ b/services/audioflinger/OWNERS
@@ -1,4 +1,4 @@
-gkasten@google.com
 hunga@google.com
 jmtrivi@google.com
 mnaganov@google.com
+philburk@google.com
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index a381c7d..45dd258 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -258,6 +258,7 @@
                             reinterpret_cast<PlaybackThread*>(thread.get()), false /*closeThread*/);
                 } else {
                     audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+                    audio_config_base_t mixerConfig = AUDIO_CONFIG_BASE_INITIALIZER;
                     audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
                     if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
@@ -276,6 +277,7 @@
                                                             patch->sinks[0].ext.device.hw_module,
                                                             &output,
                                                             &config,
+                                                            &mixerConfig,
                                                             outputDevice,
                                                             outputDeviceAddress,
                                                             flags);
@@ -573,6 +575,12 @@
 
     // create a special playback track to render to playback thread.
     // this track is given the same buffer as the PatchRecord buffer
+
+    // Default behaviour is to start as soon as possible to have the lowest possible latency even if
+    // it might glitch.
+    // Disable this behavior for FM Tuner source if no fast capture/mixer available.
+    const bool isFmBridge = mAudioPatch.sources[0].ext.device.type == AUDIO_DEVICE_IN_FM_TUNER;
+    const size_t frameCountToBeReady = isFmBridge && !usePassthruPatchRecord ? frameCount / 4 : 1;
     sp<PlaybackThread::PatchTrack> tempPatchTrack = new PlaybackThread::PatchTrack(
                                            mPlayback.thread().get(),
                                            streamType,
@@ -582,7 +590,9 @@
                                            frameCount,
                                            tempRecordTrack->buffer(),
                                            tempRecordTrack->bufferSize(),
-                                           outputFlags);
+                                           outputFlags,
+                                           {} /*timeout*/,
+                                           frameCountToBeReady);
     status = mPlayback.checkTrack(tempPatchTrack.get());
     if (status != NO_ERROR) {
         return status;
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 0929055..aecd4d3 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -19,6 +19,8 @@
     #error This header file should only be included from AudioFlinger.h
 #endif
 
+#include <math.h>
+
 // Checks and monitors OP_PLAY_AUDIO
 class OpPlayAudioMonitor : public RefBase {
 public:
@@ -161,6 +163,8 @@
             }
             /** Return at what intensity to play haptics, used in mixer. */
             os::HapticScale getHapticIntensity() const { return mHapticIntensity; }
+            /** Return the maximum amplitude allowed for haptics data, used in mixer. */
+            float getHapticMaxAmplitude() const { return mHapticMaxAmplitude; }
             /** Set intensity of haptic playback, should be set after querying vibrator service. */
             void    setHapticIntensity(os::HapticScale hapticIntensity) {
                 if (os::isValidHapticScale(hapticIntensity)) {
@@ -168,6 +172,12 @@
                     setHapticPlaybackEnabled(mHapticIntensity != os::HapticScale::MUTE);
                 }
             }
+            /** Set maximum amplitude allowed for haptic data, should be set after querying
+             *  vibrator service.
+             */
+            void    setHapticMaxAmplitude(float maxAmplitude) {
+                mHapticMaxAmplitude = maxAmplitude;
+            }
             sp<os::ExternalVibration> getExternalVibration() const { return mExternalVibration; }
 
             void    setTeePatches(TeePatches teePatches);
@@ -183,8 +193,15 @@
        }
     }
 
+    static bool checkServerLatencySupported(
+            audio_format_t format, audio_output_flags_t flags) {
+        return audio_is_linear_pcm(format)
+                && (flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) == 0;
+    }
+
     audio_output_flags_t getOutputFlags() const { return mFlags; }
     float getSpeed() const { return mSpeed; }
+
 protected:
     // for numerous
     friend class PlaybackThread;
@@ -282,6 +299,8 @@
     bool                mHapticPlaybackEnabled = false; // indicates haptic playback enabled or not
     // intensity to play haptic data
     os::HapticScale mHapticIntensity = os::HapticScale::MUTE;
+    // max amplitude allowed for haptic data
+    float mHapticMaxAmplitude = NAN;
     class AudioVibrationController : public os::BnExternalVibrationController {
     public:
         explicit AudioVibrationController(Track* track) : mTrack(track) {}
diff --git a/services/audioflinger/PropertyUtils.cpp b/services/audioflinger/PropertyUtils.cpp
new file mode 100644
index 0000000..65e2533
--- /dev/null
+++ b/services/audioflinger/PropertyUtils.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
+#include <android/media/audio/common/AudioMMapPolicy.h>
+#include <cutils/properties.h>
+
+#include "PropertyUtils.h"
+
+namespace android {
+
+using media::audio::common::AudioMMapPolicy;
+using media::audio::common::AudioMMapPolicyType;
+using media::audio::common::AudioMMapPolicyInfo;
+
+std::string getMmapPolicyProperty(AudioMMapPolicyType policyType) {
+    switch (policyType) {
+        case AudioMMapPolicyType::DEFAULT:
+            return "aaudio.mmap_policy";
+        case AudioMMapPolicyType::EXCLUSIVE:
+            return "aaudio.mmap_exclusive_policy";
+        default:
+            return "";
+    }
+}
+
+int getDefaultPolicyFromType(AudioMMapPolicyType policyType) {
+    switch (policyType) {
+        case AudioMMapPolicyType::EXCLUSIVE:
+            return AAUDIO_UNSPECIFIED;
+        case AudioMMapPolicyType::DEFAULT:
+        default:
+            return AAUDIO_POLICY_NEVER;
+    }
+}
+
+AudioMMapPolicy legacy2aidl_aaudio_policy_t_AudioMMapPolicy(aaudio_policy_t legacy) {
+    switch (legacy) {
+        case AAUDIO_POLICY_NEVER:
+            return AudioMMapPolicy::NEVER;
+        case AAUDIO_POLICY_AUTO:
+            return AudioMMapPolicy::AUTO;
+        case AAUDIO_POLICY_ALWAYS:
+            return AudioMMapPolicy::ALWAYS;
+        case AAUDIO_UNSPECIFIED:
+            return AudioMMapPolicy::UNSPECIFIED;
+        default:
+            ALOGE("%s unknown aaudio policy: %d", __func__, legacy);
+            return AudioMMapPolicy::UNSPECIFIED;
+    }
+}
+
+status_t getMmapPolicyInfosFromSystemProperty(
+        AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+    AudioMMapPolicyInfo policyInfo;
+    const std::string propertyStr = getMmapPolicyProperty(policyType);
+    if (propertyStr.empty()) {
+        return BAD_VALUE;
+    }
+    policyInfo.mmapPolicy = legacy2aidl_aaudio_policy_t_AudioMMapPolicy(
+            property_get_int32(propertyStr.c_str(), getDefaultPolicyFromType(policyType)));
+    policyInfos->push_back(policyInfo);
+    return NO_ERROR;
+}
+
+int32_t getAAudioMixerBurstCountFromSystemProperty() {
+    static const int32_t sDefaultBursts = 2; // arbitrary, use 2 for double buffered
+    static const int32_t sMaxBursts = 1024; // arbitrary
+    static const char* sPropMixerBursts = "aaudio.mixer_bursts";
+    int32_t prop = property_get_int32(sPropMixerBursts, sDefaultBursts);
+    if (prop <= 0 || prop > sMaxBursts) {
+        ALOGE("%s: invalid value %d, use default %d", __func__, prop, sDefaultBursts);
+        prop = sDefaultBursts;
+    }
+    return prop;
+}
+
+int32_t getAAudioHardwareBurstMinUsecFromSystemProperty() {
+    static const int32_t sDefaultMicros = 1000; // arbitrary
+    static const int32_t sMaxMicros = 1000 * 1000; // arbitrary
+    static const char* sPropHwBurstMinUsec = "aaudio.hw_burst_min_usec";
+    int32_t prop = property_get_int32(sPropHwBurstMinUsec, sDefaultMicros);
+    if (prop <= 0 || prop > sMaxMicros) {
+        ALOGE("%s invalid value %d, use default %d", __func__, prop, sDefaultMicros);
+        prop = sDefaultMicros;
+    }
+    return prop;
+}
+
+} // namespace android
diff --git a/services/audioflinger/PropertyUtils.h b/services/audioflinger/PropertyUtils.h
new file mode 100644
index 0000000..fbf651a
--- /dev/null
+++ b/services/audioflinger/PropertyUtils.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/media/audio/common/AudioMMapPolicyType.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+
+namespace android {
+
+status_t getMmapPolicyInfosFromSystemProperty(
+        media::audio::common::AudioMMapPolicyType policyType,
+        std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos);
+
+int32_t getAAudioMixerBurstCountFromSystemProperty();
+
+int32_t getAAudioHardwareBurstMinUsecFromSystemProperty();
+
+} // namespace android
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index b9cdab8..dd278f0 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -50,8 +50,10 @@
 #include <audio_utils/format.h>
 #include <audio_utils/minifloat.h>
 #include <audio_utils/safe_math.h>
-#include <system/audio_effects/effect_ns.h>
 #include <system/audio_effects/effect_aec.h>
+#include <system/audio_effects/effect_downmix.h>
+#include <system/audio_effects/effect_ns.h>
+#include <system/audio_effects/effect_spatializer.h>
 #include <system/audio.h>
 
 // NBAIO implementations
@@ -507,6 +509,8 @@
         return "MMAP_PLAYBACK";
     case MMAP_CAPTURE:
         return "MMAP_CAPTURE";
+    case SPATIALIZER:
+        return "SPATIALIZER";
     default:
         return "unknown";
     }
@@ -622,7 +626,7 @@
     return status;
 }
 
-void AudioFlinger::ThreadBase::sendIoConfigEvent(audio_io_config_event event, pid_t pid,
+void AudioFlinger::ThreadBase::sendIoConfigEvent(audio_io_config_event_t event, pid_t pid,
                                                  audio_port_handle_t portId)
 {
     Mutex::Autolock _l(mLock);
@@ -630,7 +634,7 @@
 }
 
 // sendIoConfigEvent_l() must be called with ThreadBase::mLock held
-void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event event, pid_t pid,
+void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event_t event, pid_t pid,
                                                    audio_port_handle_t portId)
 {
     // The audio statistics history is exponentially weighted to forget events
@@ -640,6 +644,7 @@
     mIoJitterMs.reset();
     mLatencyMs.reset();
     mProcessTimeMs.reset();
+    mMonopipePipeDepthStats.reset();
     mTimestampVerifier.discontinuity(mTimestampVerifier.DISCONTINUITY_MODE_CONTINUOUS);
 
     sp<ConfigEvent> configEvent = (ConfigEvent *)new IoConfigEvent(event, pid, portId);
@@ -722,6 +727,19 @@
     sendConfigEvent_l(configEvent);
 }
 
+void AudioFlinger::ThreadBase::sendCheckOutputStageEffectsEvent()
+{
+    Mutex::Autolock _l(mLock);
+    sendCheckOutputStageEffectsEvent_l();
+}
+
+void AudioFlinger::ThreadBase::sendCheckOutputStageEffectsEvent_l()
+{
+    sp<ConfigEvent> configEvent =
+            (ConfigEvent *)new CheckOutputStageEffectsEvent();
+    sendConfigEvent_l(configEvent);
+}
+
 // post condition: mConfigEvents.isEmpty()
 void AudioFlinger::ThreadBase::processConfigEvents_l()
 {
@@ -784,6 +802,11 @@
                     (ResizeBufferConfigEventData *)event->mData.get();
             resizeInputBuffer_l(data->mMaxSharedAudioHistoryMs);
         } break;
+
+        case CFG_EVENT_CHECK_OUTPUT_STAGE_EFFECTS: {
+            setCheckOutputStageEffects();
+        } break;
+
         default:
             ALOG_ASSERT(false, "processConfigEvents_l() unknown event type %d", event->mType);
             break;
@@ -966,6 +989,12 @@
                 isOutput() ? "write" : "read",
                 mLatencyMs.toString().c_str());
     }
+
+    if (mMonopipePipeDepthStats.getN() > 0) {
+        dprintf(fd, "  Monopipe %s pipe depth stats: %s\n",
+            isOutput() ? "write" : "read",
+            mMonopipePipeDepthStats.toString().c_str());
+    }
 }
 
 void AudioFlinger::ThreadBase::dumpEffectChains_l(int fd, const Vector<String16>& args)
@@ -1008,6 +1037,8 @@
         return String16("MmapPlayback");
     case MMAP_CAPTURE:
         return String16("MmapCapture");
+    case SPATIALIZER:
+        return String16("AudioSpatial");
     default:
         ALOG_ASSERT(false);
         return String16("AudioUnknown");
@@ -1296,8 +1327,8 @@
 {
     // no preprocessing on playback threads
     if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC) {
-        ALOGW("checkEffectCompatibility_l(): pre processing effect %s created on playback"
-                " thread %s", desc->name, mThreadName);
+        ALOGW("%s: pre processing effect %s created on playback"
+                " thread %s", __func__, desc->name, mThreadName);
         return BAD_VALUE;
     }
 
@@ -1312,14 +1343,21 @@
         return BAD_VALUE;
     }
 
+    if (memcmp(&desc->type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0
+            && mType != SPATIALIZER) {
+        ALOGW("%s: attempt to create a spatializer effect on a thread of type %d",
+                __func__, mType);
+        return BAD_VALUE;
+    }
+
     switch (mType) {
     case MIXER: {
 #ifndef MULTICHANNEL_EFFECT_CHAIN
         // Reject any effect on mixer multichannel sinks.
         // TODO: fix both format and multichannel issues with effects.
         if (mChannelCount != FCC_2) {
-            ALOGW("checkEffectCompatibility_l(): effect %s for multichannel(%d) on MIXER"
-                    " thread %s", desc->name, mChannelCount, mThreadName);
+            ALOGW("%s: effect %s for multichannel(%d) on MIXER thread %s",
+                    __func__, desc->name, mChannelCount, mThreadName);
             return BAD_VALUE;
         }
 #endif
@@ -1333,15 +1371,15 @@
             } else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
                 // only post processing on output stage session
                 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
-                    ALOGW("checkEffectCompatibility_l(): non post processing effect %s not allowed"
-                            " on output stage session", desc->name);
+                    ALOGW("%s: non post processing effect %s not allowed on output stage session",
+                            __func__, desc->name);
                     return BAD_VALUE;
                 }
             } else if (sessionId == AUDIO_SESSION_DEVICE) {
                 // only post processing on output stage session
                 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
-                    ALOGW("checkEffectCompatibility_l(): non post processing effect %s not allowed"
-                            " on device session", desc->name);
+                    ALOGW("%s: non post processing effect %s not allowed on device session",
+                            __func__, desc->name);
                     return BAD_VALUE;
                 }
             } else {
@@ -1352,13 +1390,12 @@
             }
 
             if (flags & AUDIO_OUTPUT_FLAG_RAW) {
-                ALOGW("checkEffectCompatibility_l(): effect %s on playback thread in raw mode",
-                      desc->name);
+                ALOGW("%s: effect %s on playback thread in raw mode", __func__, desc->name);
                 return BAD_VALUE;
             }
             if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
-                ALOGW("checkEffectCompatibility_l(): non HW effect %s on playback thread"
-                        " in fast mode", desc->name);
+                ALOGW("%s: non HW effect %s on playback thread in fast mode",
+                        __func__, desc->name);
                 return BAD_VALUE;
             }
         }
@@ -1372,35 +1409,64 @@
     case DIRECT:
         // Reject any effect on Direct output threads for now, since the format of
         // mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo).
-        ALOGW("checkEffectCompatibility_l(): effect %s on DIRECT output thread %s",
-                desc->name, mThreadName);
+        ALOGW("%s: effect %s on DIRECT output thread %s",
+                __func__, desc->name, mThreadName);
         return BAD_VALUE;
     case DUPLICATING:
 #ifndef MULTICHANNEL_EFFECT_CHAIN
         // Reject any effect on mixer multichannel sinks.
         // TODO: fix both format and multichannel issues with effects.
         if (mChannelCount != FCC_2) {
-            ALOGW("checkEffectCompatibility_l(): effect %s for multichannel(%d)"
-                    " on DUPLICATING thread %s", desc->name, mChannelCount, mThreadName);
+            ALOGW("%s: effect %s for multichannel(%d) on DUPLICATING thread %s",
+                    __func__, desc->name, mChannelCount, mThreadName);
             return BAD_VALUE;
         }
 #endif
         if (audio_is_global_session(sessionId)) {
-            ALOGW("checkEffectCompatibility_l(): global effect %s on DUPLICATING"
-                    " thread %s", desc->name, mThreadName);
+            ALOGW("%s: global effect %s on DUPLICATING thread %s",
+                    __func__, desc->name, mThreadName);
             return BAD_VALUE;
         }
         if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
-            ALOGW("checkEffectCompatibility_l(): post processing effect %s on"
-                    " DUPLICATING thread %s", desc->name, mThreadName);
+            ALOGW("%s: post processing effect %s on DUPLICATING thread %s",
+                __func__, desc->name, mThreadName);
             return BAD_VALUE;
         }
         if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) != 0) {
-            ALOGW("checkEffectCompatibility_l(): HW tunneled effect %s on"
-                    " DUPLICATING thread %s", desc->name, mThreadName);
+            ALOGW("%s: HW tunneled effect %s on DUPLICATING thread %s",
+                    __func__, desc->name, mThreadName);
             return BAD_VALUE;
         }
         break;
+    case SPATIALIZER:
+        // Global effects (AUDIO_SESSION_OUTPUT_MIX) are not supported on spatializer mixer
+        // as there is no common accumulation buffer for sptialized and non sptialized tracks.
+        // Post processing effects (AUDIO_SESSION_OUTPUT_STAGE or AUDIO_SESSION_DEVICE)
+        // are supported and added after the spatializer.
+        if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
+            ALOGW("%s: global effect %s not supported on spatializer thread %s",
+                    __func__, desc->name, mThreadName);
+            return BAD_VALUE;
+        } else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
+            // only post processing , downmixer or spatializer effects on output stage session
+            if (memcmp(&desc->type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0
+                    || memcmp(&desc->type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
+                break;
+            }
+            if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
+                ALOGW("%s: non post processing effect %s not allowed on output stage session",
+                        __func__, desc->name);
+                return BAD_VALUE;
+            }
+        } else if (sessionId == AUDIO_SESSION_DEVICE) {
+            // only post processing on output stage session
+            if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
+                ALOGW("%s: non post processing effect %s not allowed on device session",
+                        __func__, desc->name);
+                return BAD_VALUE;
+            }
+        }
+        break;
     default:
         LOG_ALWAYS_FATAL("checkEffectCompatibility_l(): wrong thread type %d", mType);
     }
@@ -1418,7 +1484,8 @@
         int *enabled,
         status_t *status,
         bool pinned,
-        bool probe)
+        bool probe,
+        bool notifyFramesProcessed)
 {
     sp<EffectModule> effect;
     sp<EffectHandle> handle;
@@ -1477,18 +1544,19 @@
         if (effect->isHapticGenerator()) {
             // TODO(b/184194057): Use the vibrator information from the vibrator that will be used
             // for the HapticGenerator.
-            const media::AudioVibratorInfo* defaultVibratorInfo =
-                    mAudioFlinger->getDefaultVibratorInfo_l();
-            if (defaultVibratorInfo != nullptr) {
+            const std::optional<media::AudioVibratorInfo> defaultVibratorInfo =
+                    std::move(mAudioFlinger->getDefaultVibratorInfo_l());
+            if (defaultVibratorInfo) {
                 // Only set the vibrator info when it is a valid one.
-                effect->setVibratorInfo(defaultVibratorInfo);
+                effect->setVibratorInfo(*defaultVibratorInfo);
             }
         }
         // create effect handle and connect it to effect module
-        handle = new EffectHandle(effect, client, effectClient, priority);
+        handle = new EffectHandle(effect, client, effectClient, priority, notifyFramesProcessed);
         lStatus = handle->initCheck();
         if (lStatus == OK) {
             lStatus = effect->addHandle(handle.get());
+            sendCheckOutputStageEffectsEvent_l();
         }
         if (enabled != NULL) {
             *enabled = (int)effect->isEnabled();
@@ -1531,6 +1599,7 @@
         if (remove) {
             removeEffect_l(effect, true);
         }
+        sendCheckOutputStageEffectsEvent_l();
     }
     if (remove) {
         mAudioFlinger->updateOrphanEffectChains(effect);
@@ -1884,10 +1953,24 @@
         item->setDouble(MM_PREFIX "latencyMs.mean", mLatencyMs.getMean());
         item->setDouble(MM_PREFIX "latencyMs.std", mLatencyMs.getStdDev());
     }
+    if (mMonopipePipeDepthStats.getN() > 0) {
+        item->setDouble(MM_PREFIX "monopipePipeDepthStats.mean",
+                        mMonopipePipeDepthStats.getMean());
+        item->setDouble(MM_PREFIX "monopipePipeDepthStats.std",
+                        mMonopipePipeDepthStats.getStdDev());
+    }
 
     item->selfrecord();
 }
 
+product_strategy_t AudioFlinger::ThreadBase::getStrategyForStream(audio_stream_type_t stream) const
+{
+    if (!mAudioFlinger->isAudioPolicyReady()) {
+        return PRODUCT_STRATEGY_NONE;
+    }
+    return AudioSystem::getStrategyForStream(stream);
+}
+
 // ----------------------------------------------------------------------------
 //      Playback
 // ----------------------------------------------------------------------------
@@ -1896,15 +1979,16 @@
                                              AudioStreamOut* output,
                                              audio_io_handle_t id,
                                              type_t type,
-                                             bool systemReady)
+                                             bool systemReady,
+                                             audio_config_base_t *mixerConfig)
     :   ThreadBase(audioFlinger, id, type, systemReady, true /* isOut */),
         mNormalFrameCount(0), mSinkBuffer(NULL),
-        mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision),
+        mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision || type == SPATIALIZER),
         mMixerBuffer(NULL),
         mMixerBufferSize(0),
         mMixerBufferFormat(AUDIO_FORMAT_INVALID),
         mMixerBufferValid(false),
-        mEffectBufferEnabled(AudioFlinger::kEnableExtendedPrecision),
+        mEffectBufferEnabled(AudioFlinger::kEnableExtendedPrecision || type == SPATIALIZER),
         mEffectBuffer(NULL),
         mEffectBufferSize(0),
         mEffectBufferFormat(AUDIO_FORMAT_INVALID),
@@ -1956,8 +2040,18 @@
                 mOutput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0;
     }
 
+    if (mixerConfig != nullptr && mixerConfig->channel_mask != AUDIO_CHANNEL_NONE) {
+        mMixerChannelMask = mixerConfig->channel_mask;
+    }
+
     readOutputParameters_l();
 
+    if (mType != SPATIALIZER
+            && mMixerChannelMask != mChannelMask) {
+        LOG_ALWAYS_FATAL("HAL channel mask %#x does not match mixer channel mask %#x",
+                mChannelMask, mMixerChannelMask);
+    }
+
     // TODO: We may also match on address as well as device type for
     // AUDIO_DEVICE_OUT_BUS, AUDIO_DEVICE_OUT_ALL_A2DP, AUDIO_DEVICE_OUT_REMOTE_SUBMIX
     if (type == MIXER || type == DIRECT || type == OFFLOAD) {
@@ -1986,6 +2080,7 @@
     free(mSinkBuffer);
     free(mMixerBuffer);
     free(mEffectBuffer);
+    free(mPostSpatializerBuffer);
 }
 
 // Thread virtuals
@@ -2080,10 +2175,12 @@
     write(fd, result.string(), result.size());
 }
 
-void AudioFlinger::PlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args __unused)
+void AudioFlinger::PlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args)
 {
     dprintf(fd, "  Master volume: %f\n", mMasterVolume);
     dprintf(fd, "  Master mute: %s\n", mMasterMute ? "on" : "off");
+    dprintf(fd, "  Mixer channel Mask: %#x (%s)\n",
+            mMixerChannelMask, channelMaskToString(mMixerChannelMask, true /* output */).c_str());
     if (mHapticChannelMask != AUDIO_CHANNEL_NONE) {
         dprintf(fd, "  Haptic channel mask: %#x (%s)\n", mHapticChannelMask,
                 channelMaskToString(mHapticChannelMask, true /* output */).c_str());
@@ -2109,7 +2206,7 @@
     }
     if (output != nullptr) {
         dprintf(fd, "  Hal stream dump:\n");
-        (void)output->stream->dump(fd);
+        (void)output->stream->dump(fd, args);
     }
 }
 
@@ -2219,7 +2316,7 @@
                  "AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
                  frameCount, mFrameCount);
       } else {
-        ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu "
+        ALOGD("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu "
                 "mFrameCount=%zu format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
                 "sampleRate=%u mSampleRate=%u "
                 "hasFastMixer=%d tid=%d fastTrackAvailMask=%#x",
@@ -2397,11 +2494,11 @@
         // all tracks in same audio session must share the same routing strategy otherwise
         // conflicts will happen when tracks are moved from one output to another by audio policy
         // manager
-        product_strategy_t strategy = AudioSystem::getStrategyForStream(streamType);
+        product_strategy_t strategy = getStrategyForStream(streamType);
         for (size_t i = 0; i < mTracks.size(); ++i) {
             sp<Track> t = mTracks[i];
             if (t != 0 && t->isExternalTrack()) {
-                product_strategy_t actual = AudioSystem::getStrategyForStream(t->streamType());
+                product_strategy_t actual = getStrategyForStream(t->streamType());
                 if (sessionId == t->sessionId() && strategy != actual) {
                     ALOGE("createTrack_l() mismatched strategy; expected %u but found %u",
                             strategy, actual);
@@ -2445,7 +2542,7 @@
         if (chain != 0) {
             ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
             track->setMainBuffer(chain->inBuffer());
-            chain->setStrategy(AudioSystem::getStrategyForStream(track->streamType()));
+            chain->setStrategy(getStrategyForStream(track->streamType()));
             chain->incTrackCnt();
         }
 
@@ -2613,8 +2710,19 @@
             mLock.unlock();
             const int intensity = AudioFlinger::onExternalVibrationStart(
                     track->getExternalVibration());
+            std::optional<media::AudioVibratorInfo> vibratorInfo;
+            {
+                // TODO(b/184194780): Use the vibrator information from the vibrator that will be
+                // used to play this track.
+                Mutex::Autolock _l(mAudioFlinger->mLock);
+                vibratorInfo = std::move(mAudioFlinger->getDefaultVibratorInfo_l());
+            }
             mLock.lock();
             track->setHapticIntensity(static_cast<os::HapticScale>(intensity));
+            if (vibratorInfo) {
+                track->setHapticMaxAmplitude(vibratorInfo->maxAmplitude);
+            }
+
             // Haptic playback should be enabled by vibrator service.
             if (track->getHapticPlaybackEnabled()) {
                 // Disable haptic playback of all active track to ensure only
@@ -2707,36 +2815,26 @@
     return mOutput->stream->selectPresentation(presentationId, programId);
 }
 
-void AudioFlinger::PlaybackThread::ioConfigChanged(audio_io_config_event event, pid_t pid,
+void AudioFlinger::PlaybackThread::ioConfigChanged(audio_io_config_event_t event, pid_t pid,
                                                    audio_port_handle_t portId) {
-    sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
     ALOGV("PlaybackThread::ioConfigChanged, thread %p, event %d", this, event);
-
-    desc->mIoHandle = mId;
-    struct audio_patch patch = mPatch;
-    if (isMsdDevice()) {
-        patch = mDownStreamPatch;
-    }
-
+    sp<AudioIoDescriptor> desc;
+    const struct audio_patch patch = isMsdDevice() ? mDownStreamPatch : mPatch;
     switch (event) {
     case AUDIO_OUTPUT_OPENED:
     case AUDIO_OUTPUT_REGISTERED:
     case AUDIO_OUTPUT_CONFIG_CHANGED:
-        desc->mPatch = patch;
-        desc->mChannelMask = mChannelMask;
-        desc->mSamplingRate = mSampleRate;
-        desc->mFormat = mFormat;
-        desc->mFrameCount = mNormalFrameCount; // FIXME see
-                                             // AudioFlinger::frameCount(audio_io_handle_t)
-        desc->mFrameCountHAL = mFrameCount;
-        desc->mLatency = latency_l();
+        desc = sp<AudioIoDescriptor>::make(mId, patch, false /*isInput*/,
+                mSampleRate, mFormat, mChannelMask,
+                // FIXME AudioFlinger::frameCount(audio_io_handle_t) instead of mNormalFrameCount?
+                mNormalFrameCount, mFrameCount, latency_l());
         break;
     case AUDIO_CLIENT_STARTED:
-        desc->mPatch = patch;
-        desc->mPortId = portId;
+        desc = sp<AudioIoDescriptor>::make(mId, patch, portId);
         break;
     case AUDIO_OUTPUT_CLOSED:
     default:
+        desc = sp<AudioIoDescriptor>::make(mId);
         break;
     }
     mAudioFlinger->ioConfigChanged(event, desc, pid);
@@ -2814,14 +2912,20 @@
     if (!audio_is_output_channel(mChannelMask)) {
         LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
     }
-    if ((mType == MIXER || mType == DUPLICATING)
-            && !isValidPcmSinkChannelMask(mChannelMask)) {
+    if (hasMixer() && !isValidPcmSinkChannelMask(mChannelMask)) {
         LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output",
                 mChannelMask);
     }
+
+    if (mMixerChannelMask == AUDIO_CHANNEL_NONE) {
+        mMixerChannelMask = mChannelMask;
+    }
+
     mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
     mBalance.setChannelMask(mChannelMask);
 
+    uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mMixerChannelMask);
+
     // Get actual HAL format.
     status_t result = mOutput->stream->getAudioProperties(nullptr, nullptr, &mHALFormat);
     LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving output stream format: %d", result);
@@ -2831,8 +2935,7 @@
     if (!audio_is_valid_format(mFormat)) {
         LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat);
     }
-    if ((mType == MIXER || mType == DUPLICATING)
-            && !isValidPcmSinkFormat(mFormat)) {
+    if (hasMixer() && !isValidPcmSinkFormat(mFormat)) {
         LOG_FATAL("HAL format %#x not supported for mixed output",
                 mFormat);
     }
@@ -2841,7 +2944,7 @@
     LOG_ALWAYS_FATAL_IF(result != OK,
             "Error when retrieving output stream buffer size: %d", result);
     mFrameCount = mBufferSize / mFrameSize;
-    if ((mType == MIXER || mType == DUPLICATING) && (mFrameCount & 15)) {
+    if (hasMixer() && (mFrameCount & 15)) {
         ALOGW("HAL output buffer size is %zu frames but AudioMixer requires multiples of 16 frames",
                 mFrameCount);
     }
@@ -2914,7 +3017,7 @@
     }
     mNormalFrameCount = multiplier * mFrameCount;
     // round up to nearest 16 frames to satisfy AudioMixer
-    if (mType == MIXER || mType == DUPLICATING) {
+    if (hasMixer()) {
         mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
     }
     ALOGI("HAL output buffer size %zu frames, normal sink buffer size %zu frames", mFrameCount,
@@ -2930,6 +3033,7 @@
     // Originally this was int16_t[] array, need to remove legacy implications.
     free(mSinkBuffer);
     mSinkBuffer = NULL;
+
     // For sink buffer size, we use the frame size from the downstream sink to avoid problems
     // with non PCM formats for compressed music, e.g. AAC, and Offload threads.
     const size_t sinkBufferSize = mNormalFrameCount * mFrameSize;
@@ -2941,7 +3045,7 @@
     mMixerBuffer = NULL;
     if (mMixerBufferEnabled) {
         mMixerBufferFormat = AUDIO_FORMAT_PCM_FLOAT; // no longer valid: AUDIO_FORMAT_PCM_16_BIT.
-        mMixerBufferSize = mNormalFrameCount * mChannelCount
+        mMixerBufferSize = mNormalFrameCount * mixerChannelCount
                 * audio_bytes_per_sample(mMixerBufferFormat);
         (void)posix_memalign(&mMixerBuffer, 32, mMixerBufferSize);
     }
@@ -2949,15 +3053,24 @@
     mEffectBuffer = NULL;
     if (mEffectBufferEnabled) {
         mEffectBufferFormat = EFFECT_BUFFER_FORMAT;
-        mEffectBufferSize = mNormalFrameCount * mChannelCount
+        mEffectBufferSize = mNormalFrameCount * mixerChannelCount
                 * audio_bytes_per_sample(mEffectBufferFormat);
         (void)posix_memalign(&mEffectBuffer, 32, mEffectBufferSize);
     }
 
+    if (mType == SPATIALIZER) {
+        free(mPostSpatializerBuffer);
+        mPostSpatializerBuffer = nullptr;
+        mPostSpatializerBufferSize = mNormalFrameCount * mChannelCount
+                * audio_bytes_per_sample(mEffectBufferFormat);
+        (void)posix_memalign(&mPostSpatializerBuffer, 32, mPostSpatializerBufferSize);
+    }
+
     mHapticChannelMask = static_cast<audio_channel_mask_t>(mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
     mChannelMask = static_cast<audio_channel_mask_t>(mChannelMask & ~mHapticChannelMask);
     mHapticChannelCount = audio_channel_count_from_out_mask(mHapticChannelMask);
     mChannelCount -= mHapticChannelCount;
+    mMixerChannelMask = static_cast<audio_channel_mask_t>(mMixerChannelMask & ~mHapticChannelMask);
 
     // force reconfiguration of effect chains and engines to take new buffer size and audio
     // parameters into account
@@ -3051,15 +3164,15 @@
     // session AUDIO_SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that
     // it is moved to correct output by audio policy manager when A2DP is connected or disconnected
     if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
-        return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
+        return getStrategyForStream(AUDIO_STREAM_MUSIC);
     }
     for (size_t i = 0; i < mTracks.size(); i++) {
         sp<Track> track = mTracks[i];
         if (sessionId == track->sessionId() && !track->isInvalid()) {
-            return AudioSystem::getStrategyForStream(track->streamType());
+            return getStrategyForStream(track->streamType());
         }
     }
-    return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
+    return getStrategyForStream(AUDIO_STREAM_MUSIC);
 }
 
 
@@ -3336,23 +3449,34 @@
 {
     audio_session_t session = chain->sessionId();
     sp<EffectBufferHalInterface> halInBuffer, halOutBuffer;
-    status_t result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
-            mEffectBufferEnabled ? mEffectBuffer : mSinkBuffer,
-            mEffectBufferEnabled ? mEffectBufferSize : mSinkBufferSize,
-            &halInBuffer);
-    if (result != OK) return result;
-    halOutBuffer = halInBuffer;
-    effect_buffer_t *buffer = reinterpret_cast<effect_buffer_t*>(halInBuffer->externalData());
-    ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
-    if (!audio_is_global_session(session)) {
-        // Only one effect chain can be present in direct output thread and it uses
-        // the sink buffer as input
-        if (mType != DIRECT) {
-            size_t numSamples = mNormalFrameCount * (mChannelCount + mHapticChannelCount);
+    effect_buffer_t *buffer = nullptr; // only used for non global sessions
+
+    if (mType == SPATIALIZER ) {
+        if (!audio_is_global_session(session)) {
+            // player sessions on a spatializer output will use a dedicated input buffer and
+            // will either output multi channel to mEffectBuffer if the track is spatilaized
+            // or stereo to mPostSpatializerBuffer if not spatialized.
+            uint32_t channelMask;
+            bool isSessionSpatialized =
+                (hasAudioSession_l(session) & ThreadBase::SPATIALIZED_SESSION) != 0;
+            if (isSessionSpatialized) {
+                channelMask = mMixerChannelMask;
+            } else {
+                channelMask = mChannelMask;
+            }
+            size_t numSamples = mNormalFrameCount
+                    * (audio_channel_count_from_out_mask(channelMask) + mHapticChannelCount);
             status_t result = mAudioFlinger->mEffectsFactoryHal->allocateBuffer(
                     numSamples * sizeof(effect_buffer_t),
                     &halInBuffer);
             if (result != OK) return result;
+
+            result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
+                    isSessionSpatialized ? mEffectBuffer : mPostSpatializerBuffer,
+                    isSessionSpatialized ? mEffectBufferSize : mPostSpatializerBufferSize,
+                    &halOutBuffer);
+            if (result != OK) return result;
+
 #ifdef FLOAT_EFFECT_CHAIN
             buffer = halInBuffer->audioBuffer()->f32;
 #else
@@ -3360,14 +3484,60 @@
 #endif
             ALOGV("addEffectChain_l() creating new input buffer %p session %d",
                     buffer, session);
-        }
+        } else {
+            // A global session on a SPATIALIZER thread is either OUTPUT_STAGE or DEVICE
+            // - OUTPUT_STAGE session uses the mEffectBuffer as input buffer and
+            // mPostSpatializerBuffer as output buffer
+            // - DEVICE session uses the mPostSpatializerBuffer as input and output buffer.
+            status_t result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
+                    mEffectBuffer, mEffectBufferSize, &halInBuffer);
+            if (result != OK) return result;
+            result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
+                    mPostSpatializerBuffer, mPostSpatializerBufferSize, &halOutBuffer);
+            if (result != OK) return result;
 
+            if (session == AUDIO_SESSION_DEVICE) {
+                halInBuffer = halOutBuffer;
+            }
+        }
+    } else {
+        status_t result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
+                mEffectBufferEnabled ? mEffectBuffer : mSinkBuffer,
+                mEffectBufferEnabled ? mEffectBufferSize : mSinkBufferSize,
+                &halInBuffer);
+        if (result != OK) return result;
+        halOutBuffer = halInBuffer;
+        ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
+        if (!audio_is_global_session(session)) {
+            buffer = reinterpret_cast<effect_buffer_t*>(halInBuffer->externalData());
+            // Only one effect chain can be present in direct output thread and it uses
+            // the sink buffer as input
+            if (mType != DIRECT) {
+                size_t numSamples = mNormalFrameCount
+                        * (audio_channel_count_from_out_mask(mMixerChannelMask)
+                                                             + mHapticChannelCount);
+                status_t result = mAudioFlinger->mEffectsFactoryHal->allocateBuffer(
+                        numSamples * sizeof(effect_buffer_t),
+                        &halInBuffer);
+                if (result != OK) return result;
+#ifdef FLOAT_EFFECT_CHAIN
+                buffer = halInBuffer->audioBuffer()->f32;
+#else
+                buffer = halInBuffer->audioBuffer()->s16;
+#endif
+                ALOGV("addEffectChain_l() creating new input buffer %p session %d",
+                        buffer, session);
+            }
+        }
+    }
+
+    if (!audio_is_global_session(session)) {
         // Attach all tracks with same session ID to this chain.
         for (size_t i = 0; i < mTracks.size(); ++i) {
             sp<Track> track = mTracks[i];
             if (session == track->sessionId()) {
-                ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p", track.get(),
-                        buffer);
+                ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p",
+                        track.get(), buffer);
                 track->setMainBuffer(buffer);
                 chain->incTrackCnt();
             }
@@ -3376,11 +3546,13 @@
         // indicate all active tracks in the chain
         for (const sp<Track> &track : mActiveTracks) {
             if (session == track->sessionId()) {
-                ALOGV("addEffectChain_l() activating track %p on session %d", track.get(), session);
+                ALOGV("addEffectChain_l() activating track %p on session %d",
+                        track.get(), session);
                 chain->incActiveTrackCnt();
             }
         }
     }
+
     chain->setThread(this);
     chain->setInBuffer(halInBuffer);
     chain->setOutBuffer(halOutBuffer);
@@ -3531,6 +3703,8 @@
 
     audio_patch_handle_t lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
 
+    sendCheckOutputStageEffectsEvent();
+
     // loopCount is used for statistics and diagnostics.
     for (int64_t loopCount = 0; !exitPending(); ++loopCount)
     {
@@ -3542,6 +3716,7 @@
 
         Vector< sp<EffectChain> > effectChains;
         audio_session_t activeHapticSessionId = AUDIO_SESSION_NONE;
+        bool isHapticSessionSpatialized = false;
         std::vector<sp<Track>> activeTracks;
 
         // If the device is AUDIO_DEVICE_OUT_BUS, check for downstream latency.
@@ -3587,11 +3762,18 @@
             }
         }
 
+        if (mCheckOutputStageEffects.exchange(false)) {
+            checkOutputStageEffects();
+        }
+
         { // scope for mLock
 
             Mutex::Autolock _l(mLock);
 
             processConfigEvents_l();
+            if (mCheckOutputStageEffects.load()) {
+                continue;
+            }
 
             // See comment at declaration of logString for why this is done under mLock
             if (logString != NULL) {
@@ -3695,16 +3877,21 @@
             // This must be done under the same lock as prepareTracks_l().
             // The haptic data from the effect is at a higher priority than the one from track.
             // TODO: Write haptic data directly to sink buffer when mixing.
-            if (mHapticChannelCount > 0 && effectChains.size() > 0) {
+            if (mHapticChannelCount > 0) {
                 for (const auto& track : mActiveTracks) {
                     sp<EffectChain> effectChain = getEffectChain_l(track->sessionId());
-                    if (effectChain != nullptr && effectChain->containsHapticGeneratingEffect_l()) {
+                    if (effectChain != nullptr
+                            && effectChain->containsHapticGeneratingEffect_l()) {
                         activeHapticSessionId = track->sessionId();
+                        isHapticSessionSpatialized =
+                                mType == SPATIALIZER && track->canBeSpatialized();
                         break;
                     }
-                    if (track->getHapticPlaybackEnabled()) {
+                    if (activeHapticSessionId == AUDIO_SESSION_NONE
+                            && track->getHapticPlaybackEnabled()) {
                         activeHapticSessionId = track->sessionId();
-                        break;
+                        isHapticSessionSpatialized =
+                                mType == SPATIALIZER && track->canBeSpatialized();
                     }
                 }
             }
@@ -3754,6 +3941,8 @@
             //
             // mMixerBufferValid is only set true by MixerThread::prepareTracks_l().
             // TODO use mSleepTimeUs == 0 as an additional condition.
+            uint32_t mixerChannelCount = mEffectBufferValid ?
+                        audio_channel_count_from_out_mask(mMixerChannelMask) : mChannelCount;
             if (mMixerBufferValid) {
                 void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
                 audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
@@ -3774,7 +3963,7 @@
                 }
 
                 memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
-                        mNormalFrameCount * (mChannelCount + mHapticChannelCount));
+                        mNormalFrameCount * (mixerChannelCount + mHapticChannelCount));
 
                 // If we're going directly to the sink and there are haptic channels,
                 // we should adjust channels as the sample data is partially interleaved
@@ -3807,8 +3996,16 @@
                             && activeHapticSessionId == effectChains[i]->sessionId()) {
                         // Haptic data is active in this case, copy it directly from
                         // in buffer to out buffer.
+                        uint32_t hapticSessionChannelCount = mEffectBufferValid ?
+                                            audio_channel_count_from_out_mask(mMixerChannelMask) :
+                                            mChannelCount;
+                        if (mType == SPATIALIZER && !isHapticSessionSpatialized) {
+                            hapticSessionChannelCount = mChannelCount;
+                        }
+
                         const size_t audioBufferSize = mNormalFrameCount
-                                * audio_bytes_per_frame(mChannelCount, EFFECT_BUFFER_FORMAT);
+                            * audio_bytes_per_frame(hapticSessionChannelCount,
+                                                    EFFECT_BUFFER_FORMAT);
                         memcpy_by_audio_format(
                                 (uint8_t*)effectChains[i]->outBuffer() + audioBufferSize,
                                 EFFECT_BUFFER_FORMAT,
@@ -3834,9 +4031,9 @@
         // TODO use mSleepTimeUs == 0 as an additional condition.
         if (mEffectBufferValid) {
             //ALOGV("writing effect buffer to sink buffer format %#x", mFormat);
-
+            void *effectBuffer = (mType == SPATIALIZER) ? mPostSpatializerBuffer : mEffectBuffer;
             if (requireMonoBlend()) {
-                mono_blend(mEffectBuffer, mEffectBufferFormat, mChannelCount, mNormalFrameCount,
+                mono_blend(effectBuffer, mEffectBufferFormat, mChannelCount, mNormalFrameCount,
                            true /*limit*/);
             }
 
@@ -3845,11 +4042,30 @@
                 // We do it here if there is no FastMixer.
                 // mBalance detects zero balance within the class for speed (not needed here).
                 mBalance.setBalance(mMasterBalance.load());
-                mBalance.process((float *)mEffectBuffer, mNormalFrameCount);
+                mBalance.process((float *)effectBuffer, mNormalFrameCount);
             }
 
-            memcpy_by_audio_format(mSinkBuffer, mFormat, mEffectBuffer, mEffectBufferFormat,
+            // for SPATIALIZER thread, Move haptics channels from mEffectBuffer to
+            // mPostSpatializerBuffer if the haptics track is spatialized.
+            // Otherwise, the haptics channels are already in mPostSpatializerBuffer.
+            // For other thread types, the haptics channels are already in mEffectBuffer.
+            if (mType == SPATIALIZER && isHapticSessionSpatialized) {
+                const size_t srcBufferSize = mNormalFrameCount *
+                        audio_bytes_per_frame(audio_channel_count_from_out_mask(mMixerChannelMask),
+                                              mEffectBufferFormat);
+                const size_t dstBufferSize = mNormalFrameCount
+                        * audio_bytes_per_frame(mChannelCount, mEffectBufferFormat);
+
+                memcpy_by_audio_format((uint8_t*)mPostSpatializerBuffer + dstBufferSize,
+                                       mEffectBufferFormat,
+                                       (uint8_t*)mEffectBuffer + srcBufferSize,
+                                       mEffectBufferFormat,
+                                       mNormalFrameCount * mHapticChannelCount);
+            }
+
+            memcpy_by_audio_format(mSinkBuffer, mFormat, effectBuffer, mEffectBufferFormat,
                     mNormalFrameCount * (mChannelCount + mHapticChannelCount));
+
             // The sample data is partially interleaved when haptic channels exist,
             // we need to adjust channels here.
             if (mHapticChannelCount > 0) {
@@ -3899,6 +4115,18 @@
                                 Mutex::Autolock _l(mLock);
                                 mIoJitterMs.add(jitterMs);
                                 mProcessTimeMs.add(processMs);
+
+                                if (mPipeSink.get() != nullptr) {
+                                    // Using the Monopipe availableToWrite, we estimate the current
+                                    // buffer size.
+                                    MonoPipe* monoPipe = static_cast<MonoPipe*>(mPipeSink.get());
+                                    const ssize_t
+                                            availableToWrite = mPipeSink->availableToWrite();
+                                    const size_t pipeFrames = monoPipe->maxFrames();
+                                    const size_t
+                                            remainingFrames = pipeFrames - max(availableToWrite, 0);
+                                    mMonopipePipeDepthStats.add(remainingFrames);
+                                }
                             }
 
                             // write blocked detection
@@ -4448,8 +4676,8 @@
 // ----------------------------------------------------------------------------
 
 AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
-        audio_io_handle_t id, bool systemReady, type_t type)
-    :   PlaybackThread(audioFlinger, output, id, type, systemReady),
+        audio_io_handle_t id, bool systemReady, type_t type, audio_config_base_t *mixerConfig)
+    :   PlaybackThread(audioFlinger, output, id, type, systemReady, mixerConfig),
         // mAudioMixer below
         // mFastMixer below
         mFastMixerFutex(0),
@@ -4487,26 +4715,25 @@
 
     // initialize fast mixer depending on configuration
     bool initFastMixer;
-    switch (kUseFastMixer) {
-    case FastMixer_Never:
+    if (mType == SPATIALIZER) {
         initFastMixer = false;
-        break;
-    case FastMixer_Always:
-        initFastMixer = true;
-        break;
-    case FastMixer_Static:
-    case FastMixer_Dynamic:
-        // FastMixer was designed to operate with a HAL that pulls at a regular rate,
-        // where the period is less than an experimentally determined threshold that can be
-        // scheduled reliably with CFS. However, the BT A2DP HAL is
-        // bursty (does not pull at a regular rate) and so cannot operate with FastMixer.
-        initFastMixer = mFrameCount < mNormalFrameCount
-                && Intersection(outDeviceTypes(), getAudioDeviceOutAllA2dpSet()).empty();
-        break;
+    } else {
+        switch (kUseFastMixer) {
+        case FastMixer_Never:
+            initFastMixer = false;
+            break;
+        case FastMixer_Always:
+            initFastMixer = true;
+            break;
+        case FastMixer_Static:
+        case FastMixer_Dynamic:
+            initFastMixer = mFrameCount < mNormalFrameCount;
+            break;
+        }
+        ALOGW_IF(initFastMixer == false && mFrameCount < mNormalFrameCount,
+                "FastMixer is preferred for this sink as frameCount %zu is less than threshold %zu",
+                mFrameCount, mNormalFrameCount);
     }
-    ALOGW_IF(initFastMixer == false && mFrameCount < mNormalFrameCount,
-            "FastMixer is preferred for this sink as frameCount %zu is less than threshold %zu",
-            mFrameCount, mNormalFrameCount);
     if (initFastMixer) {
         audio_format_t fastMixerFormat;
         if (mMixerBufferEnabled && mEffectBufferEnabled) {
@@ -4566,6 +4793,7 @@
         fastTrack->mFormat = mFormat; // mPipeSink format for audio to FastMixer
         fastTrack->mHapticPlaybackEnabled = mHapticChannelMask != AUDIO_CHANNEL_NONE;
         fastTrack->mHapticIntensity = os::HapticScale::NONE;
+        fastTrack->mHapticMaxAmplitude = NAN;
         fastTrack->mGeneration++;
         state->mFastTracksGen++;
         state->mTrackMask = 1;
@@ -4861,6 +5089,9 @@
         // before effects processing or output.
         if (mMixerBufferValid) {
             memset(mMixerBuffer, 0, mMixerBufferSize);
+            if (mType == SPATIALIZER) {
+                memset(mSinkBuffer, 0, mSinkBufferSize);
+            }
         } else {
             memset(mSinkBuffer, 0, mSinkBufferSize);
         }
@@ -5089,7 +5320,7 @@
                 break;
             case TrackBase::IDLE:
             default:
-                LOG_ALWAYS_FATAL("unexpected track state %d", track->mState);
+                LOG_ALWAYS_FATAL("unexpected track state %d", (int)track->mState);
             }
 
             if (isActive) {
@@ -5103,6 +5334,7 @@
                     fastTrack->mFormat = track->mFormat;
                     fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
                     fastTrack->mHapticIntensity = track->getHapticIntensity();
+                    fastTrack->mHapticMaxAmplitude = track->getHapticMaxAmplitude();
                     fastTrack->mGeneration++;
                     state->mTrackMask |= 1 << j;
                     didModify = true;
@@ -5148,7 +5380,7 @@
                     // TODO Remove the ALOGW when this theory is confirmed.
                     ALOGW("fast track %d should have been active; "
                             "mState=%d, mTrackMask=%#x, recentUnderruns=%u, isShared=%d",
-                            j, track->mState, state->mTrackMask, recentUnderruns,
+                            j, (int)track->mState, state->mTrackMask, recentUnderruns,
                             track->sharedBuffer() != 0);
                     // Since the FastMixer state already has the track inactive, do nothing here.
                 }
@@ -5352,11 +5584,21 @@
                 trackId,
                 AudioMixer::TRACK,
                 AudioMixer::CHANNEL_MASK, (void *)(uintptr_t)track->channelMask());
-            mAudioMixer->setParameter(
-                trackId,
-                AudioMixer::TRACK,
-                AudioMixer::MIXER_CHANNEL_MASK,
-                (void *)(uintptr_t)(mChannelMask | mHapticChannelMask));
+
+            if (mType == SPATIALIZER && !track->canBeSpatialized()) {
+                mAudioMixer->setParameter(
+                    trackId,
+                    AudioMixer::TRACK,
+                    AudioMixer::MIXER_CHANNEL_MASK,
+                    (void *)(uintptr_t)(mChannelMask | mHapticChannelMask));
+            } else {
+                mAudioMixer->setParameter(
+                    trackId,
+                    AudioMixer::TRACK,
+                    AudioMixer::MIXER_CHANNEL_MASK,
+                    (void *)(uintptr_t)(mMixerChannelMask | mHapticChannelMask));
+            }
+
             // limit track sample rate to 2 x output sample rate, which changes at re-configuration
             uint32_t maxSampleRate = mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX;
             uint32_t reqSampleRate = proxy->getSampleRate();
@@ -5393,16 +5635,27 @@
             if (mMixerBufferEnabled
                     && (track->mainBuffer() == mSinkBuffer
                             || track->mainBuffer() == mMixerBuffer)) {
-                mAudioMixer->setParameter(
-                        trackId,
-                        AudioMixer::TRACK,
-                        AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
-                mAudioMixer->setParameter(
-                        trackId,
-                        AudioMixer::TRACK,
-                        AudioMixer::MAIN_BUFFER, (void *)mMixerBuffer);
-                // TODO: override track->mainBuffer()?
-                mMixerBufferValid = true;
+                if (mType == SPATIALIZER && !track->canBeSpatialized()) {
+                    mAudioMixer->setParameter(
+                            trackId,
+                            AudioMixer::TRACK,
+                            AudioMixer::MIXER_FORMAT, (void *)mEffectBufferFormat);
+                    mAudioMixer->setParameter(
+                            trackId,
+                            AudioMixer::TRACK,
+                            AudioMixer::MAIN_BUFFER, (void *)mPostSpatializerBuffer);
+                } else {
+                    mAudioMixer->setParameter(
+                            trackId,
+                            AudioMixer::TRACK,
+                            AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
+                    mAudioMixer->setParameter(
+                            trackId,
+                            AudioMixer::TRACK,
+                            AudioMixer::MAIN_BUFFER, (void *)mMixerBuffer);
+                    // TODO: override track->mainBuffer()?
+                    mMixerBufferValid = true;
+                }
             } else {
                 mAudioMixer->setParameter(
                         trackId,
@@ -5425,6 +5678,10 @@
                 trackId,
                 AudioMixer::TRACK,
                 AudioMixer::HAPTIC_INTENSITY, (void *)(uintptr_t)track->getHapticIntensity());
+            mAudioMixer->setParameter(
+                trackId,
+                AudioMixer::TRACK,
+                AudioMixer::HAPTIC_MAX_AMPLITUDE, (void *)(&(track->mHapticMaxAmplitude)));
 
             // reset retry count
             track->mRetryCount = kMaxTrackRetries;
@@ -5575,7 +5832,8 @@
     // remove all the tracks that need to be...
     removeTracks_l(*tracksToRemove);
 
-    if (getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX) != 0) {
+    if (getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX) != 0 ||
+            getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE) != 0) {
         mEffectBufferValid = true;
     }
 
@@ -5583,12 +5841,17 @@
         // as long as there are effects we should clear the effects buffer, to avoid
         // passing a non-clean buffer to the effect chain
         memset(mEffectBuffer, 0, mEffectBufferSize);
+        if (mType == SPATIALIZER) {
+            memset(mPostSpatializerBuffer, 0, mPostSpatializerBufferSize);
+        }
     }
     // sink or mix buffer must be cleared if all tracks are connected to an
     // effect chain as in this case the mixer will not write to the sink or mix buffer
     // and track effects will accumulate into it
-    if ((mBytesRemaining == 0) && ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
-            (mixedTracks == 0 && fastTracks > 0))) {
+    // always clear sink buffer for spatializer output as the output of the spatializer
+    // effect will be accumulated into it
+    if ((mBytesRemaining == 0) && (((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
+            (mixedTracks == 0 && fastTracks > 0)) || (mType == SPATIALIZER))) {
         // FIXME as a performance optimization, should remember previous zero status
         if (mMixerBufferValid) {
             memset(mMixerBuffer, 0, mMixerBufferSize);
@@ -5626,6 +5889,20 @@
     return trackCount;
 }
 
+bool AudioFlinger::PlaybackThread::checkRunningTimestamp()
+{
+    uint64_t position = 0;
+    struct timespec unused;
+    const status_t ret = mOutput->getPresentationPosition(&position, &unused);
+    if (ret == NO_ERROR) {
+        if (position != mLastCheckedTimestampPosition) {
+            mLastCheckedTimestampPosition = position;
+            return true;
+        }
+    }
+    return false;
+}
+
 // isTrackAllowed_l() must be called with ThreadBase::mLock held
 bool AudioFlinger::MixerThread::isTrackAllowed_l(
         audio_channel_mask_t channelMask, audio_format_t format,
@@ -6054,19 +6331,24 @@
                 // fill a buffer, then remove it from active list.
                 // Only consider last track started for mixer state control
                 if (--(track->mRetryCount) <= 0) {
-                    ALOGV("BUFFER TIMEOUT: remove track(%d) from active list", trackId);
-                    tracksToRemove->add(track);
-                    // indicate to client process that the track was disabled because of underrun;
-                    // it will then automatically call start() when data is available
-                    track->disable();
-                    // only do hw pause when track is going to be removed due to BUFFER TIMEOUT.
-                    // unlike mixerthread, HAL can be paused for direct output
-                    ALOGW("pause because of UNDERRUN, framesReady = %zu,"
-                            "minFrames = %u, mFormat = %#x",
-                            framesReady, minFrames, mFormat);
-                    if (last && mHwSupportsPause && !mHwPaused && !mStandby) {
-                        doHwPause = true;
-                        mHwPaused = true;
+                    const bool running = checkRunningTimestamp();
+                    if (running) { // still running, give us more time.
+                        track->mRetryCount = kMaxTrackRetriesOffload;
+                    } else {
+                        ALOGV("BUFFER TIMEOUT: remove track(%d) from active list", trackId);
+                        tracksToRemove->add(track);
+                        // indicate to client process that the track was disabled because of
+                        // underrun; it will then automatically call start() when data is available
+                        track->disable();
+                        // only do hw pause when track is going to be removed due to BUFFER TIMEOUT.
+                        // unlike mixerthread, HAL can be paused for direct output
+                        ALOGW("pause because of UNDERRUN, framesReady = %zu,"
+                                "minFrames = %u, mFormat = %#x",
+                                framesReady, minFrames, mFormat);
+                        if (last && mHwSupportsPause && !mHwPaused && !mStandby) {
+                            doHwPause = true;
+                            mHwPaused = true;
+                        }
                     }
                 } else if (last) {
                     mixerStatus = MIXER_TRACKS_ENABLED;
@@ -6277,6 +6559,7 @@
 
 void AudioFlinger::DirectOutputThread::flushHw_l()
 {
+    PlaybackThread::flushHw_l();
     mOutput->flush();
     mHwPaused = false;
     mFlushPending = false;
@@ -6412,8 +6695,7 @@
 AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
         AudioStreamOut* output, audio_io_handle_t id, bool systemReady)
     :   DirectOutputThread(audioFlinger, output, id, OFFLOAD, systemReady),
-        mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true),
-        mOffloadUnderrunPosition(~0LL)
+        mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true)
 {
     //FIXME: mStandby should be set to true by ThreadBase constructo
     mStandby = true;
@@ -6630,19 +6912,7 @@
                 // No buffers for this track. Give it a few chances to
                 // fill a buffer, then remove it from active list.
                 if (--(track->mRetryCount) <= 0) {
-                    bool running = false;
-                    uint64_t position = 0;
-                    struct timespec unused;
-                    // The running check restarts the retry counter at least once.
-                    status_t ret = mOutput->stream->getPresentationPosition(&position, &unused);
-                    if (ret == NO_ERROR && position != mOffloadUnderrunPosition) {
-                        running = true;
-                        mOffloadUnderrunPosition = position;
-                    }
-                    if (ret == NO_ERROR) {
-                        ALOGVV("underrun counter, running(%d): %lld vs %lld", running,
-                                (long long)position, (long long)mOffloadUnderrunPosition);
-                    }
+                    const bool running = checkRunningTimestamp();
                     if (running) { // still running, give us more time.
                         track->mRetryCount = kMaxTrackRetriesOffload;
                     } else {
@@ -6713,7 +6983,6 @@
     mPausedBytesRemaining = 0;
     // reset bytes written count to reflect that DSP buffers are empty after flush.
     mBytesWritten = 0;
-    mOffloadUnderrunPosition = ~0LL;
 
     if (mUseAsyncWrite) {
         // discard any pending drain or write ack by incrementing sequence
@@ -6971,6 +7240,69 @@
     MixerThread::cacheParameters_l();
 }
 
+// ----------------------------------------------------------------------------
+
+AudioFlinger::SpatializerThread::SpatializerThread(const sp<AudioFlinger>& audioFlinger,
+                                                             AudioStreamOut* output,
+                                                             audio_io_handle_t id,
+                                                             bool systemReady,
+                                                             audio_config_base_t *mixerConfig)
+    : MixerThread(audioFlinger, output, id, systemReady, SPATIALIZER, mixerConfig)
+{
+}
+
+void AudioFlinger::SpatializerThread::checkOutputStageEffects()
+{
+    bool hasVirtualizer = false;
+    bool hasDownMixer = false;
+    sp<EffectHandle> finalDownMixer;
+    {
+        Mutex::Autolock _l(mLock);
+        sp<EffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE);
+        if (chain != 0) {
+            hasVirtualizer = chain->getEffectFromType_l(FX_IID_SPATIALIZER) != nullptr;
+            hasDownMixer = chain->getEffectFromType_l(EFFECT_UIID_DOWNMIX) != nullptr;
+        }
+
+        finalDownMixer = mFinalDownMixer;
+        mFinalDownMixer.clear();
+    }
+
+    if (hasVirtualizer) {
+        if (finalDownMixer != nullptr) {
+            int32_t ret;
+            finalDownMixer->disable(&ret);
+        }
+        finalDownMixer.clear();
+    } else if (!hasDownMixer) {
+        std::vector<effect_descriptor_t> descriptors;
+        status_t status = mAudioFlinger->mEffectsFactoryHal->getDescriptors(
+                                                        EFFECT_UIID_DOWNMIX, &descriptors);
+        if (status != NO_ERROR) {
+            return;
+        }
+        ALOG_ASSERT(!descriptors.empty(),
+                "%s getDescriptors() returned no error but empty list", __func__);
+
+        finalDownMixer = createEffect_l(nullptr /*client*/, nullptr /*effectClient*/,
+                0 /*priority*/, AUDIO_SESSION_OUTPUT_STAGE, &descriptors[0], nullptr /*enabled*/,
+                &status, false /*pinned*/, false /*probe*/, false /*notifyFramesProcessed*/);
+
+        if (finalDownMixer == nullptr || (status != NO_ERROR && status != ALREADY_EXISTS)) {
+            ALOGW("%s error creating downmixer %d", __func__, status);
+            finalDownMixer.clear();
+        } else {
+            int32_t ret;
+            finalDownMixer->enable(&ret);
+        }
+    }
+
+    {
+        Mutex::Autolock _l(mLock);
+        mFinalDownMixer = finalDownMixer;
+    }
+}
+
 
 // ----------------------------------------------------------------------------
 //      Record
@@ -7445,6 +7777,7 @@
 
             const ssize_t availableToRead = mPipeSource->availableToRead();
             if (availableToRead >= 0) {
+                mMonopipePipeDepthStats.add(availableToRead);
                 // PipeSource is the primary clock.  It is up to the AudioRecord client to keep up.
                 LOG_ALWAYS_FATAL_IF((size_t)availableToRead > mPipeFramesP2,
                         "more frames to read than fifo size, %zd > %zu",
@@ -7873,6 +8206,7 @@
       if (
             // we formerly checked for a callback handler (non-0 tid),
             // but that is no longer required for TRANSFER_OBTAIN mode
+            // No need to match hardware format, format conversion will be done in client side.
             //
             // Frame count is not specified (0), or is less than or equal the pipe depth.
             // It is OK to provide a higher capacity than requested.
@@ -7880,8 +8214,6 @@
             (frameCount <= mPipeFramesP2) &&
             // PCM data
             audio_is_linear_pcm(format) &&
-            // hardware format
-            (format == mFormat) &&
             // hardware channel mask
             (channelMask == mChannelMask) &&
             // hardware sample rate
@@ -8041,7 +8373,7 @@
                 ALOGV("active record track PAUSING -> ACTIVE");
                 recordTrack->mState = TrackBase::ACTIVE;
             } else {
-                ALOGV("active record track state %d", recordTrack->mState);
+                ALOGV("active record track state %d", (int)recordTrack->mState);
             }
             return status;
         }
@@ -8067,7 +8399,7 @@
             }
             if (recordTrack->mState != TrackBase::STARTING_1) {
                 ALOGW("%s(%d): unsynchronized mState:%d change",
-                    __func__, recordTrack->id(), recordTrack->mState);
+                    __func__, recordTrack->id(), (int)recordTrack->mState);
                 // Someone else has changed state, let them take over,
                 // leave mState in the new state.
                 recordTrack->clearSyncStartEvent();
@@ -8631,30 +8963,22 @@
     return String8();
 }
 
-void AudioFlinger::RecordThread::ioConfigChanged(audio_io_config_event event, pid_t pid,
+void AudioFlinger::RecordThread::ioConfigChanged(audio_io_config_event_t event, pid_t pid,
                                                  audio_port_handle_t portId) {
-    sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
-
-    desc->mIoHandle = mId;
-
+    sp<AudioIoDescriptor> desc;
     switch (event) {
     case AUDIO_INPUT_OPENED:
     case AUDIO_INPUT_REGISTERED:
     case AUDIO_INPUT_CONFIG_CHANGED:
-        desc->mPatch = mPatch;
-        desc->mChannelMask = mChannelMask;
-        desc->mSamplingRate = mSampleRate;
-        desc->mFormat = mFormat;
-        desc->mFrameCount = mFrameCount;
-        desc->mFrameCountHAL = mFrameCount;
-        desc->mLatency = 0;
+        desc = sp<AudioIoDescriptor>::make(mId, mPatch, true /*isInput*/,
+                mSampleRate, mFormat, mChannelMask, mFrameCount, mFrameCount);
         break;
     case AUDIO_CLIENT_STARTED:
-        desc->mPatch = mPatch;
-        desc->mPortId = portId;
+        desc = sp<AudioIoDescriptor>::make(mId, mPatch, portId);
         break;
     case AUDIO_INPUT_CLOSED:
     default:
+        desc = sp<AudioIoDescriptor>::make(mId);
         break;
     }
     mAudioFlinger->ioConfigChanged(event, desc, pid);
@@ -9278,7 +9602,7 @@
     mActiveTracks.add(track);
     sp<EffectChain> chain = getEffectChain_l(mSessionId);
     if (chain != 0) {
-        chain->setStrategy(AudioSystem::getStrategyForStream(streamType()));
+        chain->setStrategy(getStrategyForStream(streamType()));
         chain->incTrackCnt();
         chain->incActiveTrackCnt();
     }
@@ -9497,31 +9821,26 @@
     return String8();
 }
 
-void AudioFlinger::MmapThread::ioConfigChanged(audio_io_config_event event, pid_t pid,
+void AudioFlinger::MmapThread::ioConfigChanged(audio_io_config_event_t event, pid_t pid,
                                                audio_port_handle_t portId __unused) {
-    sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
-
-    desc->mIoHandle = mId;
-
+    sp<AudioIoDescriptor> desc;
+    bool isInput = false;
     switch (event) {
     case AUDIO_INPUT_OPENED:
     case AUDIO_INPUT_REGISTERED:
     case AUDIO_INPUT_CONFIG_CHANGED:
+        isInput = true;
+        FALLTHROUGH_INTENDED;
     case AUDIO_OUTPUT_OPENED:
     case AUDIO_OUTPUT_REGISTERED:
     case AUDIO_OUTPUT_CONFIG_CHANGED:
-        desc->mPatch = mPatch;
-        desc->mChannelMask = mChannelMask;
-        desc->mSamplingRate = mSampleRate;
-        desc->mFormat = mFormat;
-        desc->mFrameCount = mFrameCount;
-        desc->mFrameCountHAL = mFrameCount;
-        desc->mLatency = 0;
+        desc = sp<AudioIoDescriptor>::make(mId, mPatch, isInput,
+                mSampleRate, mFormat, mChannelMask, mFrameCount, mFrameCount);
         break;
-
     case AUDIO_INPUT_CLOSED:
     case AUDIO_OUTPUT_CLOSED:
     default:
+        desc = sp<AudioIoDescriptor>::make(mId);
         break;
     }
     mAudioFlinger->ioConfigChanged(event, desc, pid);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 16082a9..982893d 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -32,6 +32,7 @@
         OFFLOAD,            // Thread class is OffloadThread
         MMAP_PLAYBACK,      // Thread class for MMAP playback stream
         MMAP_CAPTURE,       // Thread class for MMAP capture stream
+        SPATIALIZER,  //
         // If you add any values here, also update ThreadBase::threadTypeToString()
     };
 
@@ -53,7 +54,8 @@
         CFG_EVENT_CREATE_AUDIO_PATCH,
         CFG_EVENT_RELEASE_AUDIO_PATCH,
         CFG_EVENT_UPDATE_OUT_DEVICE,
-        CFG_EVENT_RESIZE_BUFFER
+        CFG_EVENT_RESIZE_BUFFER,
+        CFG_EVENT_CHECK_OUTPUT_STAGE_EFFECTS
     };
 
     class ConfigEventData: public RefBase {
@@ -87,7 +89,13 @@
     public:
         virtual ~ConfigEvent() {}
 
-        void dump(char *buffer, size_t size) { mData->dump(buffer, size); }
+        void dump(char *buffer, size_t size) {
+            snprintf(buffer, size, "Event type: %d\n", mType);
+            if (mData != nullptr) {
+                snprintf(buffer, size, "Data:\n");
+                mData->dump(buffer, size);
+            }
+        }
 
         const int mType; // event type e.g. CFG_EVENT_IO
         Mutex mLock;     // mutex associated with mCond
@@ -105,22 +113,22 @@
 
     class IoConfigEventData : public ConfigEventData {
     public:
-        IoConfigEventData(audio_io_config_event event, pid_t pid,
+        IoConfigEventData(audio_io_config_event_t event, pid_t pid,
                           audio_port_handle_t portId) :
             mEvent(event), mPid(pid), mPortId(portId) {}
 
         virtual  void dump(char *buffer, size_t size) {
-            snprintf(buffer, size, "IO event: event %d\n", mEvent);
+            snprintf(buffer, size, "- IO event: event %d\n", mEvent);
         }
 
-        const audio_io_config_event mEvent;
+        const audio_io_config_event_t mEvent;
         const pid_t                 mPid;
         const audio_port_handle_t   mPortId;
     };
 
     class IoConfigEvent : public ConfigEvent {
     public:
-        IoConfigEvent(audio_io_config_event event, pid_t pid, audio_port_handle_t portId) :
+        IoConfigEvent(audio_io_config_event_t event, pid_t pid, audio_port_handle_t portId) :
             ConfigEvent(CFG_EVENT_IO) {
             mData = new IoConfigEventData(event, pid, portId);
         }
@@ -133,7 +141,7 @@
             mPid(pid), mTid(tid), mPrio(prio), mForApp(forApp) {}
 
         virtual  void dump(char *buffer, size_t size) {
-            snprintf(buffer, size, "Prio event: pid %d, tid %d, prio %d, for app? %d\n",
+            snprintf(buffer, size, "- Prio event: pid %d, tid %d, prio %d, for app? %d\n",
                     mPid, mTid, mPrio, mForApp);
         }
 
@@ -158,7 +166,7 @@
             mKeyValuePairs(keyValuePairs) {}
 
         virtual  void dump(char *buffer, size_t size) {
-            snprintf(buffer, size, "KeyValue: %s\n", mKeyValuePairs.string());
+            snprintf(buffer, size, "- KeyValue: %s\n", mKeyValuePairs.string());
         }
 
         const String8 mKeyValuePairs;
@@ -181,7 +189,7 @@
             mPatch(patch), mHandle(handle) {}
 
         virtual  void dump(char *buffer, size_t size) {
-            snprintf(buffer, size, "Patch handle: %u\n", mHandle);
+            snprintf(buffer, size, "- Patch handle: %u\n", mHandle);
         }
 
         const struct audio_patch mPatch;
@@ -205,7 +213,7 @@
             mHandle(handle) {}
 
         virtual  void dump(char *buffer, size_t size) {
-            snprintf(buffer, size, "Patch handle: %u\n", mHandle);
+            snprintf(buffer, size, "- Patch handle: %u\n", mHandle);
         }
 
         audio_patch_handle_t mHandle;
@@ -227,7 +235,7 @@
             mOutDevices(outDevices) {}
 
         virtual void dump(char *buffer, size_t size) {
-            snprintf(buffer, size, "Devices: %s", android::toString(mOutDevices).c_str());
+            snprintf(buffer, size, "- Devices: %s", android::toString(mOutDevices).c_str());
         }
 
         DeviceDescriptorBaseVector mOutDevices;
@@ -249,7 +257,7 @@
             mMaxSharedAudioHistoryMs(maxSharedAudioHistoryMs) {}
 
         virtual void dump(char *buffer, size_t size) {
-            snprintf(buffer, size, "mMaxSharedAudioHistoryMs: %d", mMaxSharedAudioHistoryMs);
+            snprintf(buffer, size, "- mMaxSharedAudioHistoryMs: %d", mMaxSharedAudioHistoryMs);
         }
 
         int32_t mMaxSharedAudioHistoryMs;
@@ -265,6 +273,16 @@
         virtual ~ResizeBufferConfigEvent() {}
     };
 
+    class CheckOutputStageEffectsEvent : public ConfigEvent {
+    public:
+        CheckOutputStageEffectsEvent() :
+            ConfigEvent(CFG_EVENT_CHECK_OUTPUT_STAGE_EFFECTS) {
+        }
+
+        virtual ~CheckOutputStageEffectsEvent() {}
+    };
+
+
     class PMDeathRecipient : public IBinder::DeathRecipient {
     public:
         explicit    PMDeathRecipient(const wp<ThreadBase>& thread) : mThread(thread) {}
@@ -290,8 +308,11 @@
                 // dynamic externally-visible
                 uint32_t    sampleRate() const { return mSampleRate; }
                 audio_channel_mask_t channelMask() const { return mChannelMask; }
+    virtual     audio_channel_mask_t mixerChannelMask() const { return mChannelMask; }
+
                 audio_format_t format() const { return mHALFormat; }
                 uint32_t channelCount() const { return mChannelCount; }
+
                 // Called by AudioFlinger::frameCount(audio_io_handle_t output) and effects,
                 // and returns the [normal mix] buffer's frame count.
     virtual     size_t      frameCount() const = 0;
@@ -311,15 +332,15 @@
                                                     status_t& status) = 0;
     virtual     status_t    setParameters(const String8& keyValuePairs);
     virtual     String8     getParameters(const String8& keys) = 0;
-    virtual     void        ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+    virtual     void        ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
                                         audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE) = 0;
                 // sendConfigEvent_l() must be called with ThreadBase::mLock held
                 // Can temporarily release the lock if waiting for a reply from
                 // processConfigEvents_l().
                 status_t    sendConfigEvent_l(sp<ConfigEvent>& event);
-                void        sendIoConfigEvent(audio_io_config_event event, pid_t pid = 0,
+                void        sendIoConfigEvent(audio_io_config_event_t event, pid_t pid = 0,
                                               audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
-                void        sendIoConfigEvent_l(audio_io_config_event event, pid_t pid = 0,
+                void        sendIoConfigEvent_l(audio_io_config_event_t event, pid_t pid = 0,
                                             audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
                 void        sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio, bool forApp);
                 void        sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio, bool forApp);
@@ -330,7 +351,11 @@
                 status_t    sendUpdateOutDeviceConfigEvent(
                                     const DeviceDescriptorBaseVector& outDevices);
                 void        sendResizeBufferConfigEvent_l(int32_t maxSharedAudioHistoryMs);
+                void        sendCheckOutputStageEffectsEvent();
+                void        sendCheckOutputStageEffectsEvent_l();
+
                 void        processConfigEvents_l();
+    virtual     void        setCheckOutputStageEffects() {}
     virtual     void        cacheParameters_l() = 0;
     virtual     status_t    createAudioPatch_l(const struct audio_patch *patch,
                                                audio_patch_handle_t *handle) = 0;
@@ -383,7 +408,8 @@
                                     int *enabled,
                                     status_t *status /*non-NULL*/,
                                     bool pinned,
-                                    bool probe);
+                                    bool probe,
+                                    bool notifyFramesProcessed);
 
                 // return values for hasAudioSession (bit field)
                 enum effect_state {
@@ -391,8 +417,10 @@
                                             // effect
                     TRACK_SESSION = 0x2,    // the audio session corresponds to at least one
                                             // track
-                    FAST_SESSION = 0x4      // the audio session corresponds to at least one
+                    FAST_SESSION = 0x4,     // the audio session corresponds to at least one
                                             // fast track
+                    SPATIALIZED_SESSION = 0x8 // the audio session corresponds to at least one
+                                              // spatialized track
                 };
 
                 // get effect chain corresponding to session Id.
@@ -433,6 +461,7 @@
                 // - EFFECT_SESSION if effects on this audio session exist in one chain
                 // - TRACK_SESSION if tracks on this audio session exist
                 // - FAST_SESSION if fast tracks on this audio session exist
+                // - SPATIALIZED_SESSION if spatialized tracks on this audio session exist
     virtual     uint32_t hasAudioSession_l(audio_session_t sessionId) const = 0;
                 uint32_t hasAudioSession(audio_session_t sessionId) const {
                     Mutex::Autolock _l(mLock);
@@ -454,6 +483,9 @@
                             if (track->isFastTrack()) {
                                 result |= FAST_SESSION;  // caution, only represents first track.
                             }
+                            if (track->canBeSpatialized()) {
+                                result |= SPATIALIZED_SESSION;  // caution, only first track.
+                            }
                             break;
                         }
                     }
@@ -574,6 +606,8 @@
                                 return INVALID_OPERATION;
                             }
 
+                product_strategy_t getStrategyForStream(audio_stream_type_t stream) const;
+
     virtual     void        dumpInternals_l(int fd __unused, const Vector<String16>& args __unused)
                             { }
     virtual     void        dumpTracks_l(int fd __unused, const Vector<String16>& args __unused) { }
@@ -657,6 +691,7 @@
                 audio_utils::Statistics<double> mIoJitterMs{0.995 /* alpha */};
                 audio_utils::Statistics<double> mProcessTimeMs{0.995 /* alpha */};
                 audio_utils::Statistics<double> mLatencyMs{0.995 /* alpha */};
+                audio_utils::Statistics<double> mMonopipePipeDepthStats{0.999 /* alpha */};
 
                 // Save the last count when we delivered statistics to mediametrics.
                 int64_t                 mLastRecordedTimestampVerifierN = 0;
@@ -824,7 +859,8 @@
     static const nsecs_t kMaxNextBufferDelayNs = 100000000;
 
     PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
-                   audio_io_handle_t id, type_t type, bool systemReady);
+                   audio_io_handle_t id, type_t type, bool systemReady,
+                   audio_config_base_t *mixerConfig = nullptr);
     virtual             ~PlaybackThread();
 
     // Thread virtuals
@@ -881,6 +917,8 @@
                                 mActiveTracks.updatePowerState(this, true /* force */);
                             }
 
+    virtual     void        checkOutputStageEffects() {}
+
                 void        dumpInternals_l(int fd, const Vector<String16>& args) override;
                 void        dumpTracks_l(int fd, const Vector<String16>& args) override;
 
@@ -942,7 +980,7 @@
                                 { return android_atomic_acquire_load(&mSuspended) > 0; }
 
     virtual     String8     getParameters(const String8& keys);
-    virtual     void        ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+    virtual     void        ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
                                             audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
                 status_t    getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames);
                 // Consider also removing and passing an explicit mMainBuffer initialization
@@ -973,6 +1011,10 @@
 
     virtual     size_t      frameCount() const { return mNormalFrameCount; }
 
+                audio_channel_mask_t mixerChannelMask() const override {
+                    return mMixerChannelMask;
+                }
+
                 status_t    getTimestamp_l(AudioTimestamp& timestamp);
 
                 void        addPatchTrack(const sp<PatchTrack>& track);
@@ -1015,6 +1057,9 @@
 
                 PlaybackThread::Track* getTrackById_l(audio_port_handle_t trackId);
 
+                bool hasMixer() const {
+                    return mType == MIXER || mType == DUPLICATING || mType == SPATIALIZER;
+                }
 protected:
     // updated by readOutputParameters_l()
     size_t                          mNormalFrameCount;  // normal mixer and effects
@@ -1084,6 +1129,15 @@
     // for any processing (including output processing).
     bool                            mEffectBufferValid;
 
+    // Frame size aligned buffer used as input and output to all post processing effects
+    // except the Spatializer in a SPATIALIZER thread. Non spatialized tracks are mixed into
+    // this buffer so that post processing effects can be applied.
+    void*                           mPostSpatializerBuffer = nullptr;
+
+    // Size of mPostSpatializerBuffer in bytes
+    size_t                          mPostSpatializerBufferSize;
+
+
     // suspend count, > 0 means suspended.  While suspended, the thread continues to pull from
     // tracks and mix, but doesn't write to HAL.  A2DP and SCO HAL implementations can't handle
     // concurrent use of both of them, so Audio Policy Service suspends one of the threads to
@@ -1101,6 +1155,9 @@
     // haptic playback.
     audio_channel_mask_t            mHapticChannelMask = AUDIO_CHANNEL_NONE;
     uint32_t                        mHapticChannelCount = 0;
+
+    audio_channel_mask_t            mMixerChannelMask = AUDIO_CHANNEL_NONE;
+
 private:
     // mMasterMute is in both PlaybackThread and in AudioFlinger.  When a
     // PlaybackThread needs to find out if master-muted, it checks it's local
@@ -1134,6 +1191,9 @@
 
     // Cache various calculated values, at threadLoop() entry and after a parameter change
     virtual     void        cacheParameters_l();
+                void        setCheckOutputStageEffects() override {
+                                mCheckOutputStageEffects.store(true);
+                            }
 
     virtual     uint32_t    correctLatency_l(uint32_t latency) const;
 
@@ -1314,6 +1374,16 @@
                 // audio patch used by the downstream software patch.
                 // Only used if ThreadBase::mIsMsdDevice is true.
                 struct audio_patch mDownStreamPatch;
+
+                std::atomic_bool mCheckOutputStageEffects{};
+
+                // A differential check on the timestamps to see if there is a change in the
+                // timestamp frame position between the last call to checkRunningTimestamp.
+                uint64_t mLastCheckedTimestampPosition = ~0LL;
+
+                bool checkRunningTimestamp();
+
+    virtual     void flushHw_l() { mLastCheckedTimestampPosition = ~0LL; }
 };
 
 class MixerThread : public PlaybackThread {
@@ -1322,7 +1392,8 @@
                 AudioStreamOut* output,
                 audio_io_handle_t id,
                 bool systemReady,
-                type_t type = MIXER);
+                type_t type = MIXER,
+                audio_config_base_t *mixerConfig = nullptr);
     virtual             ~MixerThread();
 
     // Thread virtuals
@@ -1430,7 +1501,7 @@
     virtual     bool        checkForNewParameter_l(const String8& keyValuePair,
                                                    status_t& status);
 
-    virtual     void        flushHw_l();
+                void        flushHw_l() override;
 
                 void        setMasterBalance(float balance) override;
 
@@ -1495,7 +1566,7 @@
     OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
                   audio_io_handle_t id, bool systemReady);
     virtual                 ~OffloadThread() {};
-    virtual     void        flushHw_l();
+                void        flushHw_l() override;
 
 protected:
     // threadLoop snippets
@@ -1512,10 +1583,6 @@
     size_t      mPausedWriteLength;     // length in bytes of write interrupted by pause
     size_t      mPausedBytesRemaining;  // bytes still waiting in mixbuffer after resume
     bool        mKeepWakeLock;          // keep wake lock while waiting for write callback
-    uint64_t    mOffloadUnderrunPosition; // Current frame position for offloaded playback
-                                          // used and valid only during underrun.  ~0 if
-                                          // no underrun has occurred during playback and
-                                          // is not reset on standby.
 };
 
 class AsyncCallbackThread : public Thread {
@@ -1611,6 +1678,24 @@
     }
 };
 
+class SpatializerThread : public MixerThread {
+public:
+    SpatializerThread(const sp<AudioFlinger>& audioFlinger,
+                           AudioStreamOut* output,
+                           audio_io_handle_t id,
+                           bool systemReady,
+                           audio_config_base_t *mixerConfig);
+            ~SpatializerThread() override {}
+
+            bool hasFastMixer() const override { return false; }
+
+protected:
+            void checkOutputStageEffects() override;
+
+private:
+            sp<EffectHandle> mFinalDownMixer;
+};
+
 // record thread
 class RecordThread : public ThreadBase
 {
@@ -1721,7 +1806,7 @@
                                                status_t& status);
     virtual void        cacheParameters_l() {}
     virtual String8     getParameters(const String8& keys);
-    virtual void        ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+    virtual void        ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
                                         audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
     virtual status_t    createAudioPatch_l(const struct audio_patch *patch,
                                            audio_patch_handle_t *handle);
@@ -1779,6 +1864,8 @@
 
             bool        isTimestampCorrectionEnabled() const override {
                             // checks popcount for exactly one device.
+                            // Is currently disabled. Before enabling,
+                            // verify compressed record timestamps.
                             return audio_is_input_device(mTimestampCorrectedDevice)
                                     && inDeviceType() == mTimestampCorrectedDevice;
                         }
@@ -1930,7 +2017,7 @@
     virtual     bool        checkForNewParameter_l(const String8& keyValuePair,
                                                     status_t& status);
     virtual     String8     getParameters(const String8& keys);
-    virtual     void        ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+    virtual     void        ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
                                             audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
                 void        readHalParameters_l();
     virtual     void        cacheParameters_l() {}
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 92f129c..b582b3a 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -23,7 +23,7 @@
 class TrackBase : public ExtendedAudioBufferProvider, public RefBase {
 
 public:
-    enum track_state {
+    enum track_state : int32_t {
         IDLE,
         FLUSHED,        // for PlaybackTracks only
         STOPPED,
@@ -107,6 +107,9 @@
 
     audio_attributes_t  attributes() const { return mAttr; }
 
+            bool canBeSpatialized() const { return mIsOut && (mAttr.flags
+                    & (AUDIO_FLAG_CONTENT_SPATIALIZED | AUDIO_FLAG_NEVER_SPATIALIZE)) == 0; }
+
 #ifdef TEE_SINK
            void         dumpTee(int fd, const std::string &reason) const {
                                 mTee.dump(fd, reason);
@@ -271,6 +274,7 @@
 
     void releaseCblk() {
         if (mCblk != nullptr) {
+            mState.clear();
             mCblk->~audio_track_cblk_t();   // destroy our shared-structure.
             if (mClient == 0) {
                 free(mCblk);
@@ -355,7 +359,7 @@
                                     // except for OutputTrack when it is in local memory
     size_t              mBufferSize; // size of mBuffer in bytes
     // we don't really need a lock for these
-    track_state         mState;
+    MirroredVariable<track_state>  mState;
     const audio_attributes_t mAttr;
     const uint32_t      mSampleRate;    // initial sample rate only; for tracks which
                         // support dynamic rates, the current value is in control block
diff --git a/services/audioflinger/TrackMetrics.h b/services/audioflinger/TrackMetrics.h
index 7fb69be..30d69ab 100644
--- a/services/audioflinger/TrackMetrics.h
+++ b/services/audioflinger/TrackMetrics.h
@@ -116,6 +116,21 @@
         mDeviceStartupMs.add(startupMs);
     }
 
+    void updateMinMaxVolume(int64_t durationNs, double deviceVolume) {
+        if (deviceVolume > mMaxVolume) {
+            mMaxVolume = deviceVolume;
+            mMaxVolumeDurationNs = durationNs;
+        } else if (deviceVolume == mMaxVolume) {
+            mMaxVolumeDurationNs += durationNs;
+        }
+        if (deviceVolume < mMinVolume) {
+            mMinVolume = deviceVolume;
+            mMinVolumeDurationNs = durationNs;
+        } else if (deviceVolume == mMinVolume) {
+            mMinVolumeDurationNs += durationNs;
+        }
+    }
+
     // may be called multiple times during an interval
     void logVolume(float volume) {
         const int64_t timeNs = systemTime();
@@ -123,10 +138,13 @@
         if (mStartVolumeTimeNs == 0) {
             mDeviceVolume = mVolume = volume;
             mLastVolumeChangeTimeNs = mStartVolumeTimeNs = timeNs;
+            updateMinMaxVolume(0, mVolume);
             return;
         }
+        const int64_t durationNs = timeNs - mLastVolumeChangeTimeNs;
+        updateMinMaxVolume(durationNs, mVolume);
         mDeviceVolume = (mDeviceVolume * (mLastVolumeChangeTimeNs - mStartVolumeTimeNs) +
-            mVolume * (timeNs - mLastVolumeChangeTimeNs)) / (timeNs - mStartVolumeTimeNs);
+            mVolume * durationNs) / (timeNs - mStartVolumeTimeNs);
         mVolume = volume;
         mLastVolumeChangeTimeNs = timeNs;
     }
@@ -157,7 +175,11 @@
                 .set(AMEDIAMETRICS_PROP_EVENT, eventName)
                 .set(AMEDIAMETRICS_PROP_INTERVALCOUNT, (int32_t)mIntervalCount);
             if (mIsOut) {
-                item.set(AMEDIAMETRICS_PROP_DEVICEVOLUME, mDeviceVolume);
+                item.set(AMEDIAMETRICS_PROP_DEVICEVOLUME, mDeviceVolume)
+                    .set(AMEDIAMETRICS_PROP_DEVICEMAXVOLUMEDURATIONNS, mMaxVolumeDurationNs)
+                    .set(AMEDIAMETRICS_PROP_DEVICEMAXVOLUME, mMaxVolume)
+                    .set(AMEDIAMETRICS_PROP_DEVICEMINVOLUMEDURATIONNS, mMinVolumeDurationNs)
+                    .set(AMEDIAMETRICS_PROP_DEVICEMINVOLUME, mMinVolume);
             }
             if (mDeviceLatencyMs.getN() > 0) {
                 item.set(AMEDIAMETRICS_PROP_DEVICELATENCYMS, mDeviceLatencyMs.getMean())
@@ -185,6 +207,10 @@
         mDeviceVolume = 0.f;
         mStartVolumeTimeNs = 0;
         mLastVolumeChangeTimeNs = 0;
+        mMinVolume = AMEDIAMETRICS_INITIAL_MIN_VOLUME;
+        mMaxVolume = AMEDIAMETRICS_INITIAL_MAX_VOLUME;
+        mMinVolumeDurationNs = 0;
+        mMaxVolumeDurationNs = 0;
 
         mDeviceLatencyMs.reset();
         mDeviceStartupMs.reset();
@@ -214,6 +240,12 @@
     int64_t           mStartVolumeTimeNs GUARDED_BY(mLock) = 0;
     int64_t           mLastVolumeChangeTimeNs GUARDED_BY(mLock) = 0;
 
+    // Min/Max volume
+    double            mMinVolume GUARDED_BY(mLock) = AMEDIAMETRICS_INITIAL_MIN_VOLUME;
+    double            mMaxVolume GUARDED_BY(mLock) = AMEDIAMETRICS_INITIAL_MAX_VOLUME;
+    int64_t           mMinVolumeDurationNs GUARDED_BY(mLock) = 0;
+    int64_t           mMaxVolumeDurationNs GUARDED_BY(mLock) = 0;
+
     // latency and startup for each interval.
     audio_utils::Statistics<double> mDeviceLatencyMs GUARDED_BY(mLock);
     audio_utils::Statistics<double> mDeviceStartupMs GUARDED_BY(mLock);
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index d2a30b1..279ff3d 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -234,7 +234,11 @@
 #ifdef TEE_SINK
         mTee.set(sampleRate, mChannelCount, format, NBAIO_Tee::TEE_FLAG_TRACK);
 #endif
-
+        // mState is mirrored for the client to read.
+        mState.setMirror(&mCblk->mState);
+        // ensure our state matches up until we consolidate the enumeration.
+        static_assert(CBLK_STATE_IDLE == IDLE);
+        static_assert(CBLK_STATE_PAUSING == PAUSING);
     }
 }
 
@@ -709,8 +713,7 @@
         thread->mFastTrackAvailMask &= ~(1 << i);
     }
 
-    mServerLatencySupported = thread->type() == ThreadBase::MIXER
-            || thread->type() == ThreadBase::DUPLICATING;
+    mServerLatencySupported = checkServerLatencySupported(format, flags);
 #ifdef TEE_SINK
     mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
             + "_" + std::to_string(mId) + "_T");
@@ -933,7 +936,7 @@
     buffer->raw = buf.mRaw;
     if (buf.mFrameCount == 0 && !isStopping() && !isStopped() && !isPaused() && !isOffloaded()) {
         ALOGV("%s(%d): underrun,  framesReady(%zu) < framesDesired(%zd), state: %d",
-                __func__, mId, buf.mFrameCount, desiredFrames, mState);
+                __func__, mId, buf.mFrameCount, desiredFrames, (int)mState);
         mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
     } else {
         mAudioTrackServerProxy->tallyUnderrunFrames(0);
@@ -1401,6 +1404,60 @@
             .content_type = mAttr.content_type,
             .gain = mFinalVolume,
     };
+
+    // When attributes are undefined, derive default values from stream type.
+    // See AudioAttributes.java, usageForStreamType() and Builder.setInternalLegacyStreamType()
+    if (mAttr.usage == AUDIO_USAGE_UNKNOWN) {
+        switch (mStreamType) {
+        case AUDIO_STREAM_VOICE_CALL:
+            metadata.base.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
+            break;
+        case AUDIO_STREAM_SYSTEM:
+            metadata.base.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+            break;
+        case AUDIO_STREAM_RING:
+            metadata.base.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+            break;
+        case AUDIO_STREAM_MUSIC:
+            metadata.base.usage = AUDIO_USAGE_MEDIA;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_MUSIC;
+            break;
+        case AUDIO_STREAM_ALARM:
+            metadata.base.usage = AUDIO_USAGE_ALARM;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+            break;
+        case AUDIO_STREAM_NOTIFICATION:
+            metadata.base.usage = AUDIO_USAGE_NOTIFICATION;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+            break;
+        case AUDIO_STREAM_DTMF:
+            metadata.base.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+            break;
+        case AUDIO_STREAM_ACCESSIBILITY:
+            metadata.base.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
+            break;
+        case AUDIO_STREAM_ASSISTANT:
+            metadata.base.usage = AUDIO_USAGE_ASSISTANT;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
+            break;
+        case AUDIO_STREAM_REROUTING:
+            metadata.base.usage = AUDIO_USAGE_VIRTUAL_SOURCE;
+            // unknown content type
+            break;
+        case AUDIO_STREAM_CALL_ASSISTANT:
+            metadata.base.usage = AUDIO_USAGE_CALL_ASSISTANT;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
+            break;
+        default:
+            break;
+        }
+    }
+
     metadata.channel_mask = mChannelMask,
     strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
     *backInserter++ = metadata;
@@ -1590,7 +1647,7 @@
                                       (mState == STOPPED)))) {
         ALOGW("%s(%d): in invalid state %d on session %d %s mode, framesReady %zu",
               __func__, mId,
-              mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
+              (int)mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
         event->cancel();
         return INVALID_OPERATION;
     }
@@ -2578,6 +2635,8 @@
     // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
     mKernelFrameTime.store(ft);
     if (!audio_is_linear_pcm(mFormat)) {
+        // Stream is direct, return provided timestamp with no conversion
+        mServerProxy->setTimestamp(timestamp);
         return;
     }
 
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 2e49e71..e8e478b 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -18,6 +18,7 @@
 #define ANDROID_AUDIOPOLICY_INTERFACE_H
 
 #include <media/AudioCommonTypes.h>
+#include <media/AudioContainers.h>
 #include <media/AudioDeviceTypeAddr.h>
 #include <media/AudioSystem.h>
 #include <media/AudioPolicy.h>
@@ -31,30 +32,42 @@
 
 // ----------------------------------------------------------------------------
 
-// The AudioPolicyInterface and AudioPolicyClientInterface classes define the communication interfaces
-// between the platform specific audio policy manager and Android generic audio policy manager.
-// The platform specific audio policy manager must implement methods of the AudioPolicyInterface class.
+// The AudioPolicyInterface and AudioPolicyClientInterface classes define the communication
+// interfaces between the platform specific audio policy manager and Android generic audio policy
+// manager.
+// The platform specific audio policy manager must implement methods of the AudioPolicyInterface
+// class.
 // This implementation makes use of the AudioPolicyClientInterface to control the activity and
 // configuration of audio input and output streams.
 //
 // The platform specific audio policy manager is in charge of the audio routing and volume control
 // policies for a given platform.
 // The main roles of this module are:
-//   - keep track of current system state (removable device connections, phone state, user requests...).
-//   System state changes and user actions are notified to audio policy manager with methods of the AudioPolicyInterface.
+//   - keep track of current system state (removable device connections, phone state,
+//   user requests...).
+//   System state changes and user actions are notified to audio policy manager with methods of the
+//   AudioPolicyInterface.
 //   - process getOutput() queries received when AudioTrack objects are created: Those queries
-//   return a handler on an output that has been selected, configured and opened by the audio policy manager and that
-//   must be used by the AudioTrack when registering to the AudioFlinger with the createTrack() method.
-//   When the AudioTrack object is released, a putOutput() query is received and the audio policy manager can decide
-//   to close or reconfigure the output depending on other streams using this output and current system state.
-//   - similarly process getInput() and putInput() queries received from AudioRecord objects and configure audio inputs.
-//   - process volume control requests: the stream volume is converted from an index value (received from UI) to a float value
-//   applicable to each output as a function of platform specific settings and current output route (destination device). It
-//   also make sure that streams are not muted if not allowed (e.g. camera shutter sound in some countries).
+//   return a handler on an output that has been selected, configured and opened by the audio
+//   policy manager and that must be used by the AudioTrack when registering to the AudioFlinger
+//   with the createTrack() method.
+//   When the AudioTrack object is released, a putOutput() query is received and the audio policy
+//   manager can decide to close or reconfigure the output depending on other streams using this
+//   output and current system state.
+//   - similarly process getInput() and putInput() queries received from AudioRecord objects and
+//   configure audio inputs.
+//   - process volume control requests: the stream volume is converted from an index value
+//   (received from UI) to a float value applicable to each output as a function of platform
+//   specificsettings and current output route (destination device). It also make sure that streams
+//   are not muted if not allowed (e.g. camera shutter sound in some countries).
 //
-// The platform specific audio policy manager is provided as a shared library by platform vendors (as for libaudio.so)
-// and is linked with libaudioflinger.so
-
+// The platform specific audio policy manager is provided as a shared library by platform vendors
+// (as for libaudio.so) and is linked with libaudioflinger.so
+//
+// NOTE: by convention, the implementation of the AudioPolicyInterface in AudioPolicyManager does
+// not have to perform any nullptr check on input arguments: The caller of this API is
+// AudioPolicyService running in the same process and in charge of validating arguments received
+// from incoming binder calls before calling AudioPolicyManager.
 
 //    Audio Policy Manager Interface
 class AudioPolicyInterface
@@ -99,7 +112,7 @@
                                               audio_format_t encodedFormat) = 0;
     // retrieve a device connection status
     virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
-                                                                          const char *device_address) = 0;
+                                                              const char *device_address) = 0;
     // indicate a change in device configuration
     virtual status_t handleDeviceConfigChange(audio_devices_t device,
                                               const char *device_address,
@@ -133,9 +146,11 @@
                                         audio_port_handle_t *portId,
                                         std::vector<audio_io_handle_t> *secondaryOutputs,
                                         output_type_t *outputType) = 0;
-    // indicates to the audio policy manager that the output starts being used by corresponding stream.
+    // indicates to the audio policy manager that the output starts being used by corresponding
+    // stream.
     virtual status_t startOutput(audio_port_handle_t portId) = 0;
-    // indicates to the audio policy manager that the output stops being used by corresponding stream.
+    // indicates to the audio policy manager that the output stops being used by corresponding
+    // stream.
     virtual status_t stopOutput(audio_port_handle_t portId) = 0;
     // releases the output, return true if the output descriptor is reopened.
     virtual bool releaseOutput(audio_port_handle_t portId) = 0;
@@ -198,7 +213,7 @@
     virtual product_strategy_t getStrategyForStream(audio_stream_type_t stream) = 0;
 
     // return the enabled output devices for the given stream type
-    virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream) = 0;
+    virtual DeviceTypeSet getDevicesForStream(audio_stream_type_t stream) = 0;
 
     // retrieves the list of enabled output devices for the given audio attributes
     virtual status_t getDevicesForAttributes(const audio_attributes_t &attr,
@@ -285,8 +300,10 @@
 
     virtual bool     isHapticPlaybackSupported() = 0;
 
-    virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
-                std::vector<audio_format_t> *formats) = 0;
+    virtual bool     isUltrasoundSupported() = 0;
+
+    virtual status_t getHwOffloadFormatsSupportedForBluetoothMedia(
+                audio_devices_t device, std::vector<audio_format_t> *formats) = 0;
 
     virtual void     setAppState(audio_port_handle_t portId, app_state_t state) = 0;
 
@@ -333,8 +350,67 @@
     virtual status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
                                                        device_role_t role,
                                                        AudioDeviceTypeAddrVector &devices) = 0;
-};
 
+    /**
+     * Queries if some kind of spatialization will be performed if the audio playback context
+     * described by the provided arguments is present.
+     * The context is made of:
+     * - The audio attributes describing the playback use case.
+     * - The audio configuration describing the audio format, channels, sampling rate ...
+     * - The devices describing the sink audio device selected for playback.
+     * All arguments are optional and only the specified arguments are used to match against
+     * supported criteria. For instance, supplying no argument will tell if spatialization is
+     * supported or not in general.
+     * @param attr audio attributes describing the playback use case
+     * @param config audio configuration describing the audio format, channels, sampling rate...
+     * @param devices the sink audio device selected for playback
+     * @return true if spatialization is enabled for this context,
+     *        false otherwise
+     */
+     virtual bool canBeSpatialized(const audio_attributes_t *attr,
+                                  const audio_config_t *config,
+                                  const AudioDeviceTypeAddrVector &devices) const = 0;
+
+    /**
+     * Opens a specialized spatializer output if supported by the platform.
+     * If several spatializer output profiles exist, the one supporting the sink device
+     * corresponding to the provided audio attributes will be selected.
+     * Only one spatializer output stream can be opened at a time and an error is returned
+     * if one already exists.
+     * @param config audio format, channel mask and sampling rate to be used as the mixer
+     *        configuration for the spatializer mixer created.
+     * @param attr audio attributes describing the playback use case that will drive the
+     *        sink device selection
+     * @param output the IO handle of the output opened
+     * @return NO_ERROR if an output was opened, INVALID_OPERATION or BAD_VALUE otherwise
+     */
+    virtual status_t getSpatializerOutput(const audio_config_base_t *config,
+                                            const audio_attributes_t *attr,
+                                            audio_io_handle_t *output) = 0;
+
+    /**
+     * Closes a previously opened specialized spatializer output.
+     * @param output the IO handle of the output to close.
+     * @return NO_ERROR if an output was closed, INVALID_OPERATION or BAD_VALUE otherwise
+     */
+    virtual status_t releaseSpatializerOutput(audio_io_handle_t output) = 0;
+
+    /**
+     * Query how the direct playback is currently supported on the device.
+     * @param attr audio attributes describing the playback use case
+     * @param config audio configuration for the playback
+     * @param directMode out: a set of flags describing how the direct playback is currently
+     *        supported on the device
+     * @return NO_ERROR in case of success, DEAD_OBJECT, NO_INIT, BAD_VALUE, PERMISSION_DENIED
+     *         in case of error.
+     */
+    virtual audio_direct_mode_t getDirectPlaybackSupport(const audio_attributes_t *attr,
+                                                         const audio_config_t *config) = 0;
+
+    // retrieves the list of available direct audio profiles for the given audio attributes
+    virtual status_t getDirectProfilesForAttributes(const audio_attributes_t* attr,
+                                                    AudioProfileVector& audioProfiles) = 0;
+};
 
 // Audio Policy client Interface
 class AudioPolicyClientInterface
@@ -353,23 +429,29 @@
     // Audio output Control functions
     //
 
-    // opens an audio output with the requested parameters. The parameter values can indicate to use the default values
-    // in case the audio policy manager has no specific requirements for the output being opened.
-    // When the function returns, the parameter values reflect the actual values used by the audio hardware output stream.
-    // The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
+    // opens an audio output with the requested parameters. The parameter values can indicate to
+    // use the default values in case the audio policy manager has no specific requirements for the
+    // output being opened.
+    // When the function returns, the parameter values reflect the actual values used by the audio
+    // hardware output stream.
+    // The audio policy manager can check if the proposed parameters are suitable or not and act
+    // accordingly.
     virtual status_t openOutput(audio_module_handle_t module,
                                 audio_io_handle_t *output,
-                                audio_config_t *config,
+                                audio_config_t *halConfig,
+                                audio_config_base_t *mixerConfig,
                                 const sp<DeviceDescriptorBase>& device,
                                 uint32_t *latencyMs,
                                 audio_output_flags_t flags) = 0;
-    // creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by
-    // a special mixer thread in the AudioFlinger.
-    virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2) = 0;
+    // creates a special output that is duplicated to the two outputs passed as arguments.
+    // The duplication is performed by a special mixer thread in the AudioFlinger.
+    virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
+                                                  audio_io_handle_t output2) = 0;
     // closes the output stream
     virtual status_t closeOutput(audio_io_handle_t output) = 0;
-    // suspends the output. When an output is suspended, the corresponding audio hardware output stream is placed in
-    // standby and the AudioTracks attached to the mixer thread are still processed but the output mix is discarded.
+    // suspends the output. When an output is suspended, the corresponding audio hardware output
+    // stream is placed in standby and the AudioTracks attached to the mixer thread are still
+    // processed but the output mix is discarded.
     virtual status_t suspendOutput(audio_io_handle_t output) = 0;
     // restores a suspended output.
     virtual status_t restoreOutput(audio_io_handle_t output) = 0;
@@ -392,16 +474,21 @@
     // misc control functions
     //
 
-    // set a stream volume for a particular output. For the same user setting, a given stream type can have different volumes
+    // set a stream volume for a particular output. For the same user setting, a given stream type
+    // can have different volumes
     // for each output (destination device) it is attached to.
-    virtual status_t setStreamVolume(audio_stream_type_t stream, float volume, audio_io_handle_t output, int delayMs = 0) = 0;
+    virtual status_t setStreamVolume(audio_stream_type_t stream, float volume,
+                                     audio_io_handle_t output, int delayMs = 0) = 0;
 
     // invalidate a stream type, causing a reroute to an unspecified new output
     virtual status_t invalidateStream(audio_stream_type_t stream) = 0;
 
-    // function enabling to send proprietary informations directly from audio policy manager to audio hardware interface.
-    virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs, int delayMs = 0) = 0;
-    // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
+    // function enabling to send proprietary informations directly from audio policy manager to
+    // audio hardware interface.
+    virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs,
+                               int delayMs = 0) = 0;
+    // function enabling to receive proprietary informations directly from audio hardware interface
+    // to audio policy manager.
     virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) = 0;
 
     // set down link audio volume.
@@ -464,7 +551,8 @@
     // These are the signatures of createAudioPolicyManager/destroyAudioPolicyManager
     // methods respectively, expected by AudioPolicyService, needs to be exposed by
     // libaudiopolicymanagercustom.
-    using CreateAudioPolicyManagerInstance = AudioPolicyInterface* (*)(AudioPolicyClientInterface*);
+    using CreateAudioPolicyManagerInstance =
+            AudioPolicyInterface* (*)(AudioPolicyClientInterface*);
     using DestroyAudioPolicyManagerInstance = void (*)(AudioPolicyInterface*);
 
 } // namespace android
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
index 736f8b2..f0636a0 100644
--- a/services/audiopolicy/common/include/Volume.h
+++ b/services/audiopolicy/common/include/Volume.h
@@ -127,6 +127,7 @@
         case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
         case AUDIO_DEVICE_OUT_USB_HEADSET:
         case AUDIO_DEVICE_OUT_BLE_HEADSET:
+        case AUDIO_DEVICE_OUT_BLE_BROADCAST:
             return DEVICE_CATEGORY_HEADSET;
         case AUDIO_DEVICE_OUT_HEARING_AID:
             return DEVICE_CATEGORY_HEARING_AID;
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 577f641..3d3e0cf 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -133,6 +133,7 @@
  * - AUDIO_SOURCE_FM_TUNER
  * - AUDIO_SOURCE_VOICE_RECOGNITION
  * - AUDIO_SOURCE_HOTWORD
+ * - AUDIO_SOURCE_ULTRASOUND
  *
  * @return the corresponding input source priority or 0 if priority is irrelevant for this source.
  *      This happens when the specified source cannot share a given input stream (e.g remote submix)
@@ -142,22 +143,24 @@
 {
     switch (inputSource) {
     case AUDIO_SOURCE_VOICE_COMMUNICATION:
-        return 9;
+        return 10;
     case AUDIO_SOURCE_CAMCORDER:
-        return 8;
+        return 9;
     case AUDIO_SOURCE_VOICE_PERFORMANCE:
-        return 7;
+        return 8;
     case AUDIO_SOURCE_UNPROCESSED:
-        return 6;
+        return 7;
     case AUDIO_SOURCE_MIC:
-        return 5;
+        return 6;
     case AUDIO_SOURCE_ECHO_REFERENCE:
-        return 4;
+        return 5;
     case AUDIO_SOURCE_FM_TUNER:
-        return 3;
+        return 4;
     case AUDIO_SOURCE_VOICE_RECOGNITION:
-        return 2;
+        return 3;
     case AUDIO_SOURCE_HOTWORD:
+        return 2;
+    case AUDIO_SOURCE_ULTRASOUND:
         return 1;
     default:
         break;
diff --git a/services/audiopolicy/common/managerdefinitions/Android.bp b/services/audiopolicy/common/managerdefinitions/Android.bp
index 227c2d8..1f23ae3 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.bp
+++ b/services/audiopolicy/common/managerdefinitions/Android.bp
@@ -30,6 +30,7 @@
     ],
     shared_libs: [
         "libaudiofoundation",
+        "libbase",
         "libcutils",
         "libhidlbase",
         "liblog",
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index a40f6aa..856ae66 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -54,7 +54,7 @@
     DeviceVector supportedDevices() const  {
         return mProfile != nullptr ? mProfile->getSupportedDevices() :  DeviceVector(); }
 
-    void dump(String8 *dst) const override;
+    void dump(String8 *dst, int spaces, const char* extraInfo) const override;
 
     audio_io_handle_t   mIoHandle = AUDIO_IO_HANDLE_NONE; // input handle
     wp<AudioPolicyMix>  mPolicyMix;                   // non NULL when used by a dynamic policy
@@ -93,8 +93,10 @@
     audio_patch_handle_t getPatchHandle() const override;
     void setPatchHandle(audio_patch_handle_t handle) override;
     bool isMmap() override {
-        if (getPolicyAudioPort() != nullptr) {
-            return getPolicyAudioPort()->isMmap();
+        if (const auto policyPort = getPolicyAudioPort(); policyPort != nullptr) {
+            if (const auto port = policyPort->asAudioPort(); port != nullptr) {
+                return port->isMmap();
+            }
         }
         return false;
     }
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 1f9b535..69082ac 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -149,7 +149,7 @@
                           AudioPolicyClientInterface *clientInterface);
     virtual ~AudioOutputDescriptor() {}
 
-    void dump(String8 *dst) const override;
+    void dump(String8 *dst, int spaces, const char* extraInfo = nullptr) const override;
     void        log(const char* indent);
 
     virtual DeviceVector devices() const { return mDevices; }
@@ -270,8 +270,10 @@
     audio_patch_handle_t getPatchHandle() const override;
     void setPatchHandle(audio_patch_handle_t handle) override;
     bool isMmap() override {
-        if (getPolicyAudioPort() != nullptr) {
-            return getPolicyAudioPort()->isMmap();
+        if (const auto policyPort = getPolicyAudioPort(); policyPort != nullptr) {
+            if (const auto port = policyPort->asAudioPort(); port != nullptr) {
+                return port->isMmap();
+            }
         }
         return false;
     }
@@ -307,6 +309,8 @@
     DeviceVector mDevices; /**< current devices this output is routed to */
     wp<AudioPolicyMix> mPolicyMix;  // non NULL when used by a dynamic policy
 
+    virtual uint32_t getRecommendedMuteDurationMs() const { return 0; }
+
 protected:
     const sp<PolicyAudioPort> mPolicyAudioPort;
     AudioPolicyClientInterface * const mClientInterface;
@@ -332,7 +336,7 @@
                             AudioPolicyClientInterface *clientInterface);
     virtual ~SwAudioOutputDescriptor() {}
 
-            void dump(String8 *dst) const override;
+    void dump(String8 *dst, int spaces, const char* extraInfo = nullptr) const override;
     virtual DeviceVector devices() const;
     void setDevices(const DeviceVector &devices) { mDevices = devices; }
     bool sharesHwModuleWith(const sp<SwAudioOutputDescriptor>& outputDesc);
@@ -362,7 +366,8 @@
                            const struct audio_port_config *srcConfig = NULL) const;
     virtual void toAudioPort(struct audio_port_v7 *port) const;
 
-        status_t open(const audio_config_t *config,
+        status_t open(const audio_config_t *halConfig,
+                      const audio_config_base_t *mixerConfig,
                       const DeviceVector &devices,
                       audio_stream_type_t stream,
                       audio_output_flags_t flags,
@@ -414,6 +419,8 @@
      */
     DeviceVector filterSupportedDevices(const DeviceVector &devices) const;
 
+    uint32_t getRecommendedMuteDurationMs() const override;
+
     const sp<IOProfile> mProfile;          // I/O profile this output derives from
     audio_io_handle_t mIoHandle;           // output handle
     uint32_t mLatency;                  //
@@ -423,6 +430,7 @@
     uint32_t mDirectOpenCount; // number of clients using this output (direct outputs only)
     audio_session_t mDirectClientSession; // session id of the direct output client
     bool mPendingReopenToQueryProfiles = false;
+    audio_channel_mask_t mMixerChannelMask = AUDIO_CHANNEL_NONE;
 };
 
 // Audio output driven by an input device directly.
@@ -433,7 +441,7 @@
                             AudioPolicyClientInterface *clientInterface);
     virtual ~HwAudioOutputDescriptor() {}
 
-            void dump(String8 *dst) const override;
+    void dump(String8 *dst, int spaces, const char* extraInfo) const override;
 
     virtual bool setVolume(float volumeDb,
                            VolumeSource volumeSource, const StreamTypeVector &streams,
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
index a5de655..955b0cf 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
@@ -41,7 +41,7 @@
 
     void setUid(uid_t uid) { mUid = uid; }
 
-    void dump(String8 *dst, int spaces, int index) const;
+    void dump(String8 *dst, int spaces) const;
 
     struct audio_patch mPatch;
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index 74b3405..dc2403c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -16,19 +16,21 @@
 
 #pragma once
 
-#include <vector>
-#include <map>
-#include <unistd.h>
 #include <sys/types.h>
+#include <unistd.h>
 
-#include <system/audio.h>
+#include <map>
+#include <vector>
+
+#include <android-base/stringprintf.h>
 #include <audiomanager/AudioManager.h>
 #include <media/AudioProductStrategy.h>
+#include <policy.h>
+#include <system/audio.h>
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/RefBase.h>
 #include <utils/String8.h>
-#include <policy.h>
 #include <Volume.h>
 #include "AudioPatch.h"
 #include "EffectDescriptor.h"
@@ -52,7 +54,7 @@
         mPreferredDeviceForExclusiveUse(isPreferredDeviceForExclusiveUse){}
     ~ClientDescriptor() override = default;
 
-    virtual void dump(String8 *dst, int spaces, int index) const;
+    virtual void dump(String8 *dst, int spaces) const;
     virtual std::string toShortString() const;
 
     audio_port_handle_t portId() const { return mPortId; }
@@ -100,7 +102,7 @@
     ~TrackClientDescriptor() override = default;
 
     using ClientDescriptor::dump;
-    void dump(String8 *dst, int spaces, int index) const override;
+    void dump(String8 *dst, int spaces) const override;
     std::string toShortString() const override;
 
     audio_output_flags_t flags() const { return mFlags; }
@@ -168,7 +170,7 @@
     ~RecordClientDescriptor() override = default;
 
     using ClientDescriptor::dump;
-    void dump(String8 *dst, int spaces, int index) const override;
+    void dump(String8 *dst, int spaces) const override;
 
     audio_unique_id_t riid() const { return mRIId; }
     audio_source_t source() const { return mSource; }
@@ -219,7 +221,7 @@
     void setHwOutput(const sp<HwAudioOutputDescriptor>& hwOutput);
 
     using ClientDescriptor::dump;
-    void dump(String8 *dst, int spaces, int index) const override;
+    void dump(String8 *dst, int spaces) const override;
 
  private:
     audio_patch_handle_t mPatchHandle = AUDIO_PATCH_HANDLE_NONE;
@@ -269,10 +271,13 @@
     size_t getClientCount() const {
         return mClients.size();
     }
-    virtual void dump(String8 *dst) const {
+    virtual void dump(String8 *dst, int spaces, const char* extraInfo = nullptr) const {
+        (void)extraInfo;
         size_t index = 0;
         for (const auto& client: getClientIterable()) {
-            client->dump(dst, 2, index++);
+            const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", ++index);
+            dst->appendFormat("%s", prefix.c_str());
+            client->dump(dst, prefix.size());
         }
     }
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 20b4044..4adc920 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -43,7 +43,7 @@
     DeviceDescriptor(const AudioDeviceTypeAddr &deviceTypeAddr, const std::string &tagName = "",
             const FormatVector &encodedFormats = FormatVector{});
 
-    virtual ~DeviceDescriptor() {}
+    virtual ~DeviceDescriptor() = default;
 
     virtual void addAudioProfile(const sp<AudioProfile> &profile) {
         addAudioProfileAndSort(mProfiles, profile);
@@ -51,8 +51,6 @@
 
     virtual const std::string getTagName() const { return mTagName; }
 
-    const FormatVector& encodedFormats() const { return mEncodedFormats; }
-
     audio_format_t getEncodedFormat() { return mCurrentEncodedFormat; }
 
     void setEncodedFormat(audio_format_t format) {
@@ -63,8 +61,6 @@
 
     bool hasCurrentEncodedFormat() const;
 
-    bool supportsFormat(audio_format_t format);
-
     void setDynamic() { mIsDynamic = true; }
     bool isDynamic() const { return mIsDynamic; }
 
@@ -95,7 +91,7 @@
 
     void setEncapsulationInfoFromHal(AudioPolicyClientInterface *clientInterface);
 
-    void dump(String8 *dst, int spaces, int index, bool verbose = true) const;
+    void dump(String8 *dst, int spaces, bool verbose = true) const;
 
 private:
     template <typename T, std::enable_if_t<std::is_same<T, struct audio_port>::value
@@ -106,7 +102,6 @@
     }
 
     std::string mTagName; // Unique human readable identifier for a device port found in conf file.
-    FormatVector        mEncodedFormats;
     audio_format_t      mCurrentEncodedFormat;
     bool                mIsDynamic = false;
     const std::string   mDeclaredAddress; // Original device address
@@ -168,6 +163,10 @@
     DeviceVector getDevicesFromDeviceTypeAddrVec(
             const AudioDeviceTypeAddrVector& deviceTypeAddrVector) const;
 
+    // Return the device vector that contains device descriptor whose AudioDeviceTypeAddr appears
+    // in the given AudioDeviceTypeAddrVector
+    AudioDeviceTypeAddrVector toTypeAddrVector() const;
+
     // If there are devices with the given type and the devices to add is not empty,
     // remove all the devices with the given type and add all the devices to add.
     void replaceDevicesByType(audio_devices_t typeToRemove, const DeviceVector &devicesToAdd);
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index 9ba745a..436fcc1 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -80,6 +80,7 @@
 
     sp<DeviceDescriptor> getRouteSinkDevice(const sp<AudioRoute> &route) const;
     DeviceVector getRouteSourceDevices(const sp<AudioRoute> &route) const;
+    const AudioRouteVector& getRoutes() const { return mRoutes; }
     void setRoutes(const AudioRouteVector &routes);
 
     status_t addOutputProfile(const sp<IOProfile> &profile);
@@ -114,7 +115,7 @@
                        const sp<PolicyAudioPort> &dstPort) const;
 
     // TODO remove from here (split serialization)
-    void dump(String8 *dst) const;
+    void dump(String8 *dst, int spaces) const;
 
 private:
     void refreshSupportedDevices();
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index a74cefa..90b812d 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -37,9 +37,7 @@
 public:
     IOProfile(const std::string &name, audio_port_role_t role)
         : AudioPort(name, AUDIO_PORT_TYPE_MIX, role),
-          maxOpenCount(1),
           curOpenCount(0),
-          maxActiveCount(1),
           curActiveCount(0) {}
 
     virtual ~IOProfile() = default;
@@ -59,11 +57,12 @@
     // Once capture clients are tracked individually and not per session this can be removed
     // MMAP no IRQ input streams do not have the default limitation of one active client
     // max as they can be used in shared mode by the same application.
+    // NOTE: Please consider moving to AudioPort when addressing the FIXME
     // NOTE: this works for explicit values set in audio_policy_configuration.xml because
     // flags are parsed before maxActiveCount by the serializer.
     void setFlags(uint32_t flags) override
     {
-        PolicyAudioPort::setFlags(flags);
+        AudioPort::setFlags(flags);
         if (getRole() == AUDIO_PORT_ROLE_SINK && (flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
             maxActiveCount = 0;
         }
@@ -98,7 +97,7 @@
                              uint32_t flags,
                              bool exactMatchRequiredForInputFlags = false) const;
 
-    void dump(String8 *dst) const;
+    void dump(String8 *dst, int spaces) const;
     void log();
 
     bool hasSupportedDevices() const { return !mSupportedDevices.isEmpty(); }
@@ -194,16 +193,8 @@
         return false;
     }
 
-    // Maximum number of input or output streams that can be simultaneously opened for this profile.
-    // By convention 0 means no limit. To respect legacy behavior, initialized to 1 for output
-    // profiles and 0 for input profiles
-    uint32_t     maxOpenCount;
     // Number of streams currently opened for this profile.
     uint32_t     curOpenCount;
-    // Maximum number of input or output streams that can be simultaneously active for this profile.
-    // By convention 0 means no limit. To respect legacy behavior, initialized to 0 for output
-    // profiles and 1 for input profiles
-    uint32_t     maxActiveCount;
     // Number of streams currently active for this profile. This is not the number of active clients
     // (AudioTrack or AudioRecord) but the number of active HAL streams.
     uint32_t     curActiveCount;
diff --git a/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
index ab33b38..acf787b 100644
--- a/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
@@ -36,7 +36,7 @@
 class PolicyAudioPort : public virtual RefBase, private HandleGenerator<audio_port_handle_t>
 {
 public:
-    PolicyAudioPort() : mFlags(AUDIO_OUTPUT_FLAG_NONE) {}
+    PolicyAudioPort() = default;
 
     virtual ~PolicyAudioPort() = default;
 
@@ -49,19 +49,6 @@
 
     virtual sp<AudioPort> asAudioPort() const = 0;
 
-    virtual void setFlags(uint32_t flags)
-    {
-        //force direct flag if offload flag is set: offloading implies a direct output stream
-        // and all common behaviors are driven by checking only the direct flag
-        // this should normally be set appropriately in the policy configuration file
-        if (asAudioPort()->getRole() == AUDIO_PORT_ROLE_SOURCE &&
-                (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
-            flags |= AUDIO_OUTPUT_FLAG_DIRECT;
-        }
-        mFlags = flags;
-    }
-    uint32_t getFlags() const { return mFlags; }
-
     virtual void attach(const sp<HwModule>& module);
     virtual void detach();
     bool isAttached() { return mModule != 0; }
@@ -105,22 +92,6 @@
     const char *getModuleName() const;
     sp<HwModule> getModule() const { return mModule; }
 
-    inline bool isDirectOutput() const
-    {
-        return (asAudioPort()->getType() == AUDIO_PORT_TYPE_MIX) &&
-                (asAudioPort()->getRole() == AUDIO_PORT_ROLE_SOURCE) &&
-                (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD));
-    }
-
-    inline bool isMmap() const
-    {
-        return (asAudioPort()->getType() == AUDIO_PORT_TYPE_MIX)
-                && (((asAudioPort()->getRole() == AUDIO_PORT_ROLE_SOURCE) &&
-                        ((mFlags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) != 0))
-                    || ((asAudioPort()->getRole() == AUDIO_PORT_ROLE_SINK) &&
-                        ((mFlags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0)));
-    }
-
     void addRoute(const sp<AudioRoute> &route) { mRoutes.add(route); }
     const AudioRouteVector &getRoutes() const { return mRoutes; }
 
@@ -129,7 +100,6 @@
                          const ChannelMaskSet &channelMasks) const;
     void pickSamplingRate(uint32_t &rate, const SampleRateSet &samplingRates) const;
 
-    uint32_t mFlags; // attribute flags mask (e.g primary output, direct output...).
     sp<HwModule> mModule;     // audio HW module exposing this I/O stream
     AudioRouteVector mRoutes; // Routes involving this port
 };
@@ -141,27 +111,18 @@
 
     virtual sp<PolicyAudioPort> getPolicyAudioPort() const = 0;
 
-    status_t validationBeforeApplyConfig(const struct audio_port_config *config) const;
-
-    void applyPolicyAudioPortConfig(const struct audio_port_config *config) {
-        if (config->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
-            mFlags = config->flags;
-        }
+    status_t validationBeforeApplyConfig(const struct audio_port_config *config) const {
+        sp<PolicyAudioPort> policyAudioPort = getPolicyAudioPort();
+        return policyAudioPort ? policyAudioPort->checkExactAudioProfile(config) : NO_INIT;
     }
 
-    void toPolicyAudioPortConfig(
-            struct audio_port_config *dstConfig,
-            const struct audio_port_config *srcConfig = NULL) const;
-
-
-    virtual bool hasSameHwModuleAs(const sp<PolicyAudioPortConfig>& other) const {
+    bool hasSameHwModuleAs(const sp<PolicyAudioPortConfig>& other) const {
         return (other.get() != nullptr) && (other->getPolicyAudioPort().get() != nullptr) &&
                 (getPolicyAudioPort().get() != nullptr) &&
                 (other->getPolicyAudioPort()->getModuleHandle() ==
                         getPolicyAudioPort()->getModuleHandle());
     }
 
-    union audio_io_flags mFlags = { AUDIO_INPUT_FLAG_NONE };
 };
 
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
index cd10010..580938e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
@@ -17,6 +17,8 @@
 #define LOG_TAG "APM::AudioCollections"
 //#define LOG_NDEBUG 0
 
+#include <android-base/stringprintf.h>
+
 #include "AudioCollections.h"
 #include "AudioRoute.h"
 #include "HwModule.h"
@@ -40,10 +42,11 @@
     if (audioRouteVector.isEmpty()) {
         return;
     }
-    dst->appendFormat("\n%*sAudio Routes (%zu):\n", spaces, "", audioRouteVector.size());
+    dst->appendFormat("%*s- Audio Routes (%zu):\n", spaces - 2, "", audioRouteVector.size());
     for (size_t i = 0; i < audioRouteVector.size(); i++) {
-        dst->appendFormat("%*s- Route %zu:\n", spaces, "", i + 1);
-        audioRouteVector.itemAt(i)->dump(dst, 4);
+        const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+        dst->append(prefix.c_str());
+        audioRouteVector.itemAt(i)->dump(dst, prefix.size());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 7016a08..966b8cb 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -17,6 +17,8 @@
 #define LOG_TAG "APM::AudioInputDescriptor"
 //#define LOG_NDEBUG 0
 
+#include <android-base/stringprintf.h>
+
 #include <audiomanager/AudioManager.h>
 #include <media/AudioPolicy.h>
 #include <policy.h>
@@ -62,7 +64,6 @@
     toAudioPortConfig(&localBackupConfig);
     if ((status = validationBeforeApplyConfig(config)) == NO_ERROR) {
         AudioPortConfig::applyAudioPortConfig(config, backupConfig);
-        applyPolicyAudioPortConfig(config);
     }
 
     if (backupConfig != NULL) {
@@ -83,7 +84,6 @@
     }
 
     AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
-    toPolicyAudioPortConfig(dstConfig, srcConfig);
 
     dstConfig->role = AUDIO_PORT_ROLE_SINK;
     dstConfig->type = AUDIO_PORT_TYPE_MIX;
@@ -510,17 +510,20 @@
     }
 }
 
-void AudioInputDescriptor::dump(String8 *dst) const
+void AudioInputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
 {
-    dst->appendFormat(" ID: %d\n", getId());
-    dst->appendFormat(" Sampling rate: %d\n", mSamplingRate);
-    dst->appendFormat(" Format: %d\n", mFormat);
-    dst->appendFormat(" Channels: %08x\n", mChannelMask);
-    dst->appendFormat(" Devices %s\n", mDevice->toString(true /*includeSensitiveInfo*/).c_str());
-    mEnabledEffects.dump(dst, 1 /*spaces*/, false /*verbose*/);
-    dst->append(" AudioRecord Clients:\n");
-    ClientMapHandler<RecordClientDescriptor>::dump(dst);
-    dst->append("\n");
+    dst->appendFormat("Port ID: %d%s%s\n",
+            getId(), extraInfo != nullptr ? "; " : "", extraInfo != nullptr ? extraInfo : "");
+    dst->appendFormat("%*s%s; %d; Channel mask: 0x%x\n", spaces, "",
+            audio_format_to_string(mFormat), mSamplingRate, mChannelMask);
+    dst->appendFormat("%*sDevices: %s\n", spaces, "",
+            mDevice->toString(true /*includeSensitiveInfo*/).c_str());
+    mEnabledEffects.dump(dst, spaces /*spaces*/, false /*verbose*/);
+    if (getClientCount() != 0) {
+        dst->appendFormat("%*sAudioRecord Clients (%zu):\n", spaces, "", getClientCount());
+        ClientMapHandler<RecordClientDescriptor>::dump(dst, spaces);
+        dst->append("\n");
+    }
 }
 
 bool AudioInputCollection::isSourceActive(audio_source_t source) const
@@ -608,10 +611,12 @@
 
 void AudioInputCollection::dump(String8 *dst) const
 {
-    dst->append("\nInputs dump:\n");
+    dst->appendFormat("\n Inputs (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("- Input %d dump:\n", keyAt(i));
-        valueAt(i)->dump(dst);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        const std::string extraInfo = base::StringPrintf("I/O handle: %d", keyAt(i));
+        dst->appendFormat("%s", prefix.c_str());
+        valueAt(i)->dump(dst, prefix.size(), extraInfo.c_str());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 6b08f7c..663c80a 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -17,6 +17,8 @@
 #define LOG_TAG "APM::AudioOutputDescriptor"
 //#define LOG_NDEBUG 0
 
+#include <android-base/stringprintf.h>
+
 #include <AudioPolicyInterface.h>
 #include "AudioOutputDescriptor.h"
 #include "AudioPolicyMix.h"
@@ -188,7 +190,6 @@
     toAudioPortConfig(&localBackupConfig);
     if ((status = validationBeforeApplyConfig(config)) == NO_ERROR) {
         AudioPortConfig::applyAudioPortConfig(config, backupConfig);
-        applyPolicyAudioPortConfig(config);
     }
 
     if (backupConfig != NULL) {
@@ -207,7 +208,6 @@
         dstConfig->config_mask |= srcConfig->config_mask;
     }
     AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
-    toPolicyAudioPortConfig(dstConfig, srcConfig);
 
     dstConfig->role = AUDIO_PORT_ROLE_SOURCE;
     dstConfig->type = AUDIO_PORT_TYPE_MIX;
@@ -245,32 +245,45 @@
         return client->volumeSource() != volumeSourceToIgnore; }) != end(mActiveClients);
 }
 
-void AudioOutputDescriptor::dump(String8 *dst) const
+void AudioOutputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
 {
-    dst->appendFormat(" ID: %d\n", mId);
-    dst->appendFormat(" Sampling rate: %d\n", mSamplingRate);
-    dst->appendFormat(" Format: %08x\n", mFormat);
-    dst->appendFormat(" Channels: %08x\n", mChannelMask);
-    dst->appendFormat(" Devices: %s\n", devices().toString(true /*includeSensitiveInfo*/).c_str());
-    dst->appendFormat(" Global active count: %u\n", mGlobalActiveCount);
-    for (const auto &iter : mRoutingActivities) {
-        dst->appendFormat(" Product Strategy id: %d", iter.first);
-        iter.second.dump(dst, 4);
+    dst->appendFormat("Port ID: %d%s%s\n",
+            mId, extraInfo != nullptr ? "; " : "", extraInfo != nullptr ? extraInfo : "");
+    dst->appendFormat("%*s%s; %d; Channel mask: 0x%x\n", spaces, "",
+            audio_format_to_string(mFormat), mSamplingRate, mChannelMask);
+    dst->appendFormat("%*sDevices: %s\n", spaces, "",
+            devices().toString(true /*includeSensitiveInfo*/).c_str());
+    dst->appendFormat("%*sGlobal active count: %u\n", spaces, "", mGlobalActiveCount);
+    if (!mRoutingActivities.empty()) {
+        dst->appendFormat("%*s- Product Strategies (%zu):\n", spaces - 2, "",
+                mRoutingActivities.size());
+        for (const auto &iter : mRoutingActivities) {
+            dst->appendFormat("%*sid %d: ", spaces + 1, "", iter.first);
+            iter.second.dump(dst, 0);
+        }
     }
-    for (const auto &iter : mVolumeActivities) {
-        dst->appendFormat(" Volume Activities id: %d", iter.first);
-        iter.second.dump(dst, 4);
+    if (!mVolumeActivities.empty()) {
+        dst->appendFormat("%*s- Volume Activities (%zu):\n", spaces - 2, "",
+                mVolumeActivities.size());
+        for (const auto &iter : mVolumeActivities) {
+            dst->appendFormat("%*sid %d: ", spaces + 1, "", iter.first);
+            iter.second.dump(dst, 0);
+        }
     }
-    dst->append(" AudioTrack Clients:\n");
-    ClientMapHandler<TrackClientDescriptor>::dump(dst);
-    dst->append("\n");
+    if (getClientCount() != 0) {
+        dst->appendFormat("%*s- AudioTrack clients (%zu):\n", spaces - 2, "", getClientCount());
+        ClientMapHandler<TrackClientDescriptor>::dump(dst, spaces);
+    }
     if (!mActiveClients.empty()) {
-        dst->append(" AudioTrack active (stream) clients:\n");
+        dst->appendFormat("%*s- AudioTrack active (stream) clients (%zu):\n", spaces - 2, "",
+                mActiveClients.size());
         size_t index = 0;
         for (const auto& client : mActiveClients) {
-            client->dump(dst, 2, index++);
+            const std::string prefix = base::StringPrintf(
+                    "%*sid %zu: ", spaces + 1, "", index + 1);
+            dst->appendFormat("%s", prefix.c_str());
+            client->dump(dst, prefix.size());
         }
-        dst->append(" \n");
     }
 }
 
@@ -294,11 +307,18 @@
     }
 }
 
-void SwAudioOutputDescriptor::dump(String8 *dst) const
+void SwAudioOutputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
 {
-    dst->appendFormat(" Latency: %d\n", mLatency);
-    dst->appendFormat(" Flags %08x\n", mFlags);
-    AudioOutputDescriptor::dump(dst);
+    String8 allExtraInfo;
+    if (extraInfo != nullptr) {
+        allExtraInfo.appendFormat("%s; ", extraInfo);
+    }
+    std::string flagsLiteral = toString(mFlags);
+    allExtraInfo.appendFormat("Latency: %d; 0x%04x", mLatency, mFlags);
+    if (!flagsLiteral.empty()) {
+        allExtraInfo.appendFormat(" (%s)", flagsLiteral.c_str());
+    }
+    AudioOutputDescriptor::dump(dst, spaces, allExtraInfo.c_str());
 }
 
 DeviceVector SwAudioOutputDescriptor::devices() const
@@ -491,7 +511,8 @@
     return true;
 }
 
-status_t SwAudioOutputDescriptor::open(const audio_config_t *config,
+status_t SwAudioOutputDescriptor::open(const audio_config_t *halConfig,
+                                       const audio_config_base_t *mixerConfig,
                                        const DeviceVector &devices,
                                        audio_stream_type_t stream,
                                        audio_output_flags_t flags,
@@ -504,42 +525,60 @@
                         "with the requested devices, all device types: %s",
                         __func__, dumpDeviceTypes(devices.types()).c_str());
 
-    audio_config_t lConfig;
-    if (config == nullptr) {
-        lConfig = AUDIO_CONFIG_INITIALIZER;
-        lConfig.sample_rate = mSamplingRate;
-        lConfig.channel_mask = mChannelMask;
-        lConfig.format = mFormat;
+    audio_config_t lHalConfig;
+    if (halConfig == nullptr) {
+        lHalConfig = AUDIO_CONFIG_INITIALIZER;
+        lHalConfig.sample_rate = mSamplingRate;
+        lHalConfig.channel_mask = mChannelMask;
+        lHalConfig.format = mFormat;
     } else {
-        lConfig = *config;
+        lHalConfig = *halConfig;
     }
 
     // if the selected profile is offloaded and no offload info was specified,
     // create a default one
     if ((mProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) &&
-            lConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) {
+            lHalConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) {
         flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
-        lConfig.offload_info = AUDIO_INFO_INITIALIZER;
-        lConfig.offload_info.sample_rate = lConfig.sample_rate;
-        lConfig.offload_info.channel_mask = lConfig.channel_mask;
-        lConfig.offload_info.format = lConfig.format;
-        lConfig.offload_info.stream_type = stream;
-        lConfig.offload_info.duration_us = -1;
-        lConfig.offload_info.has_video = true; // conservative
-        lConfig.offload_info.is_streaming = true; // likely
-        lConfig.offload_info.encapsulation_mode = lConfig.offload_info.encapsulation_mode;
-        lConfig.offload_info.content_id = lConfig.offload_info.content_id;
-        lConfig.offload_info.sync_id = lConfig.offload_info.sync_id;
+        lHalConfig.offload_info = AUDIO_INFO_INITIALIZER;
+        lHalConfig.offload_info.sample_rate = lHalConfig.sample_rate;
+        lHalConfig.offload_info.channel_mask = lHalConfig.channel_mask;
+        lHalConfig.offload_info.format = lHalConfig.format;
+        lHalConfig.offload_info.stream_type = stream;
+        lHalConfig.offload_info.duration_us = -1;
+        lHalConfig.offload_info.has_video = true; // conservative
+        lHalConfig.offload_info.is_streaming = true; // likely
+        lHalConfig.offload_info.encapsulation_mode = lHalConfig.offload_info.encapsulation_mode;
+        lHalConfig.offload_info.content_id = lHalConfig.offload_info.content_id;
+        lHalConfig.offload_info.sync_id = lHalConfig.offload_info.sync_id;
+    }
+
+    audio_config_base_t lMixerConfig;
+    if (mixerConfig == nullptr) {
+        lMixerConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+        lMixerConfig.sample_rate = lHalConfig.sample_rate;
+        lMixerConfig.channel_mask = lHalConfig.channel_mask;
+        lMixerConfig.format = lHalConfig.format;
+    } else {
+        lMixerConfig = *mixerConfig;
     }
 
     mFlags = (audio_output_flags_t)(mFlags | flags);
 
+    // If no mixer config is specified for a spatializer output, default to 5.1 for proper
+    // configuration of the final downmixer or spatializer
+    if ((mFlags & AUDIO_OUTPUT_FLAG_SPATIALIZER) != 0
+            && mixerConfig == nullptr) {
+        lMixerConfig.channel_mask = AUDIO_CHANNEL_OUT_5POINT1;
+    }
+
     ALOGV("opening output for device %s profile %p name %s",
           mDevices.toString().c_str(), mProfile.get(), mProfile->getName().c_str());
 
     status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(),
                                                    output,
-                                                   &lConfig,
+                                                   &lHalConfig,
+                                                   &lMixerConfig,
                                                    device,
                                                    &mLatency,
                                                    mFlags);
@@ -550,9 +589,10 @@
                             "selected device %s for opening",
                             __FUNCTION__, *output, devices.toString().c_str(),
                             device->toString().c_str());
-        mSamplingRate = lConfig.sample_rate;
-        mChannelMask = lConfig.channel_mask;
-        mFormat = lConfig.format;
+        mSamplingRate = lHalConfig.sample_rate;
+        mChannelMask = lHalConfig.channel_mask;
+        mFormat = lHalConfig.format;
+        mMixerChannelMask = lMixerConfig.channel_mask;
         mId = PolicyAudioPort::getNextUniqueId();
         mIoHandle = *output;
         mProfile->curOpenCount++;
@@ -651,6 +691,15 @@
     return NO_ERROR;
 }
 
+uint32_t SwAudioOutputDescriptor::getRecommendedMuteDurationMs() const
+{
+    if (isDuplicated()) {
+        return std::max(mOutput1->getRecommendedMuteDurationMs(),
+                mOutput2->getRecommendedMuteDurationMs());
+    }
+    return mProfile->recommendedMuteDurationMs;
+}
+
 // HwAudioOutputDescriptor implementation
 HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<SourceClientDescriptor>& source,
                                                  AudioPolicyClientInterface *clientInterface)
@@ -659,11 +708,11 @@
 {
 }
 
-void HwAudioOutputDescriptor::dump(String8 *dst) const
+void HwAudioOutputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
 {
-    AudioOutputDescriptor::dump(dst);
-    dst->append("Source:\n");
-    mSource->dump(dst, 0, 0);
+    AudioOutputDescriptor::dump(dst, spaces, extraInfo);
+    dst->appendFormat("%*sSource:\n", spaces, "");
+    mSource->dump(dst, spaces);
 }
 
 void HwAudioOutputDescriptor::toAudioPortConfig(
@@ -836,10 +885,12 @@
 
 void SwAudioOutputCollection::dump(String8 *dst) const
 {
-    dst->append("\nOutputs dump:\n");
+    dst->appendFormat("\n Outputs (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("- Output %d dump:\n", keyAt(i));
-        valueAt(i)->dump(dst);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        const std::string extraInfo = base::StringPrintf("I/O handle: %d", keyAt(i));
+        dst->appendFormat("%s", prefix.c_str());
+        valueAt(i)->dump(dst, prefix.size(), extraInfo.c_str());
     }
 }
 
@@ -858,10 +909,12 @@
 
 void HwAudioOutputCollection::dump(String8 *dst) const
 {
-    dst->append("\nOutputs dump:\n");
+    dst->appendFormat("\n Outputs (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("- Output %d dump:\n", keyAt(i));
-        valueAt(i)->dump(dst);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        const std::string extraInfo = base::StringPrintf("I/O handle: %d", keyAt(i));
+        dst->appendFormat("%s", prefix.c_str());
+        valueAt(i)->dump(dst, prefix.size(), extraInfo.c_str());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index d79110a..4f03db9 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -20,7 +20,9 @@
 #include "AudioPatch.h"
 #include "TypeConverter.h"
 
+#include <android-base/stringprintf.h>
 #include <log/log.h>
+#include <media/AudioDeviceTypeAddr.h>
 #include <utils/String8.h>
 
 namespace android {
@@ -37,20 +39,21 @@
 {
     for (int i = 0; i < count; ++i) {
         const audio_port_config &cfg = cfgs[i];
-        dst->appendFormat("%*s  [%s %d] ", spaces, "", prefix, i + 1);
+        dst->appendFormat("%*s[%s %d] ", spaces, "", prefix, i + 1);
         if (cfg.type == AUDIO_PORT_TYPE_DEVICE) {
-            dst->appendFormat("Device ID %d %s", cfg.id, toString(cfg.ext.device.type).c_str());
+            AudioDeviceTypeAddr device(cfg.ext.device.type, cfg.ext.device.address);
+            dst->appendFormat("Device Port ID: %d; {%s}",
+                    cfg.id, device.toString(true /*includeSensitiveInfo*/).c_str());
         } else {
-            dst->appendFormat("Mix ID %d I/O handle %d", cfg.id, cfg.ext.mix.handle);
+            dst->appendFormat("Mix Port ID: %d; I/O handle: %d;", cfg.id, cfg.ext.mix.handle);
         }
         dst->append("\n");
     }
 }
 
-void AudioPatch::dump(String8 *dst, int spaces, int index) const
+void AudioPatch::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat("%*sPatch %d: owner uid %4d, handle %2d, af handle %2d\n",
-            spaces, "", index + 1, mUid, mHandle, mAfPatchHandle);
+    dst->appendFormat("owner uid %4d; handle %2d; af handle %2d\n", mUid, mHandle, mAfPatchHandle);
     dumpPatchEndpoints(dst, spaces, "src ", mPatch.num_sources, mPatch.sources);
     dumpPatchEndpoints(dst, spaces, "sink", mPatch.num_sinks, mPatch.sinks);
 }
@@ -135,9 +138,11 @@
 
 void AudioPatchCollection::dump(String8 *dst) const
 {
-    dst->append("\nAudio Patches:\n");
+    dst->appendFormat("\n Audio Patches (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        valueAt(i)->dump(dst, 2, i);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        dst->appendFormat("%s", prefix.c_str());
+        valueAt(i)->dump(dst, prefix.size());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index b209a88..546f56b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -675,7 +675,7 @@
 
 void AudioPolicyMixCollection::dump(String8 *dst) const
 {
-    dst->append("\nAudio Policy Mix:\n");
+    dst->append("\n Audio Policy Mix:\n");
     for (size_t i = 0; i < size(); i++) {
         itemAt(i)->dump(dst, 2, i);
     }
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
index 866417e..53cc473 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
@@ -25,15 +25,16 @@
 
 void AudioRoute::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat("%*s- Type: %s\n", spaces, "", mType == AUDIO_ROUTE_MUX ? "Mux" : "Mix");
-    dst->appendFormat("%*s- Sink: %s\n", spaces, "", mSink->getTagName().c_str());
+    dst->appendFormat("%s; Sink: \"%s\"\n",
+            mType == AUDIO_ROUTE_MUX ? "Mux" : "Mix", mSink->getTagName().c_str());
     if (mSources.size() != 0) {
-        dst->appendFormat("%*s- Sources: \n", spaces, "");
+        dst->appendFormat("%*sSources: ", spaces, "");
         for (size_t i = 0; i < mSources.size(); i++) {
-            dst->appendFormat("%*s%s \n", spaces + 4, "", mSources[i]->getTagName().c_str());
+            dst->appendFormat("\"%s\"", mSources[i]->getTagName().c_str());
+            if (i + 1 < mSources.size()) dst->append(", ");
         }
+        dst->append("\n");
     }
-    dst->append("\n");
 }
 
 bool AudioRoute::supportsPatch(const sp<PolicyAudioPort> &srcPort,
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index afc4d01..035bef2 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -18,9 +18,12 @@
 //#define LOG_NDEBUG 0
 
 #include <sstream>
+
+#include <android-base/stringprintf.h>
+#include <TypeConverter.h>
 #include <utils/Log.h>
 #include <utils/String8.h>
-#include <TypeConverter.h>
+
 #include "AudioOutputDescriptor.h"
 #include "AudioPatch.h"
 #include "AudioPolicyMix.h"
@@ -39,35 +42,36 @@
     return ss.str();
 }
 
-void ClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void ClientDescriptor::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat("%*sClient %d:\n", spaces, "", index+1);
-    dst->appendFormat("%*s- Port Id: %d Session Id: %d UID: %d\n", spaces, "",
-             mPortId, mSessionId, mUid);
-    dst->appendFormat("%*s- Format: %08x Sampling rate: %d Channels: %08x\n", spaces, "",
-             mConfig.format, mConfig.sample_rate, mConfig.channel_mask);
-    dst->appendFormat("%*s- Attributes: %s\n", spaces, "", toString(mAttributes).c_str());
-    dst->appendFormat("%*s- Preferred Device Id: %08x\n", spaces, "", mPreferredDeviceId);
-    dst->appendFormat("%*s- State: %s\n", spaces, "", mActive ? "Active" : "Inactive");
+    dst->appendFormat("Port ID: %d; Session ID: %d; uid %d; State: %s\n",
+            mPortId, mSessionId, mUid, mActive ? "Active" : "Inactive");
+    dst->appendFormat("%*s%s; %d; Channel mask: 0x%x\n", spaces, "",
+            audio_format_to_string(mConfig.format), mConfig.sample_rate, mConfig.channel_mask);
+    dst->appendFormat("%*sAttributes: %s\n", spaces, "", toString(mAttributes).c_str());
+    if (mPreferredDeviceId != AUDIO_PORT_HANDLE_NONE) {
+        dst->appendFormat("%*sPreferred Device Port ID: %d;\n", spaces, "", mPreferredDeviceId);
+    }
 }
 
-void TrackClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void TrackClientDescriptor::dump(String8 *dst, int spaces) const
 {
-    ClientDescriptor::dump(dst, spaces, index);
-    dst->appendFormat("%*s- Stream: %d flags: %08x\n", spaces, "", mStream, mFlags);
-    dst->appendFormat("%*s- Refcount: %d\n", spaces, "", mActivityCount);
-    dst->appendFormat("%*s- DAP Primary Mix: %p\n", spaces, "", mPrimaryMix.promote().get());
-    dst->appendFormat("%*s- DAP Secondary Outputs:\n", spaces, "");
-    for (auto desc : mSecondaryOutputs) {
-        dst->appendFormat("%*s  - %d\n", spaces, "",
-                desc.promote() == nullptr ? 0 : desc.promote()->mIoHandle);
+    ClientDescriptor::dump(dst, spaces);
+    dst->appendFormat("%*sStream: %d; Flags: %08x; Refcount: %d\n", spaces, "",
+            mStream, mFlags, mActivityCount);
+    dst->appendFormat("%*sDAP Primary Mix: %p\n", spaces, "", mPrimaryMix.promote().get());
+    if (!mSecondaryOutputs.empty()) {
+        dst->appendFormat("%*sDAP Secondary Outputs: ", spaces - 2, "");
+        for (auto desc : mSecondaryOutputs) {
+            dst->appendFormat("%d, ", desc.promote() == nullptr ? 0 : desc.promote()->mIoHandle);
+        }
+        dst->append("\n");
     }
 }
 
 std::string TrackClientDescriptor::toShortString() const
 {
     std::stringstream ss;
-
     ss << ClientDescriptor::toShortString() << " Stream: " << mStream;
     return ss.str();
 }
@@ -81,10 +85,10 @@
     }
 }
 
-void RecordClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void RecordClientDescriptor::dump(String8 *dst, int spaces) const
 {
-    ClientDescriptor::dump(dst, spaces, index);
-    dst->appendFormat("%*s- Source: %d flags: %08x\n", spaces, "", mSource, mFlags);
+    ClientDescriptor::dump(dst, spaces);
+    dst->appendFormat("%*sSource: %d; Flags: %08x\n", spaces, "", mSource, mFlags);
     mEnabledEffects.dump(dst, spaces + 2 /*spaces*/, false /*verbose*/);
 }
 
@@ -109,18 +113,21 @@
     mHwOutput = hwOutput;
 }
 
-void SourceClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void SourceClientDescriptor::dump(String8 *dst, int spaces) const
 {
-    TrackClientDescriptor::dump(dst, spaces, index);
-    dst->appendFormat("%*s- Device:\n", spaces, "");
-    mSrcDevice->dump(dst, 2, 0);
+    TrackClientDescriptor::dump(dst, spaces);
+    const std::string prefix = base::StringPrintf("%*sDevice: ", spaces, "");
+    dst->appendFormat("%s", prefix.c_str());
+    mSrcDevice->dump(dst, prefix.size());
 }
 
 void SourceClientCollection::dump(String8 *dst) const
 {
-    dst->append("\nAudio sources:\n");
+    dst->append("\n Audio sources (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        valueAt(i)->dump(dst, 2, i);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        dst->appendFormat("%s", prefix.c_str());
+        valueAt(i)->dump(dst, prefix.size());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index a92d31e..a909331 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -19,10 +19,11 @@
 
 #include <set>
 
-#include <AudioPolicyInterface.h>
+#include <android-base/stringprintf.h>
 #include <audio_utils/string.h>
 #include <media/AudioParameter.h>
 #include <media/TypeConverter.h>
+#include <AudioPolicyInterface.h>
 #include "DeviceDescriptor.h"
 #include "TypeConverter.h"
 #include "HwModule.h"
@@ -54,19 +55,10 @@
 DeviceDescriptor::DeviceDescriptor(const AudioDeviceTypeAddr &deviceTypeAddr,
                                    const std::string &tagName,
                                    const FormatVector &encodedFormats) :
-        DeviceDescriptorBase(deviceTypeAddr), mTagName(tagName), mEncodedFormats(encodedFormats),
+        DeviceDescriptorBase(deviceTypeAddr, encodedFormats), mTagName(tagName),
         mDeclaredAddress(DeviceDescriptorBase::address())
 {
     mCurrentEncodedFormat = AUDIO_FORMAT_DEFAULT;
-    /* If framework runs against a pre 5.0 Audio HAL, encoded formats are absent from the config.
-     * FIXME: APM should know the version of the HAL and don't add the formats for V5.0.
-     * For now, the workaround to remove AC3 and IEC61937 support on HDMI is to declare
-     * something like 'encodedFormats="AUDIO_FORMAT_PCM_16_BIT"' on the HDMI devicePort.
-     */
-    if (mDeviceTypeAddr.mType == AUDIO_DEVICE_OUT_HDMI && mEncodedFormats.empty()) {
-        mEncodedFormats.push_back(AUDIO_FORMAT_AC3);
-        mEncodedFormats.push_back(AUDIO_FORMAT_IEC61937);
-    }
 }
 
 void DeviceDescriptor::attach(const sp<HwModule>& module)
@@ -118,20 +110,6 @@
     return (mCurrentEncodedFormat != AUDIO_FORMAT_DEFAULT);
 }
 
-bool DeviceDescriptor::supportsFormat(audio_format_t format)
-{
-    if (mEncodedFormats.empty()) {
-        return true;
-    }
-
-    for (const auto& devFormat : mEncodedFormats) {
-        if (devFormat == format) {
-            return true;
-        }
-    }
-    return false;
-}
-
 status_t DeviceDescriptor::applyAudioPortConfig(const struct audio_port_config *config,
                                                 audio_port_config *backupConfig)
 {
@@ -141,7 +119,6 @@
     toAudioPortConfig(&localBackupConfig);
     if ((status = validationBeforeApplyConfig(config)) == NO_ERROR) {
         AudioPortConfig::applyAudioPortConfig(config, backupConfig);
-        applyPolicyAudioPortConfig(config);
     }
 
     if (backupConfig != NULL) {
@@ -154,8 +131,6 @@
                                          const struct audio_port_config *srcConfig) const
 {
     DeviceDescriptorBase::toAudioPortConfig(dstConfig, srcConfig);
-    toPolicyAudioPortConfig(dstConfig, srcConfig);
-
     dstConfig->ext.device.hw_module = getModuleHandle();
 }
 
@@ -202,15 +177,15 @@
     }
 }
 
-void DeviceDescriptor::dump(String8 *dst, int spaces, int index, bool verbose) const
+void DeviceDescriptor::dump(String8 *dst, int spaces, bool verbose) const
 {
     String8 extraInfo;
     if (!mTagName.empty()) {
-        extraInfo.appendFormat("%*s- tag name: %s\n", spaces, "", mTagName.c_str());
+        extraInfo.appendFormat("\"%s\"", mTagName.c_str());
     }
 
     std::string descBaseDumpStr;
-    DeviceDescriptorBase::dump(&descBaseDumpStr, spaces, index, extraInfo.string(), verbose);
+    DeviceDescriptorBase::dump(&descBaseDumpStr, spaces, extraInfo.string(), verbose);
     dst->append(descBaseDumpStr.c_str());
 }
 
@@ -451,6 +426,14 @@
     return devices;
 }
 
+AudioDeviceTypeAddrVector DeviceVector::toTypeAddrVector() const {
+    AudioDeviceTypeAddrVector result;
+    for (const auto& device : *this) {
+        result.push_back(AudioDeviceTypeAddr(device->type(), device->address()));
+    }
+    return result;
+}
+
 void DeviceVector::replaceDevicesByType(
         audio_devices_t typeToRemove, const DeviceVector &devicesToAdd) {
     DeviceVector devicesToRemove = getDevicesFromType(typeToRemove);
@@ -465,9 +448,11 @@
     if (isEmpty()) {
         return;
     }
-    dst->appendFormat("%*s- %s devices:\n", spaces, "", tag.string());
+    dst->appendFormat("%*s%s devices (%zu):\n", spaces, "", tag.string(), size());
     for (size_t i = 0; i < size(); i++) {
-        itemAt(i)->dump(dst, spaces + 2, i, verbose);
+        const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+        dst->appendFormat("%s", prefix.c_str());
+        itemAt(i)->dump(dst, prefix.size(), verbose);
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
index 843f5da..3f9c8b0 100644
--- a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "APM::EffectDescriptor"
 //#define LOG_NDEBUG 0
 
+#include <android-base/stringprintf.h>
 #include "EffectDescriptor.h"
 #include <utils/String8.h>
 
@@ -24,13 +25,11 @@
 
 void EffectDescriptor::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat("%*sID: %d\n", spaces, "", mId);
-    dst->appendFormat("%*sI/O: %d\n", spaces, "", mIo);
-    dst->appendFormat("%*sMusic Effect: %s\n", spaces, "", isMusicEffect()? "yes" : "no");
-    dst->appendFormat("%*sSession: %d\n", spaces, "", mSession);
-    dst->appendFormat("%*sName: %s\n", spaces, "",  mDesc.name);
-    dst->appendFormat("%*s%s\n", spaces, "",  mEnabled ? "Enabled" : "Disabled");
-    dst->appendFormat("%*s%s\n", spaces, "",  mSuspended ? "Suspended" : "Active");
+    dst->appendFormat("Effect ID: %d; Attached to I/O handle: %d; Session: %d;\n",
+            mId, mIo, mSession);
+    dst->appendFormat("%*sMusic Effect? %s; \"%s\"; %s; %s\n", spaces, "",
+            isMusicEffect()? "yes" : "no", mDesc.name,
+            mEnabled ? "Enabled" : "Disabled", mSuspended ? "Suspended" : "Active");
 }
 
 EffectDescriptorCollection::EffectDescriptorCollection() :
@@ -237,10 +236,14 @@
             mTotalEffectsMemory,
             mTotalEffectsMemoryMaxUsed);
     }
-    dst->appendFormat("%*sEffects:\n", spaces, "");
-    for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("%*s- Effect %d:\n", spaces, "", keyAt(i));
-        valueAt(i)->dump(dst, spaces + 2);
+    if (size() > 0) {
+        if (spaces > 1) spaces -= 2;
+        dst->appendFormat("%*s- Effects (%zu):\n", spaces, "", size());
+        for (size_t i = 0; i < size(); i++) {
+            const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+            dst->appendFormat("%s", prefix.c_str());
+            valueAt(i)->dump(dst, prefix.size());
+        }
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 3a143b0..418b7eb 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -17,11 +17,13 @@
 #define LOG_TAG "APM::HwModule"
 //#define LOG_NDEBUG 0
 
-#include "HwModule.h"
-#include "IOProfile.h"
+#include <android-base/stringprintf.h>
 #include <policy.h>
 #include <system/audio.h>
 
+#include "HwModule.h"
+#include "IOProfile.h"
+
 namespace android {
 
 HwModule::HwModule(const char *name, uint32_t halVersionMajor, uint32_t halVersionMinor)
@@ -247,28 +249,28 @@
     return false;
 }
 
-void HwModule::dump(String8 *dst) const
+void HwModule::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat("  - name: %s\n", getName());
-    dst->appendFormat("  - handle: %d\n", mHandle);
-    dst->appendFormat("  - version: %u.%u\n", getHalVersionMajor(), getHalVersionMinor());
+    dst->appendFormat("Handle: %d; \"%s\"\n", mHandle, getName());
     if (mOutputProfiles.size()) {
-        dst->append("  - outputs:\n");
+        dst->appendFormat("%*s- Output MixPorts (%zu):\n", spaces - 2, "", mOutputProfiles.size());
         for (size_t i = 0; i < mOutputProfiles.size(); i++) {
-            dst->appendFormat("    output %zu:\n", i);
-            mOutputProfiles[i]->dump(dst);
+            const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+            dst->append(prefix.c_str());
+            mOutputProfiles[i]->dump(dst, prefix.size());
         }
     }
     if (mInputProfiles.size()) {
-        dst->append("  - inputs:\n");
+        dst->appendFormat("%*s- Input MixPorts (%zu):\n", spaces - 2, "", mInputProfiles.size());
         for (size_t i = 0; i < mInputProfiles.size(); i++) {
-            dst->appendFormat("    input %zu:\n", i);
-            mInputProfiles[i]->dump(dst);
+            const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+            dst->append(prefix.c_str());
+            mInputProfiles[i]->dump(dst, prefix.size());
         }
     }
-    mDeclaredDevices.dump(dst, String8("Declared"), 2, true);
-    mDynamicDevices.dump(dst, String8("Dynamic"),  2, true);
-    dumpAudioRouteVector(mRoutes, dst, 2);
+    mDeclaredDevices.dump(dst, String8("- Declared"), spaces - 2, true);
+    mDynamicDevices.dump(dst, String8("- Dynamic"),  spaces - 2, true);
+    dumpAudioRouteVector(mRoutes, dst, spaces);
 }
 
 sp <HwModule> HwModuleCollection::getModuleFromName(const char *name) const
@@ -462,10 +464,11 @@
 
 void HwModuleCollection::dump(String8 *dst) const
 {
-    dst->append("\nHW Modules dump:\n");
+    dst->appendFormat("\n Hardware modules (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("- HW Module %zu:\n", i + 1);
-        itemAt(i)->dump(dst);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        dst->append(prefix.c_str());
+        itemAt(i)->dump(dst, prefix.size());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 09b614d..21f2018 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -116,27 +116,30 @@
                 return device == deviceDesc && deviceDesc->hasCurrentEncodedFormat(); }) == 1;
 }
 
-void IOProfile::dump(String8 *dst) const
+void IOProfile::dump(String8 *dst, int spaces) const
 {
-    std::string portStr;
-    AudioPort::dump(&portStr, 4);
-    dst->append(portStr.c_str());
-
-    dst->appendFormat("    - flags: 0x%04x", getFlags());
+    String8 extraInfo;
+    extraInfo.appendFormat("0x%04x", getFlags());
     std::string flagsLiteral =
             getRole() == AUDIO_PORT_ROLE_SINK ?
             toString(static_cast<audio_input_flags_t>(getFlags())) :
             getRole() == AUDIO_PORT_ROLE_SOURCE ?
             toString(static_cast<audio_output_flags_t>(getFlags())) : "";
     if (!flagsLiteral.empty()) {
-        dst->appendFormat(" (%s)", flagsLiteral.c_str());
+        extraInfo.appendFormat(" (%s)", flagsLiteral.c_str());
     }
-    dst->append("\n");
-    mSupportedDevices.dump(dst, String8("Supported"), 4, false);
-    dst->appendFormat("\n    - maxOpenCount: %u - curOpenCount: %u\n",
-             maxOpenCount, curOpenCount);
-    dst->appendFormat("    - maxActiveCount: %u - curActiveCount: %u\n",
-             maxActiveCount, curActiveCount);
+
+    std::string portStr;
+    AudioPort::dump(&portStr, spaces, extraInfo.c_str());
+    dst->append(portStr.c_str());
+
+    mSupportedDevices.dump(dst, String8("- Supported"), spaces - 2, false);
+    dst->appendFormat("%*s- maxOpenCount: %u; curOpenCount: %u\n",
+            spaces - 2, "", maxOpenCount, curOpenCount);
+    dst->appendFormat("%*s- maxActiveCount: %u; curActiveCount: %u\n",
+            spaces - 2, "", maxActiveCount, curActiveCount);
+    dst->appendFormat("%*s- recommendedMuteDurationMs: %u ms\n",
+            spaces - 2, "", recommendedMuteDurationMs);
 }
 
 void IOProfile::log()
diff --git a/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
index 8c61b90..ce8178f 100644
--- a/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
@@ -30,9 +30,9 @@
 // --- PolicyAudioPort class implementation
 void PolicyAudioPort::attach(const sp<HwModule>& module)
 {
+    mModule = module;
     ALOGV("%s: attaching module %s to port %s",
             __FUNCTION__, getModuleName(), asAudioPort()->getName().c_str());
-    mModule = module;
 }
 
 void PolicyAudioPort::detach()
@@ -87,7 +87,7 @@
     // For direct outputs, pick minimum sampling rate: this helps ensuring that the
     // channel count / sampling rate combination chosen will be supported by the connected
     // sink
-    if (isDirectOutput()) {
+    if (asAudioPort()->isDirectOutput()) {
         uint32_t samplingRate = UINT_MAX;
         for (const auto rate : samplingRates) {
             if ((rate < samplingRate) && (rate > 0)) {
@@ -122,7 +122,7 @@
     // For direct outputs, pick minimum channel count: this helps ensuring that the
     // channel count / sampling rate combination chosen will be supported by the connected
     // sink
-    if (isDirectOutput()) {
+    if (asAudioPort()->isDirectOutput()) {
         uint32_t channelCount = UINT_MAX;
         for (const auto channelMask : channelMasks) {
             uint32_t cnlCount;
@@ -236,7 +236,7 @@
     audio_format_t bestFormat = sPcmFormatCompareTable[ARRAY_SIZE(sPcmFormatCompareTable) - 1];
     // For mixed output and inputs, use best mixer output format.
     // Do not limit format otherwise
-    if ((asAudioPort()->getType() != AUDIO_PORT_TYPE_MIX) || isDirectOutput()) {
+    if ((asAudioPort()->getType() != AUDIO_PORT_TYPE_MIX) || asAudioPort()->isDirectOutput()) {
         bestFormat = AUDIO_FORMAT_INVALID;
     }
 
@@ -266,29 +266,4 @@
             asAudioPort()->getName().c_str(), samplingRate, channelMask, format);
 }
 
-// --- PolicyAudioPortConfig class implementation
-
-status_t PolicyAudioPortConfig::validationBeforeApplyConfig(
-        const struct audio_port_config *config) const
-{
-    sp<PolicyAudioPort> policyAudioPort = getPolicyAudioPort();
-    return policyAudioPort ? policyAudioPort->checkExactAudioProfile(config) : NO_INIT;
-}
-
-void PolicyAudioPortConfig::toPolicyAudioPortConfig(struct audio_port_config *dstConfig,
-                                                    const struct audio_port_config *srcConfig) const
-{
-    if (dstConfig->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
-        if ((srcConfig != nullptr) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_FLAGS)) {
-            dstConfig->flags = srcConfig->flags;
-        } else {
-            dstConfig->flags = mFlags;
-        }
-    } else {
-        dstConfig->flags = { AUDIO_INPUT_FLAG_NONE };
-    }
-}
-
-
-
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 84ed656..d446e96 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -123,6 +123,7 @@
         static constexpr const char *flags = "flags";
         static constexpr const char *maxOpenCount = "maxOpenCount";
         static constexpr const char *maxActiveCount = "maxActiveCount";
+        static constexpr const char *recommendedMuteDurationMs = "recommendedMuteDurationMs";
     };
 
     // Children: GainTraits
@@ -482,7 +483,14 @@
     if (!flags.empty()) {
         // Source role
         if (portRole == AUDIO_PORT_ROLE_SOURCE) {
-            mixPort->setFlags(OutputFlagConverter::maskFromString(flags, mFlagsSeparator.c_str()));
+            //TODO: b/193496180 use spatializer flag at audio HAL when available until then,
+            // use DEEP_BUFFER+FAST flag combo to indicate the spatializer output profile
+            uint32_t intFlags =
+                    OutputFlagConverter::maskFromString(flags, mFlagsSeparator.c_str());
+            if (intFlags == (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_DEEP_BUFFER)) {
+                intFlags = AUDIO_OUTPUT_FLAG_SPATIALIZER;
+            }
+            mixPort->setFlags(intFlags);
         } else {
             // Sink role
             mixPort->setFlags(InputFlagConverter::maskFromString(flags, mFlagsSeparator.c_str()));
@@ -496,6 +504,13 @@
     if (!maxActiveCount.empty()) {
         convertTo(maxActiveCount, mixPort->maxActiveCount);
     }
+
+    std::string recommendedmuteDurationMsLiteral =
+            getXmlAttribute(child, Attributes::recommendedMuteDurationMs);
+    if (!recommendedmuteDurationMsLiteral.empty()) {
+        convertTo(recommendedmuteDurationMsLiteral, mixPort->recommendedMuteDurationMs);
+    }
+
     // Deserialize children
     AudioGainTraits::Collection gains;
     status = deserializeCollection<AudioGainTraits>(child, &gains, NULL);
@@ -861,10 +876,10 @@
         ALOGE("%s: No version found in root node %s", __func__, rootName);
         return BAD_VALUE;
     }
-    if (version == "7.0") {
+    if (version == "7.0" || version == "7.1") {
         mChannelMasksSeparator = mSamplingRatesSeparator = mFlagsSeparator = " ";
     } else if (version != "1.0") {
-        ALOGE("%s: Version does not match; expected \"1.0\" or \"7.0\" got \"%s\"",
+        ALOGE("%s: Version does not match; expected \"1.0\", \"7.0\", or \"7.1\" got \"%s\"",
                 __func__, version.c_str());
         return BAD_VALUE;
     }
diff --git a/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
index 98415b7..ce78eb0 100644
--- a/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
@@ -10,18 +10,6 @@
                      samplingRates="24000,16000"
                      channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
         </mixPort>
-        <!-- Le Audio Audio Ports -->
-        <mixPort name="le audio output" role="source">
-            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
-                     samplingRates="8000,16000,24000,32000,44100,48000"
-                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
-            <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
-                     samplingRates="8000,16000,24000,32000,44100,48000"
-                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
-            <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
-                     samplingRates="8000,16000,24000,32000,44100,48000"
-                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
-        </mixPort>
     </mixPorts>
     <devicePorts>
         <!-- A2DP Audio Ports -->
@@ -42,13 +30,6 @@
         </devicePort>
         <!-- Hearing AIDs Audio Ports -->
         <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
-        <!-- BLE Audio Ports -->
-        <!-- Note that these device types are not valid in HAL versions < 7. Any device
-             running pre-V7 HAL and using this file will not pass VTS. Need to use
-             bluetooth_audio_policy_configuration_7_0.xml instead.
-        -->
-        <devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
-        <devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
     </devicePorts>
     <routes>
         <route type="mix" sink="BT A2DP Out"
@@ -59,9 +40,5 @@
                sources="a2dp output"/>
         <route type="mix" sink="BT Hearing Aid Out"
                sources="hearing aid output"/>
-        <route type="mix" sink="BLE Headset Out"
-               sources="le audio output"/>
-        <route type="mix" sink="BLE Speaker Out"
-               sources="le audio output"/>
     </routes>
 </module>
diff --git a/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml b/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml
index fbe7571..2dffe02 100644
--- a/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml
+++ b/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml
@@ -10,18 +10,6 @@
                      samplingRates="24000 16000"
                      channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
         </mixPort>
-        <!-- Le Audio Audio Ports -->
-        <mixPort name="le audio output" role="source">
-            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
-                     samplingRates="8000 16000 24000 32000 44100 48000"
-                     channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
-            <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
-                     samplingRates="8000 16000 24000 32000 44100 48000"
-                     channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
-            <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
-                     samplingRates="8000 16000 24000 32000 44100 48000"
-                     channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
-        </mixPort>
     </mixPorts>
     <devicePorts>
         <!-- A2DP Audio Ports -->
@@ -42,9 +30,6 @@
         </devicePort>
         <!-- Hearing AIDs Audio Ports -->
         <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
-        <!-- BLE Audio Ports -->
-        <devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
-        <devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
     </devicePorts>
     <routes>
         <route type="mix" sink="BT A2DP Out"
@@ -55,9 +40,5 @@
                sources="a2dp output"/>
         <route type="mix" sink="BT Hearing Aid Out"
                sources="hearing aid output"/>
-        <route type="mix" sink="BLE Headset Out"
-               sources="le audio output"/>
-        <route type="mix" sink="BLE Speaker Out"
-               sources="le audio output"/>
     </routes>
 </module>
diff --git a/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration.xml b/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration.xml
new file mode 100644
index 0000000..22ff954
--- /dev/null
+++ b/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration.xml
@@ -0,0 +1,81 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Audio HAL Audio Policy Configuration file -->
+<module name="bluetooth" halVersion="2.0">
+    <mixPorts>
+        <!-- A2DP Audio Ports -->
+        <mixPort name="a2dp output" role="source"/>
+        <!-- Hearing AIDs Audio Ports -->
+        <mixPort name="hearing aid output" role="source">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="24000,16000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </mixPort>
+        <!-- Le Audio Audio Ports -->
+        <mixPort name="le audio output" role="source">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="8000,16000,24000,32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
+                     samplingRates="8000,16000,24000,32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
+                     samplingRates="8000,16000,24000,32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
+        </mixPort>
+        <mixPort name="le audio input" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="8000,16000,24000,32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
+                     samplingRates="8000,16000,24000,32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
+                     samplingRates="8000,16000,24000,32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+        </mixPort>
+    </mixPorts>
+    <devicePorts>
+        <!-- A2DP Audio Ports -->
+        <devicePort tagName="BT A2DP Out" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100,48000,88200,96000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <devicePort tagName="BT A2DP Headphones" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100,48000,88200,96000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <devicePort tagName="BT A2DP Speaker" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100,48000,88200,96000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <!-- Hearing AIDs Audio Ports -->
+        <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
+        <!-- BLE Audio Ports -->
+        <!-- Note that these device types are not valid in HAL versions < 7. Any device
+             running pre-V7 HAL and using this file will not pass VTS. Need to use
+             bluetooth_audio_policy_configuration_7_0.xml instead.
+        -->
+        <devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
+        <devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
+        <devicePort tagName="BLE Headset In" type="AUDIO_DEVICE_IN_BLE_HEADSET" role="source"/>
+    </devicePorts>
+    <routes>
+        <route type="mix" sink="BT A2DP Out"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT A2DP Headphones"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT A2DP Speaker"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT Hearing Aid Out"
+               sources="hearing aid output"/>
+        <route type="mix" sink="BLE Headset Out"
+               sources="le audio output"/>
+        <route type="mix" sink="le audio input"
+               sources="BLE Headset In"/>
+        <route type="mix" sink="BLE Speaker Out"
+               sources="le audio output"/>
+    </routes>
+</module>
diff --git a/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration_7_0.xml b/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration_7_0.xml
new file mode 100644
index 0000000..aad00d6
--- /dev/null
+++ b/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration_7_0.xml
@@ -0,0 +1,77 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Audio HAL Audio Policy Configuration file -->
+<module name="bluetooth" halVersion="2.0">
+    <mixPorts>
+        <!-- A2DP Audio Ports -->
+        <mixPort name="a2dp output" role="source"/>
+        <!-- Hearing AIDs Audio Ports -->
+        <mixPort name="hearing aid output" role="source">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="24000 16000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </mixPort>
+        <!-- Le Audio Audio Ports -->
+        <mixPort name="le audio output" role="source">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="8000 16000 24000 32000 44100 48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
+                     samplingRates="8000 16000 24000 32000 44100 48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
+                     samplingRates="8000 16000 24000 32000 44100 48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
+        </mixPort>
+        <mixPort name="le audio input" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="8000 16000 24000 32000 44100 48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
+                     samplingRates="8000 16000 24000 32000 44100 48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
+                     samplingRates="8000 16000 24000 32000 44100 48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
+        </mixPort>
+    </mixPorts>
+    <devicePorts>
+        <!-- A2DP Audio Ports -->
+        <devicePort tagName="BT A2DP Out" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100 48000 88200 96000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <devicePort tagName="BT A2DP Headphones" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100 48000 88200 96000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <devicePort tagName="BT A2DP Speaker" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100 48000 88200 96000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <!-- Hearing AIDs Audio Ports -->
+        <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
+        <!-- BLE Audio Ports -->
+        <devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
+        <devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
+        <devicePort tagName="BLE Headset In" type="AUDIO_DEVICE_IN_BLE_HEADSET" role="source"/>
+    </devicePorts>
+    <routes>
+        <route type="mix" sink="BT A2DP Out"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT A2DP Headphones"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT A2DP Speaker"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT Hearing Aid Out"
+               sources="hearing aid output"/>
+        <route type="mix" sink="BLE Headset Out"
+               sources="le audio output"/>
+        <route type="mix" sink="le audio input"
+               sources="BLE Headset In"/>
+        <route type="mix" sink="BLE Speaker Out"
+               sources="le audio output"/>
+    </routes>
+</module>
diff --git a/services/audiopolicy/config/le_audio_policy_configuration.xml b/services/audiopolicy/config/le_audio_policy_configuration.xml
index a3dc72b..dcdd805 100644
--- a/services/audiopolicy/config/le_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/le_audio_policy_configuration.xml
@@ -7,13 +7,20 @@
                      samplingRates="8000,16000,24000,32000,44100,48000"
                      channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
         </mixPort>
+        <mixPort name="le audio input" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT,AUDIO_FORMAT_PCM_24_BIT,AUDIO_FORMAT_PCM_32_BIT"
+                     samplingRates="8000,16000,24000,32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+        </mixPort>
     </mixPorts>
     <devicePorts>
         <devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
         <devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
+        <devicePort tagName="BLE Headset In" type="AUDIO_DEVICE_IN_BLE_HEADSET" role="source"/>
     </devicePorts>
     <routes>
         <route type="mix" sink="BLE Headset Out" sources="le audio output"/>
         <route type="mix" sink="BLE Speaker Out" sources="le audio output"/>
+        <route type="mix" sink="le audio input" sources="BLE Headset In"/>
     </routes>
 </module>
diff --git a/services/audiopolicy/engine/common/src/EngineBase.cpp b/services/audiopolicy/engine/common/src/EngineBase.cpp
index 150a9a8..7a06206 100644
--- a/services/audiopolicy/engine/common/src/EngineBase.cpp
+++ b/services/audiopolicy/engine/common/src/EngineBase.cpp
@@ -71,7 +71,10 @@
                                               audio_policy_dev_state_t state)
 {
     audio_devices_t deviceType = devDesc->type();
-    if ((deviceType != AUDIO_DEVICE_NONE) && audio_is_output_device(deviceType)) {
+    if ((deviceType != AUDIO_DEVICE_NONE) && audio_is_output_device(deviceType)
+            && deviceType != AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET) {
+        // USB dock does not follow the rule of last removable device connected wins.
+        // It is only used if no removable device is connected or if set as preferred device
         mLastRemovableMediaDevices.setRemovableMediaDevices(devDesc, state);
     }
 
diff --git a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
index d39eff6..b036e12 100644
--- a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
+++ b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
@@ -69,12 +69,6 @@
           {
               {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION, AUDIO_SOURCE_DEFAULT,
                AUDIO_FLAG_NONE, ""},
-              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
-               AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
-               AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
-               AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
               {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_EVENT,
                AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""}
           }
@@ -128,8 +122,12 @@
     {"STRATEGY_TRANSMITTED_THROUGH_SPEAKER",
      {
          {"", AUDIO_STREAM_TTS, "AUDIO_STREAM_TTS",
-          {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
-            AUDIO_FLAG_BEACON, ""}}
+          {
+              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
+                AUDIO_FLAG_BEACON, ""},
+              {AUDIO_CONTENT_TYPE_ULTRASOUND, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
+                AUDIO_FLAG_NONE, ""}
+          }
          }
      },
     }
diff --git a/services/audiopolicy/engine/common/src/LastRemovableMediaDevices.cpp b/services/audiopolicy/engine/common/src/LastRemovableMediaDevices.cpp
index b3f8947..06cc799 100644
--- a/services/audiopolicy/engine/common/src/LastRemovableMediaDevices.cpp
+++ b/services/audiopolicy/engine/common/src/LastRemovableMediaDevices.cpp
@@ -85,6 +85,7 @@
     case AUDIO_DEVICE_OUT_HEARING_AID:
     case AUDIO_DEVICE_OUT_BLE_HEADSET:
     case AUDIO_DEVICE_OUT_BLE_SPEAKER:
+    case AUDIO_DEVICE_OUT_BLE_BROADCAST:
         return GROUP_BT_A2DP;
     default:
         return GROUP_NONE;
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
index b3d144f..fbfcf72 100644
--- a/services/audiopolicy/engine/common/src/ProductStrategy.cpp
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -150,12 +150,8 @@
 void ProductStrategy::dump(String8 *dst, int spaces) const
 {
     dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
-    std::string deviceLiteral;
-    if (!deviceTypesToString(mApplicableDevices, deviceLiteral)) {
-        ALOGE("%s: failed to convert device %s",
-              __FUNCTION__, dumpDeviceTypes(mApplicableDevices).c_str());
-    }
-    dst->appendFormat("%*sSelected Device: {type:%s, @:%s}\n", spaces + 2, "",
+    std::string deviceLiteral = deviceTypesToString(mApplicableDevices);
+    dst->appendFormat("%*sSelected Device: {%s, @:%s}\n", spaces + 2, "",
                        deviceLiteral.c_str(), mDeviceAddress.c_str());
 
     for (const auto &attr : mAttributesVector) {
@@ -333,4 +329,3 @@
     dst->appendFormat("\n");
 }
 }
-
diff --git a/services/audiopolicy/engine/config/include/EngineConfig.h b/services/audiopolicy/engine/config/include/EngineConfig.h
index c565926..2ebb7df 100644
--- a/services/audiopolicy/engine/config/include/EngineConfig.h
+++ b/services/audiopolicy/engine/config/include/EngineConfig.h
@@ -70,7 +70,7 @@
 
 using ProductStrategies = std::vector<ProductStrategy>;
 
-using ValuePair = std::pair<uint32_t, std::string>;
+using ValuePair = std::tuple<uint64_t, uint32_t, std::string>;
 using ValuePairs = std::vector<ValuePair>;
 
 struct CriterionType
diff --git a/services/audiopolicy/engine/config/src/EngineConfig.cpp b/services/audiopolicy/engine/config/src/EngineConfig.cpp
index 81e803f..6f560d5 100644
--- a/services/audiopolicy/engine/config/src/EngineConfig.cpp
+++ b/services/audiopolicy/engine/config/src/EngineConfig.cpp
@@ -80,6 +80,7 @@
     struct Attributes {
         static constexpr const char *literal = "literal";
         static constexpr const char *numerical = "numerical";
+        static constexpr const char *androidType = "android_type";
     };
 
     static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
@@ -349,7 +350,16 @@
         ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::literal);
         return BAD_VALUE;
     }
-    uint32_t numerical = 0;
+    uint32_t androidType = 0;
+    std::string androidTypeliteral = getXmlAttribute(child, Attributes::androidType);
+    if (!androidTypeliteral.empty()) {
+        ALOGV("%s: androidType %s", __FUNCTION__, androidTypeliteral.c_str());
+        if (!convertTo(androidTypeliteral, androidType)) {
+            ALOGE("%s: : Invalid typeset value(%s)", __FUNCTION__, androidTypeliteral.c_str());
+            return BAD_VALUE;
+        }
+    }
+    uint64_t numerical = 0;
     std::string numericalTag = getXmlAttribute(child, Attributes::numerical);
     if (numericalTag.empty()) {
         ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::literal);
@@ -359,7 +369,7 @@
         ALOGE("%s: : Invalid value(%s)", __FUNCTION__, numericalTag.c_str());
         return BAD_VALUE;
     }
-    values.push_back({numerical, literal});
+    values.push_back({numerical,  androidType, literal});
     return NO_ERROR;
 }
 
diff --git a/services/audiopolicy/engineconfigurable/Android.bp b/services/audiopolicy/engineconfigurable/Android.bp
index a747822..dc8d9cf 100644
--- a/services/audiopolicy/engineconfigurable/Android.bp
+++ b/services/audiopolicy/engineconfigurable/Android.bp
@@ -41,8 +41,9 @@
         "libaudiopolicyengineconfigurable_pfwwrapper",
 
     ],
-    shared_libs: [
+  shared_libs: [
         "libaudiofoundation",
+        "libbase",
         "liblog",
         "libcutils",
         "libutils",
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
index bc32416..0ddf66d 100644
--- a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -57,9 +57,6 @@
     <ProductStrategy name="STRATEGY_SONIFICATION_RESPECTFUL">
         <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION" volumeGroup="notification">
             <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
-            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
-            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
-            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/> </Attributes>
             <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_EVENT"/> </Attributes>
         </AttributesGroup>
     </ProductStrategy>
diff --git a/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
index 1fc2264..9fd8b8e 100644
--- a/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
+++ b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
@@ -77,12 +77,12 @@
      * Set the input device to be used by an input source.
      *
      * @param[in] inputSource: name of the input source for which the device to use has to be set
-     * @param[in] devices; mask of devices to be used for the given input source.
+     * @param[in] devices: mask of devices to be used for the given input source.
      *
      * @return true if the devices were set correclty for this input source, false otherwise.
      */
     virtual bool setDeviceForInputSource(const audio_source_t &inputSource,
-                                         audio_devices_t device) = 0;
+                                         uint64_t device) = 0;
 
     virtual void setDeviceAddressForProductStrategy(product_strategy_t strategy,
                                                     const std::string &address) = 0;
@@ -91,12 +91,12 @@
      * Set the device to be used by a product strategy.
      *
      * @param[in] strategy: name of the product strategy for which the device to use has to be set
-     * @param[in] devices; mask of devices to be used for the given strategy.
+     * @param[in] devices: mask of devices to be used for the given strategy.
      *
      * @return true if the devices were set correclty for this strategy, false otherwise.
      */
     virtual bool setDeviceTypesForProductStrategy(product_strategy_t strategy,
-                                                  audio_devices_t devices) = 0;
+                                                  uint64_t devices) = 0;
 
     virtual product_strategy_t getProductStrategyByName(const std::string &address) = 0;
 
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/common/Structure/PolicySubsystem-CommonTypes.xml.in b/services/audiopolicy/engineconfigurable/parameter-framework/examples/common/Structure/PolicySubsystem-CommonTypes.xml.in
index 2e9f37e..2c4c7b5 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/common/Structure/PolicySubsystem-CommonTypes.xml.in
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/common/Structure/PolicySubsystem-CommonTypes.xml.in
@@ -10,8 +10,8 @@
      <!--#################### GLOBAL COMPONENTS END ####################-->
 
     <!-- Automatically filled from audio-base.h file -->
-    <ComponentType Name="OutputDevicesMask" Description="32th bit is not allowed as dedicated for input devices detection">
-        <BitParameterBlock Name="mask" Size="32">
+    <ComponentType Name="OutputDevicesMask" Description="64bit representation of devices">
+        <BitParameterBlock Name="mask" Size="64">
         </BitParameterBlock>
     </ComponentType>
 
@@ -19,8 +19,8 @@
     profile. It must match with the Input device enum parameter.
     -->
     <!-- Automatically filled from audio-base.h file -->
-    <ComponentType Name="InputDevicesMask">
-        <BitParameterBlock Name="mask" Size="32">
+    <ComponentType Name="InputDevicesMask" Description="64bit representation of devices">
+        <BitParameterBlock Name="mask" Size="64">
         </BitParameterBlock>
     </ComponentType>
 
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
index f8a6fc0..df4e3e9 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
@@ -45,7 +45,7 @@
 
 bool InputSource::sendToHW(string & /*error*/)
 {
-    audio_devices_t applicableInputDevice;
+    uint64_t applicableInputDevice;
     blackboardRead(&applicableInputDevice, sizeof(applicableInputDevice));
     return mPolicyPluginInterface->setDeviceForInputSource(mId, applicableInputDevice);
 }
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h
index 6c8eb65..e65946e 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h
@@ -32,7 +32,7 @@
 
     struct Device
     {
-        audio_devices_t applicableDevice; /**< applicable device for this strategy. */
+        uint64_t applicableDevice; /**< applicable device for this strategy. */
         char deviceAddress[mMaxStringSize]; /**< device address associated with this strategy. */
     } __attribute__((packed));
 
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index b0c376a..3d74920 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -36,6 +36,8 @@
 
 #include <media/TypeConverter.h>
 
+#include <cinttypes>
+
 using std::string;
 using std::map;
 
@@ -166,16 +168,13 @@
 status_t Engine::setDeviceConnectionState(const sp<DeviceDescriptor> device,
                                           audio_policy_dev_state_t state)
 {
-    mPolicyParameterMgr->setDeviceConnectionState(
-                device->type(), device->address().c_str(), state);
+    mPolicyParameterMgr->setDeviceConnectionState(device->type(), device->address(), state);
     if (audio_is_output_device(device->type())) {
-        // FIXME: Use DeviceTypeSet when the interface is ready
         return mPolicyParameterMgr->setAvailableOutputDevices(
-                    deviceTypesToBitMask(getApmObserver()->getAvailableOutputDevices().types()));
+                    getApmObserver()->getAvailableOutputDevices().types());
     } else if (audio_is_input_device(device->type())) {
-        // FIXME: Use DeviceTypeSet when the interface is ready
         return mPolicyParameterMgr->setAvailableInputDevices(
-                    deviceTypesToBitMask(getApmObserver()->getAvailableInputDevices().types()));
+                    getApmObserver()->getAvailableInputDevices().types());
     }
     return EngineBase::setDeviceConnectionState(device, state);
 }
@@ -299,8 +298,13 @@
     if (device != nullptr) {
         return DeviceVector(device);
     }
+    return fromCache? getCachedDevices(strategy) : getDevicesForProductStrategy(strategy);
+}
 
-    return fromCache? mDevicesForStrategies.at(strategy) : getDevicesForProductStrategy(strategy);
+DeviceVector Engine::getCachedDevices(product_strategy_t ps) const
+{
+    return mDevicesForStrategies.find(ps) != mDevicesForStrategies.end() ?
+                mDevicesForStrategies.at(ps) : DeviceVector{};
 }
 
 DeviceVector Engine::getOutputDevicesForStream(audio_stream_type_t stream, bool fromCache) const
@@ -369,17 +373,28 @@
     getProductStrategies().at(strategy)->setDeviceAddress(address);
 }
 
-bool Engine::setDeviceTypesForProductStrategy(product_strategy_t strategy, audio_devices_t devices)
+bool Engine::setDeviceTypesForProductStrategy(product_strategy_t strategy, uint64_t devices)
 {
     if (getProductStrategies().find(strategy) == getProductStrategies().end()) {
-        ALOGE("%s: set device %d on invalid strategy %d", __FUNCTION__, devices, strategy);
+        ALOGE("%s: set device %" PRId64 " on invalid strategy %d", __FUNCTION__, devices, strategy);
         return false;
     }
-    // FIXME: stop using deviceTypesFromBitMask when the interface is ready
-    getProductStrategies().at(strategy)->setDeviceTypes(deviceTypesFromBitMask(devices));
+    // Here device matches the criterion value, need to rebuitd android device types;
+    DeviceTypeSet types =
+            mPolicyParameterMgr->convertDeviceCriterionValueToDeviceTypes(devices, true /*isOut*/);
+    getProductStrategies().at(strategy)->setDeviceTypes(types);
     return true;
 }
 
+bool Engine::setDeviceForInputSource(const audio_source_t &inputSource, uint64_t device)
+{
+    DeviceTypeSet types = mPolicyParameterMgr->convertDeviceCriterionValueToDeviceTypes(
+                device, false /*isOut*/);
+    ALOG_ASSERT(types.size() <= 1, "one input device expected at most");
+    audio_devices_t deviceType = types.empty() ? AUDIO_DEVICE_IN_DEFAULT : *types.begin();
+    return setPropertyForKey<audio_devices_t, audio_source_t>(deviceType, inputSource);
+}
+
 template <>
 EngineInterface *Engine::queryInterface()
 {
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
index d8e2742..4b559f0 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.h
+++ b/services/audiopolicy/engineconfigurable/src/Engine.h
@@ -82,15 +82,12 @@
     bool setVolumeProfileForStream(const audio_stream_type_t &stream,
                                    const audio_stream_type_t &volumeProfile) override;
 
-    bool setDeviceForInputSource(const audio_source_t &inputSource, audio_devices_t device) override
-    {
-        return setPropertyForKey<audio_devices_t, audio_source_t>(device, inputSource);
-    }
+    bool setDeviceForInputSource(const audio_source_t &inputSource, uint64_t device) override;
+
     void setDeviceAddressForProductStrategy(product_strategy_t strategy,
                                                     const std::string &address) override;
 
-    bool setDeviceTypesForProductStrategy(product_strategy_t strategy,
-                                                  audio_devices_t devices) override;
+    bool setDeviceTypesForProductStrategy(product_strategy_t strategy, uint64_t devices) override;
 
     product_strategy_t getProductStrategyByName(const std::string &name) override
     {
@@ -126,6 +123,7 @@
     status_t loadAudioPolicyEngineConfig();
 
     DeviceVector getDevicesForProductStrategy(product_strategy_t strategy) const;
+    DeviceVector getCachedDevices(product_strategy_t ps) const;
 
     /**
      * Policy Parameter Manager hidden through a wrapper.
diff --git a/services/audiopolicy/engineconfigurable/src/InputSource.cpp b/services/audiopolicy/engineconfigurable/src/InputSource.cpp
index f4645e6..6fd2b70 100644
--- a/services/audiopolicy/engineconfigurable/src/InputSource.cpp
+++ b/services/audiopolicy/engineconfigurable/src/InputSource.cpp
@@ -26,7 +26,8 @@
 status_t Element<audio_source_t>::setIdentifier(audio_source_t identifier)
 {
     if (identifier > AUDIO_SOURCE_MAX && identifier != AUDIO_SOURCE_HOTWORD
-        && identifier != AUDIO_SOURCE_FM_TUNER && identifier != AUDIO_SOURCE_ECHO_REFERENCE) {
+        && identifier != AUDIO_SOURCE_FM_TUNER && identifier != AUDIO_SOURCE_ECHO_REFERENCE
+        && identifier != AUDIO_SOURCE_ULTRASOUND) {
         return BAD_VALUE;
     }
     mIdentifier = identifier;
@@ -46,12 +47,6 @@
 template <>
 status_t Element<audio_source_t>::set(audio_devices_t devices)
 {
-    if (devices == AUDIO_DEVICE_NONE) {
-        // Reset
-        mApplicableDevices = devices;
-        return NO_ERROR;
-    }
-    devices = static_cast<audio_devices_t>(devices | AUDIO_DEVICE_BIT_IN);
     if (!audio_is_input_device(devices)) {
         ALOGE("%s: trying to set an invalid device 0x%X for input source %s",
               __FUNCTION__, devices, getName().c_str());
diff --git a/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py b/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
index 43b3dd2..86ac76f 100755
--- a/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
+++ b/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
@@ -55,7 +55,7 @@
     while i < decimal:
         i = i << 1
         pos = pos + 1
-        if pos == 32:
+        if pos == 64:
             return -1
 
     # TODO: b/168065706. This is just to fix the build. That the problem of devices with
@@ -132,6 +132,9 @@
 
     logging.info("Checking Android Header file {}".format(androidaudiobaseheaderFile))
 
+    multi_bit_output_device_shift = 32
+    multi_bit_input_device_shift = 32
+
     for line_number, line in enumerate(androidaudiobaseheaderFile):
         match = criteria_pattern.match(line)
         if match:
@@ -143,16 +146,36 @@
 
             component_type_numerical_value = match.groupdict()['values']
 
-            # for AUDIO_DEVICE_IN: need to remove sign bit / rename default to stub
+            # for AUDIO_DEVICE_IN: rename default to stub
             if component_type_name == "InputDevicesMask":
-                component_type_numerical_value = str(int(component_type_numerical_value, 0) & ~2147483648)
+                component_type_numerical_value = str(int(component_type_numerical_value, 0))
                 if component_type_literal == "default":
                     component_type_literal = "stub"
 
+                string_int = int(component_type_numerical_value, 0)
+                num_bits = bin(string_int).count("1")
+                if num_bits > 1:
+                    logging.info("The value {} is for criterion {} binary rep {} has {} bits sets"
+                        .format(component_type_numerical_value, component_type_name, bin(string_int), num_bits))
+                    string_int = 2**multi_bit_input_device_shift
+                    logging.info("new val assigned is {} {}" .format(string_int, bin(string_int)))
+                    multi_bit_input_device_shift += 1
+                    component_type_numerical_value = str(string_int)
+
             if component_type_name == "OutputDevicesMask":
                 if component_type_literal == "default":
                     component_type_literal = "stub"
 
+                string_int = int(component_type_numerical_value, 0)
+                num_bits = bin(string_int).count("1")
+                if num_bits > 1:
+                    logging.info("The value {} is for criterion {} binary rep {} has {} bits sets"
+                        .format(component_type_numerical_value, component_type_name, bin(string_int), num_bits))
+                    string_int = 2**multi_bit_output_device_shift
+                    logging.info("new val assigned is {} {}" .format(string_int, bin(string_int)))
+                    multi_bit_output_device_shift += 1
+                    component_type_numerical_value = str(string_int)
+
             # Remove duplicated numerical values
             if int(component_type_numerical_value, 0) in all_component_types[component_type_name].values():
                 logging.info("The value {}:{} is duplicated for criterion {}, KEEPING LATEST".format(component_type_numerical_value, component_type_literal, component_type_name))
diff --git a/services/audiopolicy/engineconfigurable/tools/buildPolicyCriterionTypes.py b/services/audiopolicy/engineconfigurable/tools/buildPolicyCriterionTypes.py
index 76c35c1..a15a6ba 100755
--- a/services/audiopolicy/engineconfigurable/tools/buildPolicyCriterionTypes.py
+++ b/services/audiopolicy/engineconfigurable/tools/buildPolicyCriterionTypes.py
@@ -85,6 +85,9 @@
     return argparser.parse_args()
 
 
+output_devices_type_value = {}
+input_devices_type_value = {}
+
 def generateXmlCriterionTypesFile(criterionTypes, addressCriteria, criterionTypesFile, outputFile):
 
     logging.info("Importing criterionTypesFile {}".format(criterionTypesFile))
@@ -102,6 +105,11 @@
                     value_node.set('numerical', str(value))
                     value_node.set('literal', key)
 
+                    if criterion_type.get('name') == "OutputDevicesMaskType":
+                        value_node.set('android_type', output_devices_type_value[key])
+                    if criterion_type.get('name') == "InputDevicesMaskType":
+                        value_node.set('android_type', input_devices_type_value[key])
+
     if addressCriteria:
         for criterion_name, values_list in addressCriteria.items():
             for criterion_type in criterion_types_root.findall('criterion_type'):
@@ -200,10 +208,8 @@
     #
     ignored_values = ['CNT', 'MAX', 'ALL', 'NONE']
 
-    #
-    # Reaching 32 bit limit for inclusive criterion out devices: removing
-    #
-    ignored_output_device_values = ['BleSpeaker', 'BleHeadset']
+    multi_bit_outputdevice_shift = 32
+    multi_bit_inputdevice_shift = 32
 
     criteria_pattern = re.compile(
         r"\s*V\((?P<type>(?:"+'|'.join(criterion_mapping_table.keys()) + "))_" \
@@ -223,28 +229,59 @@
                 ''.join((w.capitalize() for w in match.groupdict()['literal'].split('_')))
             criterion_numerical_value = match.groupdict()['values']
 
-            # for AUDIO_DEVICE_IN: need to remove sign bit / rename default to stub
+            # for AUDIO_DEVICE_IN: rename default to stub
             if criterion_name == "InputDevicesMaskType":
                 if criterion_literal == "Default":
                     criterion_numerical_value = str(int("0x40000000", 0))
+                    input_devices_type_value[criterion_literal] = "0xC0000000"
                 else:
                     try:
                         string_int = int(criterion_numerical_value, 0)
+                        # Append AUDIO_DEVICE_IN for android type tag
+                        input_devices_type_value[criterion_literal] = hex(string_int | 2147483648)
+
+                        num_bits = bin(string_int).count("1")
+                        if num_bits > 1:
+                            logging.info("The value {}:{} is for criterion {} binary rep {} has {} bits sets"
+                                .format(criterion_numerical_value, criterion_literal, criterion_name, bin(string_int), num_bits))
+                            string_int = 2**multi_bit_inputdevice_shift
+                            logging.info("new val assigned is {} {}" .format(string_int, bin(string_int)))
+                            multi_bit_inputdevice_shift += 1
+                            criterion_numerical_value = str(string_int)
+
                     except ValueError:
                         # Handle the exception
                         logging.info("value {}:{} for criterion {} is not a number, ignoring"
                             .format(criterion_numerical_value, criterion_literal, criterion_name))
                         continue
-                    criterion_numerical_value = str(int(criterion_numerical_value, 0) & ~2147483648)
 
             if criterion_name == "OutputDevicesMaskType":
                 if criterion_literal == "Default":
                     criterion_numerical_value = str(int("0x40000000", 0))
-                if criterion_literal in ignored_output_device_values:
-                    logging.info("OutputDevicesMaskType skipping {}".format(criterion_literal))
-                    continue
+                    output_devices_type_value[criterion_literal] = "0x40000000"
+                else:
+                    try:
+                        string_int = int(criterion_numerical_value, 0)
+                        output_devices_type_value[criterion_literal] = criterion_numerical_value
+
+                        num_bits = bin(string_int).count("1")
+                        if num_bits > 1:
+                            logging.info("The value {}:{} is for criterion {} binary rep {} has {} bits sets"
+                                .format(criterion_numerical_value, criterion_literal, criterion_name, bin(string_int), num_bits))
+                            string_int = 2**multi_bit_outputdevice_shift
+                            logging.info("new val assigned is {} {}" .format(string_int, bin(string_int)))
+                            multi_bit_outputdevice_shift += 1
+                            criterion_numerical_value = str(string_int)
+
+                    except ValueError:
+                        # Handle the exception
+                        logging.info("The value {}:{} is for criterion {} is not a number, ignoring"
+                            .format(criterion_numerical_value, criterion_literal, criterion_name))
+                        continue
+
             try:
                 string_int = int(criterion_numerical_value, 0)
+
             except ValueError:
                 # Handle the exception
                 logging.info("The value {}:{} is for criterion {} is not a number, ignoring"
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.bp b/services/audiopolicy/engineconfigurable/wrapper/Android.bp
index 3e04b68..0ef0b82 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/Android.bp
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.bp
@@ -19,6 +19,7 @@
     header_libs: [
         "libbase_headers",
         "libaudiopolicycommon",
+        "libaudiofoundation_headers",
     ],
     shared_libs: [
         "liblog",
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
index 63990ac..099d55d 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
+++ b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
@@ -23,6 +23,7 @@
 #include <SelectionCriterionInterface.h>
 #include <media/convert.h>
 #include <algorithm>
+#include <cutils/bitops.h>
 #include <cutils/config_utils.h>
 #include <cutils/misc.h>
 #include <fstream>
@@ -31,6 +32,7 @@
 #include <string>
 #include <vector>
 #include <stdint.h>
+#include <cinttypes>
 #include <cmath>
 #include <utils/Log.h>
 
@@ -124,9 +126,22 @@
 
     for (auto pair : pairs) {
         std::string error;
-        ALOGV("%s: Adding pair %d,%s for criterionType %s", __FUNCTION__, pair.first,
-              pair.second.c_str(), name.c_str());
-        criterionType->addValuePair(pair.first, pair.second, error);
+        ALOGV("%s: Adding pair %" PRIu64", %s for criterionType %s", __func__, std::get<0>(pair),
+              std::get<2>(pair).c_str(), name.c_str());
+        criterionType->addValuePair(std::get<0>(pair), std::get<2>(pair), error);
+
+        if (name == gOutputDeviceCriterionName) {
+            ALOGV("%s: Adding mOutputDeviceToCriterionTypeMap %d %" PRIu64" for criterionType %s",
+                  __func__, std::get<1>(pair), std::get<0>(pair), name.c_str());
+            audio_devices_t androidType = static_cast<audio_devices_t>(std::get<1>(pair));
+            mOutputDeviceToCriterionTypeMap[androidType] = std::get<0>(pair);
+        }
+        if (name == gInputDeviceCriterionName) {
+            ALOGV("%s: Adding mInputDeviceToCriterionTypeMap %d %" PRIu64" for criterionType %s",
+                  __func__, std::get<1>(pair), std::get<0>(pair), name.c_str());
+            audio_devices_t androidType = static_cast<audio_devices_t>(std::get<1>(pair));
+            mInputDeviceToCriterionTypeMap[androidType] = std::get<0>(pair);
+        }
     }
     ALOG_ASSERT(mPolicyCriteria.find(name) == mPolicyCriteria.end(),
                 "%s: Criterion %s already added", __FUNCTION__, name.c_str());
@@ -135,7 +150,7 @@
     mPolicyCriteria[name] = criterion;
 
     if (not defaultValue.empty()) {
-        int numericalValue = 0;
+        uint64_t numericalValue = 0;
         if (not criterionType->getNumericalValue(defaultValue.c_str(), numericalValue)) {
             ALOGE("%s; trying to apply invalid default literal value (%s)", __FUNCTION__,
                   defaultValue.c_str());
@@ -263,7 +278,7 @@
 }
 
 status_t ParameterManagerWrapper::setDeviceConnectionState(
-        audio_devices_t type, const std::string address, audio_policy_dev_state_t state)
+        audio_devices_t type, const std::string &address, audio_policy_dev_state_t state)
 {
     std::string criterionName = audio_is_output_device(type) ?
                 gOutputDeviceAddressCriterionName : gInputDeviceAddressCriterionName;
@@ -279,7 +294,7 @@
     }
 
     auto criterionType = criterion->getCriterionType();
-    int deviceAddressId;
+    uint64_t deviceAddressId;
     if (not criterionType->getNumericalValue(address.c_str(), deviceAddressId)) {
         ALOGW("%s: unknown device address reported (%s) for criterion %s", __FUNCTION__,
               address.c_str(), criterionName.c_str());
@@ -296,28 +311,28 @@
     return NO_ERROR;
 }
 
-status_t ParameterManagerWrapper::setAvailableInputDevices(audio_devices_t inputDevices)
+status_t ParameterManagerWrapper::setAvailableInputDevices(const DeviceTypeSet &types)
 {
     ISelectionCriterionInterface *criterion =
             getElement<ISelectionCriterionInterface>(gInputDeviceCriterionName, mPolicyCriteria);
     if (criterion == NULL) {
-        ALOGE("%s: no criterion found for %s", __FUNCTION__, gInputDeviceCriterionName);
+        ALOGE("%s: no criterion found for %s", __func__, gInputDeviceCriterionName);
         return DEAD_OBJECT;
     }
-    criterion->setCriterionState(inputDevices & ~AUDIO_DEVICE_BIT_IN);
+    criterion->setCriterionState(convertDeviceTypesToCriterionValue(types));
     applyPlatformConfiguration();
     return NO_ERROR;
 }
 
-status_t ParameterManagerWrapper::setAvailableOutputDevices(audio_devices_t outputDevices)
+status_t ParameterManagerWrapper::setAvailableOutputDevices(const DeviceTypeSet &types)
 {
     ISelectionCriterionInterface *criterion =
             getElement<ISelectionCriterionInterface>(gOutputDeviceCriterionName, mPolicyCriteria);
     if (criterion == NULL) {
-        ALOGE("%s: no criterion found for %s", __FUNCTION__, gOutputDeviceCriterionName);
+        ALOGE("%s: no criterion found for %s", __func__, gOutputDeviceCriterionName);
         return DEAD_OBJECT;
     }
-    criterion->setCriterionState(outputDevices);
+    criterion->setCriterionState(convertDeviceTypesToCriterionValue(types));
     applyPlatformConfiguration();
     return NO_ERROR;
 }
@@ -327,5 +342,45 @@
     mPfwConnector->applyConfigurations();
 }
 
+uint64_t ParameterManagerWrapper::convertDeviceTypeToCriterionValue(audio_devices_t type) const {
+    bool isOut = audio_is_output_devices(type);
+    uint32_t typeMask = isOut ? type : (type & ~AUDIO_DEVICE_BIT_IN);
+
+    const auto &adapters = isOut ? mOutputDeviceToCriterionTypeMap : mInputDeviceToCriterionTypeMap;
+    // Only multibit devices need adaptation.
+    if (popcount(typeMask) > 1) {
+        const auto &adapter = adapters.find(type);
+        if (adapter != adapters.end()) {
+            ALOGV("%s: multibit device %d converted to criterion %" PRIu64, __func__, type,
+                  adapter->second);
+            return adapter->second;
+        }
+        ALOGE("%s: failed to find map for multibit device %d", __func__, type);
+        return 0;
+    }
+    return typeMask;
+}
+
+uint64_t ParameterManagerWrapper::convertDeviceTypesToCriterionValue(
+        const DeviceTypeSet &types) const {
+    uint64_t criterionValue = 0;
+    for (const auto &type : types) {
+        criterionValue += convertDeviceTypeToCriterionValue(type);
+    }
+    return criterionValue;
+}
+
+DeviceTypeSet ParameterManagerWrapper::convertDeviceCriterionValueToDeviceTypes(
+        uint64_t criterionValue, bool isOut) const {
+    DeviceTypeSet deviceTypes;
+    const auto &adapters = isOut ? mOutputDeviceToCriterionTypeMap : mInputDeviceToCriterionTypeMap;
+    for (const auto &adapter : adapters) {
+        if ((adapter.second & criterionValue) == adapter.second) {
+            deviceTypes.insert(adapter.first);
+        }
+    }
+    return deviceTypes;
+}
+
 } // namespace audio_policy
 } // namespace android
diff --git a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
index 62b129a..fa4ae1e 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
+++ b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
@@ -16,6 +16,7 @@
 
 #pragma once
 
+#include <media/AudioContainers.h>
 #include <system/audio.h>
 #include <system/audio_policy.h>
 #include <utils/Errors.h>
@@ -35,7 +36,8 @@
 namespace android {
 namespace audio_policy {
 
-using ValuePair = std::pair<uint32_t, std::string>;
+using ValuePair = std::tuple<uint64_t, uint32_t, std::string>;
+using DeviceToCriterionTypeAdapter = std::map<audio_devices_t, uint64_t>;
 using ValuePairs = std::vector<ValuePair>;
 
 class ParameterManagerWrapper
@@ -105,7 +107,7 @@
      *
      * @return NO_ERROR if devices criterion updated correctly, error code otherwise.
      */
-    status_t setAvailableInputDevices(audio_devices_t inputDevices);
+    status_t setAvailableInputDevices(const DeviceTypeSet &inputDeviceTypes);
 
     /**
      * Set the available output devices i.e. set the associated policy parameter framework criterion
@@ -114,7 +116,7 @@
      *
      * @return NO_ERROR if devices criterion updated correctly, error code otherwise.
      */
-    status_t setAvailableOutputDevices(audio_devices_t outputDevices);
+    status_t setAvailableOutputDevices(const DeviceTypeSet &outputDeviceTypes);
 
     /**
      * @brief setDeviceConnectionState propagates a state event on a given device(s)
@@ -124,7 +126,7 @@
      * @return NO_ERROR if new state corretly propagated to Engine Parameter-Framework, error
      * code otherwise.
      */
-    status_t setDeviceConnectionState(audio_devices_t type, const std::string address,
+    status_t setDeviceConnectionState(audio_devices_t type, const std::string &address,
                                       audio_policy_dev_state_t state);
 
     /**
@@ -138,6 +140,13 @@
     status_t addCriterion(const std::string &name, bool isInclusive, ValuePairs pairs,
                           const std::string &defaultValue);
 
+    uint64_t convertDeviceTypeToCriterionValue(audio_devices_t type) const;
+
+    uint64_t convertDeviceTypesToCriterionValue(const DeviceTypeSet &types) const;
+
+    DeviceTypeSet convertDeviceCriterionValueToDeviceTypes(
+            uint64_t criterionValue, bool isOut) const;
+
 private:
     /**
      * Apply the configuration of the platform on the policy parameter manager.
@@ -211,6 +220,9 @@
     template <typename T>
     struct parameterManagerElementSupported;
 
+    DeviceToCriterionTypeAdapter mOutputDeviceToCriterionTypeMap;
+    DeviceToCriterionTypeAdapter mInputDeviceToCriterionTypeMap;
+
     static const char *const mPolicyPfwDefaultConfFileName; /**< Default Policy PFW top file name.*/
     static const char *const mPolicyPfwVendorConfFileName; /**< Vendor Policy PFW top file name.*/
 };
diff --git a/services/audiopolicy/enginedefault/Android.bp b/services/audiopolicy/enginedefault/Android.bp
index 7f9c0ac..4671fe9 100644
--- a/services/audiopolicy/enginedefault/Android.bp
+++ b/services/audiopolicy/enginedefault/Android.bp
@@ -31,6 +31,7 @@
     ],
     shared_libs: [
         "libaudiofoundation",
+        "libbase",
         "liblog",
         "libcutils",
         "libutils",
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
index bc32416..0ddf66d 100644
--- a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -57,9 +57,6 @@
     <ProductStrategy name="STRATEGY_SONIFICATION_RESPECTFUL">
         <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION" volumeGroup="notification">
             <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
-            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
-            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
-            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/> </Attributes>
             <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_EVENT"/> </Attributes>
         </AttributesGroup>
     </ProductStrategy>
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index c73c17d..dc34a38 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -272,7 +272,8 @@
         devices = availableOutputDevices.getFirstDevicesFromTypes(
                                           getLastRemovableMediaDevices());
         if (!devices.isEmpty()) break;
-        devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_EARPIECE);
+        devices = availableOutputDevices.getFirstDevicesFromTypes({
+                AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_EARPIECE});
     } break;
 
     case STRATEGY_SONIFICATION:
@@ -364,7 +365,8 @@
                     AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET);
         }
         if (devices2.isEmpty()) {
-            devices2 = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER);
+            devices2 = availableOutputDevices.getFirstDevicesFromTypes({
+                        AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_SPEAKER});
         }
         DeviceVector devices3;
         if (strategy == STRATEGY_MEDIA) {
@@ -460,6 +462,7 @@
         case AUDIO_SOURCE_HOTWORD:
         case AUDIO_SOURCE_CAMCORDER:
         case AUDIO_SOURCE_VOICE_PERFORMANCE:
+        case AUDIO_SOURCE_ULTRASOUND:
             inputSource = AUDIO_SOURCE_VOICE_COMMUNICATION;
             break;
         default:
@@ -586,6 +589,10 @@
         device = availableDevices.getDevice(
                 AUDIO_DEVICE_IN_ECHO_REFERENCE, String8(""), AUDIO_FORMAT_DEFAULT);
         break;
+    case AUDIO_SOURCE_ULTRASOUND:
+        device = availableDevices.getFirstExistingDevice({
+                AUDIO_DEVICE_IN_BUILTIN_MIC, AUDIO_DEVICE_IN_BACK_MIC});
+        break;
     default:
         ALOGW("getDeviceForInputSource() invalid input source %d", inputSource);
         break;
@@ -645,7 +652,7 @@
         // there is a preferred device, is it available?
         preferredAvailableDevVec =
                 availableOutputDevices.getDevicesFromDeviceTypeAddrVec(preferredStrategyDevices);
-        if (preferredAvailableDevVec.size() == preferredAvailableDevVec.size()) {
+        if (preferredAvailableDevVec.size() == preferredStrategyDevices.size()) {
             ALOGVV("%s using pref device %s for strategy %u",
                    __func__, preferredAvailableDevVec.toString().c_str(), strategy);
             return preferredAvailableDevVec;
diff --git a/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
index 7000cd9..dff36e2 100644
--- a/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
+++ b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
@@ -30,6 +30,7 @@
 #include <libxml/parser.h>
 #include <libxml/xinclude.h>
 #include <media/AudioPolicy.h>
+#include <media/AudioProfile.h>
 #include <media/PatchBuilder.h>
 #include <media/RecordingActivityTracker.h>
 
@@ -163,7 +164,9 @@
     AUDIO_FLAG_BYPASS_MUTE,    AUDIO_FLAG_LOW_LATENCY,
     AUDIO_FLAG_DEEP_BUFFER,    AUDIO_FLAG_NO_MEDIA_PROJECTION,
     AUDIO_FLAG_MUTE_HAPTIC,    AUDIO_FLAG_NO_SYSTEM_CAPTURE,
-    AUDIO_FLAG_CAPTURE_PRIVATE};
+    AUDIO_FLAG_CAPTURE_PRIVATE, AUDIO_FLAG_CONTENT_SPATIALIZED,
+    AUDIO_FLAG_NEVER_SPATIALIZE,
+    };
 
 std::vector<audio_policy_dev_state_t> kAudioPolicyDeviceStates = {
     AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
@@ -839,6 +842,9 @@
         : AudioPolicyManagerFuzzerWithConfigurationFile(fdp){};
     void process() override;
 
+    void fuzzGetDirectPlaybackSupport();
+    void fuzzGetDirectProfilesForAttributes();
+
    protected:
     void setDeviceConnectionState();
     void explicitlyRoutingAfterConnection();
@@ -889,10 +895,41 @@
     }
 }
 
+void AudioPolicyManagerFuzzerDeviceConnection::fuzzGetDirectPlaybackSupport() {
+    const uint32_t numTestCases = mFdp->ConsumeIntegralInRange<uint32_t>(1, 10);
+    for (int i = 0; i < numTestCases; ++i) {
+        audio_attributes_t attr = AUDIO_ATTRIBUTES_INITIALIZER;
+        attr.content_type = getValueFromVector<audio_content_type_t>(mFdp, kAudioContentTypes);
+        attr.usage = getValueFromVector<audio_usage_t>(mFdp, kAudioUsages);
+        attr.source = getValueFromVector<audio_source_t>(mFdp, kAudioSources);
+        attr.flags = getValueFromVector<audio_flags_mask_t>(mFdp, kAudioFlagMasks);
+        audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+        config.channel_mask = getValueFromVector<audio_channel_mask_t>(mFdp, kAudioChannelOutMasks);
+        config.format = getValueFromVector<audio_format_t>(mFdp, kAudioFormats);
+        config.sample_rate = getValueFromVector<uint32_t>(mFdp, kSamplingRates);
+        mManager->getDirectPlaybackSupport(&attr, &config);
+    }
+}
+
+void AudioPolicyManagerFuzzerDeviceConnection::fuzzGetDirectProfilesForAttributes() {
+    const uint32_t numTestCases = mFdp->ConsumeIntegralInRange<uint32_t>(1, 10);
+    for (int i = 0; i < numTestCases; ++i) {
+        AudioProfileVector audioProfiles;
+        audio_attributes_t attr = AUDIO_ATTRIBUTES_INITIALIZER;
+        attr.content_type = getValueFromVector<audio_content_type_t>(mFdp, kAudioContentTypes);
+        attr.usage = getValueFromVector<audio_usage_t>(mFdp, kAudioUsages);
+        attr.source = getValueFromVector<audio_source_t>(mFdp, kAudioSources);
+        attr.flags = getValueFromVector<audio_flags_mask_t>(mFdp, kAudioFlagMasks);
+        mManager->getDirectProfilesForAttributes(&attr, audioProfiles);
+    }
+}
+
 void AudioPolicyManagerFuzzerDeviceConnection::process() {
     if (initialize()) {
         setDeviceConnectionState();
         explicitlyRoutingAfterConnection();
+        fuzzGetDirectPlaybackSupport();
+        fuzzGetDirectProfilesForAttributes();
         fuzzPatchCreation();
     }
 }
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index cc2d8e8..bd295ce 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -246,8 +246,8 @@
                     sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
                     // close unused outputs after device disconnection or direct outputs that have
                     // been opened by checkOutputsForDevice() to query dynamic parameters
-                    if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
-                            (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
+                    if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE)
+                            || (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
                                 (desc->mDirectOpenCount == 0))) {
                         clearAudioSourcesForOutput(output);
                         closeOutput(output);
@@ -525,10 +525,10 @@
     return NO_ERROR;
 }
 
-status_t AudioPolicyManager::getHwOffloadEncodingFormatsSupportedForA2DP(
-                                    std::vector<audio_format_t> *formats)
+status_t AudioPolicyManager::getHwOffloadFormatsSupportedForBluetoothMedia(
+                                    audio_devices_t device, std::vector<audio_format_t> *formats)
 {
-    ALOGV("getHwOffloadEncodingFormatsSupportedForA2DP()");
+    ALOGV("getHwOffloadFormatsSupportedForBluetoothMedia()");
     status_t status = NO_ERROR;
     std::unordered_set<audio_format_t> formatSet;
     sp<HwModule> primaryModule =
@@ -537,8 +537,23 @@
         ALOGE("%s() unable to get primary module", __func__);
         return NO_INIT;
     }
+
+    DeviceTypeSet audioDeviceSet;
+
+    switch(device) {
+    case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
+        audioDeviceSet = getAudioDeviceOutAllA2dpSet();
+        break;
+    case AUDIO_DEVICE_OUT_BLE_HEADSET:
+        audioDeviceSet = getAudioDeviceOutAllBleSet();
+        break;
+    default:
+        ALOGE("%s() device type 0x%08x not supported", __func__, device);
+        return BAD_VALUE;
+    }
+
     DeviceVector declaredDevices = primaryModule->getDeclaredDevices().getDevicesFromTypes(
-            getAudioDeviceOutAllA2dpSet());
+            audioDeviceSet);
     for (const auto& device : declaredDevices) {
         formatSet.insert(device->encodedFormats().begin(), device->encodedFormats().end());
     }
@@ -925,6 +940,32 @@
     return profile;
 }
 
+sp<IOProfile> AudioPolicyManager::getSpatializerOutputProfile(
+        const audio_config_t *config __unused, const AudioDeviceTypeAddrVector &devices) const
+{
+    for (const auto& hwModule : mHwModules) {
+        for (const auto& curProfile : hwModule->getOutputProfiles()) {
+            if (curProfile->getFlags() != AUDIO_OUTPUT_FLAG_SPATIALIZER) {
+                continue;
+            }
+            // reject profiles not corresponding to a device currently available
+            DeviceVector supportedDevices = curProfile->getSupportedDevices();
+            if (!mAvailableOutputDevices.containsAtLeastOne(supportedDevices)) {
+                continue;
+            }
+            if (!devices.empty()) {
+                if (supportedDevices.getDevicesFromDeviceTypeAddrVec(devices).size()
+                        != devices.size()) {
+                    continue;
+                }
+            }
+            ALOGV("%s found profile %s", __func__, curProfile->getName().c_str());
+            return curProfile;
+        }
+    }
+    return nullptr;
+}
+
 audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream)
 {
     DeviceVector devices = mEngine->getOutputDevicesForStream(stream, false /*fromCache*/);
@@ -1094,7 +1135,7 @@
 
     *output = AUDIO_IO_HANDLE_NONE;
     if (!msdDevices.isEmpty()) {
-        *output = getOutputForDevices(msdDevices, session, *stream, config, flags);
+        *output = getOutputForDevices(msdDevices, session, resultAttr, config, flags);
         if (*output != AUDIO_IO_HANDLE_NONE && setMsdOutputPatches(&outputDevices) == NO_ERROR) {
             ALOGV("%s() Using MSD devices %s instead of devices %s",
                   __func__, msdDevices.toString().c_str(), outputDevices.toString().c_str());
@@ -1103,7 +1144,7 @@
         }
     }
     if (*output == AUDIO_IO_HANDLE_NONE) {
-        *output = getOutputForDevices(outputDevices, session, *stream, config,
+        *output = getOutputForDevices(outputDevices, session, resultAttr, config,
                 flags, resultAttr->flags & AUDIO_FLAG_MUTE_HAPTIC);
     }
     if (*output == AUDIO_IO_HANDLE_NONE) {
@@ -1265,7 +1306,8 @@
     // all MSD patches to prioritize this request over any active output on MSD.
     releaseMsdOutputPatches(devices);
 
-    status_t status = outputDesc->open(config, devices, stream, flags, output);
+    status_t status =
+            outputDesc->open(config, nullptr /* mixerConfig */, devices, stream, flags, output);
 
     // only accept an output with the requested parameters
     if (status != NO_ERROR ||
@@ -1300,7 +1342,7 @@
 audio_io_handle_t AudioPolicyManager::getOutputForDevices(
         const DeviceVector &devices,
         audio_session_t session,
-        audio_stream_type_t stream,
+        const audio_attributes_t *attr,
         const audio_config_t *config,
         audio_output_flags_t *flags,
         bool forceMutingHaptic)
@@ -1322,6 +1364,9 @@
     if ((*flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
         *flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_DIRECT);
     }
+
+    audio_stream_type_t stream = mEngine->getStreamTypeForAttributes(*attr);
+
     // only allow deep buffering for music stream type
     if (stream != AUDIO_STREAM_MUSIC) {
         *flags = (audio_output_flags_t)(*flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
@@ -1341,6 +1386,17 @@
         ALOGV("Set VoIP and Direct output flags for PCM format");
     }
 
+    // Attach the Ultrasound flag for the AUDIO_CONTENT_TYPE_ULTRASOUND
+    if (attr->content_type == AUDIO_CONTENT_TYPE_ULTRASOUND) {
+        *flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_ULTRASOUND);
+    }
+
+    if (mSpatializerOutput != nullptr
+            && canBeSpatializedInt(attr, config,
+                    devices.toTypeAddrVector(), false /* allowCurrentOutputReconfig */)) {
+        return mSpatializerOutput->mIoHandle;
+    }
+
     audio_config_t directConfig = *config;
     directConfig.channel_mask = channelMask;
     status_t status = openDirectOutput(stream, session, &directConfig, *flags, devices, &output);
@@ -1633,7 +1689,8 @@
     // other criteria
     static const audio_output_flags_t kFunctionalFlags = (audio_output_flags_t)
         (AUDIO_OUTPUT_FLAG_VOIP_RX | AUDIO_OUTPUT_FLAG_INCALL_MUSIC |
-            AUDIO_OUTPUT_FLAG_TTS | AUDIO_OUTPUT_FLAG_DIRECT_PCM);
+            AUDIO_OUTPUT_FLAG_TTS | AUDIO_OUTPUT_FLAG_DIRECT_PCM | AUDIO_OUTPUT_FLAG_ULTRASOUND |
+            AUDIO_OUTPUT_FLAG_SPATIALIZER);
     // Flags expressing a performance request: have lower priority than serving
     // requested sampling rate or channel mask
     static const audio_output_flags_t kPerformanceFlags = (audio_output_flags_t)
@@ -1652,6 +1709,8 @@
     // The priority is as follows:
     // 1: the output supporting haptic playback when requesting haptic playback
     // 2: the output with the highest number of requested functional flags
+    //    with tiebreak preferring the minimum number of extra functional flags
+    //    (see b/200293124, the incorrect selection of AUDIO_OUTPUT_FLAG_VOIP_RX).
     // 3: the output supporting the exact channel mask
     // 4: the output with a higher channel count than requested
     // 5: the output with a higher sampling rate than requested
@@ -1693,7 +1752,12 @@
         }
 
         // functional flags match
-        currentMatchCriteria[1] = popcount(outputDesc->mFlags & functionalFlags);
+        const int matchingFunctionalFlags =
+                __builtin_popcount(outputDesc->mFlags & functionalFlags);
+        const int totalFunctionalFlags =
+                __builtin_popcount(outputDesc->mFlags & kFunctionalFlags);
+        // Prefer matching functional flags, but subtract unnecessary functional flags.
+        currentMatchCriteria[1] = 100 * (matchingFunctionalFlags + 1) - totalFunctionalFlags;
 
         // channel mask and channel count match
         uint32_t outputChannelCount = audio_channel_count_from_out_mask(
@@ -2123,8 +2187,9 @@
                                              audio_port_handle_t *portId)
 {
     ALOGV("%s() source %d, sampling rate %d, format %#x, channel mask %#x, session %d, "
-          "flags %#x attributes=%s", __func__, attr->source, config->sample_rate,
-          config->format, config->channel_mask, session, flags, toString(*attr).c_str());
+          "flags %#x attributes=%s requested device ID %d",
+          __func__, attr->source, config->sample_rate, config->format, config->channel_mask,
+          session, flags, toString(*attr).c_str(), *selectedDeviceId);
 
     status_t status = NO_ERROR;
     audio_source_t halInputSource;
@@ -2147,7 +2212,7 @@
     }
 
     // Explicit routing?
-    sp<DeviceDescriptor> explicitRoutingDevice = 
+    sp<DeviceDescriptor> explicitRoutingDevice =
             mAvailableInputDevices.getDeviceFromId(*selectedDeviceId);
 
     // special case for mmap capture: if an input IO handle is specified, we reuse this input if
@@ -2313,6 +2378,10 @@
         flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_VOIP_TX);
     }
 
+    if (attributes.source == AUDIO_SOURCE_ULTRASOUND) {
+        flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_ULTRASOUND);
+    }
+
     // find a compatible input profile (not necessarily identical in parameters)
     sp<IOProfile> profile;
     // sampling rate and flags may be updated by getInputProfile
@@ -2329,11 +2398,11 @@
             break; // success
         } else if (profileFlags & AUDIO_INPUT_FLAG_RAW) {
             profileFlags = (audio_input_flags_t) (profileFlags & ~AUDIO_INPUT_FLAG_RAW); // retry
-        } else if (profileFlags != AUDIO_INPUT_FLAG_NONE) {
+        } else if (profileFlags != AUDIO_INPUT_FLAG_NONE && audio_is_linear_pcm(config->format)) {
             profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
         } else { // fail
             ALOGW("%s could not find profile for device %s, sampling rate %u, format %#x, "
-                  "channel mask 0x%X, flags %#x", __func__, device->toString().c_str(), 
+                  "channel mask 0x%X, flags %#x", __func__, device->toString().c_str(),
                   config->sample_rate, config->format, config->channel_mask, flags);
             return input;
         }
@@ -3532,7 +3601,7 @@
 void AudioPolicyManager::dump(String8 *dst) const
 {
     dst->appendFormat("\nAudioPolicyManager Dump: %p\n", this);
-    dst->appendFormat(" Primary Output: %d\n",
+    dst->appendFormat(" Primary Output I/O handle: %d\n",
              hasPrimaryOutput() ? mPrimaryOutput->mIoHandle : AUDIO_IO_HANDLE_NONE);
     std::string stateLiteral;
     AudioModeConverter::toString(mEngine->getPhoneState(), stateLiteral);
@@ -3557,12 +3626,14 @@
     dst->appendFormat(" Communnication Strategy: %d\n", mCommunnicationStrategy);
     dst->appendFormat(" Config source: %s\n", mConfig.getSource().c_str()); // getConfig not const
 
-    mAvailableOutputDevices.dump(dst, String8("Available output"));
-    mAvailableInputDevices.dump(dst, String8("Available input"));
+    dst->append("\n");
+    mAvailableOutputDevices.dump(dst, String8("Available output"), 1);
+    dst->append("\n");
+    mAvailableInputDevices.dump(dst, String8("Available input"), 1);
     mHwModulesAll.dump(dst);
     mOutputs.dump(dst);
     mInputs.dump(dst);
-    mEffects.dump(dst);
+    mEffects.dump(dst, 1);
     mAudioPatches.dump(dst);
     mPolicyMixes.dump(dst);
     mAudioSources.dump(dst);
@@ -3602,53 +3673,7 @@
      offloadInfo.stream_type, offloadInfo.bit_rate, offloadInfo.duration_us,
      offloadInfo.has_video);
 
-    if (mMasterMono) {
-        return AUDIO_OFFLOAD_NOT_SUPPORTED; // no offloading if mono is set.
-    }
-
-    // Check if offload has been disabled
-    if (property_get_bool("audio.offload.disable", false /* default_value */)) {
-        ALOGV("%s: offload disabled by audio.offload.disable", __func__);
-        return AUDIO_OFFLOAD_NOT_SUPPORTED;
-    }
-
-    // Check if stream type is music, then only allow offload as of now.
-    if (offloadInfo.stream_type != AUDIO_STREAM_MUSIC)
-    {
-        ALOGV("%s: stream_type != MUSIC, returning false", __func__);
-        return AUDIO_OFFLOAD_NOT_SUPPORTED;
-    }
-
-    //TODO: enable audio offloading with video when ready
-    const bool allowOffloadWithVideo =
-            property_get_bool("audio.offload.video", false /* default_value */);
-    if (offloadInfo.has_video && !allowOffloadWithVideo) {
-        ALOGV("%s: has_video == true, returning false", __func__);
-        return AUDIO_OFFLOAD_NOT_SUPPORTED;
-    }
-
-    //If duration is less than minimum value defined in property, return false
-    const int min_duration_secs = property_get_int32(
-            "audio.offload.min.duration.secs", -1 /* default_value */);
-    if (min_duration_secs >= 0) {
-        if (offloadInfo.duration_us < min_duration_secs * 1000000LL) {
-            ALOGV("%s: Offload denied by duration < audio.offload.min.duration.secs(=%d)",
-                    __func__, min_duration_secs);
-            return AUDIO_OFFLOAD_NOT_SUPPORTED;
-        }
-    } else if (offloadInfo.duration_us < OFFLOAD_DEFAULT_MIN_DURATION_SECS * 1000000) {
-        ALOGV("%s: Offload denied by duration < default min(=%u)",
-                __func__, OFFLOAD_DEFAULT_MIN_DURATION_SECS);
-        return AUDIO_OFFLOAD_NOT_SUPPORTED;
-    }
-
-    // Do not allow offloading if one non offloadable effect is enabled. This prevents from
-    // creating an offloaded track and tearing it down immediately after start when audioflinger
-    // detects there is an active non offloadable effect.
-    // FIXME: We should check the audio session here but we do not have it in this context.
-    // This may prevent offloading in rare situations where effects are left active by apps
-    // in the background.
-    if (mEffects.isNonOffloadableEffectEnabled()) {
+    if (!isOffloadPossible(offloadInfo)) {
         return AUDIO_OFFLOAD_NOT_SUPPORTED;
     }
 
@@ -3676,7 +3701,8 @@
                                                  const audio_attributes_t& attributes) {
     audio_output_flags_t output_flags = AUDIO_OUTPUT_FLAG_NONE;
     audio_flags_to_audio_output_flags(attributes.flags, &output_flags);
-    sp<IOProfile> profile = getProfileForOutput(DeviceVector() /*ignore device */,
+    DeviceVector outputDevices = mEngine->getOutputDevicesForAttributes(attributes);
+    sp<IOProfile> profile = getProfileForOutput(outputDevices,
                                             config.sample_rate,
                                             config.format,
                                             config.channel_mask,
@@ -3690,6 +3716,164 @@
     return (profile != 0);
 }
 
+bool AudioPolicyManager::isOffloadPossible(const audio_offload_info_t &offloadInfo,
+                                           bool durationIgnored) {
+    if (mMasterMono) {
+        return false; // no offloading if mono is set.
+    }
+
+    // Check if offload has been disabled
+    if (property_get_bool("audio.offload.disable", false /* default_value */)) {
+        ALOGV("%s: offload disabled by audio.offload.disable", __func__);
+        return false;
+    }
+
+    // Check if stream type is music, then only allow offload as of now.
+    if (offloadInfo.stream_type != AUDIO_STREAM_MUSIC)
+    {
+        ALOGV("%s: stream_type != MUSIC, returning false", __func__);
+        return false;
+    }
+
+    //TODO: enable audio offloading with video when ready
+    const bool allowOffloadWithVideo =
+            property_get_bool("audio.offload.video", false /* default_value */);
+    if (offloadInfo.has_video && !allowOffloadWithVideo) {
+        ALOGV("%s: has_video == true, returning false", __func__);
+        return false;
+    }
+
+    //If duration is less than minimum value defined in property, return false
+    const int min_duration_secs = property_get_int32(
+            "audio.offload.min.duration.secs", -1 /* default_value */);
+    if (!durationIgnored) {
+        if (min_duration_secs >= 0) {
+            if (offloadInfo.duration_us < min_duration_secs * 1000000LL) {
+                ALOGV("%s: Offload denied by duration < audio.offload.min.duration.secs(=%d)",
+                      __func__, min_duration_secs);
+                return false;
+            }
+        } else if (offloadInfo.duration_us < OFFLOAD_DEFAULT_MIN_DURATION_SECS * 1000000) {
+            ALOGV("%s: Offload denied by duration < default min(=%u)",
+                  __func__, OFFLOAD_DEFAULT_MIN_DURATION_SECS);
+            return false;
+        }
+    }
+
+    // Do not allow offloading if one non offloadable effect is enabled. This prevents from
+    // creating an offloaded track and tearing it down immediately after start when audioflinger
+    // detects there is an active non offloadable effect.
+    // FIXME: We should check the audio session here but we do not have it in this context.
+    // This may prevent offloading in rare situations where effects are left active by apps
+    // in the background.
+    if (mEffects.isNonOffloadableEffectEnabled()) {
+        return false;
+    }
+
+    return true;
+}
+
+audio_direct_mode_t AudioPolicyManager::getDirectPlaybackSupport(const audio_attributes_t *attr,
+                                                                 const audio_config_t *config) {
+    audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
+    offloadInfo.format = config->format;
+    offloadInfo.sample_rate = config->sample_rate;
+    offloadInfo.channel_mask = config->channel_mask;
+    offloadInfo.stream_type = mEngine->getStreamTypeForAttributes(*attr);
+    offloadInfo.has_video = false;
+    offloadInfo.is_streaming = false;
+    const bool offloadPossible = isOffloadPossible(offloadInfo, true /*durationIgnored*/);
+
+    audio_direct_mode_t directMode = AUDIO_DIRECT_NOT_SUPPORTED;
+    audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
+    audio_flags_to_audio_output_flags(attr->flags, &flags);
+    // only retain flags that will drive compressed offload or passthrough
+    uint32_t relevantFlags = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+    if (offloadPossible) {
+        relevantFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+    }
+    flags = (audio_output_flags_t)((flags & relevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
+
+    DeviceVector outputDevices = mEngine->getOutputDevicesForAttributes(*attr);
+    for (const auto& hwModule : mHwModules) {
+        for (const auto& curProfile : hwModule->getOutputProfiles()) {
+            if (!curProfile->isCompatibleProfile(outputDevices,
+                    config->sample_rate, nullptr /*updatedSamplingRate*/,
+                    config->format, nullptr /*updatedFormat*/,
+                    config->channel_mask, nullptr /*updatedChannelMask*/,
+                    flags)) {
+                continue;
+            }
+            // reject profiles not corresponding to a device currently available
+            if (!mAvailableOutputDevices.containsAtLeastOne(curProfile->getSupportedDevices())) {
+                continue;
+            }
+            if ((curProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
+                        != AUDIO_OUTPUT_FLAG_NONE) {
+                if ((directMode & AUDIO_DIRECT_OFFLOAD_GAPLESS_SUPPORTED)
+                        != AUDIO_DIRECT_NOT_SUPPORTED) {
+                    // Already reports offload gapless supported. No need to report offload support.
+                    continue;
+                }
+                if ((curProfile->getFlags() & AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD)
+                        != AUDIO_OUTPUT_FLAG_NONE) {
+                    // If offload gapless is reported, no need to report offload support.
+                    directMode = (audio_direct_mode_t) ((directMode &
+                            ~AUDIO_DIRECT_OFFLOAD_SUPPORTED) |
+                            AUDIO_DIRECT_OFFLOAD_GAPLESS_SUPPORTED);
+                } else {
+                    directMode = (audio_direct_mode_t)(directMode |AUDIO_DIRECT_OFFLOAD_SUPPORTED);
+                }
+            } else {
+                directMode = (audio_direct_mode_t) (directMode |
+                                                    AUDIO_DIRECT_BITSTREAM_SUPPORTED);
+            }
+        }
+    }
+    return directMode;
+}
+
+status_t AudioPolicyManager::getDirectProfilesForAttributes(const audio_attributes_t* attr,
+                                                AudioProfileVector& audioProfilesVector) {
+    AudioDeviceTypeAddrVector devices;
+    status_t status = getDevicesForAttributes(*attr, &devices);
+    if (status != OK) {
+        return status;
+    }
+    ALOGV("%s: found %zu output devices for attributes.", __func__, devices.size());
+    if (devices.empty()) {
+        return OK; // no output devices for the attributes
+    }
+
+    for (const auto& hwModule : mHwModules) {
+        for (const auto& curProfile : hwModule->getOutputProfiles()) {
+            if (!curProfile->asAudioPort()->isDirectOutput()) {
+                continue;
+            }
+            // Allow only profiles that support all the available and routed devices
+            DeviceVector supportedDevices = curProfile->getSupportedDevices();
+            if (supportedDevices.getDevicesFromDeviceTypeAddrVec(devices).size()
+                    != devices.size()) {
+                continue;
+            }
+
+            const auto audioProfiles = curProfile->asAudioPort()->getAudioProfiles();
+            ALOGV("%s: found direct profile (%s) with %zu audio profiles.",
+                __func__, curProfile->getTagName().c_str(), audioProfiles.size());
+            for (const auto& audioProfile : audioProfiles) {
+                if (audioProfile->isValid() && !audioProfilesVector.contains(audioProfile)
+                // TODO - why do we have same PCM format with both dynamic and non dynamic format
+                    && audioProfile->isDynamicFormat()) {
+                    ALOGV("%s: adding audio profile with encoding (%d).",
+                        __func__, audioProfile->getFormat());
+                    audioProfilesVector.add(audioProfile);
+                }
+            }
+        }
+    }
+    return NO_ERROR;
+}
+
 status_t AudioPolicyManager::listAudioPorts(audio_port_role_t role,
                                             audio_port_type_t type,
                                             unsigned int *num_ports,
@@ -4065,7 +4249,7 @@
                     }
                     if (outputDesc != nullptr) {
                         audio_port_config srcMixPortConfig = {};
-                        outputDesc->toAudioPortConfig(&srcMixPortConfig, &patch->sources[0]);
+                        outputDesc->toAudioPortConfig(&srcMixPortConfig, nullptr);
                         // for volume control, we may need a valid stream
                         srcMixPortConfig.ext.mix.usecase.stream = sourceDesc != nullptr ?
                                     sourceDesc->stream() : AUDIO_STREAM_PATCH;
@@ -4749,6 +4933,37 @@
     return false;
 }
 
+bool AudioPolicyManager::isUltrasoundSupported()
+{
+    bool hasUltrasoundOutput = false;
+    bool hasUltrasoundInput = false;
+    for (const auto& hwModule : mHwModules) {
+        const OutputProfileCollection &outputProfiles = hwModule->getOutputProfiles();
+        if (!hasUltrasoundOutput) {
+            for (const auto &outProfile : outputProfiles) {
+                if (outProfile->getFlags() & AUDIO_OUTPUT_FLAG_ULTRASOUND) {
+                    hasUltrasoundOutput = true;
+                    break;
+                }
+            }
+        }
+
+        const InputProfileCollection &inputProfiles = hwModule->getInputProfiles();
+        if (!hasUltrasoundInput) {
+            for (const auto &inputProfile : inputProfiles) {
+                if (inputProfile->getFlags() & AUDIO_INPUT_FLAG_ULTRASOUND) {
+                    hasUltrasoundInput = true;
+                    break;
+                }
+            }
+        }
+
+        if (hasUltrasoundOutput && hasUltrasoundInput)
+            return true;
+    }
+    return false;
+}
+
 bool AudioPolicyManager::isCallScreenModeSupported()
 {
     return getConfig().isCallScreenModeSupported();
@@ -4802,6 +5017,200 @@
     return source;
 }
 
+/* static */
+bool AudioPolicyManager::isChannelMaskSpatialized(audio_channel_mask_t channels) {
+    switch (channels) {
+        case AUDIO_CHANNEL_OUT_5POINT1:
+        case AUDIO_CHANNEL_OUT_5POINT1POINT2:
+        case AUDIO_CHANNEL_OUT_5POINT1POINT4:
+        case AUDIO_CHANNEL_OUT_7POINT1:
+        case AUDIO_CHANNEL_OUT_7POINT1POINT2:
+        case AUDIO_CHANNEL_OUT_7POINT1POINT4:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool AudioPolicyManager::canBeSpatializedInt(const audio_attributes_t *attr,
+                                      const audio_config_t *config,
+                                      const AudioDeviceTypeAddrVector &devices,
+                                      bool allowCurrentOutputReconfig)  const
+{
+    // The caller can have the audio attributes criteria ignored by either passing a null ptr or
+    // the AUDIO_ATTRIBUTES_INITIALIZER value.
+    // If attributes are specified, current policy is to only allow spatialization for media
+    // and game usages.
+    if (attr != nullptr && *attr != AUDIO_ATTRIBUTES_INITIALIZER) {
+        if (attr->usage != AUDIO_USAGE_MEDIA && attr->usage != AUDIO_USAGE_GAME) {
+            return false;
+        }
+        if ((attr->flags & (AUDIO_FLAG_CONTENT_SPATIALIZED | AUDIO_FLAG_NEVER_SPATIALIZE)) != 0) {
+            return false;
+        }
+    }
+
+    // The caller can have the devices criteria ignored by passing and empty vector, and
+    // getSpatializerOutputProfile() will ignore the devices when looking for a match.
+    // Otherwise an output profile supporting a spatializer effect that can be routed
+    // to the specified devices must exist.
+    sp<IOProfile> profile =
+            getSpatializerOutputProfile(config, devices);
+    if (profile == nullptr) {
+        return false;
+    }
+
+    // The caller can have the audio config criteria ignored by either passing a null ptr or
+    // the AUDIO_CONFIG_INITIALIZER value.
+    // If an audio config is specified, current policy is to only allow spatialization for
+    // some positional channel masks.
+    // If the spatializer output is already opened, only channel masks included in the
+    // spatializer output mixer channel mask are allowed.
+
+    if (config != nullptr && *config != AUDIO_CONFIG_INITIALIZER) {
+        if (!isChannelMaskSpatialized(config->channel_mask)) {
+            return false;
+        }
+        if (!allowCurrentOutputReconfig && mSpatializerOutput != nullptr
+                && mSpatializerOutput->mProfile == profile) {
+            if ((config->channel_mask & mSpatializerOutput->mMixerChannelMask)
+                    != config->channel_mask) {
+                return false;
+            }
+        }
+    }
+    return true;
+}
+
+void AudioPolicyManager::checkVirtualizerClientRoutes() {
+    std::set<audio_stream_type_t> streamsToInvalidate;
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        const sp<SwAudioOutputDescriptor>& desc = mOutputs[i];
+        for (const sp<TrackClientDescriptor>& client : desc->getClientIterable()) {
+            audio_attributes_t attr = client->attributes();
+            DeviceVector devices = mEngine->getOutputDevicesForAttributes(attr, nullptr, false);
+            AudioDeviceTypeAddrVector devicesTypeAddress = devices.toTypeAddrVector();
+            audio_config_base_t clientConfig = client->config();
+            audio_config_t config = audio_config_initializer(&clientConfig);
+            if (desc != mSpatializerOutput
+                    && canBeSpatializedInt(&attr, &config,
+                            devicesTypeAddress, false /* allowCurrentOutputReconfig */)) {
+                streamsToInvalidate.insert(client->stream());
+            }
+        }
+    }
+
+    for (audio_stream_type_t stream : streamsToInvalidate) {
+        mpClientInterface->invalidateStream(stream);
+    }
+}
+
+status_t AudioPolicyManager::getSpatializerOutput(const audio_config_base_t *mixerConfig,
+                                                        const audio_attributes_t *attr,
+                                                        audio_io_handle_t *output) {
+    *output = AUDIO_IO_HANDLE_NONE;
+
+    DeviceVector devices = mEngine->getOutputDevicesForAttributes(*attr, nullptr, false);
+    AudioDeviceTypeAddrVector devicesTypeAddress = devices.toTypeAddrVector();
+    audio_config_t *configPtr = nullptr;
+    audio_config_t config;
+    if (mixerConfig != nullptr) {
+        config = audio_config_initializer(mixerConfig);
+        configPtr = &config;
+    }
+    if (!canBeSpatializedInt(
+            attr, configPtr, devicesTypeAddress)) {
+        ALOGW("%s provided attributes or mixer config cannot be spatialized", __func__);
+        return BAD_VALUE;
+    }
+
+    sp<IOProfile> profile =
+            getSpatializerOutputProfile(configPtr, devicesTypeAddress);
+    if (profile == nullptr) {
+        ALOGW("%s no suitable output profile for provided attributes or mixer config", __func__);
+        return BAD_VALUE;
+    }
+
+    if (mSpatializerOutput != nullptr && mSpatializerOutput->mProfile == profile
+            && configPtr != nullptr
+            && configPtr->channel_mask == mSpatializerOutput->mMixerChannelMask) {
+        *output = mSpatializerOutput->mIoHandle;
+        ALOGV("%s returns current spatializer output %d", __func__, *output);
+        return NO_ERROR;
+    }
+    mSpatializerOutput.clear();
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+        if (!desc->isDuplicated() && desc->mProfile == profile) {
+            ALOGV("%s found output %d for spatializer profile", __func__, desc->mIoHandle);
+            mSpatializerOutput = desc;
+            break;
+        }
+    }
+    if (mSpatializerOutput == nullptr) {
+        ALOGW("%s no opened spatializer output for profile %s",
+                __func__, profile->getName().c_str());
+        return BAD_VALUE;
+    }
+
+    if (configPtr != nullptr
+            && configPtr->channel_mask != mSpatializerOutput->mMixerChannelMask) {
+        audio_config_base_t savedMixerConfig = {
+            .sample_rate = mSpatializerOutput->getSamplingRate(),
+            .format = mSpatializerOutput->getFormat(),
+            .channel_mask = mSpatializerOutput->mMixerChannelMask,
+        };
+        DeviceVector savedDevices = mSpatializerOutput->devices();
+
+        ALOGV("%s reopening spatializer output to match channel mask %#x (current mask %#x)",
+            __func__, configPtr->channel_mask, mSpatializerOutput->mMixerChannelMask);
+
+        closeOutput(mSpatializerOutput->mIoHandle);
+        //from now on mSpatializerOutput is null
+
+        sp<SwAudioOutputDescriptor> desc =
+                openOutputWithProfileAndDevice(profile, devices, mixerConfig);
+        if (desc == nullptr) {
+            // re open the spatializer output with previous channel mask
+            desc = openOutputWithProfileAndDevice(profile, savedDevices, &savedMixerConfig);
+            if (desc == nullptr) {
+                ALOGE("%s failed to restore mSpatializerOutput with previous config", __func__);
+            } else {
+                mSpatializerOutput = desc;
+            }
+            mPreviousOutputs = mOutputs;
+            mpClientInterface->onAudioPortListUpdate();
+            *output = AUDIO_IO_HANDLE_NONE;
+            ALOGW("%s could not open spatializer output with requested config", __func__);
+            return BAD_VALUE;
+        }
+        mSpatializerOutput = desc;
+        mPreviousOutputs = mOutputs;
+        mpClientInterface->onAudioPortListUpdate();
+    }
+
+    checkVirtualizerClientRoutes();
+
+    *output = mSpatializerOutput->mIoHandle;
+    ALOGV("%s returns new spatializer output %d", __func__, *output);
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::releaseSpatializerOutput(audio_io_handle_t output) {
+    if (mSpatializerOutput == nullptr) {
+        return INVALID_OPERATION;
+    }
+    if (mSpatializerOutput->mIoHandle != output) {
+        return BAD_VALUE;
+    }
+
+    mSpatializerOutput.clear();
+
+    checkVirtualizerClientRoutes();
+
+    return NO_ERROR;
+}
+
 // ----------------------------------------------------------------------------
 // AudioPolicyManager
 // ----------------------------------------------------------------------------
@@ -4953,9 +5362,8 @@
             continue;
         }
         mHwModules.push_back(hwModule);
-        // open all output streams needed to access attached devices
-        // except for direct output streams that are only opened when they are actually
-        // required by an app.
+        // open all output streams needed to access attached devices.
+        // direct outputs are closed immediately after checking the availability of attached devices
         // This also validates mAvailableOutputDevices list
         for (const auto& outProfile : hwModule->getOutputProfiles()) {
             if (!outProfile->canOpenNewIo()) {
@@ -4967,7 +5375,8 @@
                 ALOGW("Output profile contains no device on module %s", hwModule->getName());
                 continue;
             }
-            if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_TTS) != 0) {
+            if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_TTS) != 0 ||
+                (outProfile->getFlags() & AUDIO_OUTPUT_FLAG_ULTRASOUND) != 0) {
                 mTtsOutputAvailable = true;
             }
 
@@ -4990,7 +5399,8 @@
             sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile,
                                                                                  mpClientInterface);
             audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-            status_t status = outputDesc->open(nullptr, DeviceVector(supportedDevice),
+            status_t status = outputDesc->open(nullptr /* halConfig */, nullptr /* mixerConfig */,
+                                               DeviceVector(supportedDevice),
                                                AUDIO_STREAM_DEFAULT,
                                                AUDIO_OUTPUT_FLAG_NONE, &output);
             if (status != NO_ERROR) {
@@ -5351,7 +5761,7 @@
             } // endif input != 0
 
             if (input == AUDIO_IO_HANDLE_NONE) {
-                ALOGW("%s could not open input for device %s", __func__,  
+                ALOGW("%s could not open input for device %s", __func__,
                        device->toString().c_str());
                 profiles.removeAt(profile_index);
                 profile_index--;
@@ -5445,6 +5855,9 @@
 
     removeOutput(output);
     mPreviousOutputs = mOutputs;
+    if (closingOutput == mSpatializerOutput) {
+        mSpatializerOutput.clear();
+    }
 
     // MSD patches may have been released to support a non-MSD direct output. Reset MSD patch if
     // no direct outputs are open.
@@ -5720,14 +6133,20 @@
                     client->getSecondaryOutputs().begin(),
                     client->getSecondaryOutputs().end(),
                     secondaryDescs.begin(), secondaryDescs.end())) {
-                std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryDescs;
-                std::vector<audio_io_handle_t> secondaryOutputIds;
-                for (const auto& secondaryDesc : secondaryDescs) {
-                    secondaryOutputIds.push_back(secondaryDesc->mIoHandle);
-                    weakSecondaryDescs.push_back(secondaryDesc);
+                if (!audio_is_linear_pcm(client->config().format)) {
+                    // If the format is not PCM, the tracks should be invalidated to get correct
+                    // behavior when the secondary output is changed.
+                    streamsToInvalidate.insert(client->stream());
+                } else {
+                    std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryDescs;
+                    std::vector<audio_io_handle_t> secondaryOutputIds;
+                    for (const auto &secondaryDesc: secondaryDescs) {
+                        secondaryOutputIds.push_back(secondaryDesc->mIoHandle);
+                        weakSecondaryDescs.push_back(secondaryDesc);
+                    }
+                    trackSecondaryOutputs.emplace(client->portId(), secondaryOutputIds);
+                    client->setSecondaryOutputs(std::move(weakSecondaryDescs));
                 }
-                trackSecondaryOutputs.emplace(client->portId(), secondaryOutputIds);
-                client->setSecondaryOutputs(std::move(weakSecondaryDescs));
             }
         }
     }
@@ -5843,7 +6262,8 @@
             return hasVoiceStream(streams) && (outputDesc == mPrimaryOutput ||
                 outputDesc->isActive(toVolumeSource(AUDIO_STREAM_VOICE_CALL))) &&
                 (isInCall() ||
-                 mOutputs.isStrategyActiveOnSameModule(productStrategy, outputDesc));
+                 mOutputs.isStrategyActiveOnSameModule(productStrategy, outputDesc)) &&
+                !isStreamActive(AUDIO_STREAM_ENFORCED_AUDIBLE, 0);
         };
 
         // With low-latency playing on speaker, music on WFD, when the first low-latency
@@ -5895,11 +6315,11 @@
     uid_t uid;
     sp<RecordClientDescriptor> topClient = inputDesc->getHighestPriorityClient();
     if (topClient != nullptr) {
-      attributes = topClient->attributes();
-      uid = topClient->uid();
+        attributes = topClient->attributes();
+        uid = topClient->uid();
     } else {
-      attributes = { .source = AUDIO_SOURCE_DEFAULT };
-      uid = 0;
+        attributes = { .source = AUDIO_SOURCE_DEFAULT };
+        uid = 0;
     }
 
     if (attributes.source == AUDIO_SOURCE_DEFAULT && isInCall()) {
@@ -5917,13 +6337,13 @@
     return (stream1 == stream2);
 }
 
-audio_devices_t AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) {
+DeviceTypeSet AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) {
     // By checking the range of stream before calling getStrategy, we avoid
     // getOutputDevicesForStream's behavior for invalid streams.
     // engine's getOutputDevicesForStream would fallback on its default behavior (most probably
     // device for music stream), but we want to return the empty set.
     if (stream < AUDIO_STREAM_MIN || stream >= AUDIO_STREAM_PUBLIC_CNT) {
-        return AUDIO_DEVICE_NONE;
+        return DeviceTypeSet{};
     }
     DeviceVector activeDevices;
     DeviceVector devices;
@@ -5954,10 +6374,10 @@
         devices.merge(mAvailableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER));
         devices.remove(speakerSafeDevices);
     }
-    // FIXME: use DeviceTypeSet when Java layer is ready for it.
-    return deviceTypesToBitMask(devices.types());
+    return devices.types();
 }
 
+// TODO - consider MSD routes b/214971780
 status_t AudioPolicyManager::getDevicesForAttributes(
         const audio_attributes_t &attr, AudioDeviceTypeAddrVector *devices) {
     if (devices == nullptr) {
@@ -6119,11 +6539,18 @@
     // different per device volumes
     if (outputDesc->isActive() && (devices != prevDevices)) {
         uint32_t tempMuteWaitMs = outputDesc->latency() * 2;
-        // temporary mute duration is conservatively set to 4 times the reported latency
-        uint32_t tempMuteDurationMs = outputDesc->latency() * 4;
+
         if (muteWaitMs < tempMuteWaitMs) {
             muteWaitMs = tempMuteWaitMs;
         }
+
+        // If recommended duration is defined, replace temporary mute duration to avoid
+        // truncated notifications at beginning, which depends on duration of changing path in HAL.
+        // Otherwise, temporary mute duration is conservatively set to 4 times the reported latency.
+        uint32_t tempRecommendedMuteDuration = outputDesc->getRecommendedMuteDurationMs();
+        uint32_t tempMuteDurationMs = tempRecommendedMuteDuration > 0 ?
+                tempRecommendedMuteDuration : outputDesc->latency() * 4;
+
         for (const auto &activeVs : outputDesc->getActiveVolumeSources()) {
             // make sure that we do not start the temporary mute period too early in case of
             // delayed device change
@@ -6548,7 +6975,7 @@
     outputDesc->setVolume(
             volumeDb, volumeSource, curves.getStreamTypes(), deviceTypes, delayMs, force);
 
-    if (isVoiceVolSrc || isBtScoVolSrc) {
+    if (outputDesc == mPrimaryOutput && (isVoiceVolSrc || isBtScoVolSrc)) {
         float voiceVolume;
         // Force voice volume to max or mute for Bluetooth SCO as other attenuations are managed by the headset
         if (isVoiceVolSrc) {
@@ -6693,8 +7120,8 @@
 {
     audio_mode_t mode = mEngine->getPhoneState();
     return (mode == AUDIO_MODE_IN_CALL)
-            || (mode == AUDIO_MODE_IN_COMMUNICATION)
-            || (mode == AUDIO_MODE_CALL_SCREEN);
+            || (mode == AUDIO_MODE_CALL_SCREEN)
+            || (mode == AUDIO_MODE_CALL_REDIRECT);
 }
 
 void AudioPolicyManager::cleanUpForDevice(const sp<DeviceDescriptor>& deviceDesc)
@@ -6985,7 +7412,8 @@
 }
 
 sp<SwAudioOutputDescriptor> AudioPolicyManager::openOutputWithProfileAndDevice(
-        const sp<IOProfile>& profile, const DeviceVector& devices)
+        const sp<IOProfile>& profile, const DeviceVector& devices,
+        const audio_config_base_t *mixerConfig)
 {
     for (const auto& device : devices) {
         // TODO: This should be checking if the profile supports the device combo.
@@ -6995,7 +7423,7 @@
     }
     sp<SwAudioOutputDescriptor> desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
     audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-    status_t status = desc->open(nullptr, devices,
+    status_t status = desc->open(nullptr /* halConfig */, mixerConfig, devices,
             AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
     if (status != NO_ERROR) {
         return nullptr;
@@ -7025,7 +7453,7 @@
         config.offload_info.channel_mask = config.channel_mask;
         config.offload_info.format = config.format;
 
-        status = desc->open(&config, devices,
+        status = desc->open(&config, mixerConfig, devices,
                             AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
         if (status != NO_ERROR) {
             return nullptr;
@@ -7033,6 +7461,7 @@
     }
 
     addOutput(output, desc);
+
     if (audio_is_remote_submix_device(deviceType) && address != "0") {
         sp<AudioPolicyMix> policyMix;
         if (mPolicyMixes.getAudioPolicyMix(deviceType, address, policyMix) == NO_ERROR) {
@@ -7043,9 +7472,13 @@
                     address.string());
         }
 
-    } else if (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) && hasPrimaryOutput()) {
-        // no duplicated output for direct outputs and
-        // outputs used by dynamic policy mixes
+    } else if (hasPrimaryOutput() && profile->getModule()
+                != mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY)
+            && ((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0)) {
+        // no duplicated output for:
+        // - direct outputs
+        // - outputs used by dynamic policy mixes
+        // - outputs opened on the primary HW module
         audio_io_handle_t duplicatedOutput = AUDIO_IO_HANDLE_NONE;
 
         //TODO: configure audio effect output stage here
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 98f96d1..165ac13 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -193,7 +193,7 @@
         }
 
         // return the enabled output devices for the given stream type
-        virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream);
+        virtual DeviceTypeSet getDevicesForStream(audio_stream_type_t stream);
 
         virtual status_t getDevicesForAttributes(
                 const audio_attributes_t &attributes,
@@ -320,13 +320,15 @@
                                                     audio_format_t *surroundFormats);
         virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
 
-        virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
-                    std::vector<audio_format_t> *formats);
+        virtual status_t getHwOffloadFormatsSupportedForBluetoothMedia(
+                    audio_devices_t device, std::vector<audio_format_t> *formats);
 
         virtual void setAppState(audio_port_handle_t portId, app_state_t state);
 
         virtual bool isHapticPlaybackSupported();
 
+        virtual bool isUltrasoundSupported();
+
         virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies)
         {
             return mEngine->listAudioProductStrategies(strategies);
@@ -356,6 +358,24 @@
                     BAD_VALUE : NO_ERROR;
         }
 
+        virtual bool canBeSpatialized(const audio_attributes_t *attr,
+                                      const audio_config_t *config,
+                                      const AudioDeviceTypeAddrVector &devices) const {
+            return canBeSpatializedInt(attr, config, devices);
+        }
+
+        virtual status_t getSpatializerOutput(const audio_config_base_t *config,
+                                                const audio_attributes_t *attr,
+                                                audio_io_handle_t *output);
+
+        virtual status_t releaseSpatializerOutput(audio_io_handle_t output);
+
+        virtual audio_direct_mode_t getDirectPlaybackSupport(const audio_attributes_t *attr,
+                                                             const audio_config_t *config);
+
+        virtual status_t getDirectProfilesForAttributes(const audio_attributes_t* attr,
+                                                         AudioProfileVector& audioProfiles);
+
         bool isCallScreenModeSupported() override;
 
         void onNewAudioModulesAvailable() override;
@@ -797,6 +817,8 @@
         sp<SwAudioOutputDescriptor> mPrimaryOutput;     // primary output descriptor
         // list of descriptors for outputs currently opened
 
+        sp<SwAudioOutputDescriptor> mSpatializerOutput;
+
         SwAudioOutputCollection mOutputs;
         // copy of mOutputs before setDeviceConnectionState() opens new outputs
         // reset to mOutputs when updateDevicesAndOutputs() is called.
@@ -842,7 +864,8 @@
         uint32_t mBeaconMuteRefCount;   // ref count for stream that would mute beacon
         uint32_t mBeaconPlayingRefCount;// ref count for the playing beacon streams
         bool mBeaconMuted;              // has STREAM_TTS been muted
-        bool mTtsOutputAvailable;       // true if a dedicated output for TTS stream is available
+        // true if a dedicated output for TTS stream or Ultrasound is available
+        bool mTtsOutputAvailable;
 
         bool mMasterMono;               // true if we wish to force all outputs to mono
         AudioPolicyMixCollection mPolicyMixes; // list of registered mixes
@@ -933,7 +956,7 @@
         audio_io_handle_t getOutputForDevices(
                 const DeviceVector &devices,
                 audio_session_t session,
-                audio_stream_type_t stream,
+                const audio_attributes_t *attr,
                 const audio_config_t *config,
                 audio_output_flags_t *flags,
                 bool forceMutingHaptic = false);
@@ -948,6 +971,38 @@
                 audio_output_flags_t flags,
                 const DeviceVector &devices,
                 audio_io_handle_t *output);
+
+        /**
+         * @brief Queries if some kind of spatialization will be performed if the audio playback
+         * context described by the provided arguments is present.
+         * The context is made of:
+         * - The audio attributes describing the playback use case.
+         * - The audio configuration describing the audio format, channels, sampling rate ...
+         * - The devices describing the sink audio device selected for playback.
+         * All arguments are optional and only the specified arguments are used to match against
+         * supported criteria. For instance, supplying no argument will tell if spatialization is
+         * supported or not in general.
+         * @param attr audio attributes describing the playback use case
+         * @param config audio configuration describing the audio format, channels, sample rate...
+         * @param devices the sink audio device selected for playback
+         * @param allowCurrentOutputReconfig if true, the result will be considering it is possible
+         *      to close and reopen an existing spatializer output stream to match the requested
+         *      criteria. If false, the criteria must be compatible with the opened sptializer
+         *      output.
+         * @return true if spatialization is possible for this context, false otherwise.
+         */
+        virtual bool canBeSpatializedInt(const audio_attributes_t *attr,
+                                      const audio_config_t *config,
+                                      const AudioDeviceTypeAddrVector &devices,
+                                      bool allowCurrentOutputReconfig = true) const;
+
+        sp<IOProfile> getSpatializerOutputProfile(const audio_config_t *config,
+                                                  const AudioDeviceTypeAddrVector &devices) const;
+
+        static bool isChannelMaskSpatialized(audio_channel_mask_t channels);
+
+        void checkVirtualizerClientRoutes();
+
         /**
          * @brief getInputForDevice selects an input handle for a given input device and
          * requester context
@@ -1036,8 +1091,23 @@
 
         bool areAllActiveTracksRerouted(const sp<SwAudioOutputDescriptor>& output);
 
-        sp<SwAudioOutputDescriptor> openOutputWithProfileAndDevice(const sp<IOProfile>& profile,
-                                                                   const DeviceVector& devices);
+        /**
+         * @brief Opens an output stream from the supplied IOProfile and route it to the
+         * supplied audio devices. If a mixer config is specified, it is forwarded to audio
+         * flinger. If not, a default config is derived from the output stream config.
+         * Also opens a duplicating output if needed and queries the audio HAL for supported
+         * audio profiles if the IOProfile is dynamic.
+         * @param[in] profile IOProfile to use as template
+         * @param[in] devices initial route to apply to this output stream
+         * @param[in] mixerConfig if not null, use this to configure the mixer
+         * @return an output descriptor for the newly opened stream or null in case of error.
+         */
+        sp<SwAudioOutputDescriptor> openOutputWithProfileAndDevice(
+                const sp<IOProfile>& profile, const DeviceVector& devices,
+                const audio_config_base_t *mixerConfig = nullptr);
+
+        bool isOffloadPossible(const audio_offload_info_t& offloadInfo,
+                               bool durationIgnored = false);
 
 };
 
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
index 454c020..cdad9a6 100644
--- a/services/audiopolicy/service/Android.bp
+++ b/services/audiopolicy/service/Android.bp
@@ -16,6 +16,8 @@
         "AudioPolicyInterfaceImpl.cpp",
         "AudioPolicyService.cpp",
         "CaptureStateNotifier.cpp",
+        "Spatializer.cpp",
+        "SpatializerPoseController.cpp",
     ],
 
     include_dirs: [
@@ -27,6 +29,7 @@
         "libaudioclient",
         "libaudioclient_aidl_conversion",
         "libaudiofoundation",
+        "libaudiohal",
         "libaudiopolicy",
         "libaudiopolicymanagerdefault",
         "libaudioutils",
@@ -34,19 +37,27 @@
         "libcutils",
         "libeffectsconfig",
         "libhardware_legacy",
+        "libheadtracking",
+        "libheadtracking-binding",
         "liblog",
         "libmedia_helper",
         "libmediametrics",
         "libmediautils",
         "libpermission",
+        "libsensor",
         "libsensorprivacy",
+        "libshmemcompat",
         "libutils",
+        "libstagefright_foundation",
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
         "audiopolicy-types-aidl-cpp",
         "capture_state_listener-aidl-cpp",
         "framework-permission-aidl-cpp",
+        "packagemanager_aidl-cpp",
+        "spatializer-aidl-cpp",
     ],
 
     static_libs: [
@@ -55,6 +66,7 @@
     ],
 
     header_libs: [
+        "libaudiohal_headers",
         "libaudiopolicycommon",
         "libaudiopolicyengine_interface_headers",
         "libaudiopolicymanager_interface_headers",
@@ -70,6 +82,8 @@
 
     export_shared_lib_headers: [
         "libactivitymanager_aidl",
+        "libheadtracking",
+        "libheadtracking-binding",
         "libsensorprivacy",
         "framework-permission-aidl-cpp",
     ],
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index cd53073..aaf6fba 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -40,7 +40,8 @@
 
 status_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module,
                                                            audio_io_handle_t *output,
-                                                           audio_config_t *config,
+                                                           audio_config_t *halConfig,
+                                                           audio_config_base_t *mixerConfig,
                                                            const sp<DeviceDescriptorBase>& device,
                                                            uint32_t *latencyMs,
                                                            audio_output_flags_t flags)
@@ -55,14 +56,18 @@
     media::OpenOutputResponse response;
 
     request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
-    request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(*config));
+    request.halConfig = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(*halConfig, false /*isInput*/));
+    request.mixerConfig = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_base_t_AudioConfigBase(*mixerConfig, false /*isInput*/));
     request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_DeviceDescriptorBase(device));
     request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
 
     status_t status = af->openOutput(request, &response);
     if (status == OK) {
         *output = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_io_handle_t(response.output));
-        *config = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioConfig_audio_config_t(response.config));
+        *halConfig = VALUE_OR_RETURN_STATUS(
+                aidl2legacy_AudioConfig_audio_config_t(response.config, false /*isInput*/));
         *latencyMs = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(response.latencyMs));
     }
     return status;
@@ -131,9 +136,10 @@
     media::OpenInputRequest request;
     request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
     request.input = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(*input));
-    request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(*config));
+    request.config = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(*config, true /*isInput*/));
     request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(deviceTypeAddr));
-    request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSourceType(source));
+    request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSource(source));
     request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
 
     media::OpenInputResponse response;
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index 3f01de9..70fdfcb 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -345,7 +345,8 @@
             (source > AUDIO_SOURCE_MAX &&
              source != AUDIO_SOURCE_HOTWORD &&
              source != AUDIO_SOURCE_FM_TUNER &&
-             source != AUDIO_SOURCE_ECHO_REFERENCE)) {
+             source != AUDIO_SOURCE_ECHO_REFERENCE &&
+             source != AUDIO_SOURCE_ULTRASOUND)) {
         ALOGE("addSourceDefaultEffect(): Unsupported source type %d", source);
         return BAD_VALUE;
     }
@@ -386,7 +387,7 @@
         return res;
     }
     EffectDesc *effect = new EffectDesc(
-            descriptor.name, *type, opPackageName, *uuid, priority, *id);
+            descriptor.name, descriptor.type, opPackageName, descriptor.uuid, priority, *id);
     desc->mEffects.add(effect);
     // TODO(b/71813697): Support setting params as well.
 
@@ -451,7 +452,7 @@
         return res;
     }
     EffectDesc *effect = new EffectDesc(
-            descriptor.name, *type, opPackageName, *uuid, priority, *id);
+            descriptor.name, descriptor.type, opPackageName, descriptor.uuid, priority, *id);
     desc->mEffects.add(effect);
     // TODO(b/71813697): Support setting params as well.
 
@@ -544,6 +545,7 @@
     CAMCORDER_SRC_TAG,
     VOICE_REC_SRC_TAG,
     VOICE_COMM_SRC_TAG,
+    REMOTE_SUBMIX_SRC_TAG,
     UNPROCESSED_SRC_TAG,
     VOICE_PERFORMANCE_SRC_TAG
 };
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 77223b6..87a350f 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -32,6 +32,9 @@
        if (!_tmp.ok()) return aidl_utils::binderStatusFromStatusT(_tmp.error()); \
        std::move(_tmp.value()); })
 
+#define RETURN_BINDER_STATUS_IF_ERROR(x) \
+    if (status_t _tmp = (x); _tmp != OK) return aidl_utils::binderStatusFromStatusT(_tmp);
+
 #define RETURN_IF_BINDER_ERROR(x)      \
     {                                  \
         binder::Status _tmp = (x);     \
@@ -44,6 +47,19 @@
 using binder::Status;
 using aidl_utils::binderStatusFromStatusT;
 using content::AttributionSourceState;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceAddress;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioOffloadInfo;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::AudioUuid;
+using media::audio::common::Int;
 
 const std::vector<audio_usage_t>& SYSTEM_USAGES = {
     AUDIO_USAGE_CALL_ASSISTANT,
@@ -63,15 +79,22 @@
         != std::end(mSupportedSystemUsages);
 }
 
-status_t AudioPolicyService::validateUsage(audio_usage_t usage) {
-     return validateUsage(usage, getCallingAttributionSource());
+status_t AudioPolicyService::validateUsage(const audio_attributes_t& attr) {
+     return validateUsage(attr, getCallingAttributionSource());
 }
 
-status_t AudioPolicyService::validateUsage(audio_usage_t usage,
+status_t AudioPolicyService::validateUsage(const audio_attributes_t& attr,
         const AttributionSourceState& attributionSource) {
-    if (isSystemUsage(usage)) {
-        if (isSupportedSystemUsage(usage)) {
-            if (!modifyAudioRoutingAllowed(attributionSource)) {
+    if (isSystemUsage(attr.usage)) {
+        if (isSupportedSystemUsage(attr.usage)) {
+            if (attr.usage == AUDIO_USAGE_CALL_ASSISTANT
+                    && ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0)) {
+                if (!callAudioInterceptionAllowed(attributionSource)) {
+                    ALOGE(("permission denied: modify audio routing not allowed "
+                           "for attributionSource %s"), attributionSource.toString().c_str());
+                    return PERMISSION_DENIED;
+                }
+            } else if (!modifyAudioRoutingAllowed(attributionSource)) {
                 ALOGE(("permission denied: modify audio routing not allowed "
                        "for attributionSource %s"), attributionSource.toString().c_str());
                 return PERMISSION_DENIED;
@@ -96,16 +119,18 @@
 }
 
 Status AudioPolicyService::setDeviceConnectionState(
-        const media::AudioDevice& deviceAidl,
+        const AudioDevice& deviceAidl,
         media::AudioPolicyDeviceState stateAidl,
         const std::string& deviceNameAidl,
-        media::audio::common::AudioFormat encodedFormatAidl) {
-    audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl.type));
+        const AudioFormatDescription& encodedFormatAidl) {
+    audio_devices_t device;
+    std::string address;
+    RETURN_BINDER_STATUS_IF_ERROR(
+            aidl2legacy_AudioDevice_audio_device(deviceAidl, &device, &address));
     audio_policy_dev_state_t state = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioPolicyDeviceState_audio_policy_dev_state_t(stateAidl));
     audio_format_t encodedFormat = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioFormat_audio_format_t(encodedFormatAidl));
+            aidl2legacy_AudioFormatDescription_audio_format_t(encodedFormatAidl));
 
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
@@ -121,39 +146,45 @@
     ALOGV("setDeviceConnectionState()");
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
-    return binderStatusFromStatusT(
-            mAudioPolicyManager->setDeviceConnectionState(device, state,
-                                                          deviceAidl.address.c_str(),
-                                                          deviceNameAidl.c_str(),
-                                                          encodedFormat));
+    status_t status = mAudioPolicyManager->setDeviceConnectionState(
+            device, state, address.c_str(), deviceNameAidl.c_str(), encodedFormat);
+    if (status == NO_ERROR) {
+        onCheckSpatializer_l();
+    }
+    return binderStatusFromStatusT(status);
 }
 
-Status AudioPolicyService::getDeviceConnectionState(const media::AudioDevice& deviceAidl,
+Status AudioPolicyService::getDeviceConnectionState(const AudioDevice& deviceAidl,
                                                     media::AudioPolicyDeviceState* _aidl_return) {
-    audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl.type));
+    audio_devices_t device;
+    std::string address;
+    RETURN_BINDER_STATUS_IF_ERROR(
+            aidl2legacy_AudioDevice_audio_device(deviceAidl, &device, &address));
     if (mAudioPolicyManager == NULL) {
         *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
                 legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(
                         AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
         return Status::ok();
     }
+    Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
             legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(
-                    mAudioPolicyManager->getDeviceConnectionState(device,
-                                                                  deviceAidl.address.c_str())));
+                    mAudioPolicyManager->getDeviceConnectionState(
+                            device, address.c_str())));
     return Status::ok();
 }
 
 Status AudioPolicyService::handleDeviceConfigChange(
-        const media::AudioDevice& deviceAidl,
+        const AudioDevice& deviceAidl,
         const std::string& deviceNameAidl,
-        media::audio::common::AudioFormat encodedFormatAidl) {
-    audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl.type));
+        const AudioFormatDescription& encodedFormatAidl) {
+    audio_devices_t device;
+    std::string address;
+    RETURN_BINDER_STATUS_IF_ERROR(
+            aidl2legacy_AudioDevice_audio_device(deviceAidl, &device, &address));
     audio_format_t encodedFormat = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioFormat_audio_format_t(encodedFormatAidl));
+            aidl2legacy_AudioFormatDescription_audio_format_t(encodedFormatAidl));
 
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
@@ -165,12 +196,16 @@
     ALOGV("handleDeviceConfigChange()");
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
-    return binderStatusFromStatusT(
-            mAudioPolicyManager->handleDeviceConfigChange(device, deviceAidl.address.c_str(),
-                                                          deviceNameAidl.c_str(), encodedFormat));
+    status_t status =  mAudioPolicyManager->handleDeviceConfigChange(
+            device, address.c_str(), deviceNameAidl.c_str(), encodedFormat);
+
+    if (status == NO_ERROR) {
+       onCheckSpatializer_l();
+    }
+    return binderStatusFromStatusT(status);
 }
 
-Status AudioPolicyService::setPhoneState(media::AudioMode stateAidl, int32_t uidAidl)
+Status AudioPolicyService::setPhoneState(AudioMode stateAidl, int32_t uidAidl)
 {
     audio_mode_t state = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioMode_audio_mode_t(stateAidl));
@@ -192,7 +227,15 @@
     // can be interleaved).
     Mutex::Autolock _l(mLock);
     // TODO: check if it is more appropriate to do it in platform specific policy manager
-    AudioSystem::setMode(state);
+
+    // Audio HAL mode conversion for call redirect modes
+    audio_mode_t halMode = state;
+    if (state == AUDIO_MODE_CALL_REDIRECT) {
+        halMode = AUDIO_MODE_CALL_SCREEN;
+    } else if (state == AUDIO_MODE_COMMUNICATION_REDIRECT) {
+        halMode = AUDIO_MODE_NORMAL;
+    }
+    AudioSystem::setMode(halMode);
 
     AutoCallerClear acc;
     mAudioPolicyManager->setPhoneState(state);
@@ -202,7 +245,7 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::getPhoneState(media::AudioMode* _aidl_return) {
+Status AudioPolicyService::getPhoneState(AudioMode* _aidl_return) {
     Mutex::Autolock _l(mLock);
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_mode_t_AudioMode(mPhoneState));
     return Status::ok();
@@ -234,6 +277,7 @@
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
     mAudioPolicyManager->setForceUse(usage, config);
+    onCheckSpatializer_l();
     return Status::ok();
 }
 
@@ -257,7 +301,7 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::getOutput(media::AudioStreamType streamAidl, int32_t* _aidl_return)
+Status AudioPolicyService::getOutput(AudioStreamType streamAidl, int32_t* _aidl_return)
 {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
@@ -281,7 +325,7 @@
 Status AudioPolicyService::getOutputForAttr(const media::AudioAttributesInternal& attrAidl,
                                             int32_t sessionAidl,
                                             const AttributionSourceState& attributionSource,
-                                            const media::AudioConfig& configAidl,
+                                            const AudioConfig& configAidl,
                                             int32_t flagsAidl,
                                             int32_t selectedDeviceIdAidl,
                                             media::GetOutputForAttrResponse* _aidl_return)
@@ -292,7 +336,7 @@
             aidl2legacy_int32_t_audio_session_t(sessionAidl));
     audio_stream_type_t stream = AUDIO_STREAM_DEFAULT;
     audio_config_t config = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioConfig_audio_config_t(configAidl));
+            aidl2legacy_AudioConfig_audio_config_t(configAidl, false /*isInput*/));
     audio_output_flags_t flags = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_output_flags_t_mask(flagsAidl));
     audio_port_handle_t selectedDeviceId = VALUE_OR_RETURN_BINDER_STATUS(
@@ -308,7 +352,7 @@
 
     RETURN_IF_BINDER_ERROR(
             binderStatusFromStatusT(AudioValidator::validateAudioAttributes(attr, "68953950")));
-    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr.usage, attributionSource)));
+    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr, attributionSource)));
 
     ALOGV("%s()", __func__);
     Mutex::Autolock _l(mLock);
@@ -333,6 +377,15 @@
         attr.flags = static_cast<audio_flags_mask_t>(
                 attr.flags & ~(AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE));
     }
+
+    if (attr.content_type == AUDIO_CONTENT_TYPE_ULTRASOUND) {
+        if (!accessUltrasoundAllowed(adjAttributionSource)) {
+            ALOGE("%s: permission denied: ultrasound not allowed for uid %d pid %d",
+                    __func__, adjAttributionSource.uid, adjAttributionSource.pid);
+            return binderStatusFromStatusT(PERMISSION_DENIED);
+        }
+    }
+
     AutoCallerClear acc;
     AudioPolicyInterface::output_type_t outputType;
     status_t result = mAudioPolicyManager->getOutputForAttr(&attr, &output, session,
@@ -350,7 +403,12 @@
         case AudioPolicyInterface::API_OUTPUT_LEGACY:
             break;
         case AudioPolicyInterface::API_OUTPUT_TELEPHONY_TX:
-            if (!modifyPhoneStateAllowed(adjAttributionSource)) {
+            if (((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0)
+                && !callAudioInterceptionAllowed(adjAttributionSource)) {
+                ALOGE("%s() permission denied: call redirection not allowed for uid %d",
+                    __func__, adjAttributionSource.uid);
+                result = PERMISSION_DENIED;
+            } else if (!modifyPhoneStateAllowed(adjAttributionSource)) {
                 ALOGE("%s() permission denied: modify phone state not allowed for uid %d",
                     __func__, adjAttributionSource.uid);
                 result = PERMISSION_DENIED;
@@ -510,7 +568,7 @@
                                            int32_t riidAidl,
                                            int32_t sessionAidl,
                                            const AttributionSourceState& attributionSource,
-                                           const media::AudioConfigBase& configAidl,
+                                           const AudioConfigBase& configAidl,
                                            int32_t flagsAidl,
                                            int32_t selectedDeviceIdAidl,
                                            media::GetInputForAttrResponse* _aidl_return) {
@@ -523,7 +581,7 @@
     audio_session_t session = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_session_t(sessionAidl));
     audio_config_base_t config = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl));
+            aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl, true /*isInput*/));
     audio_input_flags_t flags = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_input_flags_t_mask(flagsAidl));
     audio_port_handle_t selectedDeviceId = VALUE_OR_RETURN_BINDER_STATUS(
@@ -548,7 +606,8 @@
             || (inputSource >= AUDIO_SOURCE_CNT
                 && inputSource != AUDIO_SOURCE_HOTWORD
                 && inputSource != AUDIO_SOURCE_FM_TUNER
-                && inputSource != AUDIO_SOURCE_ECHO_REFERENCE)) {
+                && inputSource != AUDIO_SOURCE_ECHO_REFERENCE
+                && inputSource != AUDIO_SOURCE_ULTRASOUND)) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
 
@@ -577,32 +636,43 @@
         adjAttributionSource.pid = callingPid;
     }
 
-    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr.usage,
+    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr,
             adjAttributionSource)));
 
     // check calling permissions.
-    // Capturing from FM_TUNER source is controlled by captureTunerAudioInputAllowed() and
-    // captureAudioOutputAllowed() (deprecated) as this does not affect users privacy
-    // as does capturing from an actual microphone.
-    if (!(recordingAllowed(adjAttributionSource, attr.source)
-            || attr.source == AUDIO_SOURCE_FM_TUNER)) {
+    // Capturing from the following sources does not require permission RECORD_AUDIO
+    // as the captured audio does not come from a microphone:
+    // - FM_TUNER source is controlled by captureTunerAudioInputAllowed() or
+    // captureAudioOutputAllowed() (deprecated).
+    // - REMOTE_SUBMIX source is controlled by captureAudioOutputAllowed() if the input
+    // type is API_INPUT_MIX_EXT_POLICY_REROUTE and by AudioService if a media projection
+    // is used and input type is API_INPUT_MIX_PUBLIC_CAPTURE_PLAYBACK
+    // - ECHO_REFERENCE source is controlled by captureAudioOutputAllowed()
+    if (!(recordingAllowed(adjAttributionSource, inputSource)
+            || inputSource == AUDIO_SOURCE_FM_TUNER
+            || inputSource == AUDIO_SOURCE_REMOTE_SUBMIX
+            || inputSource == AUDIO_SOURCE_ECHO_REFERENCE)) {
         ALOGE("%s permission denied: recording not allowed for %s",
                 __func__, adjAttributionSource.toString().c_str());
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
 
     bool canCaptureOutput = captureAudioOutputAllowed(adjAttributionSource);
-    if ((inputSource == AUDIO_SOURCE_VOICE_UPLINK ||
-        inputSource == AUDIO_SOURCE_VOICE_DOWNLINK ||
-        inputSource == AUDIO_SOURCE_VOICE_CALL ||
-        inputSource == AUDIO_SOURCE_ECHO_REFERENCE)
-        && !canCaptureOutput) {
+    bool canInterceptCallAudio = callAudioInterceptionAllowed(adjAttributionSource);
+    bool isCallAudioSource = inputSource == AUDIO_SOURCE_VOICE_UPLINK
+             || inputSource == AUDIO_SOURCE_VOICE_DOWNLINK
+             || inputSource == AUDIO_SOURCE_VOICE_CALL;
+
+    if (isCallAudioSource && !canInterceptCallAudio && !canCaptureOutput) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
-
+    if (inputSource == AUDIO_SOURCE_ECHO_REFERENCE
+            && !canCaptureOutput) {
+        return binderStatusFromStatusT(PERMISSION_DENIED);
+    }
     if (inputSource == AUDIO_SOURCE_FM_TUNER
-        && !captureTunerAudioInputAllowed(adjAttributionSource)
-        && !canCaptureOutput) {
+        && !canCaptureOutput
+        && !captureTunerAudioInputAllowed(adjAttributionSource)) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
 
@@ -618,6 +688,14 @@
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
 
+    if (attr.source == AUDIO_SOURCE_ULTRASOUND) {
+        if (!accessUltrasoundAllowed(adjAttributionSource)) {
+            ALOGE("%s: permission denied: ultrasound not allowed for uid %d pid %d",
+                    __func__, adjAttributionSource.uid, adjAttributionSource.pid);
+            return binderStatusFromStatusT(PERMISSION_DENIED);
+        }
+    }
+
     sp<AudioPolicyEffects>audioPolicyEffects;
     {
         status_t status;
@@ -644,23 +722,30 @@
             case AudioPolicyInterface::API_INPUT_LEGACY:
                 break;
             case AudioPolicyInterface::API_INPUT_TELEPHONY_RX:
+                if ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0
+                        && canInterceptCallAudio) {
+                    break;
+                }
                 // FIXME: use the same permission as for remote submix for now.
+                FALLTHROUGH_INTENDED;
             case AudioPolicyInterface::API_INPUT_MIX_CAPTURE:
                 if (!canCaptureOutput) {
-                    ALOGE("getInputForAttr() permission denied: capture not allowed");
+                    ALOGE("%s permission denied: capture not allowed", __func__);
                     status = PERMISSION_DENIED;
                 }
                 break;
             case AudioPolicyInterface::API_INPUT_MIX_EXT_POLICY_REROUTE:
-                if (!modifyAudioRoutingAllowed(adjAttributionSource)) {
-                    ALOGE("getInputForAttr() permission denied: modify audio routing not allowed");
+                if (!(modifyAudioRoutingAllowed(adjAttributionSource)
+                        || ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0
+                            && canInterceptCallAudio))) {
+                    ALOGE("%s permission denied for remote submix capture", __func__);
                     status = PERMISSION_DENIED;
                 }
                 break;
             case AudioPolicyInterface::API_INPUT_INVALID:
             default:
-                LOG_ALWAYS_FATAL("getInputForAttr() encountered an invalid input type %d",
-                        (int)inputType);
+                LOG_ALWAYS_FATAL("%s encountered an invalid input type %d",
+                        __func__, (int)inputType);
             }
         }
 
@@ -730,8 +815,10 @@
 
     // check calling permissions
     if (!(startRecording(client->attributionSource, String16(msg.str().c_str()),
-        client->attributes.source)
-            || client->attributes.source == AUDIO_SOURCE_FM_TUNER)) {
+                         client->attributes.source)
+            || client->attributes.source == AUDIO_SOURCE_FM_TUNER
+            || client->attributes.source == AUDIO_SOURCE_REMOTE_SUBMIX
+            || client->attributes.source == AUDIO_SOURCE_ECHO_REFERENCE)) {
         ALOGE("%s permission denied: recording not allowed for attribution source %s",
                 __func__, client->attributionSource.toString().c_str());
         return binderStatusFromStatusT(PERMISSION_DENIED);
@@ -901,7 +988,7 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::initStreamVolume(media::AudioStreamType streamAidl,
+Status AudioPolicyService::initStreamVolume(AudioStreamType streamAidl,
                                             int32_t indexMinAidl,
                                             int32_t indexMaxAidl) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
@@ -924,13 +1011,14 @@
     return binderStatusFromStatusT(NO_ERROR);
 }
 
-Status AudioPolicyService::setStreamVolumeIndex(media::AudioStreamType streamAidl,
-                                                int32_t deviceAidl, int32_t indexAidl) {
+Status AudioPolicyService::setStreamVolumeIndex(AudioStreamType streamAidl,
+                                                const AudioDeviceDescription& deviceAidl,
+                                                int32_t indexAidl) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
     int index = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int>(indexAidl));
     audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
 
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
@@ -948,12 +1036,13 @@
                                                                              device));
 }
 
-Status AudioPolicyService::getStreamVolumeIndex(media::AudioStreamType streamAidl,
-                                                int32_t deviceAidl, int32_t* _aidl_return) {
+Status AudioPolicyService::getStreamVolumeIndex(AudioStreamType streamAidl,
+                                                const AudioDeviceDescription& deviceAidl,
+                                                int32_t* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
     audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
     int index;
 
     if (mAudioPolicyManager == NULL) {
@@ -971,12 +1060,13 @@
 }
 
 Status AudioPolicyService::setVolumeIndexForAttributes(
-        const media::AudioAttributesInternal& attrAidl, int32_t deviceAidl, int32_t indexAidl) {
+        const media::AudioAttributesInternal& attrAidl,
+        const AudioDeviceDescription& deviceAidl, int32_t indexAidl) {
     audio_attributes_t attributes = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
     int index = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int>(indexAidl));
     audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             AudioValidator::validateAudioAttributes(attributes, "169572641")));
 
@@ -993,11 +1083,12 @@
 }
 
 Status AudioPolicyService::getVolumeIndexForAttributes(
-        const media::AudioAttributesInternal& attrAidl, int32_t deviceAidl, int32_t* _aidl_return) {
+        const media::AudioAttributesInternal& attrAidl,
+        const AudioDeviceDescription& deviceAidl, int32_t* _aidl_return) {
     audio_attributes_t attributes = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
     audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
     int index;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             AudioValidator::validateAudioAttributes(attributes, "169572641")));
@@ -1051,7 +1142,7 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::getStrategyForStream(media::AudioStreamType streamAidl,
+Status AudioPolicyService::getStrategyForStream(AudioStreamType streamAidl,
                                                 int32_t* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
@@ -1075,14 +1166,14 @@
 
 //audio policy: use audio_device_t appropriately
 
-Status AudioPolicyService::getDevicesForStream(media::AudioStreamType streamAidl,
-                                               int32_t* _aidl_return) {
+Status AudioPolicyService::getDevicesForStream(
+        AudioStreamType streamAidl,
+        std::vector<AudioDeviceDescription>* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
 
     if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
-        *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-                legacy2aidl_audio_devices_t_int32_t(AUDIO_DEVICE_NONE));
+        *_aidl_return = std::vector<AudioDeviceDescription>{};
         return Status::ok();
     }
     if (mAudioPolicyManager == NULL) {
@@ -1091,12 +1182,14 @@
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-            legacy2aidl_audio_devices_t_int32_t(mAudioPolicyManager->getDevicesForStream(stream)));
+            convertContainer<std::vector<AudioDeviceDescription>>(
+                    mAudioPolicyManager->getDevicesForStream(stream),
+                    legacy2aidl_audio_devices_t_AudioDeviceDescription));
     return Status::ok();
 }
 
 Status AudioPolicyService::getDevicesForAttributes(const media::AudioAttributesEx& attrAidl,
-                                                   std::vector<media::AudioDevice>* _aidl_return)
+                                                   std::vector<AudioDevice>* _aidl_return)
 {
     AudioAttributes aa = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioAttributesEx_AudioAttributes(attrAidl));
@@ -1110,8 +1203,8 @@
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDevicesForAttributes(aa.getAttributes(), &devices)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return Status::ok();
 }
 
@@ -1197,7 +1290,7 @@
     return binderStatusFromStatusT(mAudioPolicyManager->moveEffectsToIo(ids, io));
 }
 
-Status AudioPolicyService::isStreamActive(media::AudioStreamType streamAidl, int32_t inPastMsAidl,
+Status AudioPolicyService::isStreamActive(AudioStreamType streamAidl, int32_t inPastMsAidl,
                                           bool* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
@@ -1216,7 +1309,7 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::isStreamActiveRemotely(media::AudioStreamType streamAidl,
+Status AudioPolicyService::isStreamActiveRemotely(AudioStreamType streamAidl,
                                                   int32_t inPastMsAidl,
                                                   bool* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1236,9 +1329,9 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::isSourceActive(media::AudioSourceType sourceAidl, bool* _aidl_return) {
+Status AudioPolicyService::isSourceActive(AudioSource sourceAidl, bool* _aidl_return) {
     audio_source_t source = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(sourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(sourceAidl));
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1266,7 +1359,7 @@
 
 Status AudioPolicyService::queryDefaultPreProcessing(
         int32_t audioSessionAidl,
-        media::Int* countAidl,
+        Int* countAidl,
         std::vector<media::EffectDescriptor>* _aidl_return) {
     audio_session_t audioSession = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_session_t(audioSessionAidl));
@@ -1290,11 +1383,11 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::addSourceDefaultEffect(const media::AudioUuid& typeAidl,
+Status AudioPolicyService::addSourceDefaultEffect(const AudioUuid& typeAidl,
                                                   const std::string& opPackageNameAidl,
-                                                  const media::AudioUuid& uuidAidl,
+                                                  const AudioUuid& uuidAidl,
                                                   int32_t priority,
-                                                  media::AudioSourceType sourceAidl,
+                                                  AudioSource sourceAidl,
                                                   int32_t* _aidl_return) {
     effect_uuid_t type = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioUuid_audio_uuid_t(typeAidl));
@@ -1303,7 +1396,7 @@
     effect_uuid_t uuid = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioUuid_audio_uuid_t(uuidAidl));
     audio_source_t source = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(sourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(sourceAidl));
     audio_unique_id_t id;
 
     sp<AudioPolicyEffects>audioPolicyEffects;
@@ -1317,10 +1410,10 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::addStreamDefaultEffect(const media::AudioUuid& typeAidl,
+Status AudioPolicyService::addStreamDefaultEffect(const AudioUuid& typeAidl,
                                                   const std::string& opPackageNameAidl,
-                                                  const media::AudioUuid& uuidAidl,
-                                                  int32_t priority, media::AudioUsage usageAidl,
+                                                  const AudioUuid& uuidAidl,
+                                                  int32_t priority, AudioUsage usageAidl,
                                                   int32_t* _aidl_return) {
     effect_uuid_t type = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioUuid_audio_uuid_t(typeAidl));
@@ -1368,7 +1461,7 @@
 }
 
 Status AudioPolicyService::setSupportedSystemUsages(
-        const std::vector<media::AudioUsage>& systemUsagesAidl) {
+        const std::vector<AudioUsage>& systemUsagesAidl) {
     size_t size = systemUsagesAidl.size();
     if (size > MAX_ITEMS_PER_LIST) {
         size = MAX_ITEMS_PER_LIST;
@@ -1407,7 +1500,7 @@
             mAudioPolicyManager->setAllowedCapturePolicy(uid, capturePolicy));
 }
 
-Status AudioPolicyService::getOffloadSupport(const media::AudioOffloadInfo& infoAidl,
+Status AudioPolicyService::getOffloadSupport(const AudioOffloadInfo& infoAidl,
                                              media::AudioOffloadMode* _aidl_return) {
     audio_offload_info_t info = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioOffloadInfo_audio_offload_info_t(infoAidl));
@@ -1423,11 +1516,11 @@
 }
 
 Status AudioPolicyService::isDirectOutputSupported(
-        const media::AudioConfigBase& configAidl,
+        const AudioConfigBase& configAidl,
         const media::AudioAttributesInternal& attributesAidl,
         bool* _aidl_return) {
     audio_config_base_t config = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl));
+            aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl, false /*isInput*/));
     audio_attributes_t attributes = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioAttributesInternal_audio_attributes_t(attributesAidl));
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
@@ -1438,7 +1531,7 @@
         return binderStatusFromStatusT(NO_INIT);
     }
 
-    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes.usage)));
+    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes)));
 
     Mutex::Autolock _l(mLock);
     *_aidl_return = mAudioPolicyManager->isDirectOutputSupported(config, attributes);
@@ -1447,7 +1540,7 @@
 
 
 Status AudioPolicyService::listAudioPorts(media::AudioPortRole roleAidl,
-                                          media::AudioPortType typeAidl, media::Int* count,
+                                          media::AudioPortType typeAidl, Int* count,
                                           std::vector<media::AudioPort>* portsAidl,
                                           int32_t* _aidl_return) {
     audio_port_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1535,7 +1628,7 @@
                                                    IPCThreadState::self()->getCallingUid()));
 }
 
-Status AudioPolicyService::listAudioPatches(media::Int* count,
+Status AudioPolicyService::listAudioPatches(Int* count,
                                             std::vector<media::AudioPatch>* patchesAidl,
                                             int32_t* _aidl_return) {
     unsigned int num_patches = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1602,7 +1695,7 @@
     _aidl_return->ioHandle = VALUE_OR_RETURN_BINDER_STATUS(
             legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
     _aidl_return->device = VALUE_OR_RETURN_BINDER_STATUS(
-            legacy2aidl_audio_devices_t_int32_t(device));
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     return Status::ok();
 }
 
@@ -1672,7 +1765,7 @@
 
 Status AudioPolicyService::setUidDeviceAffinities(
         int32_t uidAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     uid_t uid = VALUE_OR_RETURN_BINDER_STATUS(aidl2legacy_int32_t_uid_t(uidAidl));
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
             convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
@@ -1705,7 +1798,7 @@
 
 Status AudioPolicyService::setUserIdDeviceAffinities(
         int32_t userIdAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     int userId = VALUE_OR_RETURN_BINDER_STATUS(convertReinterpret<int>(userIdAidl));
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
             convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
@@ -1754,7 +1847,7 @@
         return binderStatusFromStatusT(NO_INIT);
     }
 
-    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes.usage)));
+    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes)));
 
     // startAudioSource should be created as the calling uid
     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
@@ -1802,13 +1895,14 @@
 }
 
 
-Status AudioPolicyService::getStreamVolumeDB(media::AudioStreamType streamAidl, int32_t indexAidl,
-                                             int32_t deviceAidl, float* _aidl_return) {
+Status AudioPolicyService::getStreamVolumeDB(
+        AudioStreamType streamAidl, int32_t indexAidl,
+        const AudioDeviceDescription& deviceAidl, float* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
     int index = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int>(indexAidl));
     audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
 
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
@@ -1819,8 +1913,8 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::getSurroundFormats(media::Int* count,
-        std::vector<media::audio::common::AudioFormat>* formats,
+Status AudioPolicyService::getSurroundFormats(Int* count,
+        std::vector<AudioFormatDescription>* formats,
         std::vector<bool>* formatsEnabled) {
     unsigned int numSurroundFormats = VALUE_OR_RETURN_BINDER_STATUS(
             convertIntegral<unsigned int>(count->value));
@@ -1842,7 +1936,8 @@
     numSurroundFormatsReq = std::min(numSurroundFormats, numSurroundFormatsReq);
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             convertRange(surroundFormats.get(), surroundFormats.get() + numSurroundFormatsReq,
-                         std::back_inserter(*formats), legacy2aidl_audio_format_t_AudioFormat)));
+                         std::back_inserter(*formats),
+                         legacy2aidl_audio_format_t_AudioFormatDescription)));
     formatsEnabled->insert(
             formatsEnabled->begin(),
             surroundFormatsEnabled.get(),
@@ -1852,7 +1947,7 @@
 }
 
 Status AudioPolicyService::getReportedSurroundFormats(
-        media::Int* count, std::vector<media::audio::common::AudioFormat>* formats) {
+        Int* count, std::vector<AudioFormatDescription>* formats) {
     unsigned int numSurroundFormats = VALUE_OR_RETURN_BINDER_STATUS(
             convertIntegral<unsigned int>(count->value));
     if (numSurroundFormats > MAX_ITEMS_PER_LIST) {
@@ -1872,13 +1967,15 @@
     numSurroundFormatsReq = std::min(numSurroundFormats, numSurroundFormatsReq);
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             convertRange(surroundFormats.get(), surroundFormats.get() + numSurroundFormatsReq,
-                         std::back_inserter(*formats), legacy2aidl_audio_format_t_AudioFormat)));
+                         std::back_inserter(*formats),
+                         legacy2aidl_audio_format_t_AudioFormatDescription)));
     count->value = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<uint32_t>(numSurroundFormats));
     return Status::ok();
 }
 
-Status AudioPolicyService::getHwOffloadEncodingFormatsSupportedForA2DP(
-        std::vector<media::audio::common::AudioFormat>* _aidl_return) {
+Status AudioPolicyService::getHwOffloadFormatsSupportedForBluetoothMedia(
+        const AudioDeviceDescription& deviceAidl,
+        std::vector<AudioFormatDescription>* _aidl_return) {
     std::vector<audio_format_t> formats;
 
     if (mAudioPolicyManager == NULL) {
@@ -1886,19 +1983,21 @@
     }
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
+    audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
-            mAudioPolicyManager->getHwOffloadEncodingFormatsSupportedForA2DP(&formats)));
+            mAudioPolicyManager->getHwOffloadFormatsSupportedForBluetoothMedia(device, &formats)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-            convertContainer<std::vector<media::audio::common::AudioFormat>>(
+            convertContainer<std::vector<AudioFormatDescription>>(
                     formats,
-                    legacy2aidl_audio_format_t_AudioFormat));
+                    legacy2aidl_audio_format_t_AudioFormatDescription));
     return Status::ok();
 }
 
 Status AudioPolicyService::setSurroundFormatEnabled(
-        media::audio::common::AudioFormat audioFormatAidl, bool enabled) {
+        const AudioFormatDescription& audioFormatAidl, bool enabled) {
     audio_format_t audioFormat = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioFormat_audio_format_t(audioFormatAidl));
+            aidl2legacy_AudioFormatDescription_audio_format_t(audioFormatAidl));
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1960,6 +2059,17 @@
     return Status::ok();
 }
 
+Status AudioPolicyService::isUltrasoundSupported(bool* _aidl_return)
+{
+    if (mAudioPolicyManager == NULL) {
+        return binderStatusFromStatusT(NO_INIT);
+    }
+    Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
+    *_aidl_return = mAudioPolicyManager->isUltrasoundSupported();
+    return Status::ok();
+}
+
 Status AudioPolicyService::listAudioProductStrategies(
         std::vector<media::AudioProductStrategy>* _aidl_return) {
     AudioProductStrategyVector strategies;
@@ -2049,7 +2159,7 @@
 Status AudioPolicyService::setDevicesRoleForStrategy(
         int32_t strategyAidl,
         media::DeviceRole roleAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     product_strategy_t strategy = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_product_strategy_t(strategyAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2062,8 +2172,11 @@
         return binderStatusFromStatusT(NO_INIT);
     }
     Mutex::Autolock _l(mLock);
-    return binderStatusFromStatusT(
-            mAudioPolicyManager->setDevicesRoleForStrategy(strategy, role, devices));
+    status_t status = mAudioPolicyManager->setDevicesRoleForStrategy(strategy, role, devices);
+    if (status == NO_ERROR) {
+       onCheckSpatializer_l();
+    }
+    return binderStatusFromStatusT(status);
 }
 
 Status AudioPolicyService::removeDevicesRoleForStrategy(int32_t strategyAidl,
@@ -2076,14 +2189,17 @@
         return binderStatusFromStatusT(NO_INIT);
     }
     Mutex::Autolock _l(mLock);
-    return binderStatusFromStatusT(
-            mAudioPolicyManager->removeDevicesRoleForStrategy(strategy, role));
+    status_t status = mAudioPolicyManager->removeDevicesRoleForStrategy(strategy, role);
+    if (status == NO_ERROR) {
+       onCheckSpatializer_l();
+    }
+    return binderStatusFromStatusT(status);
 }
 
 Status AudioPolicyService::getDevicesForRoleAndStrategy(
         int32_t strategyAidl,
         media::DeviceRole roleAidl,
-        std::vector<media::AudioDevice>* _aidl_return) {
+        std::vector<AudioDevice>* _aidl_return) {
     product_strategy_t strategy = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_product_strategy_t(strategyAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2097,8 +2213,8 @@
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDevicesForRoleAndStrategy(strategy, role, devices)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return Status::ok();
 }
 
@@ -2109,11 +2225,11 @@
 }
 
 Status AudioPolicyService::setDevicesRoleForCapturePreset(
-        media::AudioSourceType audioSourceAidl,
+        AudioSource audioSourceAidl,
         media::DeviceRole roleAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_DeviceRole_device_role_t(roleAidl));
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2129,11 +2245,11 @@
 }
 
 Status AudioPolicyService::addDevicesRoleForCapturePreset(
-        media::AudioSourceType audioSourceAidl,
+        AudioSource audioSourceAidl,
         media::DeviceRole roleAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_DeviceRole_device_role_t(roleAidl));
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2149,11 +2265,11 @@
 }
 
 Status AudioPolicyService::removeDevicesRoleForCapturePreset(
-        media::AudioSourceType audioSourceAidl,
+        AudioSource audioSourceAidl,
         media::DeviceRole roleAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_DeviceRole_device_role_t(roleAidl));
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2168,10 +2284,10 @@
             mAudioPolicyManager->removeDevicesRoleForCapturePreset(audioSource, role, devices));
 }
 
-Status AudioPolicyService::clearDevicesRoleForCapturePreset(media::AudioSourceType audioSourceAidl,
+Status AudioPolicyService::clearDevicesRoleForCapturePreset(AudioSource audioSourceAidl,
                                                             media::DeviceRole roleAidl) {
     audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_DeviceRole_device_role_t(roleAidl));
 
@@ -2184,11 +2300,11 @@
 }
 
 Status AudioPolicyService::getDevicesForRoleAndCapturePreset(
-        media::AudioSourceType audioSourceAidl,
+        AudioSource audioSourceAidl,
         media::DeviceRole roleAidl,
-        std::vector<media::AudioDevice>* _aidl_return) {
+        std::vector<AudioDevice>* _aidl_return) {
     audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_DeviceRole_device_role_t(roleAidl));
     AudioDeviceTypeAddrVector devices;
@@ -2200,8 +2316,91 @@
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
+    return Status::ok();
+}
+
+Status AudioPolicyService::getSpatializer(
+        const sp<media::INativeSpatializerCallback>& callback,
+        media::GetSpatializerResponse* _aidl_return) {
+    _aidl_return->spatializer = nullptr;
+    if (callback == nullptr) {
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    if (mSpatializer != nullptr) {
+        RETURN_IF_BINDER_ERROR(
+                binderStatusFromStatusT(mSpatializer->registerCallback(callback)));
+        _aidl_return->spatializer = mSpatializer;
+    }
+    return Status::ok();
+}
+
+Status AudioPolicyService::canBeSpatialized(
+        const std::optional<media::AudioAttributesInternal>& attrAidl,
+        const std::optional<AudioConfig>& configAidl,
+        const std::vector<AudioDevice>& devicesAidl,
+        bool* _aidl_return) {
+    if (mAudioPolicyManager == nullptr) {
+        return binderStatusFromStatusT(NO_INIT);
+    }
+    audio_attributes_t attr = AUDIO_ATTRIBUTES_INITIALIZER;
+    if (attrAidl.has_value()) {
+        attr = VALUE_OR_RETURN_BINDER_STATUS(
+            aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl.value()));
+    }
+    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+    if (configAidl.has_value()) {
+        config = VALUE_OR_RETURN_BINDER_STATUS(
+                                    aidl2legacy_AudioConfig_audio_config_t(configAidl.value(),
+                                    false /*isInput*/));
+    }
+    AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
+            convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
+                                                        aidl2legacy_AudioDeviceTypeAddress));
+
+    Mutex::Autolock _l(mLock);
+    *_aidl_return = mAudioPolicyManager->canBeSpatialized(&attr, &config, devices);
+    return Status::ok();
+}
+
+Status AudioPolicyService::getDirectPlaybackSupport(const media::AudioAttributesInternal &attrAidl,
+                                                    const AudioConfig &configAidl,
+                                                    media::AudioDirectMode *_aidl_return) {
+    if (mAudioPolicyManager == nullptr) {
+        return binderStatusFromStatusT(NO_INIT);
+    }
+    if (_aidl_return == nullptr) {
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    audio_attributes_t attr = VALUE_OR_RETURN_BINDER_STATUS(
+            aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
+    audio_config_t config = VALUE_OR_RETURN_BINDER_STATUS(
+            aidl2legacy_AudioConfig_audio_config_t(configAidl, false /*isInput*/));
+    Mutex::Autolock _l(mLock);
+    *_aidl_return = static_cast<media::AudioDirectMode>(
+            VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_direct_mode_t_int32_t_mask(
+                    mAudioPolicyManager->getDirectPlaybackSupport(&attr, &config))));
+    return Status::ok();
+}
+
+Status AudioPolicyService::getDirectProfilesForAttributes(
+                                const media::AudioAttributesInternal& attrAidl,
+                                std::vector<media::audio::common::AudioProfile>* _aidl_return) {
+   if (mAudioPolicyManager == nullptr) {
+        return binderStatusFromStatusT(NO_INIT);
+    }
+    audio_attributes_t attr = VALUE_OR_RETURN_BINDER_STATUS(
+            aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
+    AudioProfileVector audioProfiles;
+
+    Mutex::Autolock _l(mLock);
+    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
+            mAudioPolicyManager->getDirectProfilesForAttributes(&attr, audioProfiles)));
+    *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
+            convertContainer<std::vector<media::audio::common::AudioProfile>>(
+                audioProfiles, legacy2aidl_AudioProfile_common, false /*isInput*/));
+
     return Status::ok();
 }
 
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 4d0e1f1..8add137 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -127,6 +127,7 @@
         loadAudioPolicyManager();
         mAudioPolicyManager = mCreateAudioPolicyManager(mAudioPolicyClient);
     }
+
     // load audio processing modules
     sp<AudioPolicyEffects> audioPolicyEffects = new AudioPolicyEffects();
     sp<UidPolicy> uidPolicy = new UidPolicy(this);
@@ -139,6 +140,18 @@
     }
     uidPolicy->registerSelf();
     sensorPrivacyPolicy->registerSelf();
+
+    // Create spatializer if supported
+    if (mAudioPolicyManager != nullptr) {
+        Mutex::Autolock _l(mLock);
+        const audio_attributes_t attr = attributes_initializer(AUDIO_USAGE_MEDIA);
+        AudioDeviceTypeAddrVector devices;
+        bool hasSpatializer = mAudioPolicyManager->canBeSpatialized(&attr, nullptr, devices);
+        if (hasSpatializer) {
+            mSpatializer = Spatializer::create(this);
+        }
+    }
+    AudioSystem::audioPolicyReady();
 }
 
 void AudioPolicyService::unloadAudioPolicyManager()
@@ -353,6 +366,61 @@
     }
 }
 
+void AudioPolicyService::onCheckSpatializer()
+{
+    Mutex::Autolock _l(mLock);
+    onCheckSpatializer_l();
+}
+
+void AudioPolicyService::onCheckSpatializer_l()
+{
+    if (mSpatializer != nullptr) {
+        mOutputCommandThread->checkSpatializerCommand();
+    }
+}
+
+void AudioPolicyService::doOnCheckSpatializer()
+{
+    Mutex::Autolock _l(mLock);
+
+    if (mSpatializer != nullptr) {
+        // Note: mSpatializer != nullptr =>  mAudioPolicyManager != nullptr
+        if (mSpatializer->getLevel() != media::SpatializationLevel::NONE) {
+            audio_io_handle_t currentOutput = mSpatializer->getOutput();
+            audio_io_handle_t newOutput;
+            const audio_attributes_t attr = attributes_initializer(AUDIO_USAGE_MEDIA);
+            audio_config_base_t config = mSpatializer->getAudioInConfig();
+            status_t status =
+                    mAudioPolicyManager->getSpatializerOutput(&config, &attr, &newOutput);
+            ALOGV("%s currentOutput %d newOutput %d channel_mask %#x",
+                    __func__, currentOutput, newOutput, config.channel_mask);
+            if (status == NO_ERROR && currentOutput == newOutput) {
+                return;
+            }
+            mLock.unlock();
+            // It is OK to call detachOutput() is none is already attached.
+            mSpatializer->detachOutput();
+            if (status != NO_ERROR || newOutput == AUDIO_IO_HANDLE_NONE) {
+                mLock.lock();
+                return;
+            }
+            status = mSpatializer->attachOutput(newOutput);
+            mLock.lock();
+            if (status != NO_ERROR) {
+                mAudioPolicyManager->releaseSpatializerOutput(newOutput);
+            }
+        } else if (mSpatializer->getLevel() == media::SpatializationLevel::NONE
+                               && mSpatializer->getOutput() != AUDIO_IO_HANDLE_NONE) {
+            mLock.unlock();
+            audio_io_handle_t output = mSpatializer->detachOutput();
+            mLock.lock();
+            if (output != AUDIO_IO_HANDLE_NONE) {
+                mAudioPolicyManager->releaseSpatializerOutput(output);
+            }
+        }
+    }
+}
+
 status_t AudioPolicyService::clientCreateAudioPatch(const struct audio_patch *patch,
                                                 audio_patch_handle_t *handle,
                                                 int delayMs)
@@ -409,7 +477,7 @@
     }
 }
 
-void AudioPolicyService::NotificationClient::onAudioVolumeGroupChanged(volume_group_t group, 
+void AudioPolicyService::NotificationClient::onAudioVolumeGroupChanged(volume_group_t group,
                                                                       int flags)
 {
     if (mAudioPolicyServiceClient != 0 && mAudioVolumeGroupCallbacksEnabled) {
@@ -442,22 +510,24 @@
             int32_t eventAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(event));
             media::RecordClientInfo clientInfoAidl = VALUE_OR_RETURN_STATUS(
                     legacy2aidl_record_client_info_t_RecordClientInfo(*clientInfo));
-            media::AudioConfigBase clientConfigAidl = VALUE_OR_RETURN_STATUS(
-                    legacy2aidl_audio_config_base_t_AudioConfigBase(*clientConfig));
+            AudioConfigBase clientConfigAidl = VALUE_OR_RETURN_STATUS(
+                    legacy2aidl_audio_config_base_t_AudioConfigBase(
+                            *clientConfig, true /*isInput*/));
             std::vector<media::EffectDescriptor> clientEffectsAidl = VALUE_OR_RETURN_STATUS(
                     convertContainer<std::vector<media::EffectDescriptor>>(
                             clientEffects,
                             legacy2aidl_effect_descriptor_t_EffectDescriptor));
-            media::AudioConfigBase deviceConfigAidl = VALUE_OR_RETURN_STATUS(
-                    legacy2aidl_audio_config_base_t_AudioConfigBase(*deviceConfig));
+            AudioConfigBase deviceConfigAidl = VALUE_OR_RETURN_STATUS(
+                    legacy2aidl_audio_config_base_t_AudioConfigBase(
+                            *deviceConfig, true /*isInput*/));
             std::vector<media::EffectDescriptor> effectsAidl = VALUE_OR_RETURN_STATUS(
                     convertContainer<std::vector<media::EffectDescriptor>>(
                             effects,
                             legacy2aidl_effect_descriptor_t_EffectDescriptor));
             int32_t patchHandleAidl = VALUE_OR_RETURN_STATUS(
                     legacy2aidl_audio_patch_handle_t_int32_t(patchHandle));
-            media::AudioSourceType sourceAidl = VALUE_OR_RETURN_STATUS(
-                    legacy2aidl_audio_source_t_AudioSourceType(source));
+            media::audio::common::AudioSource sourceAidl = VALUE_OR_RETURN_STATUS(
+                    legacy2aidl_audio_source_t_AudioSource(source));
             return aidl_utils::statusTFromBinderStatus(
                     mAudioPolicyServiceClient->onRecordingConfigurationUpdate(eventAidl,
                                                                               clientInfoAidl,
@@ -660,7 +730,8 @@
         if (current->attributes.source != AUDIO_SOURCE_HOTWORD) {
             onlyHotwordActive = false;
         }
-        if (currentUid == mPhoneStateOwnerUid) {
+        if (currentUid == mPhoneStateOwnerUid &&
+                !isVirtualSource(current->attributes.source)) {
             isPhoneStateOwnerActive = true;
         }
     }
@@ -839,6 +910,7 @@
     switch (source) {
         case AUDIO_SOURCE_FM_TUNER:
         case AUDIO_SOURCE_ECHO_REFERENCE:
+        case AUDIO_SOURCE_REMOTE_SUBMIX:
             return false;
         default:
             break;
@@ -970,7 +1042,7 @@
         case TRANSACTION_removeUidDeviceAffinities:
         case TRANSACTION_setUserIdDeviceAffinities:
         case TRANSACTION_removeUserIdDeviceAffinities:
-        case TRANSACTION_getHwOffloadEncodingFormatsSupportedForA2DP:
+        case TRANSACTION_getHwOffloadFormatsSupportedForBluetoothMedia:
         case TRANSACTION_listAudioVolumeGroups:
         case TRANSACTION_getVolumeGroupFromAudioAttributes:
         case TRANSACTION_acquireSoundTriggerSession:
@@ -990,7 +1062,8 @@
         case TRANSACTION_addDevicesRoleForCapturePreset:
         case TRANSACTION_removeDevicesRoleForCapturePreset:
         case TRANSACTION_clearDevicesRoleForCapturePreset:
-        case TRANSACTION_getDevicesForRoleAndCapturePreset: {
+        case TRANSACTION_getDevicesForRoleAndCapturePreset:
+        case TRANSACTION_getSpatializer: {
             if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
                 ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
                       __func__, code, IPCThreadState::self()->getCallingPid(),
@@ -1764,6 +1837,17 @@
                     mLock.lock();
                     } break;
 
+                case CHECK_SPATIALIZER: {
+                    ALOGV("AudioCommandThread() processing updateUID states");
+                    svc = mService.promote();
+                    if (svc == 0) {
+                        break;
+                    }
+                    mLock.unlock();
+                    svc->doOnCheckSpatializer();
+                    mLock.lock();
+                    } break;
+
                 default:
                     ALOGW("AudioCommandThread() unknown command %d", command->mCommand);
                 }
@@ -2075,6 +2159,14 @@
     sendCommand(command);
 }
 
+void AudioPolicyService::AudioCommandThread::checkSpatializerCommand()
+{
+    sp<AudioCommand>command = new AudioCommand();
+    command->mCommand = CHECK_SPATIALIZER;
+    ALOGV("AudioCommandThread() adding check spatializer");
+    sendCommand(command);
+}
+
 status_t AudioPolicyService::AudioCommandThread::sendCommand(sp<AudioCommand>& command, int delayMs)
 {
     {
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 7ed829c..ac5af6b 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -19,6 +19,7 @@
 #define ANDROID_AUDIOPOLICYSERVICE_H
 
 #include <android/media/BnAudioPolicyService.h>
+#include <android/media/GetSpatializerResponse.h>
 #include <android-base/thread_annotations.h>
 #include <cutils/misc.h>
 #include <cutils/config_utils.h>
@@ -38,6 +39,7 @@
 #include <mediautils/ServiceUtilities.h>
 #include "AudioPolicyEffects.h"
 #include "CaptureStateNotifier.h"
+#include "Spatializer.h"
 #include <AudioPolicyInterface.h>
 #include <android/hardware/BnSensorPrivacyListener.h>
 #include <android/content/AttributionSourceState.h>
@@ -47,13 +49,25 @@
 namespace android {
 
 using content::AttributionSourceState;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::AudioUuid;
+using media::audio::common::Int;
 
 // ----------------------------------------------------------------------------
 
 class AudioPolicyService :
     public BinderService<AudioPolicyService>,
     public media::BnAudioPolicyService,
-    public IBinder::DeathRecipient
+    public IBinder::DeathRecipient,
+    public SpatializerPolicyCallback
 {
     friend class BinderService<AudioPolicyService>;
 
@@ -68,25 +82,25 @@
     //
     binder::Status onNewAudioModulesAvailable() override;
     binder::Status setDeviceConnectionState(
-            const media::AudioDevice& device,
+            const AudioDevice& device,
             media::AudioPolicyDeviceState state,
             const std::string& deviceName,
-            media::audio::common::AudioFormat encodedFormat) override;
-    binder::Status getDeviceConnectionState(const media::AudioDevice& device,
+            const AudioFormatDescription& encodedFormat) override;
+    binder::Status getDeviceConnectionState(const AudioDevice& device,
                                             media::AudioPolicyDeviceState* _aidl_return) override;
     binder::Status handleDeviceConfigChange(
-            const media::AudioDevice& device,
+            const AudioDevice& device,
             const std::string& deviceName,
-            media::audio::common::AudioFormat encodedFormat) override;
-    binder::Status setPhoneState(media::AudioMode state, int32_t uid) override;
+            const AudioFormatDescription& encodedFormat) override;
+    binder::Status setPhoneState(AudioMode state, int32_t uid) override;
     binder::Status setForceUse(media::AudioPolicyForceUse usage,
                                media::AudioPolicyForcedConfig config) override;
     binder::Status getForceUse(media::AudioPolicyForceUse usage,
                                media::AudioPolicyForcedConfig* _aidl_return) override;
-    binder::Status getOutput(media::AudioStreamType stream, int32_t* _aidl_return) override;
+    binder::Status getOutput(AudioStreamType stream, int32_t* _aidl_return) override;
     binder::Status getOutputForAttr(const media::AudioAttributesInternal& attr, int32_t session,
                                     const AttributionSourceState &attributionSource,
-                                    const media::AudioConfig& config,
+                                    const AudioConfig& config,
                                     int32_t flags, int32_t selectedDeviceId,
                                     media::GetOutputForAttrResponse* _aidl_return) override;
     binder::Status startOutput(int32_t portId) override;
@@ -95,32 +109,37 @@
     binder::Status getInputForAttr(const media::AudioAttributesInternal& attr, int32_t input,
                                    int32_t riid, int32_t session,
                                    const AttributionSourceState &attributionSource,
-                                   const media::AudioConfigBase& config, int32_t flags,
+                                   const AudioConfigBase& config, int32_t flags,
                                    int32_t selectedDeviceId,
                                    media::GetInputForAttrResponse* _aidl_return) override;
     binder::Status startInput(int32_t portId) override;
     binder::Status stopInput(int32_t portId) override;
     binder::Status releaseInput(int32_t portId) override;
-    binder::Status initStreamVolume(media::AudioStreamType stream, int32_t indexMin,
+    binder::Status initStreamVolume(AudioStreamType stream, int32_t indexMin,
                                     int32_t indexMax) override;
-    binder::Status setStreamVolumeIndex(media::AudioStreamType stream, int32_t device,
+    binder::Status setStreamVolumeIndex(AudioStreamType stream,
+                                        const AudioDeviceDescription& device,
                                         int32_t index) override;
-    binder::Status getStreamVolumeIndex(media::AudioStreamType stream, int32_t device,
+    binder::Status getStreamVolumeIndex(AudioStreamType stream,
+                                        const AudioDeviceDescription& device,
                                         int32_t* _aidl_return) override;
     binder::Status setVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
-                                               int32_t device, int32_t index) override;
+                                               const AudioDeviceDescription& device,
+                                               int32_t index) override;
     binder::Status getVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
-                                               int32_t device, int32_t* _aidl_return) override;
+                                               const AudioDeviceDescription& device,
+                                               int32_t* _aidl_return) override;
     binder::Status getMaxVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
                                                   int32_t* _aidl_return) override;
     binder::Status getMinVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
                                                   int32_t* _aidl_return) override;
-    binder::Status getStrategyForStream(media::AudioStreamType stream,
+    binder::Status getStrategyForStream(AudioStreamType stream,
                                         int32_t* _aidl_return) override;
-    binder::Status getDevicesForStream(media::AudioStreamType stream,
-                                       int32_t* _aidl_return) override;
+    binder::Status getDevicesForStream(
+            AudioStreamType stream,
+            std::vector<AudioDeviceDescription>* _aidl_return) override;
     binder::Status getDevicesForAttributes(const media::AudioAttributesEx& attr,
-                                           std::vector<media::AudioDevice>* _aidl_return) override;
+                                           std::vector<AudioDevice>* _aidl_return) override;
     binder::Status getOutputForEffect(const media::EffectDescriptor& desc,
                                       int32_t* _aidl_return) override;
     binder::Status registerEffect(const media::EffectDescriptor& desc, int32_t io, int32_t strategy,
@@ -128,42 +147,42 @@
     binder::Status unregisterEffect(int32_t id) override;
     binder::Status setEffectEnabled(int32_t id, bool enabled) override;
     binder::Status moveEffectsToIo(const std::vector<int32_t>& ids, int32_t io) override;
-    binder::Status isStreamActive(media::AudioStreamType stream, int32_t inPastMs,
+    binder::Status isStreamActive(AudioStreamType stream, int32_t inPastMs,
                                   bool* _aidl_return) override;
-    binder::Status isStreamActiveRemotely(media::AudioStreamType stream, int32_t inPastMs,
+    binder::Status isStreamActiveRemotely(AudioStreamType stream, int32_t inPastMs,
                                           bool* _aidl_return) override;
-    binder::Status isSourceActive(media::AudioSourceType source, bool* _aidl_return) override;
+    binder::Status isSourceActive(AudioSource source, bool* _aidl_return) override;
     binder::Status queryDefaultPreProcessing(
-            int32_t audioSession, media::Int* count,
+            int32_t audioSession, Int* count,
             std::vector<media::EffectDescriptor>* _aidl_return) override;
-    binder::Status addSourceDefaultEffect(const media::AudioUuid& type,
+    binder::Status addSourceDefaultEffect(const AudioUuid& type,
                                           const std::string& opPackageName,
-                                          const media::AudioUuid& uuid, int32_t priority,
-                                          media::AudioSourceType source,
+                                          const AudioUuid& uuid, int32_t priority,
+                                          AudioSource source,
                                           int32_t* _aidl_return) override;
-    binder::Status addStreamDefaultEffect(const media::AudioUuid& type,
+    binder::Status addStreamDefaultEffect(const AudioUuid& type,
                                           const std::string& opPackageName,
-                                          const media::AudioUuid& uuid, int32_t priority,
-                                          media::AudioUsage usage, int32_t* _aidl_return) override;
+                                          const AudioUuid& uuid, int32_t priority,
+                                          AudioUsage usage, int32_t* _aidl_return) override;
     binder::Status removeSourceDefaultEffect(int32_t id) override;
     binder::Status removeStreamDefaultEffect(int32_t id) override;
     binder::Status setSupportedSystemUsages(
-            const std::vector<media::AudioUsage>& systemUsages) override;
+            const std::vector<AudioUsage>& systemUsages) override;
     binder::Status setAllowedCapturePolicy(int32_t uid, int32_t capturePolicy) override;
-    binder::Status getOffloadSupport(const media::AudioOffloadInfo& info,
+    binder::Status getOffloadSupport(const media::audio::common::AudioOffloadInfo& info,
                                      media::AudioOffloadMode* _aidl_return) override;
-    binder::Status isDirectOutputSupported(const media::AudioConfigBase& config,
+    binder::Status isDirectOutputSupported(const AudioConfigBase& config,
                                            const media::AudioAttributesInternal& attributes,
                                            bool* _aidl_return) override;
     binder::Status listAudioPorts(media::AudioPortRole role, media::AudioPortType type,
-                                  media::Int* count, std::vector<media::AudioPort>* ports,
+                                  Int* count, std::vector<media::AudioPort>* ports,
                                   int32_t* _aidl_return) override;
     binder::Status getAudioPort(const media::AudioPort& port,
                                 media::AudioPort* _aidl_return) override;
     binder::Status createAudioPatch(const media::AudioPatch& patch, int32_t handle,
                                     int32_t* _aidl_return) override;
     binder::Status releaseAudioPatch(int32_t handle) override;
-    binder::Status listAudioPatches(media::Int* count, std::vector<media::AudioPatch>* patches,
+    binder::Status listAudioPatches(Int* count, std::vector<media::AudioPatch>* patches,
                                     int32_t* _aidl_return) override;
     binder::Status setAudioPortConfig(const media::AudioPortConfig& config) override;
     binder::Status registerClient(const sp<media::IAudioPolicyServiceClient>& client) override;
@@ -171,15 +190,15 @@
     binder::Status setAudioVolumeGroupCallbacksEnabled(bool enabled) override;
     binder::Status acquireSoundTriggerSession(media::SoundTriggerSession* _aidl_return) override;
     binder::Status releaseSoundTriggerSession(int32_t session) override;
-    binder::Status getPhoneState(media::AudioMode* _aidl_return) override;
+    binder::Status getPhoneState(AudioMode* _aidl_return) override;
     binder::Status registerPolicyMixes(const std::vector<media::AudioMix>& mixes,
                                        bool registration) override;
     binder::Status setUidDeviceAffinities(int32_t uid,
-                                          const std::vector<media::AudioDevice>& devices) override;
+                                          const std::vector<AudioDevice>& devices) override;
     binder::Status removeUidDeviceAffinities(int32_t uid) override;
     binder::Status setUserIdDeviceAffinities(
             int32_t userId,
-            const std::vector<media::AudioDevice>& devices) override;
+            const std::vector<AudioDevice>& devices) override;
     binder::Status removeUserIdDeviceAffinities(int32_t userId) override;
     binder::Status startAudioSource(const media::AudioPortConfig& source,
                                     const media::AudioAttributesInternal& attributes,
@@ -187,22 +206,25 @@
     binder::Status stopAudioSource(int32_t portId) override;
     binder::Status setMasterMono(bool mono) override;
     binder::Status getMasterMono(bool* _aidl_return) override;
-    binder::Status getStreamVolumeDB(media::AudioStreamType stream, int32_t index, int32_t device,
+    binder::Status getStreamVolumeDB(AudioStreamType stream, int32_t index,
+                                     const AudioDeviceDescription& device,
                                      float* _aidl_return) override;
-    binder::Status getSurroundFormats(media::Int* count,
-                                      std::vector<media::audio::common::AudioFormat>* formats,
+    binder::Status getSurroundFormats(Int* count,
+                                      std::vector<AudioFormatDescription>* formats,
                                       std::vector<bool>* formatsEnabled) override;
     binder::Status getReportedSurroundFormats(
-            media::Int* count, std::vector<media::audio::common::AudioFormat>* formats) override;
-    binder::Status getHwOffloadEncodingFormatsSupportedForA2DP(
-            std::vector<media::audio::common::AudioFormat>* _aidl_return) override;
-    binder::Status setSurroundFormatEnabled(media::audio::common::AudioFormat audioFormat,
+            Int* count, std::vector<AudioFormatDescription>* formats) override;
+    binder::Status getHwOffloadFormatsSupportedForBluetoothMedia(
+            const AudioDeviceDescription& device,
+            std::vector<AudioFormatDescription>* _aidl_return) override;
+    binder::Status setSurroundFormatEnabled(const AudioFormatDescription& audioFormat,
                                             bool enabled) override;
     binder::Status setAssistantUid(int32_t uid) override;
     binder::Status setHotwordDetectionServiceUid(int32_t uid) override;
     binder::Status setA11yServicesUids(const std::vector<int32_t>& uids) override;
     binder::Status setCurrentImeUid(int32_t uid) override;
     binder::Status isHapticPlaybackSupported(bool* _aidl_return) override;
+    binder::Status isUltrasoundSupported(bool* _aidl_return) override;
     binder::Status listAudioProductStrategies(
             std::vector<media::AudioProductStrategy>* _aidl_return) override;
     binder::Status getProductStrategyFromAudioAttributes(const media::AudioAttributesEx& aa,
@@ -217,37 +239,48 @@
     binder::Status isCallScreenModeSupported(bool* _aidl_return) override;
     binder::Status setDevicesRoleForStrategy(
             int32_t strategy, media::DeviceRole role,
-            const std::vector<media::AudioDevice>& devices) override;
+            const std::vector<AudioDevice>& devices) override;
     binder::Status removeDevicesRoleForStrategy(int32_t strategy, media::DeviceRole role) override;
     binder::Status getDevicesForRoleAndStrategy(
             int32_t strategy, media::DeviceRole role,
-            std::vector<media::AudioDevice>* _aidl_return) override;
+            std::vector<AudioDevice>* _aidl_return) override;
     binder::Status setDevicesRoleForCapturePreset(
-            media::AudioSourceType audioSource,
+            AudioSource audioSource,
             media::DeviceRole role,
-            const std::vector<media::AudioDevice>& devices) override;
+            const std::vector<AudioDevice>& devices) override;
     binder::Status addDevicesRoleForCapturePreset(
-            media::AudioSourceType audioSource,
+            AudioSource audioSource,
             media::DeviceRole role,
-            const std::vector<media::AudioDevice>& devices) override;
+            const std::vector<AudioDevice>& devices) override;
     binder::Status removeDevicesRoleForCapturePreset(
-            media::AudioSourceType audioSource,
+            AudioSource audioSource,
             media::DeviceRole role,
-            const std::vector<media::AudioDevice>& devices) override;
-    binder::Status clearDevicesRoleForCapturePreset(media::AudioSourceType audioSource,
+            const std::vector<AudioDevice>& devices) override;
+    binder::Status clearDevicesRoleForCapturePreset(AudioSource audioSource,
                                                     media::DeviceRole role) override;
     binder::Status getDevicesForRoleAndCapturePreset(
-            media::AudioSourceType audioSource,
+            AudioSource audioSource,
             media::DeviceRole role,
-            std::vector<media::AudioDevice>* _aidl_return) override;
+            std::vector<AudioDevice>* _aidl_return) override;
     binder::Status registerSoundTriggerCaptureStateListener(
             const sp<media::ICaptureStateListener>& listener, bool* _aidl_return) override;
 
-    virtual     status_t    onTransact(
-                                uint32_t code,
-                                const Parcel& data,
-                                Parcel* reply,
-                                uint32_t flags);
+    binder::Status getSpatializer(const sp<media::INativeSpatializerCallback>& callback,
+            media::GetSpatializerResponse* _aidl_return) override;
+    binder::Status canBeSpatialized(
+            const std::optional<media::AudioAttributesInternal>& attr,
+            const std::optional<AudioConfig>& config,
+            const std::vector<AudioDevice>& devices,
+            bool* _aidl_return) override;
+
+    binder::Status getDirectPlaybackSupport(const media::AudioAttributesInternal& attr,
+                                            const AudioConfig& config,
+                                            media::AudioDirectMode* _aidl_return) override;
+
+    binder::Status getDirectProfilesForAttributes(const media::AudioAttributesInternal& attr,
+                        std::vector<media::audio::common::AudioProfile>* _aidl_return) override;
+
+    status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) override;
 
     // IBinder::DeathRecipient
     virtual     void        binderDied(const wp<IBinder>& who);
@@ -313,6 +346,16 @@
     void onRoutingUpdated();
     void doOnRoutingUpdated();
 
+    /**
+     * Spatializer SpatializerPolicyCallback implementation.
+     * onCheckSpatializer() sends an event on mOutputCommandThread which executes
+     * doOnCheckSpatializer() to check if a Spatializer output must be opened or closed
+     * by audio policy manager and attach/detach the spatializer effect accordingly.
+     */
+    void onCheckSpatializer() override;
+    void onCheckSpatializer_l();
+    void doOnCheckSpatializer();
+
     void setEffectSuspended(int effectId,
                             audio_session_t sessionId,
                             bool suspended);
@@ -350,8 +393,9 @@
     app_state_t apmStatFromAmState(int amState);
 
     bool isSupportedSystemUsage(audio_usage_t usage);
-    status_t validateUsage(audio_usage_t usage);
-    status_t validateUsage(audio_usage_t usage, const AttributionSourceState& attributionSource);
+    status_t validateUsage(const audio_attributes_t& attr);
+    status_t validateUsage(const audio_attributes_t& attr,
+                           const AttributionSourceState& attributionSource);
 
     void updateUidStates();
     void updateUidStates_l() REQUIRES(mLock);
@@ -483,7 +527,8 @@
             SET_EFFECT_SUSPENDED,
             AUDIO_MODULES_UPDATE,
             ROUTING_UPDATED,
-            UPDATE_UID_STATES
+            UPDATE_UID_STATES,
+            CHECK_SPATIALIZER
         };
 
         AudioCommandThread (String8 name, const wp<AudioPolicyService>& service);
@@ -532,6 +577,7 @@
                     void        audioModulesUpdateCommand();
                     void        routingChangedCommand();
                     void        updateUidStatesCommand();
+                    void        checkSpatializerCommand();
                     void        insertCommand_l(AudioCommand *command, int delayMs = 0);
     private:
         class AudioCommandData;
@@ -667,7 +713,8 @@
         // The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
         virtual status_t openOutput(audio_module_handle_t module,
                                     audio_io_handle_t *output,
-                                    audio_config_t *config,
+                                    audio_config_t *halConfig,
+                                    audio_config_base_t *mixerConfig,
                                     const sp<DeviceDescriptorBase>& device,
                                     uint32_t *latencyMs,
                                     audio_output_flags_t flags);
@@ -985,6 +1032,8 @@
 
     CaptureStateNotifier mCaptureStateNotifier;
 
+    sp<Spatializer> mSpatializer;
+
     void *mLibraryHandle = nullptr;
     CreateAudioPolicyManagerInstance mCreateAudioPolicyManager;
     DestroyAudioPolicyManagerInstance mDestroyAudioPolicyManager;
diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp
new file mode 100644
index 0000000..54d9094
--- /dev/null
+++ b/services/audiopolicy/service/Spatializer.cpp
@@ -0,0 +1,742 @@
+/*
+**
+** Copyright 2021, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+
+#define LOG_TAG "Spatializer"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <limits.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <android/content/AttributionSourceState.h>
+#include <audio_utils/fixedfft.h>
+#include <cutils/bitops.h>
+#include <hardware/sensors.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/ShmemCompat.h>
+#include <mediautils/ServiceUtilities.h>
+#include <utils/Thread.h>
+
+#include "Spatializer.h"
+
+namespace android {
+
+using aidl_utils::statusTFromBinderStatus;
+using aidl_utils::binderStatusFromStatusT;
+using android::content::AttributionSourceState;
+using binder::Status;
+using media::HeadTrackingMode;
+using media::Pose3f;
+using media::SpatializationLevel;
+using media::SpatializationMode;
+using media::SpatializerHeadTrackingMode;
+using media::SensorPoseProvider;
+
+using namespace std::chrono_literals;
+
+#define VALUE_OR_RETURN_BINDER_STATUS(x) \
+    ({ auto _tmp = (x); \
+       if (!_tmp.ok()) return aidl_utils::binderStatusFromStatusT(_tmp.error()); \
+       std::move(_tmp.value()); })
+
+// ---------------------------------------------------------------------------
+
+class Spatializer::EngineCallbackHandler : public AHandler {
+public:
+    EngineCallbackHandler(wp<Spatializer> spatializer)
+            : mSpatializer(spatializer) {
+    }
+
+    enum {
+        // Device state callbacks
+        kWhatOnFramesProcessed,    // AudioEffect::EVENT_FRAMES_PROCESSED
+        kWhatOnHeadToStagePose,    // SpatializerPoseController::Listener::onHeadToStagePose
+        kWhatOnActualModeChange,   // SpatializerPoseController::Listener::onActualModeChange
+    };
+    static constexpr const char *kNumFramesKey = "numFrames";
+    static constexpr const char *kModeKey = "mode";
+    static constexpr const char *kTranslation0Key = "translation0";
+    static constexpr const char *kTranslation1Key = "translation1";
+    static constexpr const char *kTranslation2Key = "translation2";
+    static constexpr const char *kRotation0Key = "rotation0";
+    static constexpr const char *kRotation1Key = "rotation1";
+    static constexpr const char *kRotation2Key = "rotation2";
+
+    void onMessageReceived(const sp<AMessage> &msg) override {
+        switch (msg->what()) {
+            case kWhatOnFramesProcessed: {
+                sp<Spatializer> spatializer = mSpatializer.promote();
+                if (spatializer == nullptr) {
+                    ALOGW("%s: Cannot promote spatializer", __func__);
+                    return;
+                }
+                int numFrames;
+                if (!msg->findInt32(kNumFramesKey, &numFrames)) {
+                    ALOGE("%s: Cannot find num frames!", __func__);
+                    return;
+                }
+                if (numFrames > 0) {
+                    spatializer->calculateHeadPose();
+                }
+                } break;
+            case kWhatOnHeadToStagePose: {
+                sp<Spatializer> spatializer = mSpatializer.promote();
+                if (spatializer == nullptr) {
+                    ALOGW("%s: Cannot promote spatializer", __func__);
+                    return;
+                }
+                std::vector<float> headToStage(sHeadPoseKeys.size());
+                for (size_t i = 0 ; i < sHeadPoseKeys.size(); i++) {
+                    if (!msg->findFloat(sHeadPoseKeys[i], &headToStage[i])) {
+                        ALOGE("%s: Cannot find kTranslation0Key!", __func__);
+                        return;
+                    }
+                }
+                spatializer->onHeadToStagePoseMsg(headToStage);
+                } break;
+            case kWhatOnActualModeChange: {
+                sp<Spatializer> spatializer = mSpatializer.promote();
+                if (spatializer == nullptr) {
+                    ALOGW("%s: Cannot promote spatializer", __func__);
+                    return;
+                }
+                int mode;
+                if (!msg->findInt32(EngineCallbackHandler::kModeKey, &mode)) {
+                    ALOGE("%s: Cannot find actualMode!", __func__);
+                    return;
+                }
+                spatializer->onActualModeChangeMsg(static_cast<HeadTrackingMode>(mode));
+                } break;
+            default:
+                LOG_ALWAYS_FATAL("Invalid callback message %d", msg->what());
+        }
+    }
+private:
+    wp<Spatializer> mSpatializer;
+};
+
+const std::vector<const char *> Spatializer::sHeadPoseKeys = {
+    Spatializer::EngineCallbackHandler::kTranslation0Key,
+    Spatializer::EngineCallbackHandler::kTranslation1Key,
+    Spatializer::EngineCallbackHandler::kTranslation2Key,
+    Spatializer::EngineCallbackHandler::kRotation0Key,
+    Spatializer::EngineCallbackHandler::kRotation1Key,
+    Spatializer::EngineCallbackHandler::kRotation2Key,
+};
+
+// ---------------------------------------------------------------------------
+sp<Spatializer> Spatializer::create(SpatializerPolicyCallback *callback) {
+    sp<Spatializer> spatializer;
+
+    sp<EffectsFactoryHalInterface> effectsFactoryHal = EffectsFactoryHalInterface::create();
+    if (effectsFactoryHal == nullptr) {
+        ALOGW("%s failed to create effect factory interface", __func__);
+        return spatializer;
+    }
+
+    std::vector<effect_descriptor_t> descriptors;
+    status_t status =
+            effectsFactoryHal->getDescriptors(FX_IID_SPATIALIZER, &descriptors);
+    if (status != NO_ERROR) {
+        ALOGW("%s failed to get spatializer descriptor, error %d", __func__, status);
+        return spatializer;
+    }
+    ALOG_ASSERT(!descriptors.empty(),
+            "%s getDescriptors() returned no error but empty list", __func__);
+
+    //TODO: get supported spatialization modes from FX engine or descriptor
+
+    sp<EffectHalInterface> effect;
+    status = effectsFactoryHal->createEffect(&descriptors[0].uuid, AUDIO_SESSION_OUTPUT_STAGE,
+            AUDIO_IO_HANDLE_NONE, AUDIO_PORT_HANDLE_NONE, &effect);
+    ALOGI("%s FX create status %d effect %p", __func__, status, effect.get());
+
+    if (status == NO_ERROR && effect != nullptr) {
+        spatializer = new Spatializer(descriptors[0], callback);
+        if (spatializer->loadEngineConfiguration(effect) != NO_ERROR) {
+            spatializer.clear();
+        }
+    }
+
+    return spatializer;
+}
+
+Spatializer::Spatializer(effect_descriptor_t engineDescriptor, SpatializerPolicyCallback* callback)
+    : mEngineDescriptor(engineDescriptor),
+      mPolicyCallback(callback) {
+    ALOGV("%s", __func__);
+}
+
+void Spatializer::onFirstRef() {
+    mLooper = new ALooper;
+    mLooper->setName("Spatializer-looper");
+    mLooper->start(
+            /*runOnCallingThread*/false,
+            /*canCallJava*/       false,
+            PRIORITY_AUDIO);
+
+    mHandler = new EngineCallbackHandler(this);
+    mLooper->registerHandler(mHandler);
+}
+
+Spatializer::~Spatializer() {
+    ALOGV("%s", __func__);
+    if (mLooper != nullptr) {
+        mLooper->stop();
+        mLooper->unregisterHandler(mHandler->id());
+    }
+    mLooper.clear();
+    mHandler.clear();
+}
+
+status_t Spatializer::loadEngineConfiguration(sp<EffectHalInterface> effect) {
+    ALOGV("%s", __func__);
+
+    std::vector<bool> supportsHeadTracking;
+    status_t status = getHalParameter<false>(effect, SPATIALIZER_PARAM_HEADTRACKING_SUPPORTED,
+                                         &supportsHeadTracking);
+    if (status != NO_ERROR) {
+        return status;
+    }
+    mSupportsHeadTracking = supportsHeadTracking[0];
+
+    status = getHalParameter<true>(effect, SPATIALIZER_PARAM_SUPPORTED_LEVELS, &mLevels);
+    if (status != NO_ERROR) {
+        return status;
+    }
+    status = getHalParameter<true>(effect, SPATIALIZER_PARAM_SUPPORTED_SPATIALIZATION_MODES,
+                                &mSpatializationModes);
+    if (status != NO_ERROR) {
+        return status;
+    }
+    return getHalParameter<true>(effect, SPATIALIZER_PARAM_SUPPORTED_CHANNEL_MASKS,
+                                 &mChannelMasks);
+}
+
+/** Gets the channel mask, sampling rate and format set for the spatializer input. */
+audio_config_base_t Spatializer::getAudioInConfig() const {
+    std::lock_guard lock(mLock);
+    audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+    // For now use highest supported channel count
+    uint32_t maxCount = 0;
+    for ( auto mask : mChannelMasks) {
+        if (audio_channel_count_from_out_mask(mask) > maxCount) {
+            config.channel_mask = mask;
+        }
+    }
+    return config;
+}
+
+status_t Spatializer::registerCallback(
+        const sp<media::INativeSpatializerCallback>& callback) {
+    std::lock_guard lock(mLock);
+    if (callback == nullptr) {
+        return BAD_VALUE;
+    }
+
+    sp<IBinder> binder = IInterface::asBinder(callback);
+    status_t status = binder->linkToDeath(this);
+    if (status == NO_ERROR) {
+        mSpatializerCallback = callback;
+    }
+    ALOGV("%s status %d", __func__, status);
+    return status;
+}
+
+// IBinder::DeathRecipient
+void Spatializer::binderDied(__unused const wp<IBinder> &who) {
+    {
+        std::lock_guard lock(mLock);
+        mLevel = SpatializationLevel::NONE;
+        mSpatializerCallback.clear();
+    }
+    ALOGV("%s", __func__);
+    mPolicyCallback->onCheckSpatializer();
+}
+
+// ISpatializer
+Status Spatializer::getSupportedLevels(std::vector<SpatializationLevel> *levels) {
+    ALOGV("%s", __func__);
+    if (levels == nullptr) {
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    levels->push_back(SpatializationLevel::NONE);
+    levels->insert(levels->end(), mLevels.begin(), mLevels.end());
+    return Status::ok();
+}
+
+Status Spatializer::setLevel(SpatializationLevel level) {
+    ALOGV("%s level %d", __func__, (int)level);
+    if (level != SpatializationLevel::NONE
+            && std::find(mLevels.begin(), mLevels.end(), level) == mLevels.end()) {
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    sp<media::INativeSpatializerCallback> callback;
+    bool levelChanged = false;
+    {
+        std::lock_guard lock(mLock);
+        levelChanged = mLevel != level;
+        mLevel = level;
+        callback = mSpatializerCallback;
+
+        if (levelChanged && mEngine != nullptr) {
+            setEffectParameter_l(SPATIALIZER_PARAM_LEVEL, std::vector<SpatializationLevel>{level});
+        }
+    }
+
+    if (levelChanged) {
+        mPolicyCallback->onCheckSpatializer();
+        if (callback != nullptr) {
+            callback->onLevelChanged(level);
+        }
+    }
+    return Status::ok();
+}
+
+Status Spatializer::getLevel(SpatializationLevel *level) {
+    if (level == nullptr) {
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    std::lock_guard lock(mLock);
+    *level = mLevel;
+    ALOGV("%s level %d", __func__, (int)*level);
+    return Status::ok();
+}
+
+Status Spatializer::isHeadTrackingSupported(bool *supports) {
+    ALOGV("%s mSupportsHeadTracking %d", __func__, mSupportsHeadTracking);
+    if (supports == nullptr) {
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    std::lock_guard lock(mLock);
+    *supports = mSupportsHeadTracking;
+    return Status::ok();
+}
+
+Status Spatializer::getSupportedHeadTrackingModes(
+        std::vector<SpatializerHeadTrackingMode>* modes) {
+    std::lock_guard lock(mLock);
+    ALOGV("%s", __func__);
+    if (modes == nullptr) {
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+
+    modes->push_back(SpatializerHeadTrackingMode::DISABLED);
+    if (mSupportsHeadTracking) {
+        if (mHeadSensor != SpatializerPoseController::INVALID_SENSOR) {
+            modes->push_back(SpatializerHeadTrackingMode::RELATIVE_WORLD);
+            if (mScreenSensor != SpatializerPoseController::INVALID_SENSOR) {
+                modes->push_back(SpatializerHeadTrackingMode::RELATIVE_SCREEN);
+            }
+        }
+    }
+    return Status::ok();
+}
+
+Status Spatializer::setDesiredHeadTrackingMode(SpatializerHeadTrackingMode mode) {
+    ALOGV("%s mode %d", __func__, (int)mode);
+
+    if (!mSupportsHeadTracking) {
+        return binderStatusFromStatusT(INVALID_OPERATION);
+    }
+    std::lock_guard lock(mLock);
+    switch (mode) {
+        case SpatializerHeadTrackingMode::OTHER:
+            return binderStatusFromStatusT(BAD_VALUE);
+        case SpatializerHeadTrackingMode::DISABLED:
+            mDesiredHeadTrackingMode = HeadTrackingMode::STATIC;
+            break;
+        case SpatializerHeadTrackingMode::RELATIVE_WORLD:
+            mDesiredHeadTrackingMode = HeadTrackingMode::WORLD_RELATIVE;
+            break;
+        case SpatializerHeadTrackingMode::RELATIVE_SCREEN:
+            mDesiredHeadTrackingMode = HeadTrackingMode::SCREEN_RELATIVE;
+            break;
+    }
+
+    if (mPoseController != nullptr) {
+        mPoseController->setDesiredMode(mDesiredHeadTrackingMode);
+    }
+
+    return Status::ok();
+}
+
+Status Spatializer::getActualHeadTrackingMode(SpatializerHeadTrackingMode *mode) {
+    if (mode == nullptr) {
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    std::lock_guard lock(mLock);
+    *mode = mActualHeadTrackingMode;
+    ALOGV("%s mode %d", __func__, (int)*mode);
+    return Status::ok();
+}
+
+Status Spatializer::recenterHeadTracker() {
+    if (!mSupportsHeadTracking) {
+        return binderStatusFromStatusT(INVALID_OPERATION);
+    }
+    std::lock_guard lock(mLock);
+    if (mPoseController != nullptr) {
+        mPoseController->recenter();
+    }
+    return Status::ok();
+}
+
+Status Spatializer::setGlobalTransform(const std::vector<float>& screenToStage) {
+    ALOGV("%s", __func__);
+    if (!mSupportsHeadTracking) {
+        return binderStatusFromStatusT(INVALID_OPERATION);
+    }
+    std::optional<Pose3f> maybePose = Pose3f::fromVector(screenToStage);
+    if (!maybePose.has_value()) {
+        ALOGW("Invalid screenToStage vector.");
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    std::lock_guard lock(mLock);
+    if (mPoseController != nullptr) {
+        mPoseController->setScreenToStagePose(maybePose.value());
+    }
+    return Status::ok();
+}
+
+Status Spatializer::release() {
+    ALOGV("%s", __func__);
+    bool levelChanged = false;
+    {
+        std::lock_guard lock(mLock);
+        if (mSpatializerCallback == nullptr) {
+            return binderStatusFromStatusT(INVALID_OPERATION);
+        }
+
+        sp<IBinder> binder = IInterface::asBinder(mSpatializerCallback);
+        binder->unlinkToDeath(this);
+        mSpatializerCallback.clear();
+
+        levelChanged = mLevel != SpatializationLevel::NONE;
+        mLevel = SpatializationLevel::NONE;
+    }
+
+    if (levelChanged) {
+        mPolicyCallback->onCheckSpatializer();
+    }
+    return Status::ok();
+}
+
+Status Spatializer::setHeadSensor(int sensorHandle) {
+    ALOGV("%s sensorHandle %d", __func__, sensorHandle);
+    if (!mSupportsHeadTracking) {
+        return binderStatusFromStatusT(INVALID_OPERATION);
+    }
+    std::lock_guard lock(mLock);
+    mHeadSensor = sensorHandle;
+    if (mPoseController != nullptr) {
+        mPoseController->setHeadSensor(mHeadSensor);
+    }
+    return Status::ok();
+}
+
+Status Spatializer::setScreenSensor(int sensorHandle) {
+    ALOGV("%s sensorHandle %d", __func__, sensorHandle);
+    if (!mSupportsHeadTracking) {
+        return binderStatusFromStatusT(INVALID_OPERATION);
+    }
+    std::lock_guard lock(mLock);
+    mScreenSensor = sensorHandle;
+    if (mPoseController != nullptr) {
+        mPoseController->setScreenSensor(mScreenSensor);
+    }
+    return Status::ok();
+}
+
+Status Spatializer::setDisplayOrientation(float physicalToLogicalAngle) {
+    ALOGV("%s physicalToLogicalAngle %f", __func__, physicalToLogicalAngle);
+    if (!mSupportsHeadTracking) {
+        return binderStatusFromStatusT(INVALID_OPERATION);
+    }
+    std::lock_guard lock(mLock);
+    mDisplayOrientation = physicalToLogicalAngle;
+    if (mPoseController != nullptr) {
+        mPoseController->setDisplayOrientation(mDisplayOrientation);
+    }
+    if (mEngine != nullptr) {
+        setEffectParameter_l(
+            SPATIALIZER_PARAM_DISPLAY_ORIENTATION, std::vector<float>{physicalToLogicalAngle});
+    }
+    return Status::ok();
+}
+
+Status Spatializer::setHingeAngle(float hingeAngle) {
+    std::lock_guard lock(mLock);
+    ALOGV("%s hingeAngle %f", __func__, hingeAngle);
+    if (mEngine != nullptr) {
+        setEffectParameter_l(SPATIALIZER_PARAM_HINGE_ANGLE, std::vector<float>{hingeAngle});
+    }
+    return Status::ok();
+}
+
+Status Spatializer::getSupportedModes(std::vector<SpatializationMode> *modes) {
+    ALOGV("%s", __func__);
+    if (modes == nullptr) {
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    *modes = mSpatializationModes;
+    return Status::ok();
+}
+
+Status Spatializer::registerHeadTrackingCallback(
+        const sp<media::ISpatializerHeadTrackingCallback>& callback) {
+    ALOGV("%s callback %p", __func__, callback.get());
+    std::lock_guard lock(mLock);
+    if (!mSupportsHeadTracking) {
+        return binderStatusFromStatusT(INVALID_OPERATION);
+    }
+    mHeadTrackingCallback = callback;
+    return Status::ok();
+}
+
+Status Spatializer::setParameter(int key, const std::vector<unsigned char>& value) {
+    ALOGV("%s key %d", __func__, key);
+    std::lock_guard lock(mLock);
+    status_t status = INVALID_OPERATION;
+    if (mEngine != nullptr) {
+        status = setEffectParameter_l(key, value);
+    }
+    return binderStatusFromStatusT(status);
+}
+
+Status Spatializer::getParameter(int key, std::vector<unsigned char> *value) {
+    ALOGV("%s key %d value size %d", __func__, key,
+          (value != nullptr ? (int)value->size() : -1));
+    if (value == nullptr) {
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    std::lock_guard lock(mLock);
+    status_t status = INVALID_OPERATION;
+    if (mEngine != nullptr) {
+        ALOGV("%s key %d mEngine %p", __func__, key, mEngine.get());
+        status = getEffectParameter_l(key, value);
+    }
+    return binderStatusFromStatusT(status);
+}
+
+Status Spatializer::getOutput(int *output) {
+    ALOGV("%s", __func__);
+    if (output == nullptr) {
+        binderStatusFromStatusT(BAD_VALUE);
+    }
+    std::lock_guard lock(mLock);
+    *output = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_io_handle_t_int32_t(mOutput));
+    ALOGV("%s got output %d", __func__, *output);
+    return Status::ok();
+}
+
+// SpatializerPoseController::Listener
+void Spatializer::onHeadToStagePose(const Pose3f& headToStage) {
+    ALOGV("%s", __func__);
+    LOG_ALWAYS_FATAL_IF(!mSupportsHeadTracking,
+            "onHeadToStagePose() called with no head tracking support!");
+
+    auto vec = headToStage.toVector();
+    LOG_ALWAYS_FATAL_IF(vec.size() != sHeadPoseKeys.size(),
+            "%s invalid head to stage vector size %zu", __func__, vec.size());
+
+    sp<AMessage> msg =
+            new AMessage(EngineCallbackHandler::kWhatOnHeadToStagePose, mHandler);
+    for (size_t i = 0 ; i < sHeadPoseKeys.size(); i++) {
+        msg->setFloat(sHeadPoseKeys[i], vec[i]);
+    }
+    msg->post();
+}
+
+void Spatializer::onHeadToStagePoseMsg(const std::vector<float>& headToStage) {
+    ALOGV("%s", __func__);
+    sp<media::ISpatializerHeadTrackingCallback> callback;
+    {
+        std::lock_guard lock(mLock);
+        callback = mHeadTrackingCallback;
+        if (mEngine != nullptr) {
+            setEffectParameter_l(SPATIALIZER_PARAM_HEAD_TO_STAGE, headToStage);
+        }
+    }
+
+    if (callback != nullptr) {
+        callback->onHeadToSoundStagePoseUpdated(headToStage);
+    }
+}
+
+void Spatializer::onActualModeChange(HeadTrackingMode mode) {
+    ALOGV("%s(%d)", __func__, (int)mode);
+    sp<AMessage> msg =
+            new AMessage(EngineCallbackHandler::kWhatOnActualModeChange, mHandler);
+    msg->setInt32(EngineCallbackHandler::kModeKey, static_cast<int>(mode));
+    msg->post();
+}
+
+void Spatializer::onActualModeChangeMsg(HeadTrackingMode mode) {
+    ALOGV("%s(%d)", __func__, (int) mode);
+    sp<media::ISpatializerHeadTrackingCallback> callback;
+    SpatializerHeadTrackingMode spatializerMode;
+    {
+        std::lock_guard lock(mLock);
+        if (!mSupportsHeadTracking) {
+            spatializerMode = SpatializerHeadTrackingMode::DISABLED;
+        } else {
+            switch (mode) {
+                case HeadTrackingMode::STATIC:
+                    spatializerMode = SpatializerHeadTrackingMode::DISABLED;
+                    break;
+                case HeadTrackingMode::WORLD_RELATIVE:
+                    spatializerMode = SpatializerHeadTrackingMode::RELATIVE_WORLD;
+                    break;
+                case HeadTrackingMode::SCREEN_RELATIVE:
+                    spatializerMode = SpatializerHeadTrackingMode::RELATIVE_SCREEN;
+                    break;
+                default:
+                    LOG_ALWAYS_FATAL("Unknown mode: %d", mode);
+            }
+        }
+        mActualHeadTrackingMode = spatializerMode;
+        callback = mHeadTrackingCallback;
+    }
+    if (callback != nullptr) {
+        callback->onHeadTrackingModeChanged(spatializerMode);
+    }
+}
+
+status_t Spatializer::attachOutput(audio_io_handle_t output) {
+    std::shared_ptr<SpatializerPoseController> poseController;
+    bool outputChanged = false;
+    sp<media::INativeSpatializerCallback> callback;
+
+    {
+        std::lock_guard lock(mLock);
+        ALOGV("%s output %d mOutput %d", __func__, (int)output, (int)mOutput);
+        if (mOutput != AUDIO_IO_HANDLE_NONE) {
+            LOG_ALWAYS_FATAL_IF(mEngine == nullptr, "%s output set without FX engine", __func__);
+            // remove FX instance
+            mEngine->setEnabled(false);
+            mEngine.clear();
+        }
+        // create FX instance on output
+        AttributionSourceState attributionSource = AttributionSourceState();
+        mEngine = new AudioEffect(attributionSource);
+        mEngine->set(nullptr, &mEngineDescriptor.uuid, 0, Spatializer::engineCallback /* cbf */,
+                     this /* user */, AUDIO_SESSION_OUTPUT_STAGE, output, {} /* device */,
+                     false /* probe */, true /* notifyFramesProcessed */);
+        status_t status = mEngine->initCheck();
+        ALOGV("%s mEngine create status %d", __func__, (int)status);
+        if (status != NO_ERROR) {
+            return status;
+        }
+
+        setEffectParameter_l(SPATIALIZER_PARAM_LEVEL,
+                             std::vector<SpatializationLevel>{mLevel});
+        setEffectParameter_l(SPATIALIZER_PARAM_HEADTRACKING_MODE,
+                             std::vector<SpatializerHeadTrackingMode>{mActualHeadTrackingMode});
+
+        mEngine->setEnabled(true);
+        outputChanged = mOutput != output;
+        mOutput = output;
+
+        if (mSupportsHeadTracking) {
+            mPoseController = std::make_shared<SpatializerPoseController>(
+                    static_cast<SpatializerPoseController::Listener*>(this), 10ms, 50ms);
+            LOG_ALWAYS_FATAL_IF(mPoseController == nullptr,
+                                "%s could not allocate pose controller", __func__);
+
+            mPoseController->setDesiredMode(mDesiredHeadTrackingMode);
+            mPoseController->setHeadSensor(mHeadSensor);
+            mPoseController->setScreenSensor(mScreenSensor);
+            mPoseController->setDisplayOrientation(mDisplayOrientation);
+            poseController = mPoseController;
+        }
+        callback = mSpatializerCallback;
+    }
+    if (poseController != nullptr) {
+        poseController->waitUntilCalculated();
+    }
+
+    if (outputChanged && callback != nullptr) {
+        callback->onOutputChanged(output);
+    }
+
+    return NO_ERROR;
+}
+
+audio_io_handle_t Spatializer::detachOutput() {
+    audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+    sp<media::INativeSpatializerCallback> callback;
+
+    {
+        std::lock_guard lock(mLock);
+        ALOGV("%s mOutput %d", __func__, (int)mOutput);
+        if (mOutput == AUDIO_IO_HANDLE_NONE) {
+            return output;
+        }
+        // remove FX instance
+        mEngine->setEnabled(false);
+        mEngine.clear();
+        output = mOutput;
+        mOutput = AUDIO_IO_HANDLE_NONE;
+        mPoseController.reset();
+
+        callback = mSpatializerCallback;
+    }
+
+    if (callback != nullptr) {
+        callback->onOutputChanged(AUDIO_IO_HANDLE_NONE);
+    }
+    return output;
+}
+
+void Spatializer::calculateHeadPose() {
+    ALOGV("%s", __func__);
+    std::lock_guard lock(mLock);
+    if (mPoseController != nullptr) {
+        mPoseController->calculateAsync();
+    }
+}
+
+void Spatializer::engineCallback(int32_t event, void *user, void *info) {
+    if (user == nullptr) {
+        return;
+    }
+    Spatializer* const me = reinterpret_cast<Spatializer *>(user);
+    switch (event) {
+        case AudioEffect::EVENT_FRAMES_PROCESSED: {
+            int frames = info == nullptr ? 0 : *(int*)info;
+            ALOGD("%s frames processed %d for me %p", __func__, frames, me);
+            me->postFramesProcessedMsg(frames);
+        } break;
+        default:
+            ALOGD("%s event %d", __func__, event);
+            break;
+    }
+}
+
+void Spatializer::postFramesProcessedMsg(int frames) {
+    sp<AMessage> msg =
+            new AMessage(EngineCallbackHandler::kWhatOnFramesProcessed, mHandler);
+    msg->setInt32(EngineCallbackHandler::kNumFramesKey, frames);
+    msg->post();
+}
+
+} // namespace android
diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h
new file mode 100644
index 0000000..4d77b78
--- /dev/null
+++ b/services/audiopolicy/service/Spatializer.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_SPATIALIZER_H
+#define ANDROID_MEDIA_SPATIALIZER_H
+
+#include <android/media/BnEffect.h>
+#include <android/media/BnSpatializer.h>
+#include <android/media/SpatializationLevel.h>
+#include <android/media/SpatializationMode.h>
+#include <android/media/SpatializerHeadTrackingMode.h>
+#include <media/audiohal/EffectHalInterface.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/AudioEffect.h>
+#include <system/audio_effects/effect_spatializer.h>
+
+#include "SpatializerPoseController.h"
+
+namespace android {
+
+
+// ----------------------------------------------------------------------------
+
+/**
+ * A callback interface from the Spatializer object or its parent AudioPolicyService.
+ * This is implemented by the audio policy service hosting the Spatializer to perform
+ * actions needed when a state change inside the Spatializer requires some audio system
+ * changes that cannot be performed by the Spatializer. For instance opening or closing a
+ * spatializer output stream when the spatializer is enabled or disabled
+ */
+class SpatializerPolicyCallback {
+public:
+    /** Called when a stage change occurs that requires the parent audio policy service to take
+     * some action.
+     */
+    virtual void onCheckSpatializer() = 0;
+
+    virtual ~SpatializerPolicyCallback() = default;
+};
+/**
+ * The Spatializer class implements all functional controlling the multichannel spatializer
+ * with head tracking implementation in the native audio service: audio policy and audio flinger.
+ * It presents an AIDL interface available to the java audio service to discover the availability
+ * of the feature and options, control its state and register an active head tracking sensor.
+ * It maintains the current state of the platform spatializer and applies the stored parameters
+ * when the spatializer engine is created and enabled.
+ * Based on the requested spatializer level, it will request the creation of a specialized output
+ * mixer to the audio policy service which will in turn notify the Spatializer of the output
+ * stream on which a spatializer engine should be created, configured and enabled.
+ * The spatializer also hosts the head tracking management logic. This logic receives the
+ * desired head tracking mode and selected head tracking sensor, registers a sensor event listener
+ * and derives the compounded head pose information to the spatializer engine.
+ *
+ * Workflow:
+ * - Initialization: when the audio policy service starts, it checks if a spatializer effect
+ * engine exists and if the audio policy manager reports a dedicated spatializer output profile.
+ * If both conditions are met, a Spatializer object is created
+ * - Capabilities discovery: AudioService will call AudioSystem::canBeSpatialized() and if true,
+ * acquire an ISpatializer interface with AudioSystem::getSpatializer(). This interface
+ * will be used to query the implementation capabilities and configure the spatializer.
+ * - Enabling: when ISpatializer::setLevel() sets a level different from NONE the spatializer
+ * is considered enabled. The audio policy callback onCheckSpatializer() is called. This
+ * triggers a request to audio policy manager to open a spatialization output stream and a
+ * spatializer mixer is created in audio flinger. When an output is returned by audio policy
+ * manager, Spatializer::attachOutput() is called which creates and enables the spatializer
+ * stage engine on the specified output.
+ * - Disabling: when the spatialization level is set to NONE, the spatializer is considered
+ * disabled. The audio policy callback onCheckSpatializer() is called. This triggers a call
+ * to Spatializer::detachOutput() and the spatializer engine is released. Then a request is
+ * made to audio policy manager to release and close the spatializer output stream and the
+ * spatializer mixer thread is destroyed.
+ */
+class Spatializer : public media::BnSpatializer,
+                    public IBinder::DeathRecipient,
+                    private SpatializerPoseController::Listener {
+  public:
+    static sp<Spatializer> create(SpatializerPolicyCallback *callback);
+
+           ~Spatializer() override;
+
+    /** RefBase */
+    void onFirstRef();
+
+    /** ISpatializer, see ISpatializer.aidl */
+    binder::Status release() override;
+    binder::Status getSupportedLevels(std::vector<media::SpatializationLevel>* levels) override;
+    binder::Status setLevel(media::SpatializationLevel level) override;
+    binder::Status getLevel(media::SpatializationLevel *level) override;
+    binder::Status isHeadTrackingSupported(bool *supports);
+    binder::Status getSupportedHeadTrackingModes(
+            std::vector<media::SpatializerHeadTrackingMode>* modes) override;
+    binder::Status setDesiredHeadTrackingMode(
+            media::SpatializerHeadTrackingMode mode) override;
+    binder::Status getActualHeadTrackingMode(
+            media::SpatializerHeadTrackingMode* mode) override;
+    binder::Status recenterHeadTracker() override;
+    binder::Status setGlobalTransform(const std::vector<float>& screenToStage) override;
+    binder::Status setHeadSensor(int sensorHandle) override;
+    binder::Status setScreenSensor(int sensorHandle) override;
+    binder::Status setDisplayOrientation(float physicalToLogicalAngle) override;
+    binder::Status setHingeAngle(float hingeAngle) override;
+    binder::Status getSupportedModes(std::vector<media::SpatializationMode>* modes) override;
+    binder::Status registerHeadTrackingCallback(
+        const sp<media::ISpatializerHeadTrackingCallback>& callback) override;
+    binder::Status setParameter(int key, const std::vector<unsigned char>& value) override;
+    binder::Status getParameter(int key, std::vector<unsigned char> *value) override;
+    binder::Status getOutput(int *output);
+
+    /** IBinder::DeathRecipient. Listen to the death of the INativeSpatializerCallback. */
+    virtual void binderDied(const wp<IBinder>& who);
+
+    /** Registers a INativeSpatializerCallback when a client is attached to this Spatializer
+     * by audio policy service.
+     */
+    status_t registerCallback(const sp<media::INativeSpatializerCallback>& callback);
+
+    status_t loadEngineConfiguration(sp<EffectHalInterface> effect);
+
+    /** Level getter for use by local classes. */
+    media::SpatializationLevel getLevel() const { std::lock_guard lock(mLock); return mLevel; }
+
+    /** Called by audio policy service when the special output mixer dedicated to spatialization
+     * is opened and the spatializer engine must be created.
+     */
+    status_t attachOutput(audio_io_handle_t output);
+    /** Called by audio policy service when the special output mixer dedicated to spatialization
+     * is closed and the spatializer engine must be release.
+     */
+    audio_io_handle_t detachOutput();
+    /** Returns the output stream the spatializer is attached to. */
+    audio_io_handle_t getOutput() const { std::lock_guard lock(mLock); return mOutput; }
+
+    /** Gets the channel mask, sampling rate and format set for the spatializer input. */
+    audio_config_base_t getAudioInConfig() const;
+
+    void calculateHeadPose();
+
+private:
+    Spatializer(effect_descriptor_t engineDescriptor,
+                     SpatializerPolicyCallback *callback);
+
+    static void engineCallback(int32_t event, void* user, void *info);
+
+    // From VirtualizerStageController::Listener
+    void onHeadToStagePose(const media::Pose3f& headToStage) override;
+    void onActualModeChange(media::HeadTrackingMode mode) override;
+
+    void onHeadToStagePoseMsg(const std::vector<float>& headToStage);
+    void onActualModeChangeMsg(media::HeadTrackingMode mode);
+
+    static constexpr int kMaxEffectParamValues = 10;
+    /**
+     * Get a parameter from spatializer engine by calling the effect HAL command method directly.
+     * To be used when the engine instance mEngine is not yet created in the effect framework.
+     * When MULTI_VALUES is false, the expected reply is only one value of type T.
+     * When MULTI_VALUES is true, the expected reply is made of a number (of type T) indicating
+     * how many values are returned, followed by this number for values of type T.
+     */
+    template<bool MULTI_VALUES, typename T>
+    status_t getHalParameter(sp<EffectHalInterface> effect, uint32_t type,
+                                          std::vector<T> *values) {
+        static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
+
+        uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1];
+        uint32_t reply[sizeof(effect_param_t) / sizeof(uint32_t) + 2 + kMaxEffectParamValues];
+
+        effect_param_t *p = (effect_param_t *)cmd;
+        p->psize = sizeof(uint32_t);
+        if (MULTI_VALUES) {
+            p->vsize = (kMaxEffectParamValues + 1) * sizeof(T);
+        } else {
+            p->vsize = sizeof(T);
+        }
+        *(uint32_t *)p->data = type;
+        uint32_t replySize = sizeof(effect_param_t) + p->psize + p->vsize;
+
+        status_t status = effect->command(EFFECT_CMD_GET_PARAM,
+                                          sizeof(effect_param_t) + sizeof(uint32_t), cmd,
+                                          &replySize, reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        if (p->status != NO_ERROR) {
+            return p->status;
+        }
+        if (replySize <
+                sizeof(effect_param_t) + sizeof(uint32_t) + (MULTI_VALUES ? 2 : 1) * sizeof(T)) {
+            return BAD_VALUE;
+        }
+
+        T *params = (T *)((uint8_t *)reply + sizeof(effect_param_t) + sizeof(uint32_t));
+        int numParams = 1;
+        if (MULTI_VALUES) {
+            numParams = (int)*params++;
+        }
+        if (numParams > kMaxEffectParamValues) {
+            return BAD_VALUE;
+        }
+        (*values).clear();
+        std::copy(&params[0], &params[numParams], back_inserter(*values));
+        return NO_ERROR;
+    }
+
+    /**
+     * Set a parameter to spatializer engine by calling setParameter on mEngine AudioEffect object.
+     * It is possible to pass more than one value of type T according to the parameter type
+     *  according to values vector size.
+     */
+    template<typename T>
+    status_t setEffectParameter_l(uint32_t type, const std::vector<T>& values) REQUIRES(mLock) {
+        static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
+
+        uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1 + values.size()];
+        effect_param_t *p = (effect_param_t *)cmd;
+        p->psize = sizeof(uint32_t);
+        p->vsize = sizeof(T) * values.size();
+        *(uint32_t *)p->data = type;
+        memcpy((uint32_t *)p->data + 1, values.data(), sizeof(T) * values.size());
+
+        status_t status = mEngine->setParameter(p);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        if (p->status != NO_ERROR) {
+            return p->status;
+        }
+        return NO_ERROR;
+    }
+
+    /**
+     * Get a parameter from spatializer engine by calling getParameter on AudioEffect object.
+     * It is possible to read more than one value of type T according to the parameter type
+     * by specifying values vector size.
+     */
+    template<typename T>
+    status_t getEffectParameter_l(uint32_t type, std::vector<T> *values) REQUIRES(mLock) {
+        static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
+
+        uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1 + values->size()];
+        effect_param_t *p = (effect_param_t *)cmd;
+        p->psize = sizeof(uint32_t);
+        p->vsize = sizeof(T) * values->size();
+        *(uint32_t *)p->data = type;
+
+        status_t status = mEngine->getParameter(p);
+
+        if (status != NO_ERROR) {
+            return status;
+        }
+        if (p->status != NO_ERROR) {
+            return p->status;
+        }
+
+        int numValues = std::min(p->vsize / sizeof(T), values->size());
+        (*values).clear();
+        T *retValues = (T *)((uint8_t *)p->data + sizeof(uint32_t));
+        std::copy(&retValues[0], &retValues[numValues], back_inserter(*values));
+
+        return NO_ERROR;
+    }
+
+    void postFramesProcessedMsg(int frames);
+
+    /** Effect engine descriptor */
+    const effect_descriptor_t mEngineDescriptor;
+    /** Callback interface to parent audio policy service */
+    SpatializerPolicyCallback* mPolicyCallback;
+
+    /** Mutex protecting internal state */
+    mutable std::mutex mLock;
+
+    /** Client AudioEffect for the engine */
+    sp<AudioEffect> mEngine GUARDED_BY(mLock);
+    /** Output stream the spatializer mixer thread is attached to */
+    audio_io_handle_t mOutput GUARDED_BY(mLock) = AUDIO_IO_HANDLE_NONE;
+
+    /** Callback interface to the client (AudioService) controlling this`Spatializer */
+    sp<media::INativeSpatializerCallback> mSpatializerCallback GUARDED_BY(mLock);
+
+    /** Callback interface for head tracking */
+    sp<media::ISpatializerHeadTrackingCallback> mHeadTrackingCallback GUARDED_BY(mLock);
+
+    /** Requested spatialization level */
+    media::SpatializationLevel mLevel GUARDED_BY(mLock) = media::SpatializationLevel::NONE;
+
+    /** Control logic for head-tracking, etc. */
+    std::shared_ptr<SpatializerPoseController> mPoseController GUARDED_BY(mLock);
+
+    /** Last requested head tracking mode */
+    media::HeadTrackingMode mDesiredHeadTrackingMode GUARDED_BY(mLock)
+            = media::HeadTrackingMode::STATIC;
+
+    /** Last-reported actual head-tracking mode. */
+    media::SpatializerHeadTrackingMode mActualHeadTrackingMode GUARDED_BY(mLock)
+            = media::SpatializerHeadTrackingMode::DISABLED;
+
+    /** Selected Head pose sensor */
+    int32_t mHeadSensor GUARDED_BY(mLock) = SpatializerPoseController::INVALID_SENSOR;
+
+    /** Selected Screen pose sensor */
+    int32_t mScreenSensor GUARDED_BY(mLock) = SpatializerPoseController::INVALID_SENSOR;
+
+    /** Last display orientation received */
+    static constexpr float kDisplayOrientationInvalid = 1000;
+    float mDisplayOrientation GUARDED_BY(mLock) = kDisplayOrientationInvalid;
+
+    std::vector<media::SpatializationLevel> mLevels;
+    std::vector<media::SpatializationMode> mSpatializationModes;
+    std::vector<audio_channel_mask_t> mChannelMasks;
+    bool mSupportsHeadTracking;
+
+    // Looper thread for mEngine callbacks
+    class EngineCallbackHandler;
+
+    sp<ALooper> mLooper;
+    sp<EngineCallbackHandler> mHandler;
+
+    static const std::vector<const char *> sHeadPoseKeys;
+};
+
+
+}; // namespace android
+
+#endif // ANDROID_MEDIA_SPATIALIZER_H
diff --git a/services/audiopolicy/service/SpatializerPoseController.cpp b/services/audiopolicy/service/SpatializerPoseController.cpp
new file mode 100644
index 0000000..6a3c9d1
--- /dev/null
+++ b/services/audiopolicy/service/SpatializerPoseController.cpp
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "SpatializerPoseController.h"
+
+#define LOG_TAG "SpatializerPoseController"
+//#define LOG_NDEBUG 0
+#include <sensor/Sensor.h>
+#include <utils/Log.h>
+#include <utils/SystemClock.h>
+
+namespace android {
+
+using media::createHeadTrackingProcessor;
+using media::HeadTrackingMode;
+using media::HeadTrackingProcessor;
+using media::Pose3f;
+using media::SensorPoseProvider;
+using media::Twist3f;
+
+using namespace std::chrono_literals;
+
+namespace {
+
+// This is how fast, in m/s, we allow position to shift during rate-limiting.
+constexpr float kMaxTranslationalVelocity = 2;
+
+// This is how fast, in rad/s, we allow rotation angle to shift during rate-limiting.
+constexpr float kMaxRotationalVelocity = 8;
+
+// This is how far into the future we predict the head pose, using linear extrapolation based on
+// twist (velocity). It should be set to a value that matches the characteristic durations of moving
+// one's head. The higher we set this, the more latency we are able to reduce, but setting this too
+// high will result in high prediction errors whenever the head accelerates (changes velocity).
+constexpr auto kPredictionDuration = 50ms;
+
+// After losing this many consecutive samples from either sensor, we would treat the measurement as
+// stale;
+constexpr auto kMaxLostSamples = 4;
+
+// Auto-recenter kicks in after the head has been still for this long.
+constexpr auto kAutoRecenterWindowDuration = 6s;
+
+// Auto-recenter considers head not still if translated by this much (in meters, approx).
+constexpr float kAutoRecenterTranslationThreshold = 0.1f;
+
+// Auto-recenter considers head not still if rotated by this much (in radians, approx).
+constexpr float kAutoRecenterRotationThreshold = 7.0f / 180 * M_PI;
+
+// Screen is considered to be unstable (not still) if it has moved significantly within the last
+// time window of this duration.
+constexpr auto kScreenStillnessWindowDuration = 3s;
+
+// Screen is considered to have moved significantly if translated by this much (in meter, approx).
+constexpr float kScreenStillnessTranslationThreshold = 0.1f;
+
+// Screen is considered to have moved significantly if rotated by this much (in radians, approx).
+constexpr float kScreenStillnessRotationThreshold = 7.0f / 180 * M_PI;
+
+// Time units for system clock ticks. This is what the Sensor Framework timestamps represent and
+// what we use for pose filtering.
+using Ticks = std::chrono::nanoseconds;
+
+// How many ticks in a second.
+constexpr auto kTicksPerSecond = Ticks::period::den;
+
+}  // namespace
+
+SpatializerPoseController::SpatializerPoseController(Listener* listener,
+                                                     std::chrono::microseconds sensorPeriod,
+                                                     std::chrono::microseconds maxUpdatePeriod)
+    : mListener(listener),
+      mSensorPeriod(sensorPeriod),
+      mProcessor(createHeadTrackingProcessor(HeadTrackingProcessor::Options{
+              .maxTranslationalVelocity = kMaxTranslationalVelocity / kTicksPerSecond,
+              .maxRotationalVelocity = kMaxRotationalVelocity / kTicksPerSecond,
+              .freshnessTimeout = Ticks(sensorPeriod * kMaxLostSamples).count(),
+              .predictionDuration = Ticks(kPredictionDuration).count(),
+              .autoRecenterWindowDuration = Ticks(kAutoRecenterWindowDuration).count(),
+              .autoRecenterTranslationalThreshold = kAutoRecenterTranslationThreshold,
+              .autoRecenterRotationalThreshold = kAutoRecenterRotationThreshold,
+              .screenStillnessWindowDuration = Ticks(kScreenStillnessWindowDuration).count(),
+              .screenStillnessTranslationalThreshold = kScreenStillnessTranslationThreshold,
+              .screenStillnessRotationalThreshold = kScreenStillnessRotationThreshold,
+      })),
+      mPoseProvider(SensorPoseProvider::create("headtracker", this)),
+      mThread([this, maxUpdatePeriod] {
+          while (true) {
+              Pose3f headToStage;
+              std::optional<HeadTrackingMode> modeIfChanged;
+              {
+                  std::unique_lock lock(mMutex);
+                  mCondVar.wait_for(lock, maxUpdatePeriod,
+                                    [this] { return mShouldExit || mShouldCalculate; });
+                  if (mShouldExit) {
+                      ALOGV("Exiting thread");
+                      return;
+                  }
+
+                  // Calculate.
+                  std::tie(headToStage, modeIfChanged) = calculate_l();
+              }
+
+              // Invoke the callbacks outside the lock.
+              mListener->onHeadToStagePose(headToStage);
+              if (modeIfChanged) {
+                  mListener->onActualModeChange(modeIfChanged.value());
+              }
+
+              {
+                  std::lock_guard lock(mMutex);
+                  if (!mCalculated) {
+                      mCalculated = true;
+                      mCondVar.notify_all();
+                  }
+                  mShouldCalculate = false;
+              }
+          }
+      }) {}
+
+SpatializerPoseController::~SpatializerPoseController() {
+    {
+        std::unique_lock lock(mMutex);
+        mShouldExit = true;
+        mCondVar.notify_all();
+    }
+    mThread.join();
+}
+
+void SpatializerPoseController::setHeadSensor(int32_t sensor) {
+    std::lock_guard lock(mMutex);
+    // Stop current sensor, if valid and different from the other sensor.
+    if (mHeadSensor != INVALID_SENSOR && mHeadSensor != mScreenSensor) {
+        mPoseProvider->stopSensor(mHeadSensor);
+    }
+
+    if (sensor != INVALID_SENSOR) {
+        if (sensor != mScreenSensor) {
+            // Start new sensor.
+            mHeadSensor =
+                    mPoseProvider->startSensor(sensor, mSensorPeriod) ? sensor : INVALID_SENSOR;
+        } else {
+            // Sensor is already enabled.
+            mHeadSensor = mScreenSensor;
+        }
+    } else {
+        mHeadSensor = INVALID_SENSOR;
+    }
+
+    mProcessor->recenter(true, false);
+}
+
+void SpatializerPoseController::setScreenSensor(int32_t sensor) {
+    std::lock_guard lock(mMutex);
+    // Stop current sensor, if valid and different from the other sensor.
+    if (mScreenSensor != INVALID_SENSOR && mScreenSensor != mHeadSensor) {
+        mPoseProvider->stopSensor(mScreenSensor);
+    }
+
+    if (sensor != INVALID_SENSOR) {
+        if (sensor != mHeadSensor) {
+            // Start new sensor.
+            mScreenSensor =
+                    mPoseProvider->startSensor(sensor, mSensorPeriod) ? sensor : INVALID_SENSOR;
+        } else {
+            // Sensor is already enabled.
+            mScreenSensor = mHeadSensor;
+        }
+    } else {
+        mScreenSensor = INVALID_SENSOR;
+    }
+
+    mProcessor->recenter(false, true);
+}
+
+void SpatializerPoseController::setDesiredMode(HeadTrackingMode mode) {
+    std::lock_guard lock(mMutex);
+    mProcessor->setDesiredMode(mode);
+}
+
+void SpatializerPoseController::setScreenToStagePose(const Pose3f& screenToStage) {
+    std::lock_guard lock(mMutex);
+    mProcessor->setScreenToStagePose(screenToStage);
+}
+
+void SpatializerPoseController::setDisplayOrientation(float physicalToLogicalAngle) {
+    std::lock_guard lock(mMutex);
+    mProcessor->setDisplayOrientation(physicalToLogicalAngle);
+}
+
+void SpatializerPoseController::calculateAsync() {
+    std::lock_guard lock(mMutex);
+    mShouldCalculate = true;
+    mCondVar.notify_all();
+}
+
+void SpatializerPoseController::waitUntilCalculated() {
+    std::unique_lock lock(mMutex);
+    mCondVar.wait(lock, [this] { return mCalculated; });
+}
+
+std::tuple<media::Pose3f, std::optional<media::HeadTrackingMode>>
+SpatializerPoseController::calculate_l() {
+    Pose3f headToStage;
+    HeadTrackingMode mode;
+    std::optional<media::HeadTrackingMode> modeIfChanged;
+
+    mProcessor->calculate(elapsedRealtimeNano());
+    headToStage = mProcessor->getHeadToStagePose();
+    mode = mProcessor->getActualMode();
+    if (!mActualMode.has_value() || mActualMode.value() != mode) {
+        mActualMode = mode;
+        modeIfChanged = mode;
+    }
+    return std::make_tuple(headToStage, modeIfChanged);
+}
+
+void SpatializerPoseController::recenter() {
+    std::lock_guard lock(mMutex);
+    mProcessor->recenter();
+}
+
+void SpatializerPoseController::onPose(int64_t timestamp, int32_t sensor, const Pose3f& pose,
+                                       const std::optional<Twist3f>& twist, bool isNewReference) {
+    std::lock_guard lock(mMutex);
+    if (sensor == mHeadSensor) {
+        mProcessor->setWorldToHeadPose(timestamp, pose,
+                                       twist.value_or(Twist3f()) / kTicksPerSecond);
+        if (isNewReference) {
+            mProcessor->recenter(true, false);
+        }
+    }
+    if (sensor == mScreenSensor) {
+        mProcessor->setWorldToScreenPose(timestamp, pose);
+        if (isNewReference) {
+            mProcessor->recenter(false, true);
+        }
+    }
+}
+
+}  // namespace android
diff --git a/services/audiopolicy/service/SpatializerPoseController.h b/services/audiopolicy/service/SpatializerPoseController.h
new file mode 100644
index 0000000..2b5c189
--- /dev/null
+++ b/services/audiopolicy/service/SpatializerPoseController.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <chrono>
+#include <condition_variable>
+#include <limits>
+#include <memory>
+#include <mutex>
+#include <thread>
+
+#include <media/HeadTrackingProcessor.h>
+#include <media/SensorPoseProvider.h>
+
+namespace android {
+
+/**
+ * This class encapsulates the logic for pose processing, intended for driving a spatializer effect.
+ * This includes integration with the Sensor sub-system for retrieving sensor data, doing all the
+ * necessary processing, etc.
+ *
+ * Calculations happen on a dedicated thread and published to the client via the Listener interface.
+ * A calculation may be triggered in one of two ways:
+ * - By calling calculateAsync() - calculation will be kicked off in the background.
+ * - By setting a timeout in the ctor, a calculation will be triggered after the timeout elapsed
+ *   from the last calculateAsync() call.
+ *
+ * This class is thread-safe.
+ */
+class SpatializerPoseController : private media::SensorPoseProvider::Listener {
+  public:
+    static constexpr int32_t INVALID_SENSOR = media::SensorPoseProvider::INVALID_HANDLE;
+
+    /**
+     * Listener interface for getting pose and mode updates.
+     * Methods will always be invoked from a designated thread.
+     */
+    class Listener {
+      public:
+        virtual ~Listener() = default;
+
+        virtual void onHeadToStagePose(const media::Pose3f&) = 0;
+        virtual void onActualModeChange(media::HeadTrackingMode) = 0;
+    };
+
+    /**
+     * Ctor.
+     * sensorPeriod determines how often to receive updates from the sensors (input rate).
+     * maxUpdatePeriod determines how often to produce an output when calculateAsync() isn't
+     * invoked.
+     */
+    SpatializerPoseController(Listener* listener, std::chrono::microseconds sensorPeriod,
+                               std::chrono::microseconds maxUpdatePeriod);
+
+    /** Dtor. */
+    ~SpatializerPoseController();
+
+    /**
+     * Set the sensor that is to be used for head-tracking.
+     * INVALID_SENSOR can be used to disable head-tracking.
+     */
+    void setHeadSensor(int32_t sensor);
+
+    /**
+     * Set the sensor that is to be used for screen-tracking.
+     * INVALID_SENSOR can be used to disable screen-tracking.
+     */
+    void setScreenSensor(int32_t sensor);
+
+    /** Sets the desired head-tracking mode. */
+    void setDesiredMode(media::HeadTrackingMode mode);
+
+    /**
+     * Set the screen-to-stage pose, used in all modes.
+     */
+    void setScreenToStagePose(const media::Pose3f& screenToStage);
+
+    /**
+     * Sets the display orientation.
+     * Orientation is expressed in the angle of rotation from the physical "up" side of the screen
+     * to the logical "up" side of the content displayed the screen. Counterclockwise angles, as
+     * viewed while facing the screen are positive.
+     */
+    void setDisplayOrientation(float physicalToLogicalAngle);
+
+    /**
+     * This causes the current poses for both the head and screen to be considered "center".
+     */
+    void recenter();
+
+    /**
+     * This call triggers the recalculation of the output and the invocation of the relevant
+     * callbacks. This call is async and the callbacks will be triggered shortly after.
+     */
+    void calculateAsync();
+
+    /**
+     * Blocks until calculation and invocation of the respective callbacks has happened at least
+     * once. Do not call from within callbacks.
+     */
+    void waitUntilCalculated();
+
+  private:
+    mutable std::mutex mMutex;
+    Listener* const mListener;
+    const std::chrono::microseconds mSensorPeriod;
+    // Order matters for the following two members to ensure correct destruction.
+    std::unique_ptr<media::HeadTrackingProcessor> mProcessor;
+    std::unique_ptr<media::SensorPoseProvider> mPoseProvider;
+    int32_t mHeadSensor = media::SensorPoseProvider::INVALID_HANDLE;
+    int32_t mScreenSensor = media::SensorPoseProvider::INVALID_HANDLE;
+    std::optional<media::HeadTrackingMode> mActualMode;
+    std::thread mThread;
+    std::condition_variable mCondVar;
+    bool mShouldCalculate = true;
+    bool mShouldExit = false;
+    bool mCalculated = false;
+
+    void onPose(int64_t timestamp, int32_t sensor, const media::Pose3f& pose,
+                const std::optional<media::Twist3f>& twist, bool isNewReference) override;
+
+    /**
+     * Calculates the new outputs and updates internal state. Must be called with the lock held.
+     * Returns values that should be passed to the respective callbacks.
+     */
+    std::tuple<media::Pose3f, std::optional<media::HeadTrackingMode>> calculate_l();
+};
+
+}  // namespace android
diff --git a/services/audiopolicy/tests/Android.bp b/services/audiopolicy/tests/Android.bp
index b296fb0..8fbe8b2 100644
--- a/services/audiopolicy/tests/Android.bp
+++ b/services/audiopolicy/tests/Android.bp
@@ -25,7 +25,7 @@
         "libmedia_helper",
         "libutils",
         "libxml2",
-        "libpermission",
+        "framework-permission-aidl-cpp",
         "libbinder",
     ],
 
diff --git a/services/audiopolicy/tests/AudioPolicyManagerTestClient.h b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
index f7b0565..84b40d2 100644
--- a/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
@@ -37,7 +37,8 @@
 
     status_t openOutput(audio_module_handle_t module,
                         audio_io_handle_t *output,
-                        audio_config_t * /*config*/,
+                        audio_config_t * /*halConfig*/,
+                        audio_config_base_t * /*mixerConfig*/,
                         const sp<DeviceDescriptorBase>& /*device*/,
                         uint32_t * /*latencyMs*/,
                         audio_output_flags_t /*flags*/) override {
diff --git a/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h b/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h
index a5ad9b1..7343b9b 100644
--- a/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h
+++ b/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h
@@ -28,19 +28,26 @@
 class AudioPolicyManagerTestClientForHdmi : public AudioPolicyManagerTestClient {
 public:
     String8 getParameters(audio_io_handle_t /* ioHandle */, const String8&  /* keys*/ ) override {
+        AudioParameter mAudioParameters;
+        std::string formats;
+        for (const auto& f : mSupportedFormats) {
+            if (!formats.empty()) formats += AUDIO_PARAMETER_VALUE_LIST_SEPARATOR;
+            formats += audio_format_to_string(f);
+        }
+        mAudioParameters.add(
+                String8(AudioParameter::keyStreamSupportedFormats),
+                String8(formats.c_str()));
+        mAudioParameters.addInt(String8(AudioParameter::keyStreamSupportedSamplingRates), 48000);
+        mAudioParameters.add(String8(AudioParameter::keyStreamSupportedChannels), String8(""));
         return mAudioParameters.toString();
     }
 
     void addSupportedFormat(audio_format_t format) override {
-        mAudioParameters.add(
-                String8(AudioParameter::keyStreamSupportedFormats),
-                String8(audio_format_to_string(format)));
-        mAudioParameters.addInt(String8(AudioParameter::keyStreamSupportedSamplingRates), 48000);
-        mAudioParameters.add(String8(AudioParameter::keyStreamSupportedChannels), String8(""));
+        mSupportedFormats.insert(format);
     }
 
 private:
-    AudioParameter mAudioParameters;
+    std::set<audio_format_t> mSupportedFormats;
 };
 
-} // namespace android
\ No newline at end of file
+} // namespace android
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index 1384864..4e0735b 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -30,7 +30,8 @@
     }
     status_t openOutput(audio_module_handle_t /*module*/,
                         audio_io_handle_t* /*output*/,
-                        audio_config_t* /*config*/,
+                        audio_config_t* /*halConfig*/,
+                        audio_config_base_t* /*mixerConfig*/,
                         const sp<DeviceDescriptorBase>& /*device*/,
                         uint32_t* /*latencyMs*/,
                         audio_output_flags_t /*flags*/) override { return NO_INIT; }
diff --git a/services/audiopolicy/tests/AudioPolicyTestManager.h b/services/audiopolicy/tests/AudioPolicyTestManager.h
index 7f67940..ff06937 100644
--- a/services/audiopolicy/tests/AudioPolicyTestManager.h
+++ b/services/audiopolicy/tests/AudioPolicyTestManager.h
@@ -33,6 +33,8 @@
     using AudioPolicyManager::releaseMsdOutputPatches;
     using AudioPolicyManager::setMsdOutputPatches;
     using AudioPolicyManager::getAudioPatches;
+    using AudioPolicyManager::getDirectPlaybackSupport;
+    using AudioPolicyManager::getDirectProfilesForAttributes;
     uint32_t getAudioPortGeneration() const { return mAudioPortGeneration; }
 };
 
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index a16ab7d..9c1adc6 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -151,7 +151,7 @@
 void AudioPolicyManagerTest::SetUp() {
     mClient.reset(getClient());
     mManager.reset(new AudioPolicyTestManager(mClient.get()));
-    SetUpManagerConfig();  // Subclasses may want to customize the config.
+    ASSERT_NO_FATAL_FAILURE(SetUpManagerConfig());  // Subclasses may want to customize the config.
     ASSERT_EQ(NO_ERROR, mManager->initialize());
     ASSERT_EQ(NO_ERROR, mManager->initCheck());
 }
@@ -401,7 +401,7 @@
 
 void AudioPolicyManagerTestMsd::SetUpManagerConfig() {
     // TODO: Consider using Serializer to load part of the config from a string.
-    AudioPolicyManagerTest::SetUpManagerConfig();
+    ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTest::SetUpManagerConfig());
     AudioPolicyConfig& config = mManager->getConfig();
     mMsdOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_BUS);
     sp<AudioProfile> pcmOutputProfile = new AudioProfile(
@@ -660,6 +660,7 @@
 void AudioPolicyManagerTestWithConfigurationFile::SetUpManagerConfig() {
     status_t status = deserializeAudioPolicyFile(getConfigFile().c_str(), &mManager->getConfig());
     ASSERT_EQ(NO_ERROR, status);
+    mManager->getConfig().setSource(getConfigFile());
 }
 
 TEST_F(AudioPolicyManagerTestWithConfigurationFile, InitSuccess) {
@@ -803,7 +804,8 @@
 }
 
 class AudioPolicyManagerTestForHdmi
-        : public AudioPolicyManagerTestWithConfigurationFile {
+        : public AudioPolicyManagerTestWithConfigurationFile,
+          public testing::WithParamInterface<audio_format_t> {
 protected:
     void SetUp() override;
     std::string getConfigFile() override { return sTvConfig; }
@@ -824,7 +826,8 @@
         "test_settop_box_surround_configuration.xml";
 
 void AudioPolicyManagerTestForHdmi::SetUp() {
-    AudioPolicyManagerTest::SetUp();
+    ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTest::SetUp());
+    mClient->addSupportedFormat(AUDIO_FORMAT_AC3);
     mClient->addSupportedFormat(AUDIO_FORMAT_E_AC3);
     mManager->setDeviceConnectionState(
             AUDIO_DEVICE_OUT_HDMI, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
@@ -914,76 +917,90 @@
     return formats;
 }
 
-TEST_F(AudioPolicyManagerTestForHdmi, GetSurroundFormatsReturnsSupportedFormats) {
+TEST_P(AudioPolicyManagerTestForHdmi, GetSurroundFormatsReturnsSupportedFormats) {
     mManager->setForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS);
     auto surroundFormats = getSurroundFormatsHelper();
-    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(1, surroundFormats.count(GetParam()));
 }
 
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
         GetSurroundFormatsReturnsManipulatedFormats) {
     mManager->setForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL);
 
     status_t ret =
-            mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/);
+            mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/);
     ASSERT_EQ(NO_ERROR, ret);
     auto surroundFormats = getSurroundFormatsHelper();
-    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
-    ASSERT_FALSE(surroundFormats[AUDIO_FORMAT_E_AC3]);
+    ASSERT_EQ(1, surroundFormats.count(GetParam()));
+    ASSERT_FALSE(surroundFormats[GetParam()]);
 
-    ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, true /*enabled*/);
+    ret = mManager->setSurroundFormatEnabled(GetParam(), true /*enabled*/);
     ASSERT_EQ(NO_ERROR, ret);
     surroundFormats = getSurroundFormatsHelper();
-    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
-    ASSERT_TRUE(surroundFormats[AUDIO_FORMAT_E_AC3]);
+    ASSERT_EQ(1, surroundFormats.count(GetParam()));
+    ASSERT_TRUE(surroundFormats[GetParam()]);
 
-    ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/);
+    ret = mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/);
     ASSERT_EQ(NO_ERROR, ret);
     surroundFormats = getSurroundFormatsHelper();
-    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
-    ASSERT_FALSE(surroundFormats[AUDIO_FORMAT_E_AC3]);
+    ASSERT_EQ(1, surroundFormats.count(GetParam()));
+    ASSERT_FALSE(surroundFormats[GetParam()]);
 }
 
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
         ListAudioPortsReturnManipulatedHdmiFormats) {
     mManager->setForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL);
 
-    ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/));
+    ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/));
     auto formats = getFormatsFromPorts();
-    ASSERT_EQ(0, formats.count(AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(0, formats.count(GetParam()));
 
-    ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, true /*enabled*/));
+    ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(GetParam(), true /*enabled*/));
     formats = getFormatsFromPorts();
-    ASSERT_EQ(1, formats.count(AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(1, formats.count(GetParam()));
 }
 
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
         GetReportedSurroundFormatsReturnsHdmiReportedFormats) {
     mManager->setForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS);
     auto surroundFormats = getReportedSurroundFormatsHelper();
-    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), GetParam()));
 }
 
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
         GetReportedSurroundFormatsReturnsNonManipulatedHdmiReportedFormats) {
     mManager->setForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL);
 
-    status_t ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/);
+    status_t ret = mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/);
     ASSERT_EQ(NO_ERROR, ret);
     auto surroundFormats = getReportedSurroundFormatsHelper();
-    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), GetParam()));
 
-    ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, true /*enabled*/);
+    ret = mManager->setSurroundFormatEnabled(GetParam(), true /*enabled*/);
     ASSERT_EQ(NO_ERROR, ret);
     surroundFormats = getReportedSurroundFormatsHelper();
-    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), GetParam()));
 }
 
+TEST_P(AudioPolicyManagerTestForHdmi, GetSurroundFormatsIgnoresSupportedFormats) {
+    mManager->setForceUse(
+            AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER);
+    auto surroundFormats = getSurroundFormatsHelper();
+    ASSERT_EQ(1, surroundFormats.count(GetParam()));
+    ASSERT_FALSE(surroundFormats[GetParam()]);
+}
+
+INSTANTIATE_TEST_SUITE_P(SurroundFormatSupport, AudioPolicyManagerTestForHdmi,
+        testing::Values(AUDIO_FORMAT_AC3, AUDIO_FORMAT_E_AC3),
+        [](const ::testing::TestParamInfo<AudioPolicyManagerTestForHdmi::ParamType>& info) {
+            return audio_format_to_string(info.param);
+        });
+
 class AudioPolicyManagerTestDPNoRemoteSubmixModule : public AudioPolicyManagerTestDynamicPolicy {
 protected:
     std::string getConfigFile() override { return sPrimaryOnlyConfig; }
@@ -1035,7 +1052,7 @@
 };
 
 void AudioPolicyManagerTestDPPlaybackReRouting::SetUp() {
-    AudioPolicyManagerTestDynamicPolicy::SetUp();
+    ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTestDynamicPolicy::SetUp());
 
     mTracker.reset(new RecordingActivityTracker());
 
@@ -1221,7 +1238,7 @@
 };
 
 void AudioPolicyManagerTestDPMixRecordInjection::SetUp() {
-    AudioPolicyManagerTestDynamicPolicy::SetUp();
+    ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTestDynamicPolicy::SetUp());
 
     mTracker.reset(new RecordingActivityTracker());
 
@@ -1375,7 +1392,8 @@
     if (type == AUDIO_DEVICE_OUT_HDMI) {
         // Set device connection state failed due to no device descriptor found
         // For HDMI case, it is easier to simulate device descriptor not found error
-        // by using a undeclared encoded format.
+        // by using an encoded format which isn't listed in the 'encodedFormats'
+        // attribute for this devicePort.
         ASSERT_EQ(INVALID_OPERATION, mManager->setDeviceConnectionState(
                 type, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
                 address.c_str(), name.c_str(), AUDIO_FORMAT_MAT_2_1));
@@ -1519,7 +1537,7 @@
 };
 
 void AudioPolicyManagerDynamicHwModulesTest::SetUpManagerConfig() {
-    AudioPolicyManagerTestWithConfigurationFile::SetUpManagerConfig();
+    ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTestWithConfigurationFile::SetUpManagerConfig());
     // Only allow successful opening of "primary" hw module during APM initialization.
     mClient->swapAllowedModuleNames({"primary"});
 }
diff --git a/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml b/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
index 87f0ab9..41ed70c 100644
--- a/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
+++ b/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
@@ -50,7 +50,8 @@
                 </devicePort>
                 <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
                 </devicePort>
-                <devicePort tagName="Hdmi" type="AUDIO_DEVICE_OUT_HDMI" role="sink">
+                <devicePort tagName="Hdmi" type="AUDIO_DEVICE_OUT_HDMI" role="sink"
+                            encodedFormats="AUDIO_FORMAT_AC3">
                 </devicePort>
                 <devicePort tagName="Hdmi-In Mic" type="AUDIO_DEVICE_IN_HDMI" role="source">
                 </devicePort>
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 26562e0..8428881 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -47,8 +47,8 @@
         "common/CameraDeviceBase.cpp",
         "common/CameraOfflineSessionBase.cpp",
         "common/CameraProviderManager.cpp",
-        "common/DepthPhotoProcessor.cpp",
         "common/FrameProcessorBase.cpp",
+        "common/hidl/HidlProviderInfo.cpp",
         "api1/Camera2Client.cpp",
         "api1/client2/Parameters.cpp",
         "api1/client2/FrameProcessor.cpp",
@@ -76,14 +76,14 @@
         "device3/StatusTracker.cpp",
         "device3/Camera3BufferManager.cpp",
         "device3/Camera3StreamSplitter.cpp",
-        "device3/CoordinateMapper.cpp",
-        "device3/DistortionMapper.cpp",
-        "device3/ZoomRatioMapper.cpp",
-        "device3/RotateAndCropMapper.cpp",
         "device3/Camera3OutputStreamInterface.cpp",
         "device3/Camera3OutputUtils.cpp",
         "device3/Camera3DeviceInjectionMethods.cpp",
         "device3/UHRCropAndMeteringRegionMapper.cpp",
+        "device3/PreviewFrameScheduler.cpp",
+        "device3/hidl/HidlCamera3Device.cpp",
+        "device3/hidl/HidlCamera3OfflineSession.cpp",
+        "device3/hidl/HidlCamera3OutputUtils.cpp",
         "gui/RingBufferConsumer.cpp",
         "hidl/AidlCameraDeviceCallbacks.cpp",
         "hidl/AidlCameraServiceListener.cpp",
@@ -94,7 +94,6 @@
         "utils/CameraThreadState.cpp",
         "utils/CameraTraces.cpp",
         "utils/AutoConditionLock.cpp",
-        "utils/ExifUtils.cpp",
         "utils/SessionConfigurationUtils.cpp",
         "utils/SessionStatsBuilder.cpp",
         "utils/TagMonitor.cpp",
@@ -107,6 +106,7 @@
     ],
 
     shared_libs: [
+        "libandroid",
         "libbase",
         "libdl",
         "libexif",
@@ -154,6 +154,7 @@
         "android.hardware.camera.device@3.5",
         "android.hardware.camera.device@3.6",
         "android.hardware.camera.device@3.7",
+        "android.hardware.camera.device@3.8",
         "media_permission-aidl-cpp",
     ],
 
@@ -161,6 +162,7 @@
         "libprocessinfoservice_aidl",
         "libbinderthreadstateutils",
         "media_permission-aidl-cpp",
+        "libcameraservice_device_independent",
     ],
 
     export_shared_lib_headers: [
@@ -190,3 +192,49 @@
     ],
 
 }
+
+cc_library_static {
+    name: "libcameraservice_device_independent",
+    host_supported: true,
+
+    // Camera service source
+
+    srcs: [
+        "common/DepthPhotoProcessor.cpp",
+        "device3/CoordinateMapper.cpp",
+        "device3/DistortionMapper.cpp",
+        "device3/RotateAndCropMapper.cpp",
+        "device3/ZoomRatioMapper.cpp",
+        "utils/ExifUtils.cpp",
+        "utils/SessionConfigurationUtilsHost.cpp",
+    ],
+
+    shared_libs: [
+        "libbase",
+        "libbinder",
+        "libcamera_metadata",
+        "libdynamic_depth",
+        "libexif",
+        "libjpeg",
+        "liblog",
+        "libutils",
+        "libxml2",
+    ],
+
+    include_dirs: [
+        "external/dynamic_depth/includes",
+        "external/dynamic_depth/internal",
+        "frameworks/av/camera/include",
+        "frameworks/av/camera/include/camera",
+    ],
+
+    export_include_dirs: ["."],
+
+    cflags: [
+        "-Wall",
+        "-Wextra",
+        "-Werror",
+        "-Wno-ignored-qualifiers",
+    ],
+
+}
\ No newline at end of file
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
deleted file mode 100644
index 4cfecfd..0000000
--- a/services/camera/libcameraservice/Android.mk
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2010 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-# Build tests
-
-include $(LOCAL_PATH)/tests/Android.mk
-
diff --git a/services/camera/libcameraservice/CameraFlashlight.cpp b/services/camera/libcameraservice/CameraFlashlight.cpp
index ccdd9e5..ffd38be 100644
--- a/services/camera/libcameraservice/CameraFlashlight.cpp
+++ b/services/camera/libcameraservice/CameraFlashlight.cpp
@@ -32,8 +32,6 @@
 
 namespace android {
 
-using hardware::camera::common::V1_0::TorchModeStatus;
-
 /////////////////////////////////////////////////////////////////////
 // CameraFlashlight implementation begins
 // used by camera service to control flashflight.
@@ -119,6 +117,59 @@
     return res;
 }
 
+status_t CameraFlashlight::turnOnTorchWithStrengthLevel(const String8& cameraId,
+            int32_t torchStrength) {
+    if (!mFlashlightMapInitialized) {
+        ALOGE("%s: findFlashUnits() must be called before this method.",
+               __FUNCTION__);
+        return NO_INIT;
+    }
+
+    ALOGV("%s: set torch strength of camera %s to %d", __FUNCTION__,
+            cameraId.string(), torchStrength);
+    status_t res = OK;
+    Mutex::Autolock l(mLock);
+
+    if (mOpenedCameraIds.indexOf(cameraId) != NAME_NOT_FOUND) {
+        ALOGE("%s: Camera device %s is in use, cannot be turned ON.",
+                __FUNCTION__, cameraId.string());
+        return -EBUSY;
+    }
+
+    if (mFlashControl == NULL) {
+        res = createFlashlightControl(cameraId);
+        if (res) {
+            return res;
+        }
+    }
+
+    res = mFlashControl->turnOnTorchWithStrengthLevel(cameraId, torchStrength);
+    return res;
+}
+
+
+status_t CameraFlashlight::getTorchStrengthLevel(const String8& cameraId,
+            int32_t* torchStrength) {
+    status_t res = OK;
+    if (!mFlashlightMapInitialized) {
+        ALOGE("%s: findFlashUnits() must be called before this method.",
+            __FUNCTION__);
+        return false;
+    }
+
+    Mutex::Autolock l(mLock);
+
+    if (mFlashControl == NULL) {
+        res = createFlashlightControl(cameraId);
+        if (res) {
+            return res;
+        }
+    }
+
+    res = mFlashControl->getTorchStrengthLevel(cameraId, torchStrength);
+    return res;
+}
+
 status_t CameraFlashlight::findFlashUnits() {
     Mutex::Autolock l(mLock);
     status_t res;
@@ -306,6 +357,22 @@
 
     return mProviderManager->setTorchMode(cameraId.string(), enabled);
 }
+
+status_t ProviderFlashControl::turnOnTorchWithStrengthLevel(const String8& cameraId,
+            int32_t torchStrength) {
+    ALOGV("%s: change torch strength level of camera %s to %d", __FUNCTION__,
+            cameraId.string(), torchStrength);
+
+    return mProviderManager->turnOnTorchWithStrengthLevel(cameraId.string(), torchStrength);
+}
+
+status_t ProviderFlashControl::getTorchStrengthLevel(const String8& cameraId,
+            int32_t* torchStrength) {
+    ALOGV("%s: get torch strength level of camera %s", __FUNCTION__,
+            cameraId.string());
+
+    return mProviderManager->getTorchStrengthLevel(cameraId.string(), torchStrength);
+}
 // ProviderFlashControl implementation ends
 
 }
diff --git a/services/camera/libcameraservice/CameraFlashlight.h b/services/camera/libcameraservice/CameraFlashlight.h
index b97fa5f..1703ddc 100644
--- a/services/camera/libcameraservice/CameraFlashlight.h
+++ b/services/camera/libcameraservice/CameraFlashlight.h
@@ -44,6 +44,14 @@
         // set the torch mode to on or off.
         virtual status_t setTorchMode(const String8& cameraId,
                     bool enabled) = 0;
+
+        // Change the brightness level of the torch. If the torch is OFF and
+        // torchStrength >= 1, then the torch will also be turned ON.
+        virtual status_t turnOnTorchWithStrengthLevel(const String8& cameraId,
+                    int32_t torchStrength) = 0;
+
+        // Returns the torch strength level.
+        virtual status_t getTorchStrengthLevel(const String8& cameraId, int32_t* torchStrength) = 0;
 };
 
 /**
@@ -67,6 +75,12 @@
         // set the torch mode to on or off.
         status_t setTorchMode(const String8& cameraId, bool enabled);
 
+        // Change the torch strength level of the flash unit in torch mode.
+        status_t turnOnTorchWithStrengthLevel(const String8& cameraId, int32_t torchStrength);
+
+        // Get the torch strength level
+        status_t getTorchStrengthLevel(const String8& cameraId, int32_t* torchStrength);
+
         // Notify CameraFlashlight that camera service is going to open a camera
         // device. CameraFlashlight will free the resources that may cause the
         // camera open to fail. Camera service must call this function before
@@ -115,6 +129,8 @@
         // FlashControlBase
         status_t hasFlashUnit(const String8& cameraId, bool *hasFlash);
         status_t setTorchMode(const String8& cameraId, bool enabled);
+        status_t turnOnTorchWithStrengthLevel(const String8& cameraId, int32_t torchStrength);
+        status_t getTorchStrengthLevel(const String8& cameraId, int32_t* torchStrength);
 
     private:
         sp<CameraProviderManager> mProviderManager;
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index d0d3a9d..5740038 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -28,6 +28,7 @@
 #include <sys/types.h>
 #include <inttypes.h>
 #include <pthread.h>
+#include <poll.h>
 
 #include <android/hardware/ICamera.h>
 #include <android/hardware/ICameraClient.h>
@@ -85,13 +86,11 @@
 
 using base::StringPrintf;
 using binder::Status;
-using camera3::SessionConfigurationUtils;
+using namespace camera3;
 using frameworks::cameraservice::service::V2_0::implementation::HidlCameraService;
 using hardware::ICamera;
 using hardware::ICameraClient;
 using hardware::ICameraServiceListener;
-using hardware::camera::common::V1_0::CameraDeviceStatus;
-using hardware::camera::common::V1_0::TorchModeStatus;
 using hardware::camera2::ICameraInjectionCallback;
 using hardware::camera2::ICameraInjectionSession;
 using hardware::camera2::utils::CameraIdAndSessionConfiguration;
@@ -137,6 +136,7 @@
 static constexpr int32_t kVendorClientState = ActivityManager::PROCESS_STATE_PERSISTENT_UI;
 
 const String8 CameraService::kOfflineDevice("offline-");
+const String16 CameraService::kWatchAllClientsFlag("all");
 
 // Set to keep track of logged service error events.
 static std::set<String8> sServiceErrorEventSet;
@@ -361,18 +361,20 @@
 
 void CameraService::addStates(const String8 id) {
     std::string cameraId(id.c_str());
-    hardware::camera::common::V1_0::CameraResourceCost cost;
+    CameraResourceCost cost;
     status_t res = mCameraProviderManager->getResourceCost(cameraId, &cost);
-    SystemCameraKind deviceKind = SystemCameraKind::PUBLIC;
     if (res != OK) {
         ALOGE("Failed to query device resource cost: %s (%d)", strerror(-res), res);
         return;
     }
+    SystemCameraKind deviceKind = SystemCameraKind::PUBLIC;
     res = mCameraProviderManager->getSystemCameraKind(cameraId, &deviceKind);
     if (res != OK) {
         ALOGE("Failed to query device kind: %s (%d)", strerror(-res), res);
         return;
     }
+    std::vector<std::string> physicalCameraIds;
+    mCameraProviderManager->isLogicalCamera(cameraId, &physicalCameraIds);
     std::set<String8> conflicting;
     for (size_t i = 0; i < cost.conflictingDevices.size(); i++) {
         conflicting.emplace(String8(cost.conflictingDevices[i].c_str()));
@@ -381,7 +383,7 @@
     {
         Mutex::Autolock lock(mCameraStatesLock);
         mCameraStates.emplace(id, std::make_shared<CameraState>(id, cost.resourceCost,
-                                                                conflicting, deviceKind));
+                conflicting, deviceKind, physicalCameraIds));
     }
 
     if (mFlashlight->hasFlashUnit(id)) {
@@ -560,6 +562,22 @@
     onTorchStatusChangedLocked(cameraId, newStatus, systemCameraKind);
 }
 
+
+void CameraService::onTorchStatusChanged(const String8& cameraId,
+        TorchModeStatus newStatus, SystemCameraKind systemCameraKind) {
+    Mutex::Autolock al(mTorchStatusMutex);
+    onTorchStatusChangedLocked(cameraId, newStatus, systemCameraKind);
+}
+
+void CameraService::broadcastTorchStrengthLevel(const String8& cameraId,
+        int32_t newStrengthLevel) {
+    Mutex::Autolock lock(mStatusListenerLock);
+    for (auto& i : mListenerList) {
+        i->getListener()->onTorchStrengthLevelChanged(String16{cameraId},
+                newStrengthLevel);
+    }
+}
+
 void CameraService::onTorchStatusChangedLocked(const String8& cameraId,
         TorchModeStatus newStatus, SystemCameraKind systemCameraKind) {
     ALOGI("%s: Torch status changed for cameraId=%s, newStatus=%d",
@@ -613,8 +631,10 @@
     broadcastTorchModeStatus(cameraId, newStatus, systemCameraKind);
 }
 
-static bool hasPermissionsForSystemCamera(int callingPid, int callingUid) {
-    return checkPermission(sSystemCameraPermission, callingPid, callingUid) &&
+static bool hasPermissionsForSystemCamera(int callingPid, int callingUid,
+        bool logPermissionFailure = false) {
+    return checkPermission(sSystemCameraPermission, callingPid, callingUid,
+            logPermissionFailure) &&
             checkPermission(sCameraPermission, callingPid, callingUid);
 }
 
@@ -693,8 +713,8 @@
     const std::vector<std::string> *deviceIds = &mNormalDeviceIdsWithoutSystemCamera;
     auto callingPid = CameraThreadState::getCallingPid();
     auto callingUid = CameraThreadState::getCallingUid();
-    if (checkPermission(sSystemCameraPermission, callingPid, callingUid) ||
-            getpid() == callingPid) {
+    if (checkPermission(sSystemCameraPermission, callingPid, callingUid,
+            /*logPermissionFailure*/false) || getpid() == callingPid) {
         deviceIds = &mNormalDeviceIds;
     }
     if (cameraIdInt < 0 || cameraIdInt >= static_cast<int>(deviceIds->size())) {
@@ -793,6 +813,31 @@
     return ret;
 }
 
+Status CameraService::getTorchStrengthLevel(const String16& cameraId,
+        int32_t* torchStrength) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mServiceLock);
+    if (!mInitialized) {
+        ALOGE("%s: Camera HAL couldn't be initialized.", __FUNCTION__);
+        return STATUS_ERROR(ERROR_DISCONNECTED, "Camera HAL couldn't be initialized.");
+    }
+
+    if(torchStrength == NULL) {
+        ALOGE("%s: strength level must not be null.", __FUNCTION__);
+        return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Strength level should not be null.");
+    }
+
+    status_t res = mCameraProviderManager->getTorchStrengthLevel(String8(cameraId).string(),
+        torchStrength);
+    if (res != OK) {
+        return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION, "Unable to retrieve torch "
+            "strength level for device %s: %s (%d)", String8(cameraId).string(),
+            strerror(-res), res);
+    }
+    ALOGI("%s: Torch strength level is: %d", __FUNCTION__, *torchStrength);
+    return Status::ok();
+}
+
 String8 CameraService::getFormattedCurrentTime() {
     time_t now = time(nullptr);
     char formattedTime[64];
@@ -900,6 +945,7 @@
         case CAMERA_DEVICE_API_VERSION_3_5:
         case CAMERA_DEVICE_API_VERSION_3_6:
         case CAMERA_DEVICE_API_VERSION_3_7:
+        case CAMERA_DEVICE_API_VERSION_3_8:
             if (effectiveApiLevel == API_1) { // Camera1 API route
                 sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
                 *client = new Camera2Client(cameraService, tmp, packageName, featureId,
@@ -1334,7 +1380,7 @@
                 auto clientSp = current->getValue();
                 if (clientSp.get() != nullptr) { // should never be needed
                     if (!clientSp->canCastToApiClient(effectiveApiLevel)) {
-                        ALOGW("CameraService connect called from same client, but with a different"
+                        ALOGW("CameraService connect called with a different"
                                 " API level, evicting prior client...");
                     } else if (clientSp->getRemote() == remoteCallback) {
                         ALOGI("CameraService::connect X (PID %d) (second call from same"
@@ -1597,7 +1643,7 @@
     //     same behavior for system camera devices.
     if (getCurrentServingCall() != BinderCallType::HWBINDER &&
             systemCameraKind == SystemCameraKind::SYSTEM_ONLY_CAMERA &&
-            !hasPermissionsForSystemCamera(cPid, cUid)) {
+            !hasPermissionsForSystemCamera(cPid, cUid, /*logPermissionFailure*/true)) {
         ALOGW("Rejecting access to system only camera %s, inadequete permissions",
                 cameraId.c_str());
         return true;
@@ -1786,7 +1832,8 @@
         LOG_ALWAYS_FATAL_IF(client.get() == nullptr, "%s: CameraService in invalid state",
                 __FUNCTION__);
 
-        err = client->initialize(mCameraProviderManager, mMonitorTags);
+        String8 monitorTags = isClientWatched(client.get()) ? mMonitorTags : String8("");
+        err = client->initialize(mCameraProviderManager, monitorTags);
         if (err != OK) {
             ALOGE("%s: Could not initialize client from HAL.", __FUNCTION__);
             // Errors could be from the HAL module open call or from AppOpsManager
@@ -1834,9 +1881,11 @@
         // Set rotate-and-crop override behavior
         if (mOverrideRotateAndCropMode != ANDROID_SCALER_ROTATE_AND_CROP_AUTO) {
             client->setRotateAndCropOverride(mOverrideRotateAndCropMode);
-        } else if (CameraServiceProxyWrapper::isRotateAndCropOverrideNeeded(clientPackageName,
-                    orientation, facing)) {
-            client->setRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_90);
+        } else if (effectiveApiLevel == API_2) {
+
+          client->setRotateAndCropOverride(
+              CameraServiceProxyWrapper::getRotateAndCropOverride(
+                  clientPackageName, facing, multiuser_get_user_id(clientUid)));
         }
 
         // Set camera muting behavior
@@ -1888,6 +1937,33 @@
     CameraServiceProxyWrapper::logOpen(cameraId, facing, clientPackageName,
             effectiveApiLevel, isNdk, openLatencyMs);
 
+    {
+        Mutex::Autolock lock(mInjectionParametersLock);
+        if (cameraId == mInjectionInternalCamId && mInjectionInitPending) {
+            mInjectionInitPending = false;
+            status_t res = NO_ERROR;
+            auto clientDescriptor = mActiveClientManager.get(mInjectionInternalCamId);
+            if (clientDescriptor != nullptr) {
+                sp<BasicClient> clientSp = clientDescriptor->getValue();
+                res = checkIfInjectionCameraIsPresent(mInjectionExternalCamId, clientSp);
+                if(res != OK) {
+                    return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+                            "No camera device with ID \"%s\" currently available",
+                            mInjectionExternalCamId.string());
+                }
+                res = clientSp->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
+                if (res != OK) {
+                    mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
+                }
+            } else {
+                ALOGE("%s: Internal camera ID = %s 's client does not exist!",
+                        __FUNCTION__, mInjectionInternalCamId.string());
+                res = NO_INIT;
+                mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
+            }
+        }
+    }
+
     return ret;
 }
 
@@ -1931,7 +2007,8 @@
             return BAD_VALUE;
         }
 
-        auto err = offlineClient->initialize(mCameraProviderManager, mMonitorTags);
+        String8 monitorTags = isClientWatched(offlineClient.get()) ? mMonitorTags : String8("");
+        auto err = offlineClient->initialize(mCameraProviderManager, monitorTags);
         if (err != OK) {
             ALOGE("%s: Could not initialize offline client.", __FUNCTION__);
             return err;
@@ -1963,6 +2040,132 @@
     return OK;
 }
 
+Status CameraService::turnOnTorchWithStrengthLevel(const String16& cameraId, int32_t torchStrength,
+        const sp<IBinder>& clientBinder) {
+    Mutex::Autolock lock(mServiceLock);
+
+    ATRACE_CALL();
+    if (clientBinder == nullptr) {
+        ALOGE("%s: torch client binder is NULL", __FUNCTION__);
+        return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT,
+                "Torch client binder in null.");
+    }
+
+    String8 id = String8(cameraId.string());
+    int uid = CameraThreadState::getCallingUid();
+
+    if (shouldRejectSystemCameraConnection(id)) {
+        return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT, "Unable to change the strength level"
+                "for system only device %s: ", id.string());
+    }
+
+    // verify id is valid
+    auto state = getCameraState(id);
+    if (state == nullptr) {
+        ALOGE("%s: camera id is invalid %s", __FUNCTION__, id.string());
+        return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+            "Camera ID \"%s\" is a not valid camera ID", id.string());
+    }
+
+    StatusInternal cameraStatus = state->getStatus();
+    if (cameraStatus != StatusInternal::NOT_AVAILABLE &&
+            cameraStatus != StatusInternal::PRESENT) {
+        ALOGE("%s: camera id is invalid %s, status %d", __FUNCTION__, id.string(),
+            (int)cameraStatus);
+        return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                "Camera ID \"%s\" is a not valid camera ID", id.string());
+    }
+
+    {
+        Mutex::Autolock al(mTorchStatusMutex);
+        TorchModeStatus status;
+        status_t err = getTorchStatusLocked(id, &status);
+        if (err != OK) {
+            if (err == NAME_NOT_FOUND) {
+             return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                    "Camera \"%s\" does not have a flash unit", id.string());
+            }
+            ALOGE("%s: getting current torch status failed for camera %s",
+                    __FUNCTION__, id.string());
+            return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                    "Error changing torch strength level for camera \"%s\": %s (%d)",
+                    id.string(), strerror(-err), err);
+        }
+
+        if (status == TorchModeStatus::NOT_AVAILABLE) {
+            if (cameraStatus == StatusInternal::NOT_AVAILABLE) {
+                ALOGE("%s: torch mode of camera %s is not available because "
+                        "camera is in use.", __FUNCTION__, id.string());
+                return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
+                        "Torch for camera \"%s\" is not available due to an existing camera user",
+                        id.string());
+            } else {
+                ALOGE("%s: torch mode of camera %s is not available due to "
+                       "insufficient resources", __FUNCTION__, id.string());
+                return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+                        "Torch for camera \"%s\" is not available due to insufficient resources",
+                        id.string());
+            }
+        }
+    }
+
+    {
+        Mutex::Autolock al(mTorchUidMapMutex);
+        updateTorchUidMapLocked(cameraId, uid);
+    }
+    // Check if the current torch strength level is same as the new one.
+    bool shouldSkipTorchStrengthUpdates = mCameraProviderManager->shouldSkipTorchStrengthUpdate(
+            id.string(), torchStrength);
+
+    status_t err = mFlashlight->turnOnTorchWithStrengthLevel(id, torchStrength);
+
+    if (err != OK) {
+        int32_t errorCode;
+        String8 msg;
+        switch (err) {
+            case -ENOSYS:
+                msg = String8::format("Camera \"%s\" has no flashlight.",
+                    id.string());
+                errorCode = ERROR_ILLEGAL_ARGUMENT;
+                break;
+            case -EBUSY:
+                msg = String8::format("Camera \"%s\" is in use",
+                    id.string());
+                errorCode = ERROR_CAMERA_IN_USE;
+                break;
+            default:
+                msg = String8::format("Changing torch strength level failed.");
+                errorCode = ERROR_INVALID_OPERATION;
+
+        }
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(errorCode, msg.string());
+    }
+
+    {
+        // update the link to client's death
+        // Store the last client that turns on each camera's torch mode.
+        Mutex::Autolock al(mTorchClientMapMutex);
+        ssize_t index = mTorchClientMap.indexOfKey(id);
+        if (index == NAME_NOT_FOUND) {
+            mTorchClientMap.add(id, clientBinder);
+        } else {
+            mTorchClientMap.valueAt(index)->unlinkToDeath(this);
+            mTorchClientMap.replaceValueAt(index, clientBinder);
+        }
+        clientBinder->linkToDeath(this);
+    }
+
+    int clientPid = CameraThreadState::getCallingPid();
+    const char *id_cstr = id.c_str();
+    ALOGI("%s: Torch strength for camera id %s changed to %d for client PID %d",
+            __FUNCTION__, id_cstr, torchStrength, clientPid);
+    if (!shouldSkipTorchStrengthUpdates) {
+        broadcastTorchStrengthLevel(id, torchStrength);
+    }
+    return Status::ok();
+}
+
 Status CameraService::setTorchMode(const String16& cameraId, bool enabled,
         const sp<IBinder>& clientBinder) {
     Mutex::Autolock lock(mServiceLock);
@@ -2034,13 +2237,7 @@
         // Update UID map - this is used in the torch status changed callbacks, so must be done
         // before setTorchMode
         Mutex::Autolock al(mTorchUidMapMutex);
-        if (mTorchUidMap.find(id) == mTorchUidMap.end()) {
-            mTorchUidMap[id].first = uid;
-            mTorchUidMap[id].second = uid;
-        } else {
-            // Set the pending UID
-            mTorchUidMap[id].first = uid;
-        }
+        updateTorchUidMapLocked(cameraId, uid);
     }
 
     status_t err = mFlashlight->setTorchMode(id, enabled);
@@ -2054,6 +2251,11 @@
                     id.string());
                 errorCode = ERROR_ILLEGAL_ARGUMENT;
                 break;
+            case -EBUSY:
+                msg = String8::format("Camera \"%s\" is in use",
+                    id.string());
+                errorCode = ERROR_CAMERA_IN_USE;
+                break;
             default:
                 msg = String8::format(
                     "Setting torch mode of camera \"%s\" to %d failed: %s (%d)",
@@ -2090,6 +2292,17 @@
     return Status::ok();
 }
 
+void CameraService::updateTorchUidMapLocked(const String16& cameraId, int uid) {
+    String8 id = String8(cameraId.string());
+    if (mTorchUidMap.find(id) == mTorchUidMap.end()) {
+        mTorchUidMap[id].first = uid;
+        mTorchUidMap[id].second = uid;
+    } else {
+        // Set the pending UID
+        mTorchUidMap[id].first = uid;
+    }
+}
+
 Status CameraService::notifySystemEvent(int32_t eventId,
         const std::vector<int32_t>& args) {
     const int pid = CameraThreadState::getCallingPid();
@@ -2120,6 +2333,13 @@
             doUserSwitch(/*newUserIds*/ args);
             break;
         }
+        case ICameraService::EVENT_USB_DEVICE_ATTACHED:
+        case ICameraService::EVENT_USB_DEVICE_DETACHED: {
+            // Notify CameraProviderManager for lazy HALs
+            mCameraProviderManager->notifyUsbDeviceEvent(eventId,
+                                                        std::to_string(args[0]));
+            break;
+        }
         case ICameraService::EVENT_NONE:
         default: {
             ALOGW("%s: Received invalid system event from system_server: %d", __FUNCTION__,
@@ -2162,24 +2382,7 @@
 
     ATRACE_CALL();
 
-    using hardware::camera::provider::V2_5::DeviceState;
-    hardware::hidl_bitfield<DeviceState> newDeviceState{};
-    if (newState & ICameraService::DEVICE_STATE_BACK_COVERED) {
-        newDeviceState |= DeviceState::BACK_COVERED;
-    }
-    if (newState & ICameraService::DEVICE_STATE_FRONT_COVERED) {
-        newDeviceState |= DeviceState::FRONT_COVERED;
-    }
-    if (newState & ICameraService::DEVICE_STATE_FOLDED) {
-        newDeviceState |= DeviceState::FOLDED;
-    }
-    // Only map vendor bits directly
-    uint64_t vendorBits = static_cast<uint64_t>(newState) & 0xFFFFFFFF00000000l;
-    newDeviceState |= vendorBits;
-
-    ALOGV("%s: New device state 0x%" PRIx64, __FUNCTION__, newDeviceState);
-    Mutex::Autolock l(mServiceLock);
-    mCameraProviderManager->notifyDeviceStateChange(newDeviceState);
+    mCameraProviderManager->notifyDeviceStateChange(newState);
 
     return Status::ok();
 }
@@ -2212,14 +2415,12 @@
     for (auto& current : clients) {
         if (current != nullptr) {
             const auto basicClient = current->getValue();
-            if (basicClient.get() != nullptr) {
-                if (CameraServiceProxyWrapper::isRotateAndCropOverrideNeeded(
-                            basicClient->getPackageName(), basicClient->getCameraOrientation(),
-                            basicClient->getCameraFacing())) {
-                    basicClient->setRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_90);
-                } else {
-                    basicClient->setRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_NONE);
-                }
+            if (basicClient.get() != nullptr && basicClient->canCastToApiClient(API_2)) {
+              basicClient->setRotateAndCropOverride(
+                  CameraServiceProxyWrapper::getRotateAndCropOverride(
+                      basicClient->getPackageName(),
+                      basicClient->getCameraFacing(),
+                      multiuser_get_user_id(basicClient->getClientUid())));
             }
         }
     }
@@ -2336,7 +2537,7 @@
     auto clientUid = CameraThreadState::getCallingUid();
     auto clientPid = CameraThreadState::getCallingPid();
     bool openCloseCallbackAllowed = checkPermission(sCameraOpenCloseListenerPermission,
-            clientPid, clientUid);
+            clientPid, clientUid, /*logPermissionFailure*/false);
 
     Mutex::Autolock lock(mServiceLock);
 
@@ -2373,7 +2574,8 @@
         Mutex::Autolock lock(mCameraStatesLock);
         for (auto& i : mCameraStates) {
             cameraStatuses->emplace_back(i.first,
-                    mapToInterface(i.second->getStatus()), i.second->getUnavailablePhysicalIds());
+                    mapToInterface(i.second->getStatus()), i.second->getUnavailablePhysicalIds(),
+                    openCloseCallbackAllowed ? i.second->getClientPackage() : String8::empty());
         }
     }
     // Remove the camera statuses that should be hidden from the client, we do
@@ -2511,6 +2713,7 @@
         case CAMERA_DEVICE_API_VERSION_3_5:
         case CAMERA_DEVICE_API_VERSION_3_6:
         case CAMERA_DEVICE_API_VERSION_3_7:
+        case CAMERA_DEVICE_API_VERSION_3_8:
             ALOGV("%s: Camera id %s uses HAL3.2 or newer, supports api1/api2 directly",
                     __FUNCTION__, id.string());
             *isSupported = true;
@@ -2548,7 +2751,7 @@
         const String16& externalCamId,
         const sp<ICameraInjectionCallback>& callback,
         /*out*/
-        sp<hardware::camera2::ICameraInjectionSession>* cameraInjectionSession) {
+        sp<ICameraInjectionSession>* cameraInjectionSession) {
     ATRACE_CALL();
 
     if (!checkCallingPermission(sCameraInjectExternalCameraPermission)) {
@@ -2565,18 +2768,36 @@
         __FUNCTION__, String8(packageName).string(),
         String8(internalCamId).string(), String8(externalCamId).string());
 
-    binder::Status ret = binder::Status::ok();
-    // TODO: Implement the injection camera function.
-    // ret = internalInjectCamera(...);
-    // if(!ret.isOk()) {
-    //     mInjectionStatusListener->notifyInjectionError(...);
-    //     return ret;
-    // }
+    {
+        Mutex::Autolock lock(mInjectionParametersLock);
+        mInjectionInternalCamId = String8(internalCamId);
+        mInjectionExternalCamId = String8(externalCamId);
+        mInjectionStatusListener->addListener(callback);
+        *cameraInjectionSession = new CameraInjectionSession(this);
+        status_t res = NO_ERROR;
+        auto clientDescriptor = mActiveClientManager.get(mInjectionInternalCamId);
+        // If the client already exists, we can directly connect to the camera device through the
+        // client's injectCamera(), otherwise we need to wait until the client is established
+        // (execute connectHelper()) before injecting the camera to the camera device.
+        if (clientDescriptor != nullptr) {
+            mInjectionInitPending = false;
+            sp<BasicClient> clientSp = clientDescriptor->getValue();
+            res = checkIfInjectionCameraIsPresent(mInjectionExternalCamId, clientSp);
+            if(res != OK) {
+                return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+                        "No camera device with ID \"%s\" currently available",
+                        mInjectionExternalCamId.string());
+            }
+            res = clientSp->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
+            if(res != OK) {
+                mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
+            }
+        } else {
+            mInjectionInitPending = true;
+        }
+    }
 
-    mInjectionStatusListener->addListener(callback);
-    *cameraInjectionSession = new CameraInjectionSession(this);
-
-    return ret;
+    return binder::Status::ok();
 }
 
 void CameraService::removeByClient(const BasicClient* client) {
@@ -2584,6 +2805,7 @@
     for (auto& i : mActiveClientManager.getAll()) {
         auto clientSp = i->getValue();
         if (clientSp.get() == client) {
+            cacheClientTagDumpIfNeeded(client->mCameraIdStr, clientSp.get());
             mActiveClientManager.remove(i);
         }
     }
@@ -2660,7 +2882,11 @@
         return sp<BasicClient>{nullptr};
     }
 
-    return clientDescriptorPtr->getValue();
+    sp<BasicClient> client = clientDescriptorPtr->getValue();
+    if (client.get() != nullptr) {
+        cacheClientTagDumpIfNeeded(clientDescriptorPtr->getKey(), client.get());
+    }
+    return client;
 }
 
 void CameraService::doUserSwitch(const std::vector<int32_t>& newUserIds) {
@@ -3079,6 +3305,21 @@
     return OK;
 }
 
+status_t CameraService::BasicClient::startWatchingTags(const String8&, int) {
+    // Can't watch tags directly, must go through CameraService::startWatchingTags
+    return OK;
+}
+
+status_t CameraService::BasicClient::stopWatchingTags(int) {
+    // Can't watch tags directly, must go through CameraService::stopWatchingTags
+    return OK;
+}
+
+status_t CameraService::BasicClient::dumpWatchedEventsToVector(std::vector<std::string> &) {
+    // Can't watch tags directly, must go through CameraService::dumpWatchedEventsToVector
+    return OK;
+}
+
 String16 CameraService::BasicClient::getPackageName() const {
     return mClientPackageName;
 }
@@ -3678,9 +3919,10 @@
 // ----------------------------------------------------------------------------
 
 CameraService::CameraState::CameraState(const String8& id, int cost,
-        const std::set<String8>& conflicting, SystemCameraKind systemCameraKind) : mId(id),
+        const std::set<String8>& conflicting, SystemCameraKind systemCameraKind,
+        const std::vector<std::string>& physicalCameras) : mId(id),
         mStatus(StatusInternal::NOT_PRESENT), mCost(cost), mConflicting(conflicting),
-        mSystemCameraKind(systemCameraKind) {}
+        mSystemCameraKind(systemCameraKind), mPhysicalCameras(physicalCameras) {}
 
 CameraService::CameraState::~CameraState() {}
 
@@ -3719,6 +3961,11 @@
     return mSystemCameraKind;
 }
 
+bool CameraService::CameraState::containsPhysicalCamera(const std::string& physicalCameraId) const {
+    return std::find(mPhysicalCameras.begin(), mPhysicalCameras.end(), physicalCameraId)
+            != mPhysicalCameras.end();
+}
+
 bool CameraService::CameraState::addUnavailablePhysicalId(const String8& physicalId) {
     Mutex::Autolock lock(mStatusLock);
     auto result = mUnavailablePhysicalIds.insert(physicalId);
@@ -3731,6 +3978,16 @@
     return count > 0;
 }
 
+void CameraService::CameraState::setClientPackage(const String8& clientPackage) {
+    Mutex::Autolock lock(mStatusLock);
+    mClientPackage = clientPackage;
+}
+
+String8 CameraService::CameraState::getClientPackage() const {
+    Mutex::Autolock lock(mStatusLock);
+    return mClientPackage;
+}
+
 // ----------------------------------------------------------------------------
 //                  ClientEventListener
 // ----------------------------------------------------------------------------
@@ -3865,22 +4122,62 @@
 }
 
 void CameraService::InjectionStatusListener::notifyInjectionError(
-        int errorCode) {
-    Mutex::Autolock lock(mListenerLock);
+        String8 injectedCamId, status_t err) {
     if (mCameraInjectionCallback == nullptr) {
         ALOGW("InjectionStatusListener: mCameraInjectionCallback == nullptr");
         return;
     }
-    mCameraInjectionCallback->onInjectionError(errorCode);
+
+    switch (err) {
+        case -ENODEV:
+            mCameraInjectionCallback->onInjectionError(
+                    ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+            ALOGE("No camera device with ID \"%s\" currently available!",
+                    injectedCamId.string());
+            break;
+        case -EBUSY:
+            mCameraInjectionCallback->onInjectionError(
+                    ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+            ALOGE("Higher-priority client using camera, ID \"%s\" currently unavailable!",
+                    injectedCamId.string());
+            break;
+        case DEAD_OBJECT:
+            mCameraInjectionCallback->onInjectionError(
+                    ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+            ALOGE("Camera ID \"%s\" object is dead!",
+                    injectedCamId.string());
+            break;
+        case INVALID_OPERATION:
+            mCameraInjectionCallback->onInjectionError(
+                    ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+            ALOGE("Camera ID \"%s\" encountered an operating or internal error!",
+                    injectedCamId.string());
+            break;
+        case UNKNOWN_TRANSACTION:
+            mCameraInjectionCallback->onInjectionError(
+                    ICameraInjectionCallback::ERROR_INJECTION_UNSUPPORTED);
+            ALOGE("Camera ID \"%s\" method doesn't support!",
+                    injectedCamId.string());
+            break;
+        default:
+            mCameraInjectionCallback->onInjectionError(
+                    ICameraInjectionCallback::ERROR_INJECTION_INVALID_ERROR);
+            ALOGE("Unexpected error %s (%d) opening camera \"%s\"!",
+                    strerror(-err), err, injectedCamId.string());
+    }
 }
 
 void CameraService::InjectionStatusListener::binderDied(
         const wp<IBinder>& /*who*/) {
-    Mutex::Autolock lock(mListenerLock);
     ALOGV("InjectionStatusListener: ICameraInjectionCallback has died");
     auto parent = mParent.promote();
     if (parent != nullptr) {
-        parent->stopInjectionImpl();
+        auto clientDescriptor = parent->mActiveClientManager.get(parent->mInjectionInternalCamId);
+        if (clientDescriptor != nullptr) {
+            BasicClient* baseClientPtr = clientDescriptor->getValue().get();
+            baseClientPtr->stopInjection();
+        }
+        parent->clearInjectionParameters();
     }
 }
 
@@ -3896,7 +4193,20 @@
         return STATUS_ERROR(ICameraInjectionCallback::ERROR_INJECTION_SERVICE,
                 "Camera service encountered error");
     }
-    parent->stopInjectionImpl();
+
+    status_t res = NO_ERROR;
+    auto clientDescriptor = parent->mActiveClientManager.get(parent->mInjectionInternalCamId);
+    if (clientDescriptor != nullptr) {
+        BasicClient* baseClientPtr = clientDescriptor->getValue().get();
+        res = baseClientPtr->stopInjection();
+        if (res != OK) {
+            ALOGE("CameraInjectionSession: Failed to stop the injection camera!"
+                " ret != NO_ERROR: %d", res);
+            return STATUS_ERROR(ICameraInjectionCallback::ERROR_INJECTION_SESSION,
+                "Camera session encountered error");
+        }
+    }
+    parent->clearInjectionParameters();
     return binder::Status::ok();
 }
 
@@ -4037,7 +4347,7 @@
 
     // Dump camera traces if there were any
     dprintf(fd, "\n");
-    camera3::CameraTraces::dump(fd, args);
+    camera3::CameraTraces::dump(fd);
 
     // Process dump arguments, if any
     int n = args.size();
@@ -4131,6 +4441,45 @@
     dprintf(fd, "\n");
 }
 
+void CameraService::cacheClientTagDumpIfNeeded(const char *cameraId, BasicClient* client) {
+    Mutex::Autolock lock(mLogLock);
+    if (!isClientWatchedLocked(client)) { return; }
+
+    std::vector<std::string> dumpVector;
+    client->dumpWatchedEventsToVector(dumpVector);
+
+    if (dumpVector.empty()) { return; }
+
+    std::string dumpString;
+
+    String8 currentTime = getFormattedCurrentTime();
+    dumpString += "Cached @ ";
+    dumpString += currentTime.string();
+    dumpString += "\n"; // First line is the timestamp of when client is cached.
+
+
+    const String16 &packageName = client->getPackageName();
+
+    String8 packageName8 = String8(packageName);
+    const char *printablePackageName = packageName8.lockBuffer(packageName.size());
+
+
+    size_t i = dumpVector.size();
+
+    // Store the string in reverse order (latest last)
+    while (i > 0) {
+         i--;
+         dumpString += cameraId;
+         dumpString += ":";
+         dumpString += printablePackageName;
+         dumpString += "  ";
+         dumpString += dumpVector[i]; // implicitly ends with '\n'
+    }
+
+    packageName8.unlockBuffer();
+    mWatchedClientsDumpCache[packageName] = dumpString;
+}
+
 void CameraService::handleTorchClientBinderDied(const wp<IBinder> &who) {
     Mutex::Autolock al(mTorchClientMapMutex);
     for (size_t i = 0; i < mTorchClientMap.size(); i++) {
@@ -4241,6 +4590,18 @@
 
 void CameraService::updateOpenCloseStatus(const String8& cameraId, bool open,
         const String16& clientPackageName) {
+    auto state = getCameraState(cameraId);
+    if (state == nullptr) {
+        ALOGW("%s: Could not update the status for %s, no such device exists", __FUNCTION__,
+                cameraId.string());
+        return;
+    }
+    if (open) {
+        state->setClientPackage(String8(clientPackageName));
+    } else {
+        state->setClientPackage(String8::empty());
+    }
+
     Mutex::Autolock lock(mStatusListenerLock);
 
     for (const auto& it : mListenerList) {
@@ -4337,18 +4698,9 @@
     std::list<String16> retList;
     Mutex::Autolock lock(mCameraStatesLock);
     for (const auto& state : mCameraStates) {
-        std::vector<std::string> physicalCameraIds;
-        if (!mCameraProviderManager->isLogicalCamera(state.first.c_str(), &physicalCameraIds)) {
-            // This is not a logical multi-camera.
-            continue;
+        if (state.second->containsPhysicalCamera(physicalCameraId.c_str())) {
+            retList.emplace_back(String16(state.first));
         }
-        if (std::find(physicalCameraIds.begin(), physicalCameraIds.end(), physicalCameraId.c_str())
-                == physicalCameraIds.end()) {
-            // cameraId is not a physical camera of this logical multi-camera.
-            continue;
-        }
-
-        retList.emplace_back(String16(state.first));
     }
     return retList;
 }
@@ -4423,9 +4775,11 @@
         return handleGetImageDumpMask(out);
     } else if (args.size() >= 2 && args[0] == String16("set-camera-mute")) {
         return handleSetCameraMute(args);
+    } else if (args.size() >= 2 && args[0] == String16("watch")) {
+        return handleWatchCommand(args, in, out);
     } else if (args.size() == 1 && args[0] == String16("help")) {
         printHelp(out);
-        return NO_ERROR;
+        return OK;
     }
     printHelp(err);
     return BAD_VALUE;
@@ -4569,6 +4923,348 @@
     return OK;
 }
 
+status_t CameraService::handleWatchCommand(const Vector<String16>& args, int inFd, int outFd) {
+    if (args.size() >= 3 && args[1] == String16("start")) {
+        return startWatchingTags(args, outFd);
+    } else if (args.size() == 2 && args[1] == String16("stop")) {
+        return stopWatchingTags(outFd);
+    } else if (args.size() == 2 && args[1] == String16("dump")) {
+        return printWatchedTags(outFd);
+    } else if (args.size() >= 2 && args[1] == String16("live")) {
+        return printWatchedTagsUntilInterrupt(args, inFd, outFd);
+    } else if (args.size() == 2 && args[1] == String16("clear")) {
+        return clearCachedMonitoredTagDumps(outFd);
+    }
+    dprintf(outFd, "Camera service watch commands:\n"
+                 "  start -m <comma_separated_tag_list> [-c <comma_separated_client_list>]\n"
+                 "        starts watching the provided tags for clients with provided package\n"
+                 "        recognizes tag shorthands like '3a'\n"
+                 "        watches all clients if no client is passed, or if 'all' is listed\n"
+                 "  dump dumps the monitoring information and exits\n"
+                 "  stop stops watching all tags\n"
+                 "  live [-n <refresh_interval_ms>]\n"
+                 "        prints the monitored information in real time\n"
+                 "        Hit return to exit\n"
+                 "  clear clears all buffers storing information for watch command");
+  return BAD_VALUE;
+}
+
+status_t CameraService::startWatchingTags(const Vector<String16> &args, int outFd) {
+    Mutex::Autolock lock(mLogLock);
+    size_t tagsIdx; // index of '-m'
+    String16 tags("");
+    for (tagsIdx = 2; tagsIdx < args.size() && args[tagsIdx] != String16("-m"); tagsIdx++);
+    if (tagsIdx < args.size() - 1) {
+        tags = args[tagsIdx + 1];
+    } else {
+        dprintf(outFd, "No tags provided.\n");
+        return BAD_VALUE;
+    }
+
+    size_t clientsIdx; // index of '-c'
+    String16 clients = kWatchAllClientsFlag; // watch all clients if no clients are provided
+    for (clientsIdx = 2; clientsIdx < args.size() && args[clientsIdx] != String16("-c");
+         clientsIdx++);
+    if (clientsIdx < args.size() - 1) {
+        clients = args[clientsIdx + 1];
+    }
+    parseClientsToWatchLocked(String8(clients));
+
+    // track tags to initialize future clients with the monitoring information
+    mMonitorTags = String8(tags);
+
+    bool serviceLock = tryLock(mServiceLock);
+    int numWatchedClients = 0;
+    auto cameraClients = mActiveClientManager.getAll();
+    for (const auto &clientDescriptor: cameraClients) {
+        if (clientDescriptor == nullptr) { continue; }
+        sp<BasicClient> client = clientDescriptor->getValue();
+        if (client.get() == nullptr) { continue; }
+
+        if (isClientWatchedLocked(client.get())) {
+            client->startWatchingTags(mMonitorTags, outFd);
+            numWatchedClients++;
+        }
+    }
+    dprintf(outFd, "Started watching %d active clients\n", numWatchedClients);
+
+    if (serviceLock) { mServiceLock.unlock(); }
+    return OK;
+}
+
+status_t CameraService::stopWatchingTags(int outFd) {
+    // clear mMonitorTags to prevent new clients from monitoring tags at initialization
+    Mutex::Autolock lock(mLogLock);
+    mMonitorTags = String8::empty();
+
+    mWatchedClientPackages.clear();
+    mWatchedClientsDumpCache.clear();
+
+    bool serviceLock = tryLock(mServiceLock);
+    auto cameraClients = mActiveClientManager.getAll();
+    for (const auto &clientDescriptor : cameraClients) {
+        if (clientDescriptor == nullptr) { continue; }
+        sp<BasicClient> client = clientDescriptor->getValue();
+        if (client.get() == nullptr) { continue; }
+        client->stopWatchingTags(outFd);
+    }
+    dprintf(outFd, "Stopped watching all clients.\n");
+    if (serviceLock) { mServiceLock.unlock(); }
+    return OK;
+}
+
+status_t CameraService::clearCachedMonitoredTagDumps(int outFd) {
+    Mutex::Autolock lock(mLogLock);
+    size_t clearedSize = mWatchedClientsDumpCache.size();
+    mWatchedClientsDumpCache.clear();
+    dprintf(outFd, "Cleared tag information of %zu cached clients.\n", clearedSize);
+    return OK;
+}
+
+status_t CameraService::printWatchedTags(int outFd) {
+    Mutex::Autolock logLock(mLogLock);
+    std::set<String16> connectedMonitoredClients;
+
+    bool printedSomething = false; // tracks if any monitoring information was printed
+                                   // (from either cached or active clients)
+
+    bool serviceLock = tryLock(mServiceLock);
+    // get all watched clients that are currently connected
+    for (const auto &clientDescriptor: mActiveClientManager.getAll()) {
+        if (clientDescriptor == nullptr) { continue; }
+
+        sp<BasicClient> client = clientDescriptor->getValue();
+        if (client.get() == nullptr) { continue; }
+        if (!isClientWatchedLocked(client.get())) { continue; }
+
+        std::vector<std::string> dumpVector;
+        client->dumpWatchedEventsToVector(dumpVector);
+
+        size_t printIdx = dumpVector.size();
+        if (printIdx == 0) {
+            continue;
+        }
+
+        // Print tag dumps for active client
+        const String8 &cameraId = clientDescriptor->getKey();
+        String8 packageName8 = String8(client->getPackageName());
+        const char *printablePackageName = packageName8.lockBuffer(packageName8.size());
+        dprintf(outFd, "Client: %s (active)\n", printablePackageName);
+        while(printIdx > 0) {
+            printIdx--;
+            dprintf(outFd, "%s:%s  %s", cameraId.string(), printablePackageName,
+                    dumpVector[printIdx].c_str());
+        }
+        dprintf(outFd, "\n");
+        packageName8.unlockBuffer();
+        printedSomething = true;
+
+        connectedMonitoredClients.emplace(client->getPackageName());
+    }
+    if (serviceLock) { mServiceLock.unlock(); }
+
+    // Print entries in mWatchedClientsDumpCache for clients that are not connected
+    for (const auto &kv: mWatchedClientsDumpCache) {
+        const String16 &package = kv.first;
+        if (connectedMonitoredClients.find(package) != connectedMonitoredClients.end()) {
+            continue;
+        }
+
+        dprintf(outFd, "Client: %s (cached)\n", String8(package).string());
+        dprintf(outFd, "%s\n", kv.second.c_str());
+        printedSomething = true;
+    }
+
+    if (!printedSomething) {
+        dprintf(outFd, "No monitoring information to print.\n");
+    }
+
+    return OK;
+}
+
+// Print all events in vector `events' that came after lastPrintedEvent
+void printNewWatchedEvents(int outFd,
+                           const char *cameraId,
+                           const String16 &packageName,
+                           const std::vector<std::string> &events,
+                           const std::string &lastPrintedEvent) {
+    if (events.empty()) { return; }
+
+    // index of lastPrintedEvent in events.
+    // lastPrintedIdx = events.size() if lastPrintedEvent is not in events
+    size_t lastPrintedIdx;
+    for (lastPrintedIdx = 0;
+         lastPrintedIdx < events.size() && lastPrintedEvent != events[lastPrintedIdx];
+         lastPrintedIdx++);
+
+    if (lastPrintedIdx == 0) { return; } // early exit if no new event in `events`
+
+    String8 packageName8(packageName);
+    const char *printablePackageName = packageName8.lockBuffer(packageName8.size());
+
+    // print events in chronological order (latest event last)
+    size_t idxToPrint = lastPrintedIdx;
+    do {
+        idxToPrint--;
+        dprintf(outFd, "%s:%s  %s", cameraId, printablePackageName, events[idxToPrint].c_str());
+    } while (idxToPrint != 0);
+
+    packageName8.unlockBuffer();
+}
+
+// Returns true if adb shell cmd watch should be interrupted based on data in inFd. The watch
+// command should be interrupted if the user presses the return key, or if user loses any way to
+// signal interrupt.
+// If timeoutMs == 0, this function will always return false
+bool shouldInterruptWatchCommand(int inFd, int outFd, long timeoutMs) {
+    struct timeval startTime;
+    int startTimeError = gettimeofday(&startTime, nullptr);
+    if (startTimeError) {
+        dprintf(outFd, "Failed waiting for interrupt, aborting.\n");
+        return true;
+    }
+
+    const nfds_t numFds = 1;
+    struct pollfd pollFd = { .fd = inFd, .events = POLLIN, .revents = 0 };
+
+    struct timeval currTime;
+    char buffer[2];
+    while(true) {
+        int currTimeError = gettimeofday(&currTime, nullptr);
+        if (currTimeError) {
+            dprintf(outFd, "Failed waiting for interrupt, aborting.\n");
+            return true;
+        }
+
+        long elapsedTimeMs = ((currTime.tv_sec - startTime.tv_sec) * 1000L)
+                + ((currTime.tv_usec - startTime.tv_usec) / 1000L);
+        int remainingTimeMs = (int) (timeoutMs - elapsedTimeMs);
+
+        if (remainingTimeMs <= 0) {
+            // No user interrupt within timeoutMs, don't interrupt watch command
+            return false;
+        }
+
+        int numFdsUpdated = poll(&pollFd, numFds, remainingTimeMs);
+        if (numFdsUpdated < 0) {
+            dprintf(outFd, "Failed while waiting for user input. Exiting.\n");
+            return true;
+        }
+
+        if (numFdsUpdated == 0) {
+            // No user input within timeoutMs, don't interrupt watch command
+            return false;
+        }
+
+        if (!(pollFd.revents & POLLIN)) {
+            dprintf(outFd, "Failed while waiting for user input. Exiting.\n");
+            return true;
+        }
+
+        ssize_t sizeRead = read(inFd, buffer, sizeof(buffer) - 1);
+        if (sizeRead < 0) {
+            dprintf(outFd, "Error reading user input. Exiting.\n");
+            return true;
+        }
+
+        if (sizeRead == 0) {
+            // Reached end of input fd (can happen if input is piped)
+            // User has no way to signal an interrupt, so interrupt here
+            return true;
+        }
+
+        if (buffer[0] == '\n') {
+            // User pressed return, interrupt watch command.
+            return true;
+        }
+    }
+}
+
+status_t CameraService::printWatchedTagsUntilInterrupt(const Vector<String16> &args,
+                                                       int inFd, int outFd) {
+    // Figure out refresh interval, if present in args
+    long refreshTimeoutMs = 1000L; // refresh every 1s by default
+    if (args.size() > 2) {
+        size_t intervalIdx; // index of '-n'
+        for (intervalIdx = 2; intervalIdx < args.size() && String16("-n") != args[intervalIdx];
+             intervalIdx++);
+
+        size_t intervalValIdx = intervalIdx + 1;
+        if (intervalValIdx < args.size()) {
+            refreshTimeoutMs = strtol(String8(args[intervalValIdx].string()), nullptr, 10);
+            if (errno) { return BAD_VALUE; }
+        }
+    }
+
+    // Set min timeout of 10ms. This prevents edge cases in polling when timeout of 0 is passed.
+    refreshTimeoutMs = refreshTimeoutMs < 10 ? 10 : refreshTimeoutMs;
+
+    dprintf(outFd, "Press return to exit...\n\n");
+    std::map<String16, std::string> packageNameToLastEvent;
+
+    while (true) {
+        bool serviceLock = tryLock(mServiceLock);
+        auto cameraClients = mActiveClientManager.getAll();
+        if (serviceLock) { mServiceLock.unlock(); }
+
+        for (const auto& clientDescriptor : cameraClients) {
+            Mutex::Autolock lock(mLogLock);
+            if (clientDescriptor == nullptr) { continue; }
+
+            sp<BasicClient> client = clientDescriptor->getValue();
+            if (client.get() == nullptr) { continue; }
+            if (!isClientWatchedLocked(client.get())) { continue; }
+
+            const String16 &packageName = client->getPackageName();
+            // This also initializes the map entries with an empty string
+            const std::string& lastPrintedEvent = packageNameToLastEvent[packageName];
+
+            std::vector<std::string> latestEvents;
+            client->dumpWatchedEventsToVector(latestEvents);
+
+            if (!latestEvents.empty()) {
+                String8 cameraId = clientDescriptor->getKey();
+                const char *printableCameraId = cameraId.lockBuffer(cameraId.size());
+                printNewWatchedEvents(outFd,
+                                      printableCameraId,
+                                      packageName,
+                                      latestEvents,
+                                      lastPrintedEvent);
+                packageNameToLastEvent[packageName] = latestEvents[0];
+                cameraId.unlockBuffer();
+            }
+        }
+        if (shouldInterruptWatchCommand(inFd, outFd, refreshTimeoutMs)) {
+            break;
+        }
+    }
+    return OK;
+}
+
+void CameraService::parseClientsToWatchLocked(String8 clients) {
+    mWatchedClientPackages.clear();
+
+    const char *allSentinel = String8(kWatchAllClientsFlag).string();
+
+    char *tokenized = clients.lockBuffer(clients.size());
+    char *savePtr;
+    char *nextClient = strtok_r(tokenized, ",", &savePtr);
+
+    while (nextClient != nullptr) {
+        if (strcmp(nextClient, allSentinel) == 0) {
+            // Don't need to track any other package if 'all' is present
+            mWatchedClientPackages.clear();
+            mWatchedClientPackages.emplace(kWatchAllClientsFlag);
+            break;
+        }
+
+        // track package names
+        mWatchedClientPackages.emplace(nextClient);
+        nextClient = strtok_r(nullptr, ",", &savePtr);
+    }
+    clients.unlockBuffer();
+}
+
 status_t CameraService::printHelp(int out) {
     return dprintf(out, "Camera service commands:\n"
         "  get-uid-state <PACKAGE> [--user USER_ID] gets the uid state\n"
@@ -4581,9 +5277,20 @@
         "      Valid values 0=OFF, 1=ON for JPEG\n"
         "  get-image-dump-mask returns the current image-dump-mask value\n"
         "  set-camera-mute <0/1> enable or disable camera muting\n"
+        "  watch <start|stop|dump|print|clear> manages tag monitoring in connected clients\n"
         "  help print this message\n");
 }
 
+bool CameraService::isClientWatched(const BasicClient *client) {
+    Mutex::Autolock lock(mLogLock);
+    return isClientWatchedLocked(client);
+}
+
+bool CameraService::isClientWatchedLocked(const BasicClient *client) {
+    return mWatchedClientPackages.find(kWatchAllClientsFlag) != mWatchedClientPackages.end() ||
+           mWatchedClientPackages.find(client->getPackageName()) != mWatchedClientPackages.end();
+}
+
 int32_t CameraService::updateAudioRestriction() {
     Mutex::Autolock lock(mServiceLock);
     return updateAudioRestrictionLocked();
@@ -4605,10 +5312,43 @@
     return mode;
 }
 
-void CameraService::stopInjectionImpl() {
-    mInjectionStatusListener->removeListener();
+status_t CameraService::checkIfInjectionCameraIsPresent(const String8& externalCamId,
+        sp<BasicClient> clientSp) {
+    std::unique_ptr<AutoConditionLock> lock =
+            AutoConditionLock::waitAndAcquire(mServiceLockWrapper);
+    status_t res = NO_ERROR;
+    if ((res = checkIfDeviceIsUsable(externalCamId)) != NO_ERROR) {
+        ALOGW("Device %s is not usable!", externalCamId.string());
+        mInjectionStatusListener->notifyInjectionError(
+                externalCamId, UNKNOWN_TRANSACTION);
+        clientSp->notifyError(
+                hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+                CaptureResultExtras());
 
-    // TODO: Implement the stop injection function.
+        // Do not hold mServiceLock while disconnecting clients, but retain the condition blocking
+        // other clients from connecting in mServiceLockWrapper if held
+        mServiceLock.unlock();
+
+        // Clear caller identity temporarily so client disconnect PID checks work correctly
+        int64_t token = CameraThreadState::clearCallingIdentity();
+        clientSp->disconnect();
+        CameraThreadState::restoreCallingIdentity(token);
+
+        // Reacquire mServiceLock
+        mServiceLock.lock();
+    }
+
+    return res;
+}
+
+void CameraService::clearInjectionParameters() {
+    {
+        Mutex::Autolock lock(mInjectionParametersLock);
+        mInjectionInitPending = false;
+        mInjectionInternalCamId = "";
+    }
+    mInjectionExternalCamId = "";
+    mInjectionStatusListener->removeListener();
 }
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index bc2e347..c73d28a 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -106,12 +106,20 @@
     // HAL Callbacks - implements CameraProviderManager::StatusListener
 
     virtual void        onDeviceStatusChanged(const String8 &cameraId,
-            hardware::camera::common::V1_0::CameraDeviceStatus newHalStatus) override;
+            CameraDeviceStatus newHalStatus) override;
     virtual void        onDeviceStatusChanged(const String8 &cameraId,
             const String8 &physicalCameraId,
-            hardware::camera::common::V1_0::CameraDeviceStatus newHalStatus) override;
+            CameraDeviceStatus newHalStatus) override;
+    // This method may hold CameraProviderManager::mInterfaceMutex as a part
+    // of calling getSystemCameraKind() internally. Care should be taken not to
+    // directly / indirectly call this from callers who also hold
+    // mInterfaceMutex.
     virtual void        onTorchStatusChanged(const String8& cameraId,
-            hardware::camera::common::V1_0::TorchModeStatus newStatus) override;
+            TorchModeStatus newStatus) override;
+    // Does not hold CameraProviderManager::mInterfaceMutex.
+    virtual void        onTorchStatusChanged(const String8& cameraId,
+            TorchModeStatus newStatus,
+            SystemCameraKind kind) override;
     virtual void        onNewProviderRegistered() override;
 
     /////////////////////////////////////////////////////////////////////
@@ -164,6 +172,12 @@
     virtual binder::Status    setTorchMode(const String16& cameraId, bool enabled,
             const sp<IBinder>& clientBinder);
 
+    virtual binder::Status    turnOnTorchWithStrengthLevel(const String16& cameraId,
+            int32_t torchStrength, const sp<IBinder>& clientBinder);
+
+    virtual binder::Status    getTorchStrengthLevel(const String16& cameraId,
+            int32_t* torchStrength);
+
     virtual binder::Status    notifySystemEvent(int32_t eventId,
             const std::vector<int32_t>& args);
 
@@ -272,6 +286,10 @@
         // Internal dump method to be called by CameraService
         virtual status_t dumpClient(int fd, const Vector<String16>& args) = 0;
 
+        virtual status_t startWatchingTags(const String8 &tags, int outFd);
+        virtual status_t stopWatchingTags(int outFd);
+        virtual status_t dumpWatchedEventsToVector(std::vector<std::string> &out);
+
         // Return the package name for this client
         virtual String16 getPackageName() const;
 
@@ -320,6 +338,14 @@
         // Set/reset camera mute
         virtual status_t setCameraMute(bool enabled) = 0;
 
+        // The injection camera session to replace the internal camera
+        // session.
+        virtual status_t injectCamera(const String8& injectedCamId,
+                sp<CameraProviderManager> manager) = 0;
+
+        // Stop the injection camera and restore to internal camera session.
+        virtual status_t stopInjection() = 0;
+
     protected:
         BasicClient(const sp<CameraService>& cameraService,
                 const sp<IBinder>& remoteCallback,
@@ -532,8 +558,6 @@
 
 private:
 
-    typedef hardware::camera::common::V1_0::CameraDeviceStatus CameraDeviceStatus;
-
     /**
      * Typesafe version of device status, containing both the HAL-layer and the service interface-
      * layer values.
@@ -563,7 +587,7 @@
          * returned in the HAL's camera_info struct for each device.
          */
         CameraState(const String8& id, int cost, const std::set<String8>& conflicting,
-                SystemCameraKind deviceKind);
+                SystemCameraKind deviceKind, const std::vector<std::string>& physicalCameras);
         virtual ~CameraState();
 
         /**
@@ -621,12 +645,24 @@
         SystemCameraKind getSystemCameraKind() const;
 
         /**
+         * Return whether this camera is a logical multi-camera and has a
+         * particular physical sub-camera.
+         */
+        bool containsPhysicalCamera(const std::string& physicalCameraId) const;
+
+        /**
          * Add/Remove the unavailable physical camera ID.
          */
         bool addUnavailablePhysicalId(const String8& physicalId);
         bool removeUnavailablePhysicalId(const String8& physicalId);
 
         /**
+         * Set and get client package name.
+         */
+        void setClientPackage(const String8& clientPackage);
+        String8 getClientPackage() const;
+
+        /**
          * Return the unavailable physical ids for this device.
          *
          * This method acquires mStatusLock.
@@ -638,9 +674,11 @@
         const int mCost;
         std::set<String8> mConflicting;
         std::set<String8> mUnavailablePhysicalIds;
+        String8 mClientPackage;
         mutable Mutex mStatusLock;
         CameraParameters mShimParams;
         const SystemCameraKind mSystemCameraKind;
+        const std::vector<std::string> mPhysicalCameras; // Empty if not a logical multi-camera
     }; // class CameraState
 
     // Observer for UID lifecycle enforcing that UIDs in idle
@@ -810,6 +848,14 @@
     RingBuffer<String8> mEventLog;
     Mutex mLogLock;
 
+    // set of client package names to watch. if this set contains 'all', then all clients will
+    // be watched. Access should be guarded by mLogLock
+    std::set<String16> mWatchedClientPackages;
+    // cache of last monitored tags dump immediately before the client disconnects. If a client
+    // re-connects, its entry is not updated until it disconnects again. Access should be guarded
+    // by mLogLock
+    std::map<String16, std::string> mWatchedClientsDumpCache;
+
     // The last monitored tags set by client
     String8 mMonitorTags;
 
@@ -942,6 +988,8 @@
      */
     void dumpEventLog(int fd);
 
+    void cacheClientTagDumpIfNeeded(const char *cameraId, BasicClient *client);
+
     /**
      * This method will acquire mServiceLock
      */
@@ -1052,7 +1100,7 @@
     // guard mTorchUidMap
     Mutex                mTorchUidMapMutex;
     // camera id -> torch status
-    KeyedVector<String8, hardware::camera::common::V1_0::TorchModeStatus>
+    KeyedVector<String8, TorchModeStatus>
             mTorchStatusMap;
     // camera id -> torch client binder
     // only store the last client that turns on each camera's torch mode
@@ -1066,16 +1114,16 @@
     // handle torch mode status change and invoke callbacks. mTorchStatusMutex
     // should be locked.
     void onTorchStatusChangedLocked(const String8& cameraId,
-            hardware::camera::common::V1_0::TorchModeStatus newStatus,
+            TorchModeStatus newStatus,
             SystemCameraKind systemCameraKind);
 
     // get a camera's torch status. mTorchStatusMutex should be locked.
     status_t getTorchStatusLocked(const String8 &cameraId,
-             hardware::camera::common::V1_0::TorchModeStatus *status) const;
+             TorchModeStatus *status) const;
 
     // set a camera's torch status. mTorchStatusMutex should be locked.
     status_t setTorchStatusLocked(const String8 &cameraId,
-            hardware::camera::common::V1_0::TorchModeStatus status);
+            TorchModeStatus status);
 
     // notify physical camera status when the physical camera is public.
     // Expects mStatusListenerLock to be locked.
@@ -1134,9 +1182,43 @@
     // Set the camera mute state
     status_t handleSetCameraMute(const Vector<String16>& args);
 
+    // Handle 'watch' command as passed through 'cmd'
+    status_t handleWatchCommand(const Vector<String16> &args, int inFd, int outFd);
+
+    // Enable tag monitoring of the given tags in provided clients
+    status_t startWatchingTags(const Vector<String16> &args, int outFd);
+
+    // Disable tag monitoring
+    status_t stopWatchingTags(int outFd);
+
+    // Clears mWatchedClientsDumpCache
+    status_t clearCachedMonitoredTagDumps(int outFd);
+
+    // Print events of monitored tags in all cached and attached clients
+    status_t printWatchedTags(int outFd);
+
+    // Print events of monitored tags in all attached clients as they are captured. New events are
+    // fetched every `refreshMillis` ms
+    // NOTE: This function does not terminate until user passes '\n' to inFd.
+    status_t printWatchedTagsUntilInterrupt(const Vector<String16> &args, int inFd, int outFd);
+
+    // Parses comma separated clients list and adds them to mWatchedClientPackages.
+    // Does not acquire mLogLock before modifying mWatchedClientPackages. It is the caller's
+    // responsibility to acquire mLogLock before calling this function.
+    void parseClientsToWatchLocked(String8 clients);
+
     // Prints the shell command help
     status_t printHelp(int out);
 
+    // Returns true if client should monitor tags based on the contents of mWatchedClientPackages.
+    // Acquires mLogLock before querying mWatchedClientPackages.
+    bool isClientWatched(const BasicClient *client);
+
+    // Returns true if client should monitor tags based on the contents of mWatchedClientPackages.
+    // Does not acquire mLogLock before querying mWatchedClientPackages. It is the caller's
+    // responsibility to acquire mLogLock before calling this functions.
+    bool isClientWatchedLocked(const BasicClient *client);
+
     /**
      * Get the current system time as a formatted string.
      */
@@ -1152,14 +1234,15 @@
     status_t checkCameraAccess(const String16& opPackageName);
 
     static String8 toString(std::set<userid_t> intSet);
-    static int32_t mapToInterface(hardware::camera::common::V1_0::TorchModeStatus status);
-    static StatusInternal mapToInternal(hardware::camera::common::V1_0::CameraDeviceStatus status);
+    static int32_t mapToInterface(TorchModeStatus status);
+    static StatusInternal mapToInternal(CameraDeviceStatus status);
     static int32_t mapToInterface(StatusInternal status);
 
 
     void broadcastTorchModeStatus(const String8& cameraId,
-            hardware::camera::common::V1_0::TorchModeStatus status,
-            SystemCameraKind systemCameraKind);
+            TorchModeStatus status, SystemCameraKind systemCameraKind);
+
+    void broadcastTorchStrengthLevel(const String8& cameraId, int32_t newTorchStrengthLevel);
 
     void disconnectClient(const String8& id, sp<BasicClient> clientToDisconnect);
 
@@ -1167,6 +1250,10 @@
     // Use separate keys for offline devices.
     static const String8 kOfflineDevice;
 
+    // Sentinel value to be stored in `mWatchedClientsPackages` to indicate that all clients should
+    // be watched.
+    static const String16 kWatchAllClientsFlag;
+
     // TODO: right now each BasicClient holds one AppOpsManager instance.
     // We can refactor the code so all of clients share this instance
     AppOpsManager mAppOps;
@@ -1194,7 +1281,7 @@
 
             void addListener(const sp<hardware::camera2::ICameraInjectionCallback>& callback);
             void removeListener();
-            void notifyInjectionError(int errorCode);
+            void notifyInjectionError(String8 injectedCamId, status_t err);
 
             // IBinder::DeathRecipient implementation
             virtual void binderDied(const wp<IBinder>& who);
@@ -1221,7 +1308,22 @@
             wp<CameraService> mParent;
     };
 
-    void stopInjectionImpl();
+    // When injecting the camera, it will check whether the injecting camera status is unavailable.
+    // If it is, the disconnect function will be called to to prevent camera access on the device.
+    status_t checkIfInjectionCameraIsPresent(const String8& externalCamId,
+            sp<BasicClient> clientSp);
+
+    void clearInjectionParameters();
+
+    // This is the existing camera id being replaced.
+    String8 mInjectionInternalCamId;
+    // This is the external camera Id replacing the internalId.
+    String8 mInjectionExternalCamId;
+    bool mInjectionInitPending = false;
+    // Guard mInjectionInternalCamId and mInjectionInitPending.
+    Mutex mInjectionParametersLock;
+
+    void updateTorchUidMapLocked(const String16& cameraId, int uid);
 };
 
 } // namespace android
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 944b8ab..8c72bd7 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -62,7 +62,7 @@
         bool overrideForPerfClass):
         Camera2ClientBase(cameraService, cameraClient, clientPackageName, clientFeatureId,
                 cameraDeviceId, api1CameraId, cameraFacing, sensorOrientation,
-                clientPid, clientUid, servicePid, overrideForPerfClass),
+                clientPid, clientUid, servicePid, overrideForPerfClass, /*legacyClient*/ true),
         mParameters(api1CameraId, cameraFacing)
 {
     ATRACE_CALL();
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index eed2654..a38d7ae 100755
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -78,7 +78,8 @@
     }
 
     // Find out buffer size for JPEG
-    ssize_t maxJpegSize = device->getJpegBufferSize(params.pictureWidth, params.pictureHeight);
+    ssize_t maxJpegSize = device->getJpegBufferSize(device->infoPhysical(String8("")),
+            params.pictureWidth, params.pictureHeight);
     if (maxJpegSize <= 0) {
         ALOGE("%s: Camera %d: Jpeg buffer size (%zu) is invalid ",
                 __FUNCTION__, mId, maxJpegSize);
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 80508e4..9a7ada2 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -75,23 +75,43 @@
     // Treat the H.264 max size as the max supported video size.
     MediaProfiles *videoEncoderProfiles = MediaProfiles::getInstance();
     Vector<video_encoder> encoders = videoEncoderProfiles->getVideoEncoders();
+    int32_t minVideoWidth = MAX_PREVIEW_WIDTH;
+    int32_t minVideoHeight = MAX_PREVIEW_HEIGHT;
     int32_t maxVideoWidth = 0;
     int32_t maxVideoHeight = 0;
     for (size_t i = 0; i < encoders.size(); i++) {
-        int width = videoEncoderProfiles->getVideoEncoderParamByName(
+        int w0 = videoEncoderProfiles->getVideoEncoderParamByName(
+                "enc.vid.width.min", encoders[i]);
+        int h0 = videoEncoderProfiles->getVideoEncoderParamByName(
+                "enc.vid.height.min", encoders[i]);
+        int w1 = videoEncoderProfiles->getVideoEncoderParamByName(
                 "enc.vid.width.max", encoders[i]);
-        int height = videoEncoderProfiles->getVideoEncoderParamByName(
+        int h1 = videoEncoderProfiles->getVideoEncoderParamByName(
                 "enc.vid.height.max", encoders[i]);
-        // Treat width/height separately here to handle the case where different
-        // profile might report max size of different aspect ratio
-        if (width > maxVideoWidth) {
-            maxVideoWidth = width;
+        // Assume the min size is 0 if it's not reported by encoder
+        if (w0 == -1) {
+            w0 = 0;
         }
-        if (height > maxVideoHeight) {
-            maxVideoHeight = height;
+        if (h0 == -1) {
+            h0 = 0;
+        }
+        // Treat width/height separately here to handle the case where different
+        // profile might report min/max size of different aspect ratio
+        if (w0 < minVideoWidth) {
+            minVideoWidth = w0;
+        }
+        if (h0 < minVideoHeight) {
+            minVideoHeight = h0;
+        }
+        if (w1 > maxVideoWidth) {
+            maxVideoWidth = w1;
+        }
+        if (h1 > maxVideoHeight) {
+            maxVideoHeight = h1;
         }
     }
-    // This is just an upper bound and may not be an actually valid video size
+    // These are upper/lower bounds and may not be an actually valid video size
+    const Size VIDEO_SIZE_LOWER_BOUND = {minVideoWidth, minVideoHeight};
     Size videoSizeUpperBound = {maxVideoWidth, maxVideoHeight};
 
     if (fastInfo.supportsPreferredConfigs) {
@@ -99,9 +119,10 @@
         videoSizeUpperBound = getMaxSize(getPreferredVideoSizes());
     }
 
-    res = getFilteredSizes(maxPreviewSize, &availablePreviewSizes);
+    res = getFilteredSizes(Size{0, 0}, maxPreviewSize, &availablePreviewSizes);
     if (res != OK) return res;
-    res = getFilteredSizes(videoSizeUpperBound, &availableVideoSizes);
+    res = getFilteredSizes(
+        VIDEO_SIZE_LOWER_BOUND, videoSizeUpperBound, &availableVideoSizes);
     if (res != OK) return res;
 
     // Select initial preview and video size that's under the initial bound and
@@ -864,7 +885,6 @@
 
     if (fabs(maxDigitalZoom.data.f[0] - 1.f) > 0.00001f) {
         params.set(CameraParameters::KEY_ZOOM, zoom);
-        params.set(CameraParameters::KEY_MAX_ZOOM, NUM_ZOOM_STEPS - 1);
 
         {
             String8 zoomRatios;
@@ -872,18 +892,34 @@
             float zoomIncrement = (maxDigitalZoom.data.f[0] - zoom) /
                     (NUM_ZOOM_STEPS-1);
             bool addComma = false;
-            for (size_t i=0; i < NUM_ZOOM_STEPS; i++) {
+            int previousZoom = -1;
+            size_t zoomSteps = 0;
+            for (size_t i = 0; i < NUM_ZOOM_STEPS; i++) {
+                int currentZoom = static_cast<int>(zoom * 100);
+                if (previousZoom == currentZoom) {
+                    zoom += zoomIncrement;
+                    continue;
+                }
                 if (addComma) zoomRatios += ",";
                 addComma = true;
-                zoomRatios += String8::format("%d", static_cast<int>(zoom * 100));
+                zoomRatios += String8::format("%d", currentZoom);
                 zoom += zoomIncrement;
+                previousZoom = currentZoom;
+                zoomSteps++;
             }
-            params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios);
+
+            if (zoomSteps > 0) {
+                params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios);
+                params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+                        CameraParameters::TRUE);
+                params.set(CameraParameters::KEY_MAX_ZOOM, zoomSteps - 1);
+                zoomAvailable = true;
+            } else {
+                params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+                        CameraParameters::FALSE);
+            }
         }
 
-        params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
-                CameraParameters::TRUE);
-        zoomAvailable = true;
     } else {
         params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
                 CameraParameters::FALSE);
@@ -1040,7 +1076,8 @@
     if (fastInfo.supportsPreferredConfigs) {
         previewSizeBound = getMaxSize(getPreferredPreviewSizes());
     }
-    status_t res = getFilteredSizes(previewSizeBound, &supportedPreviewSizes);
+    status_t res = getFilteredSizes(
+        Size{0, 0}, previewSizeBound, &supportedPreviewSizes);
     if (res != OK) return res;
     for (size_t i=0; i < availableFpsRanges.count; i += 2) {
         if (!isFpsSupported(supportedPreviewSizes,
@@ -2983,7 +3020,8 @@
     }
 }
 
-status_t Parameters::getFilteredSizes(Size limit, Vector<Size> *sizes) {
+status_t Parameters::getFilteredSizes(const Size &lower, const Size &upper,
+        Vector<Size> *sizes) {
     if (info == NULL) {
         ALOGE("%s: Static metadata is not initialized", __FUNCTION__);
         return NO_INIT;
@@ -2999,7 +3037,8 @@
         const StreamConfiguration &sc = scs[i];
         if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
                 sc.format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
-                ((sc.width * sc.height) <= (limit.width * limit.height))) {
+                ((sc.width * sc.height) >= (lower.width * lower.height)) &&
+                ((sc.width * sc.height) <= (upper.width * upper.height))) {
             int64_t minFrameDuration = getMinFrameDurationNs(
                     {sc.width, sc.height}, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
             if (minFrameDuration > MAX_PREVIEW_RECORD_DURATION_NS) {
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index e2f8d011..263025e 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -205,7 +205,7 @@
     static const int MAX_INITIAL_PREVIEW_WIDTH = 1920;
     static const int MAX_INITIAL_PREVIEW_HEIGHT = 1080;
     // Aspect ratio tolerance
-    static const CONSTEXPR float ASPECT_RATIO_TOLERANCE = 0.001;
+    static const CONSTEXPR float ASPECT_RATIO_TOLERANCE = 0.01;
     // Threshold for slow jpeg mode
     static const int64_t kSlowJpegModeThreshold = 33400000LL; // 33.4 ms
     // Margin for checking FPS
@@ -396,9 +396,10 @@
 
     Vector<Size> availablePreviewSizes;
     Vector<Size> availableVideoSizes;
-    // Get size list (that are no larger than limit) from static metadata.
+    // Get size list (that fall within lower/upper bounds) from static metadata.
     // This method filtered size with minFrameDuration < MAX_PREVIEW_RECORD_DURATION_NS
-    status_t getFilteredSizes(Size limit, Vector<Size> *sizes);
+    status_t getFilteredSizes(const Size &lower, const Size &upper,
+            Vector<Size> *sizes);
     // Get max size (from the size array) that matches the given aspect ratio.
     Size getMaxSizeForRatio(float ratio, const int32_t* sizeArray, size_t count);
 
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 1f3d478..9b0d0e7 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -51,8 +51,8 @@
 
 namespace android {
 using namespace camera2;
+using namespace camera3;
 using camera3::camera_stream_rotation_t::CAMERA_STREAM_ROTATION_0;
-using camera3::SessionConfigurationUtils;
 
 CameraDeviceClientBase::CameraDeviceClientBase(
         const sp<CameraService>& cameraService,
@@ -140,6 +140,40 @@
                 physicalKeysEntry.data.i32 + physicalKeysEntry.count);
     }
 
+    auto entry = deviceInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+    mDynamicProfileMap.emplace(
+            ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
+            ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD);
+    if (entry.count > 0) {
+        const auto it = std::find(entry.data.i32, entry.data.i32 + entry.count,
+                ANDROID_REQUEST_AVAILABLE_CAPABILITIES_DYNAMIC_RANGE_TEN_BIT);
+        if (it != entry.data.i32 + entry.count) {
+            entry = deviceInfo.find(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP);
+            if (entry.count > 0 || ((entry.count % 2) != 0)) {
+                int standardBitmap = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
+                for (size_t i = 0; i < entry.count; i += 2) {
+                    if (entry.data.i32[i] !=
+                            ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {
+                        mDynamicProfileMap.emplace(entry.data.i32[i], entry.data.i32[i+1]);
+                        if ((entry.data.i32[i+1] == 0) || (entry.data.i32[i+1] &
+                                ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD)) {
+                            standardBitmap |= entry.data.i32[i];
+                        }
+                    } else {
+                        ALOGE("%s: Device %s includes unexpected profile entry: 0x%x!",
+                                __FUNCTION__, mCameraIdStr.c_str(), entry.data.i32[i]);
+                    }
+                }
+                mDynamicProfileMap.emplace(
+                        ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
+                        standardBitmap);
+            } else {
+                ALOGE("%s: Device %s supports 10-bit output but doesn't include a dynamic range"
+                        " profile map!", __FUNCTION__, mCameraIdStr.c_str());
+            }
+        }
+    }
+
     mProviderManager = providerPtr;
     // Cache physical camera ids corresponding to this device and also the high
     // resolution sensors in this device + physical camera ids
@@ -172,6 +206,7 @@
     int compositeIdx;
     int idx = mStreamMap.indexOfKey(IInterface::asBinder(gbp));
 
+    Mutex::Autolock l(mCompositeLock);
     // Trying to submit request with surface that wasn't created
     if (idx == NAME_NOT_FOUND) {
         ALOGE("%s: Camera %s: Tried to submit a request with a surface that"
@@ -297,6 +332,7 @@
         SurfaceMap surfaceMap;
         Vector<int32_t> outputStreamIds;
         std::vector<std::string> requestedPhysicalIds;
+        int dynamicProfileBitmap = 0;
         if (request.mSurfaceList.size() > 0) {
             for (const sp<Surface>& surface : request.mSurfaceList) {
                 if (surface == 0) continue;
@@ -313,6 +349,8 @@
                     String8 requestedPhysicalId(
                             mConfiguredOutputs.valueAt(index).getPhysicalCameraId());
                     requestedPhysicalIds.push_back(requestedPhysicalId.string());
+                    dynamicProfileBitmap |=
+                            mConfiguredOutputs.valueAt(index).getDynamicRangeProfile();
                 } else {
                     ALOGW("%s: Output stream Id not found among configured outputs!", __FUNCTION__);
                 }
@@ -348,6 +386,41 @@
                 String8 requestedPhysicalId(
                         mConfiguredOutputs.valueAt(index).getPhysicalCameraId());
                 requestedPhysicalIds.push_back(requestedPhysicalId.string());
+                dynamicProfileBitmap |=
+                        mConfiguredOutputs.valueAt(index).getDynamicRangeProfile();
+            }
+        }
+
+        if (dynamicProfileBitmap !=
+                    ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {
+            for (int i = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
+                    i < ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_MAX; i <<= 1) {
+                if ((dynamicProfileBitmap & i) == 0) {
+                    continue;
+                }
+
+                const auto& it = mDynamicProfileMap.find(i);
+                if (it != mDynamicProfileMap.end()) {
+                    if ((it->second == 0) ||
+                            ((it->second & dynamicProfileBitmap) == dynamicProfileBitmap)) {
+                        continue;
+                    } else {
+                        ALOGE("%s: Camera %s: Tried to submit a request with a surfaces that"
+                                " reference an unsupported dynamic range profile combination"
+                                " 0x%x!", __FUNCTION__, mCameraIdStr.string(),
+                                dynamicProfileBitmap);
+                        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                                "Request targets an unsupported dynamic range profile"
+                                " combination");
+                    }
+                } else {
+                    ALOGE("%s: Camera %s: Tried to submit a request with a surface that"
+                            " references unsupported dynamic range profile 0x%x!",
+                            __FUNCTION__, mCameraIdStr.string(), i);
+                    return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                            "Request targets 10-bit Surface with unsupported dynamic range"
+                            " profile");
+                }
             }
         }
 
@@ -379,6 +452,12 @@
             }
 
             String8 physicalId(it.id.c_str());
+            bool hasTestPatternModePhysicalKey = std::find(mSupportedPhysicalRequestKeys.begin(),
+                    mSupportedPhysicalRequestKeys.end(), ANDROID_SENSOR_TEST_PATTERN_MODE) !=
+                    mSupportedPhysicalRequestKeys.end();
+            bool hasTestPatternDataPhysicalKey = std::find(mSupportedPhysicalRequestKeys.begin(),
+                    mSupportedPhysicalRequestKeys.end(), ANDROID_SENSOR_TEST_PATTERN_DATA) !=
+                    mSupportedPhysicalRequestKeys.end();
             if (physicalId != mDevice->getId()) {
                 auto found = std::find(requestedPhysicalIds.begin(), requestedPhysicalIds.end(),
                         it.id);
@@ -404,7 +483,8 @@
                         }
                     }
 
-                    physicalSettingsList.push_back({it.id, filteredParams});
+                    physicalSettingsList.push_back({it.id, filteredParams,
+                            hasTestPatternModePhysicalKey, hasTestPatternDataPhysicalKey});
                 }
             } else {
                 physicalSettingsList.push_back({it.id, it.settings});
@@ -561,6 +641,7 @@
         offlineStreamIds->clear();
         mDevice->getOfflineStreamIds(offlineStreamIds);
 
+        Mutex::Autolock l(mCompositeLock);
         for (size_t i = 0; i < mCompositeStreamMap.size(); ++i) {
             err = mCompositeStreamMap.valueAt(i)->configureStream();
             if (err != OK) {
@@ -631,27 +712,9 @@
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
 
-    hardware::camera::device::V3_7::StreamConfiguration streamConfiguration;
-    bool earlyExit = false;
-    camera3::metadataGetter getMetadata = [this](const String8 &id, bool /*overrideForPerfClass*/) {
-          return mDevice->infoPhysical(id);};
-    std::vector<std::string> physicalCameraIds;
-    mProviderManager->isLogicalCamera(mCameraIdStr.string(), &physicalCameraIds);
-    res = SessionConfigurationUtils::convertToHALStreamCombination(sessionConfiguration,
-            mCameraIdStr, mDevice->info(), getMetadata, physicalCameraIds, streamConfiguration,
-            mOverrideForPerfClass, &earlyExit);
-    if (!res.isOk()) {
-        return res;
-    }
-
-    if (earlyExit) {
-        *status = false;
-        return binder::Status::ok();
-    }
-
     *status = false;
     ret = mProviderManager->isSessionConfigurationSupported(mCameraIdStr.string(),
-            streamConfiguration, status);
+            sessionConfiguration, mOverrideForPerfClass, status);
     switch (ret) {
         case OK:
             // Expected, do nothing.
@@ -713,6 +776,7 @@
             }
         }
 
+        Mutex::Autolock l(mCompositeLock);
         for (size_t i = 0; i < mCompositeStreamMap.size(); ++i) {
             if (streamId == mCompositeStreamMap.valueAt(i)->getStreamId()) {
                 compositeIndex = i;
@@ -751,6 +815,7 @@
             }
 
             if (compositeIndex != NAME_NOT_FOUND) {
+                Mutex::Autolock l(mCompositeLock);
                 status_t ret;
                 if ((ret = mCompositeStreamMap.valueAt(compositeIndex)->deleteStream())
                         != OK) {
@@ -794,6 +859,7 @@
     String8 physicalCameraId = String8(outputConfiguration.getPhysicalCameraId());
     bool deferredConsumerOnly = deferredConsumer && numBufferProducers == 0;
     bool isMultiResolution = outputConfiguration.isMultiResolution();
+    int dynamicRangeProfile = outputConfiguration.getDynamicRangeProfile();
 
     res = SessionConfigurationUtils::checkSurfaceType(numBufferProducers, deferredConsumer,
             outputConfiguration.getSurfaceType());
@@ -837,7 +903,7 @@
         sp<Surface> surface;
         res = SessionConfigurationUtils::createSurfaceFromGbp(streamInfo,
                 isStreamInfoValid, surface, bufferProducer, mCameraIdStr,
-                mDevice->infoPhysical(physicalCameraId), sensorPixelModesUsed);
+                mDevice->infoPhysical(physicalCameraId), sensorPixelModesUsed, dynamicRangeProfile);
 
         if (!res.isOk())
             return res;
@@ -873,6 +939,7 @@
                 &streamId, physicalCameraId, streamInfo.sensorPixelModesUsed, &surfaceIds,
                 outputConfiguration.getSurfaceSetID(), isShared, isMultiResolution);
         if (err == OK) {
+            Mutex::Autolock l(mCompositeLock);
             mCompositeStreamMap.add(IInterface::asBinder(surfaces[0]->getIGraphicBufferProducer()),
                     compositeStream);
         }
@@ -881,7 +948,8 @@
                 streamInfo.height, streamInfo.format, streamInfo.dataSpace,
                 static_cast<camera_stream_rotation_t>(outputConfiguration.getRotation()),
                 &streamId, physicalCameraId, streamInfo.sensorPixelModesUsed, &surfaceIds,
-                outputConfiguration.getSurfaceSetID(), isShared, isMultiResolution);
+                outputConfiguration.getSurfaceSetID(), isShared, isMultiResolution,
+                streamInfo.dynamicRangeProfile);
     }
 
     if (err != OK) {
@@ -975,7 +1043,8 @@
             overriddenSensorPixelModesUsed,
             &surfaceIds,
             outputConfiguration.getSurfaceSetID(), isShared,
-            outputConfiguration.isMultiResolution(), consumerUsage);
+            outputConfiguration.isMultiResolution(), consumerUsage,
+            outputConfiguration.getDynamicRangeProfile());
 
     if (err != OK) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
@@ -988,7 +1057,8 @@
         mDeferredStreams.push_back(streamId);
         mStreamInfoMap.emplace(std::piecewise_construct, std::forward_as_tuple(streamId),
                 std::forward_as_tuple(width, height, format, dataSpace, consumerUsage,
-                        overriddenSensorPixelModesUsed));
+                        overriddenSensorPixelModesUsed,
+                        outputConfiguration.getDynamicRangeProfile()));
 
         ALOGV("%s: Camera %s: Successfully created a new stream ID %d for a deferred surface"
                 " (%d x %d) stream with format 0x%x.",
@@ -1177,12 +1247,14 @@
     const std::vector<int32_t> &sensorPixelModesUsed =
             outputConfiguration.getSensorPixelModesUsed();
 
+    int dynamicRangeProfile = outputConfiguration.getDynamicRangeProfile();
+
     for (size_t i = 0; i < newOutputsMap.size(); i++) {
         OutputStreamInfo outInfo;
         sp<Surface> surface;
         res = SessionConfigurationUtils::createSurfaceFromGbp(outInfo,
                 /*isStreamInfoValid*/ false, surface, newOutputsMap.valueAt(i), mCameraIdStr,
-                mDevice->infoPhysical(physicalCameraId), sensorPixelModesUsed);
+                mDevice->infoPhysical(physicalCameraId), sensorPixelModesUsed, dynamicRangeProfile);
         if (!res.isOk())
             return res;
 
@@ -1539,6 +1611,7 @@
     std::vector<sp<Surface>> consumerSurfaces;
     const std::vector<int32_t> &sensorPixelModesUsed =
             outputConfiguration.getSensorPixelModesUsed();
+    int dynamicRangeProfile = outputConfiguration.getDynamicRangeProfile();
     for (auto& bufferProducer : bufferProducers) {
         // Don't create multiple streams for the same target surface
         ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
@@ -1551,7 +1624,7 @@
         sp<Surface> surface;
         res = SessionConfigurationUtils::createSurfaceFromGbp(mStreamInfoMap[streamId],
                 true /*isStreamInfoValid*/, surface, bufferProducer, mCameraIdStr,
-                mDevice->infoPhysical(physicalId), sensorPixelModesUsed);
+                mDevice->infoPhysical(physicalId), sensorPixelModesUsed, dynamicRangeProfile);
 
         if (!res.isOk())
             return res;
@@ -1686,8 +1759,9 @@
             return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
         }
 
+        Mutex::Autolock l(mCompositeLock);
         bool isCompositeStream = false;
-        for (const auto& gbp : mConfiguredOutputs[streamId].getGraphicBufferProducers()) {
+        for (const auto& gbp : mConfiguredOutputs.valueAt(index).getGraphicBufferProducers()) {
             sp<Surface> s = new Surface(gbp, false /*controlledByApp*/);
             isCompositeStream = camera3::DepthCompositeStream::isDepthCompositeStream(s) |
                 camera3::HeicCompositeStream::isHeicCompositeStream(s);
@@ -1736,6 +1810,7 @@
         mConfiguredOutputs.clear();
         mDeferredStreams.clear();
         mStreamInfoMap.clear();
+        Mutex::Autolock l(mCompositeLock);
         mCompositeStreamMap.clear();
         mInputStream = {false, 0, 0, 0, 0};
     } else {
@@ -1797,16 +1872,50 @@
     return dumpDevice(fd, args);
 }
 
+status_t CameraDeviceClient::startWatchingTags(const String8 &tags, int out) {
+    sp<CameraDeviceBase> device = mDevice;
+    if (!device) {
+        dprintf(out, "  Device is detached.");
+        return OK;
+    }
+    device->startWatchingTags(tags);
+    return OK;
+}
+
+status_t CameraDeviceClient::stopWatchingTags(int out) {
+    sp<CameraDeviceBase> device = mDevice;
+    if (!device) {
+        dprintf(out, "  Device is detached.");
+        return OK;
+    }
+    device->stopWatchingTags();
+    return OK;
+}
+
+status_t CameraDeviceClient::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+    sp<CameraDeviceBase> device = mDevice;
+    if (!device) {
+        return OK;
+    }
+    device->dumpWatchedEventsToVector(out);
+    return OK;
+}
+
 void CameraDeviceClient::notifyError(int32_t errorCode,
                                      const CaptureResultExtras& resultExtras) {
     // Thread safe. Don't bother locking.
     sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
 
-    // Composites can have multiple internal streams. Error notifications coming from such internal
-    // streams may need to remain within camera service.
     bool skipClientNotification = false;
-    for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
-        skipClientNotification |= mCompositeStreamMap.valueAt(i)->onError(errorCode, resultExtras);
+    {
+        // Access to the composite stream map must be synchronized
+        Mutex::Autolock l(mCompositeLock);
+        // Composites can have multiple internal streams. Error notifications coming from such
+        // internal streams may need to remain within camera service.
+        for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
+            skipClientNotification |= mCompositeStreamMap.valueAt(i)->onError(errorCode,
+                    resultExtras);
+        }
     }
 
     if ((remoteCb != 0) && (!skipClientNotification)) {
@@ -1846,6 +1955,8 @@
     }
     Camera2ClientBase::notifyShutter(resultExtras, timestamp);
 
+    // Access to the composite stream map must be synchronized
+    Mutex::Autolock l(mCompositeLock);
     for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
         mCompositeStreamMap.valueAt(i)->onShutter(resultExtras, timestamp);
     }
@@ -1895,14 +2006,17 @@
         }
     }
 
-    for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
-        auto ret = mCompositeStreamMap.valueAt(i)->deleteInternalStreams();
-        if (ret != OK) {
-            ALOGE("%s: Failed removing composite stream  %s (%d)", __FUNCTION__,
-                    strerror(-ret), ret);
+    {
+        Mutex::Autolock l(mCompositeLock);
+        for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
+            auto ret = mCompositeStreamMap.valueAt(i)->deleteInternalStreams();
+            if (ret != OK) {
+                ALOGE("%s: Failed removing composite stream  %s (%d)", __FUNCTION__,
+                        strerror(-ret), ret);
+            }
         }
+        mCompositeStreamMap.clear();
     }
-    mCompositeStreamMap.clear();
 
     Camera2ClientBase::detachDevice();
 
@@ -1922,6 +2036,8 @@
                 result.mPhysicalMetadatas);
     }
 
+    // Access to the composite stream map must be synchronized
+    Mutex::Autolock l(mCompositeLock);
     for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
         mCompositeStreamMap.valueAt(i)->onResultAvailable(result);
     }
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 76b3f53..1b0c61a 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -22,6 +22,7 @@
 #include <camera/camera2/OutputConfiguration.h>
 #include <camera/camera2/SessionConfiguration.h>
 #include <camera/camera2/SubmitInfo.h>
+#include <unordered_map>
 
 #include "CameraOfflineSessionClient.h"
 #include "CameraService.h"
@@ -199,6 +200,10 @@
 
     virtual status_t      dumpClient(int fd, const Vector<String16>& args);
 
+    virtual status_t      startWatchingTags(const String8 &tags, int out);
+    virtual status_t      stopWatchingTags(int out);
+    virtual status_t      dumpWatchedEventsToVector(std::vector<std::string> &out);
+
     /**
      * Device listener interface
      */
@@ -299,6 +304,10 @@
     // Stream ID -> OutputConfiguration. Used for looking up Surface by stream/surface index
     KeyedVector<int32_t, hardware::camera2::params::OutputConfiguration> mConfiguredOutputs;
 
+    // Dynamic range profile id -> Supported dynamic profiles bitmap within an single capture
+    // request
+    std::unordered_map<int, int> mDynamicProfileMap;
+
     struct InputStreamConfiguration {
         bool configured;
         int32_t width;
@@ -330,6 +339,8 @@
     // set of high resolution camera id (logical / physical)
     std::unordered_set<std::string> mHighResolutionSensors;
 
+    // Synchronize access to 'mCompositeStreamMap'
+    Mutex mCompositeLock;
     KeyedVector<sp<IBinder>, sp<CompositeStream>> mCompositeStreamMap;
 
     sp<CameraProviderManager> mProviderManager;
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
index ef15f2d..10fa33f 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
@@ -110,6 +110,18 @@
     return OK;
 }
 
+status_t CameraOfflineSessionClient::startWatchingTags(const String8 &tags, int outFd) {
+    return BasicClient::startWatchingTags(tags, outFd);
+}
+
+status_t CameraOfflineSessionClient::stopWatchingTags(int outFd) {
+    return BasicClient::stopWatchingTags(outFd);
+}
+
+status_t CameraOfflineSessionClient::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+    return BasicClient::dumpWatchedEventsToVector(out);
+}
+
 binder::Status CameraOfflineSessionClient::disconnect() {
     Mutex::Autolock icl(mBinderSerializationLock);
 
@@ -330,5 +342,19 @@
                 CaptureResultExtras());
 }
 
+status_t CameraOfflineSessionClient::injectCamera(const String8& injectedCamId,
+            sp<CameraProviderManager> manager) {
+    ALOGV("%s: This client doesn't support the injection camera. injectedCamId: %s providerPtr: %p",
+            __FUNCTION__, injectedCamId.string(), manager.get());
+
+    return OK;
+}
+
+status_t CameraOfflineSessionClient::stopInjection() {
+    ALOGV("%s: This client doesn't support the injection camera.", __FUNCTION__);
+
+    return OK;
+}
+
 // ----------------------------------------------------------------------------
 }; // namespace android
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index b219a4c..920a176 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -71,6 +71,10 @@
 
     status_t dumpClient(int /*fd*/, const Vector<String16>& /*args*/) override;
 
+    status_t startWatchingTags(const String8 &tags, int outFd) override;
+    status_t stopWatchingTags(int outFd) override;
+    status_t dumpWatchedEventsToVector(std::vector<std::string> &out) override;
+
     status_t initialize(sp<CameraProviderManager> /*manager*/,
             const String8& /*monitorTags*/) override;
 
@@ -98,6 +102,9 @@
     void notifyPrepared(int streamId) override;
     void notifyRequestQueueEmpty() override;
     void notifyRepeatingRequestError(long lastFrameNumber) override;
+    status_t injectCamera(const String8& injectedCamId,
+            sp<CameraProviderManager> manager) override;
+    status_t stopInjection() override;
 
 private:
     mutable Mutex mBinderSerializationLock;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 13d044a..55c7579 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -34,6 +34,7 @@
 #include "api2/CameraDeviceClient.h"
 
 #include "device3/Camera3Device.h"
+#include "device3/hidl/HidlCamera3Device.h"
 #include "utils/CameraThreadState.h"
 #include "utils/CameraServiceProxyWrapper.h"
 
@@ -55,20 +56,21 @@
         int clientPid,
         uid_t clientUid,
         int servicePid,
-        bool overrideForPerfClass):
+        bool overrideForPerfClass,
+        bool legacyClient):
         TClientBase(cameraService, remoteCallback, clientPackageName, clientFeatureId,
                 cameraId, api1CameraId, cameraFacing, sensorOrientation, clientPid, clientUid,
                 servicePid),
         mSharedCameraCallbacks(remoteCallback),
         mDeviceVersion(cameraService->getDeviceVersion(TClientBase::mCameraIdStr)),
-        mDevice(new Camera3Device(cameraId, overrideForPerfClass)),
         mDeviceActive(false), mApi1CameraId(api1CameraId)
 {
     ALOGI("Camera %s: Opened. Client: %s (PID %d, UID %d)", cameraId.string(),
             String8(clientPackageName).string(), clientPid, clientUid);
 
     mInitialClientPid = clientPid;
-    LOG_ALWAYS_FATAL_IF(mDevice == 0, "Device should never be NULL here.");
+    mOverrideForPerfClass = overrideForPerfClass;
+    mLegacyClient = legacyClient;
 }
 
 template <typename TClientBase>
@@ -103,7 +105,26 @@
     if (res != OK) {
         return res;
     }
-
+    IPCTransport providerTransport = IPCTransport::INVALID;
+    res = providerPtr->getCameraIdIPCTransport(TClientBase::mCameraIdStr.string(),
+            &providerTransport);
+    if (res != OK) {
+        return res;
+    }
+    switch (providerTransport) {
+        case IPCTransport::HIDL:
+            mDevice =
+                    new HidlCamera3Device(TClientBase::mCameraIdStr, mOverrideForPerfClass,
+                            mLegacyClient);
+            break;
+        case IPCTransport::AIDL:
+            ALOGE("%s: AIDL camera3Devices not available yet", __FUNCTION__);
+            return NO_INIT;
+        default:
+            ALOGE("%s Invalid transport for camera id %s", __FUNCTION__,
+                    TClientBase::mCameraIdStr.string());
+            return NO_INIT;
+    }
     if (mDevice == NULL) {
         ALOGE("%s: Camera %s: No device connected",
                 __FUNCTION__, TClientBase::mCameraIdStr.string());
@@ -155,6 +176,38 @@
 }
 
 template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::startWatchingTags(const String8 &tags, int out) {
+  sp<CameraDeviceBase> device = mDevice;
+  if (!device) {
+    dprintf(out, "  Device is detached");
+    return OK;
+  }
+
+  return device->startWatchingTags(tags);
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::stopWatchingTags(int out) {
+  sp<CameraDeviceBase> device = mDevice;
+  if (!device) {
+    dprintf(out, "  Device is detached");
+    return OK;
+  }
+
+  return device->stopWatchingTags();
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+    sp<CameraDeviceBase> device = mDevice;
+    if (!device) {
+        // Nothing to dump if the device is detached
+        return OK;
+    }
+    return device->dumpWatchedEventsToVector(out);
+}
+
+template <typename TClientBase>
 status_t Camera2ClientBase<TClientBase>::dumpDevice(
                                                 int fd,
                                                 const Vector<String16>& args) {
@@ -413,6 +466,17 @@
     mRemoteCallback.clear();
 }
 
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::injectCamera(const String8& injectedCamId,
+        sp<CameraProviderManager> manager) {
+    return mDevice->injectCamera(injectedCamId, manager);
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::stopInjection() {
+    return mDevice->stopInjection();
+}
+
 template class Camera2ClientBase<CameraService::Client>;
 template class Camera2ClientBase<CameraDeviceClientBase>;
 
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 6246f7b..296ef43 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -56,11 +56,15 @@
                       int clientPid,
                       uid_t clientUid,
                       int servicePid,
-                      bool overrideForPerfClass);
+                      bool overrideForPerfClass,
+                      bool legacyClient = false);
     virtual ~Camera2ClientBase();
 
     virtual status_t      initialize(sp<CameraProviderManager> manager, const String8& monitorTags);
     virtual status_t      dumpClient(int fd, const Vector<String16>& args);
+    virtual status_t      startWatchingTags(const String8 &tags, int out);
+    virtual status_t      stopWatchingTags(int out);
+    virtual status_t      dumpWatchedEventsToVector(std::vector<std::string> &out);
 
     /**
      * NotificationListener implementation
@@ -114,10 +118,16 @@
         mutable Mutex mRemoteCallbackLock;
     } mSharedCameraCallbacks;
 
+    status_t      injectCamera(const String8& injectedCamId,
+                               sp<CameraProviderManager> manager) override;
+    status_t      stopInjection() override;
+
 protected:
 
     // The PID provided in the constructor call
     pid_t mInitialClientPid;
+    bool mOverrideForPerfClass = false;
+    bool mLegacyClient = false;
 
     virtual sp<IBinder> asBinderWrapper() {
         return IInterface::asBinder(this);
@@ -137,9 +147,12 @@
 
     const int mDeviceVersion;
 
-    // Set to const to avoid mDevice being updated (update of sp<> is racy) during
-    // dumpDevice (which is important to be lock free for debugging purpose)
-    const sp<CameraDeviceBase>  mDevice;
+    // Note: This was previously set to const to avoid mDevice being updated -
+    // b/112639939 (update of sp<> is racy) during dumpDevice (which is important to be lock free
+    // for debugging purpose). The const has been removed since CameraDeviceBase
+    // needs to be set during initializeImpl(). This must not be set / cleared
+    // anywhere else.
+    sp<CameraDeviceBase>  mDevice;
 
     /** Utility members */
 
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 85b0cc2..e936cb6 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -97,15 +97,29 @@
     virtual status_t disconnect() = 0;
 
     virtual status_t dump(int fd, const Vector<String16> &args) = 0;
+    virtual status_t startWatchingTags(const String8 &tags) = 0;
+    virtual status_t stopWatchingTags() = 0;
+    virtual status_t dumpWatchedEventsToVector(std::vector<std::string> &out) = 0;
 
     /**
-     * The physical camera device's static characteristics metadata buffer
+     * The physical camera device's static characteristics metadata buffer, or
+     * the logical camera's static characteristics if physical id is empty.
      */
     virtual const CameraMetadata& infoPhysical(const String8& physicalId) const = 0;
 
     struct PhysicalCameraSettings {
         std::string cameraId;
         CameraMetadata metadata;
+
+        // Whether the physical camera supports testPatternMode/testPatternData
+        bool mHasTestPatternModeTag = true;
+        bool mHasTestPatternDataTag = true;
+
+        // Original value of TEST_PATTERN_MODE and DATA so that they can be
+        // restored when sensor muting is turned off
+        int32_t mOriginalTestPatternMode = 0;
+        int32_t mOriginalTestPatternData[4] = {};
+
     };
     typedef List<PhysicalCameraSettings> PhysicalCameraSettingsList;
 
@@ -168,7 +182,8 @@
             std::vector<int> *surfaceIds = nullptr,
             int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
             bool isShared = false, bool isMultiResolution = false,
-            uint64_t consumerUsage = 0) = 0;
+            uint64_t consumerUsage = 0,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) = 0;
 
     /**
      * Create an output stream of the requested size, format, rotation and
@@ -185,7 +200,8 @@
             std::vector<int> *surfaceIds = nullptr,
             int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
             bool isShared = false, bool isMultiResolution = false,
-            uint64_t consumerUsage = 0) = 0;
+            uint64_t consumerUsage = 0,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) = 0;
 
     /**
      * Create an input stream of width, height, and format.
@@ -206,10 +222,12 @@
         android_dataspace dataSpace;
         bool dataSpaceOverridden;
         android_dataspace originalDataSpace;
+        uint32_t dynamicRangeProfile;
 
         StreamInfo() : width(0), height(0), format(0), formatOverridden(false), originalFormat(0),
                 dataSpace(HAL_DATASPACE_UNKNOWN), dataSpaceOverridden(false),
-                originalDataSpace(HAL_DATASPACE_UNKNOWN) {}
+                originalDataSpace(HAL_DATASPACE_UNKNOWN),
+                dynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD){}
         /**
          * Check whether the format matches the current or the original one in case
          * it got overridden.
@@ -294,7 +312,8 @@
      * Get Jpeg buffer size for a given jpeg resolution.
      * Negative values are error codes.
      */
-    virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const = 0;
+    virtual ssize_t getJpegBufferSize(const CameraMetadata &info, uint32_t width,
+            uint32_t height) const = 0;
 
     /**
      * Connect HAL notifications to a listener. Overwrites previous
@@ -427,6 +446,18 @@
      */
     void setImageDumpMask(int mask) { mImageDumpMask = mask; }
 
+    /**
+     * The injection camera session to replace the internal camera
+     * session.
+     */
+    virtual status_t injectCamera(const String8& injectedCamId,
+            sp<CameraProviderManager> manager) = 0;
+
+    /**
+     * Stop the injection camera and restore to internal camera session.
+     */
+    virtual status_t stopInjection() = 0;
+
 protected:
     bool mImageDumpMask = 0;
 };
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 4f2b878..4227d28 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -20,11 +20,12 @@
 
 #include "CameraProviderManager.h"
 
-#include <android/hardware/camera/device/3.7/ICameraDevice.h>
+#include <android/hardware/camera/device/3.8/ICameraDevice.h>
 
 #include <algorithm>
 #include <chrono>
 #include "common/DepthPhotoProcessor.h"
+#include "hidl/HidlProviderInfo.h"
 #include <dlfcn.h>
 #include <future>
 #include <inttypes.h>
@@ -45,26 +46,27 @@
 namespace android {
 
 using namespace ::android::hardware::camera;
-using namespace ::android::hardware::camera::common::V1_0;
-using camera3::SessionConfigurationUtils;
+using namespace ::android::camera3;
+using android::hardware::camera::common::V1_0::Status;
+using namespace camera3::SessionConfigurationUtils;
 using std::literals::chrono_literals::operator""s;
 using hardware::camera2::utils::CameraIdAndSessionConfiguration;
-using hardware::camera::provider::V2_7::CameraIdAndStreamCombination;
 
 namespace {
 const bool kEnableLazyHal(property_get_bool("ro.camera.enableLazyHal", false));
+const std::string kExternalProviderName = "external/0";
 } // anonymous namespace
 
 const float CameraProviderManager::kDepthARTolerance = .1f;
 
-CameraProviderManager::HardwareServiceInteractionProxy
-CameraProviderManager::sHardwareServiceInteractionProxy{};
+CameraProviderManager::HidlServiceInteractionProxyImpl
+CameraProviderManager::sHidlServiceInteractionProxy{};
 
 CameraProviderManager::~CameraProviderManager() {
 }
 
 hardware::hidl_vec<hardware::hidl_string>
-CameraProviderManager::HardwareServiceInteractionProxy::listServices() {
+CameraProviderManager::HidlServiceInteractionProxyImpl::listServices() {
     hardware::hidl_vec<hardware::hidl_string> ret;
     auto manager = hardware::defaultServiceManager1_2();
     if (manager != nullptr) {
@@ -77,19 +79,18 @@
 }
 
 status_t CameraProviderManager::initialize(wp<CameraProviderManager::StatusListener> listener,
-        ServiceInteractionProxy* proxy) {
+        HidlServiceInteractionProxy* hidlProxy) {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
-    if (proxy == nullptr) {
+    if (hidlProxy == nullptr) {
         ALOGE("%s: No valid service interaction proxy provided", __FUNCTION__);
         return BAD_VALUE;
     }
     mListener = listener;
-    mServiceProxy = proxy;
-    mDeviceState = static_cast<hardware::hidl_bitfield<provider::V2_5::DeviceState>>(
-        provider::V2_5::DeviceState::NORMAL);
+    mHidlServiceProxy = hidlProxy;
+    mDeviceState = 0;
 
     // Registering will trigger notifications for all already-known providers
-    bool success = mServiceProxy->registerForNotifications(
+    bool success = mHidlServiceProxy->registerForNotifications(
         /* instance name, empty means no filter */ "",
         this);
     if (!success) {
@@ -98,9 +99,8 @@
         return INVALID_OPERATION;
     }
 
-
-    for (const auto& instance : mServiceProxy->listServices()) {
-        this->addProviderLocked(instance);
+    for (const auto& instance : mHidlServiceProxy->listServices()) {
+        this->addHidlProviderLocked(instance);
     }
 
     IPCThreadState::self()->flushCommands();
@@ -267,7 +267,7 @@
 }
 
 status_t CameraProviderManager::isSessionConfigurationSupported(const std::string& id,
-        const hardware::camera::device::V3_7::StreamConfiguration &configuration,
+        const SessionConfiguration &configuration, bool overrideForPerfClass,
         bool *status /*out*/) const {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
     auto deviceInfo = findDeviceInfoLocked(id);
@@ -275,7 +275,22 @@
         return NAME_NOT_FOUND;
     }
 
-    return deviceInfo->isSessionConfigurationSupported(configuration, status);
+    return deviceInfo->isSessionConfigurationSupported(configuration, overrideForPerfClass, status);
+}
+
+status_t CameraProviderManager::getCameraIdIPCTransport(const std::string &id,
+        IPCTransport *providerTransport) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) {
+        return NAME_NOT_FOUND;
+    }
+    sp<ProviderInfo> parentProvider = deviceInfo->mParentProvider.promote();
+    if (parentProvider == nullptr) {
+        return DEAD_OBJECT;
+    }
+    *providerTransport = parentProvider->getIPCTransport();
+    return OK;
 }
 
 status_t CameraProviderManager::getCameraCharacteristics(const std::string &id,
@@ -284,6 +299,9 @@
     return getCameraCharacteristicsLocked(id, overrideForPerfClass, characteristics);
 }
 
+// Till hidl is removed from the android source tree, we use this for aidl as
+// well. We artificially give aidl camera device version 1 a major version 3 and minor
+// version 8.
 status_t CameraProviderManager::getHighestSupportedVersion(const std::string &id,
         hardware::hidl_version *v) {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
@@ -307,6 +325,50 @@
     return OK;
 }
 
+status_t CameraProviderManager::getTorchStrengthLevel(const std::string &id,
+        int32_t* torchStrength /*out*/) {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    return deviceInfo->getTorchStrengthLevel(torchStrength);
+}
+
+status_t CameraProviderManager::turnOnTorchWithStrengthLevel(const std::string &id,
+        int32_t torchStrength) {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    return deviceInfo->turnOnTorchWithStrengthLevel(torchStrength);
+}
+
+bool CameraProviderManager::shouldSkipTorchStrengthUpdate(const std::string &id,
+        int32_t torchStrength) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    if (deviceInfo->mTorchStrengthLevel == torchStrength) {
+        ALOGV("%s: Skipping torch strength level updates prev_level: %d, new_level: %d",
+                __FUNCTION__, deviceInfo->mTorchStrengthLevel, torchStrength);
+        return true;
+    }
+    return false;
+}
+
+int32_t CameraProviderManager::getTorchDefaultStrengthLevel(const std::string &id) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    return deviceInfo->mTorchDefaultStrengthLevel;
+}
+
 bool CameraProviderManager::supportSetTorchMode(const std::string &id) const {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
     for (auto& provider : mProviders) {
@@ -330,11 +392,25 @@
     if (parentProvider == nullptr) {
         return DEAD_OBJECT;
     }
-    const sp<provider::V2_4::ICameraProvider> interface = parentProvider->startProviderInterface();
-    if (interface == nullptr) {
-        return DEAD_OBJECT;
+    std::shared_ptr<HalCameraProvider> halCameraProvider = nullptr;
+    IPCTransport providerTransport = parentProvider->getIPCTransport();
+    if (providerTransport == IPCTransport::HIDL) {
+        HidlProviderInfo * hidlProviderInfo = static_cast<HidlProviderInfo *>(parentProvider.get());
+        const sp<provider::V2_4::ICameraProvider> hidlInterface =
+                hidlProviderInfo->startProviderInterface();
+        if (hidlInterface == nullptr) {
+            return DEAD_OBJECT;
+        }
+        halCameraProvider =
+                std::make_shared<HidlHalCameraProvider>(hidlInterface, hidlInterface->descriptor);
+    } else if (providerTransport == IPCTransport::AIDL) {
+        ALOGE("%s AIDL hal providers not supported yet", __FUNCTION__);
+        return INVALID_OPERATION;
+    } else {
+        ALOGE("%s Invalid provider transport", __FUNCTION__);
+        return INVALID_OPERATION;
     }
-    saveRef(DeviceMode::TORCH, deviceInfo->mId, interface);
+    saveRef(DeviceMode::TORCH, deviceInfo->mId, halCameraProvider);
 
     return deviceInfo->setTorchMode(enabled);
 }
@@ -351,15 +427,85 @@
     return OK;
 }
 
-status_t CameraProviderManager::notifyDeviceStateChange(
-        hardware::hidl_bitfield<provider::V2_5::DeviceState> newState) {
+sp<CameraProviderManager::ProviderInfo> CameraProviderManager::startExternalLazyProvider() const {
+    std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    for (const auto& providerInfo : mProviders) {
+        if (providerInfo->isExternalLazyHAL()) {
+            if (!providerInfo->successfullyStartedProviderInterface()) {
+                return nullptr;
+            } else {
+                return providerInfo;
+            }
+        }
+    }
+    return nullptr;
+}
+
+status_t CameraProviderManager::notifyUsbDeviceEvent(int32_t eventId,
+                                                     const std::string& usbDeviceId) {
+    if (!kEnableLazyHal) {
+        return OK;
+    }
+
+    ALOGV("notifySystemEvent: %d usbDeviceId : %s", eventId, usbDeviceId.c_str());
+
+    if (eventId == android::hardware::ICameraService::EVENT_USB_DEVICE_ATTACHED) {
+        sp<ProviderInfo> externalProvider = startExternalLazyProvider();
+        if (externalProvider != nullptr) {
+            auto usbDevices = mExternalUsbDevicesForProvider.first;
+            usbDevices.push_back(usbDeviceId);
+            mExternalUsbDevicesForProvider = {usbDevices, externalProvider};
+        }
+    } else if (eventId
+          == android::hardware::ICameraService::EVENT_USB_DEVICE_DETACHED) {
+        usbDeviceDetached(usbDeviceId);
+    }
+
+    return OK;
+}
+
+status_t CameraProviderManager::usbDeviceDetached(const std::string &usbDeviceId) {
+    std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
+    std::lock_guard<std::mutex> interfaceLock(mInterfaceMutex);
+
+    auto usbDevices = mExternalUsbDevicesForProvider.first;
+    auto foundId = std::find(usbDevices.begin(), usbDevices.end(), usbDeviceId);
+    if (foundId != usbDevices.end()) {
+        sp<ProviderInfo> providerInfo = mExternalUsbDevicesForProvider.second;
+        if (providerInfo == nullptr) {
+              ALOGE("%s No valid external provider for USB device: %s",
+                    __FUNCTION__,
+                    usbDeviceId.c_str());
+              mExternalUsbDevicesForProvider = {std::vector<std::string>(), nullptr};
+              return DEAD_OBJECT;
+        } else {
+            mInterfaceMutex.unlock();
+            providerInfo->removeAllDevices();
+            mInterfaceMutex.lock();
+            mExternalUsbDevicesForProvider = {std::vector<std::string>(), nullptr};
+        }
+    } else {
+        return DEAD_OBJECT;
+    }
+    return OK;
+}
+
+status_t CameraProviderManager::notifyDeviceStateChange(int64_t newState) {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
     mDeviceState = newState;
     status_t res = OK;
     for (auto& provider : mProviders) {
         ALOGV("%s: Notifying %s for new state 0x%" PRIx64,
                 __FUNCTION__, provider->mProviderName.c_str(), newState);
+        // b/199240726 Camera providers can for example try to add/remove
+        // camera devices as part of the state change notification. Holding
+        // 'mInterfaceMutex' while calling 'notifyDeviceStateChange' can
+        // result in a recursive deadlock.
+        mInterfaceMutex.unlock();
         status_t singleRes = provider->notifyDeviceStateChange(mDeviceState);
+        mInterfaceMutex.lock();
         if (singleRes != OK) {
             ALOGE("%s: Unable to notify provider %s about device state change",
                     __FUNCTION__,
@@ -367,11 +513,12 @@
             res = singleRes;
             // continue to do the rest of the providers instead of returning now
         }
+        provider->notifyDeviceInfoStateChangeLocked(mDeviceState);
     }
     return res;
 }
 
-status_t CameraProviderManager::openSession(const std::string &id,
+status_t CameraProviderManager::openHidlSession(const std::string &id,
         const sp<device::V3_2::ICameraDeviceCallback>& callback,
         /*out*/
         sp<device::V3_2::ICameraDeviceSession> *session) {
@@ -382,21 +529,23 @@
             /*minVersion*/ {3,0}, /*maxVersion*/ {4,0});
     if (deviceInfo == nullptr) return NAME_NOT_FOUND;
 
-    auto *deviceInfo3 = static_cast<ProviderInfo::DeviceInfo3*>(deviceInfo);
+    auto *hidlDeviceInfo3 = static_cast<HidlProviderInfo::HidlDeviceInfo3*>(deviceInfo);
     sp<ProviderInfo> parentProvider = deviceInfo->mParentProvider.promote();
     if (parentProvider == nullptr) {
         return DEAD_OBJECT;
     }
-    const sp<provider::V2_4::ICameraProvider> provider = parentProvider->startProviderInterface();
+    const sp<provider::V2_4::ICameraProvider> provider =
+            static_cast<HidlProviderInfo *>(parentProvider.get())->startProviderInterface();
     if (provider == nullptr) {
         return DEAD_OBJECT;
     }
-    saveRef(DeviceMode::CAMERA, id, provider);
+    std::shared_ptr<HalCameraProvider> halCameraProvider =
+            std::make_shared<HidlHalCameraProvider>(provider, provider->descriptor);
+    saveRef(DeviceMode::CAMERA, id, halCameraProvider);
 
     Status status;
     hardware::Return<void> ret;
-    auto interface = deviceInfo3->startDeviceInterface<
-            CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT>();
+    auto interface = hidlDeviceInfo3->startDeviceInterface();
     if (interface == nullptr) {
         return DEAD_OBJECT;
     }
@@ -414,17 +563,18 @@
                 __FUNCTION__, id.c_str(), ret.description().c_str());
         return DEAD_OBJECT;
     }
-    return mapToStatusT(status);
+    return HidlProviderInfo::mapToStatusT(status);
 }
 
 void CameraProviderManager::saveRef(DeviceMode usageType, const std::string &cameraId,
-        sp<provider::V2_4::ICameraProvider> provider) {
+        std::shared_ptr<HalCameraProvider> provider) {
     if (!kEnableLazyHal) {
         return;
     }
-    ALOGV("Saving camera provider %s for camera device %s", provider->descriptor, cameraId.c_str());
+    ALOGV("Saving camera provider %s for camera device %s", provider->mDescriptor.c_str(),
+              cameraId.c_str());
     std::lock_guard<std::mutex> lock(mProviderInterfaceMapLock);
-    std::unordered_map<std::string, sp<provider::V2_4::ICameraProvider>> *primaryMap, *alternateMap;
+    std::unordered_map<std::string, std::shared_ptr<HalCameraProvider>> *primaryMap, *alternateMap;
     if (usageType == DeviceMode::TORCH) {
         primaryMap = &mTorchProviderByCameraId;
         alternateMap = &mCameraProviderByCameraId;
@@ -447,7 +597,7 @@
         return;
     }
     ALOGV("Removing camera device %s", cameraId.c_str());
-    std::unordered_map<std::string, sp<provider::V2_4::ICameraProvider>> *providerMap;
+    std::unordered_map<std::string, std::shared_ptr<HalCameraProvider>> *providerMap;
     if (usageType == DeviceMode::TORCH) {
         providerMap = &mTorchProviderByCameraId;
     } else {
@@ -481,7 +631,7 @@
     {
         std::lock_guard<std::mutex> lock(mInterfaceMutex);
 
-        res = addProviderLocked(name, preexisting);
+        res = addHidlProviderLocked(name, preexisting);
     }
 
     sp<StatusListener> listener = getStatusListener();
@@ -1185,10 +1335,12 @@
 }
 
 bool CameraProviderManager::isHiddenPhysicalCamera(const std::string& cameraId) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
     return isHiddenPhysicalCameraInternal(cameraId).first;
 }
 
 status_t CameraProviderManager::filterSmallJpegSizes(const std::string& cameraId) {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
     for (auto& provider : mProviders) {
         for (auto& deviceInfo : provider->mDevices) {
             if (deviceInfo->mId == cameraId) {
@@ -1235,10 +1387,10 @@
     return falseRet;
 }
 
-status_t CameraProviderManager::tryToInitializeProviderLocked(
+status_t CameraProviderManager::tryToInitializeHidlProviderLocked(
         const std::string& providerName, const sp<ProviderInfo>& providerInfo) {
     sp<provider::V2_4::ICameraProvider> interface;
-    interface = mServiceProxy->tryGetService(providerName);
+    interface = mHidlServiceProxy->tryGetService(providerName);
 
     if (interface == nullptr) {
         // The interface may not be started yet. In that case, this is not a
@@ -1248,10 +1400,11 @@
         return BAD_VALUE;
     }
 
-    return providerInfo->initialize(interface, mDeviceState);
+    HidlProviderInfo *hidlProviderInfo = static_cast<HidlProviderInfo *>(providerInfo.get());
+    return hidlProviderInfo->initializeHidlProvider(interface, mDeviceState);
 }
 
-status_t CameraProviderManager::addProviderLocked(const std::string& newProvider,
+status_t CameraProviderManager::addHidlProviderLocked(const std::string& newProvider,
         bool preexisting) {
     // Several camera provider instances can be temporarily present.
     // Defer initialization of a new instance until the older instance is properly removed.
@@ -1261,9 +1414,10 @@
         if (providerInfo->mProviderName == newProvider) {
             ALOGW("%s: Camera provider HAL with name '%s' already registered",
                     __FUNCTION__, newProvider.c_str());
-            if (preexisting) {
+            // Do not add new instances for lazy HAL external provider
+            if (preexisting || providerInfo->isExternalLazyHAL()) {
                 return ALREADY_EXISTS;
-            } else{
+            } else {
                 ALOGW("%s: The new provider instance will get initialized immediately after the"
                         " currently present instance is removed!", __FUNCTION__);
                 providerPresent = true;
@@ -1272,9 +1426,9 @@
         }
     }
 
-    sp<ProviderInfo> providerInfo = new ProviderInfo(newProvider, providerInstance, this);
+    sp<HidlProviderInfo> providerInfo = new HidlProviderInfo(newProvider, providerInstance, this);
     if (!providerPresent) {
-        status_t res = tryToInitializeProviderLocked(newProvider, providerInfo);
+        status_t res = tryToInitializeHidlProviderLocked(newProvider, providerInfo);
         if (res != OK) {
             return res;
         }
@@ -1312,7 +1466,13 @@
         // initialize.
         for (const auto& providerInfo : mProviders) {
             if (providerInfo->mProviderName == removedProviderName) {
-                return tryToInitializeProviderLocked(removedProviderName, providerInfo);
+                IPCTransport providerTransport = providerInfo->getIPCTransport();
+                switch(providerTransport) {
+                    case IPCTransport::HIDL:
+                        return tryToInitializeHidlProviderLocked(removedProviderName, providerInfo);
+                    default:
+                        ALOGE("%s Unsupported Transport %d", __FUNCTION__, providerTransport);
+                }
             }
         }
 
@@ -1334,7 +1494,6 @@
 sp<CameraProviderManager::StatusListener> CameraProviderManager::getStatusListener() const {
     return mListener.promote();
 }
-
 /**** Methods for ProviderInfo ****/
 
 
@@ -1350,321 +1509,60 @@
     (void) mManager;
 }
 
-status_t CameraProviderManager::ProviderInfo::initialize(
-        sp<provider::V2_4::ICameraProvider>& interface,
-        hardware::hidl_bitfield<provider::V2_5::DeviceState> currentDeviceState) {
-    status_t res = parseProviderName(mProviderName, &mType, &mId);
-    if (res != OK) {
-        ALOGE("%s: Invalid provider name, ignoring", __FUNCTION__);
-        return BAD_VALUE;
-    }
-    ALOGI("Connecting to new camera provider: %s, isRemote? %d",
-            mProviderName.c_str(), interface->isRemote());
-
-    // Determine minor version
-    mMinorVersion = 4;
-    auto cast2_6 = provider::V2_6::ICameraProvider::castFrom(interface);
-    sp<provider::V2_6::ICameraProvider> interface2_6 = nullptr;
-    if (cast2_6.isOk()) {
-        interface2_6 = cast2_6;
-        if (interface2_6 != nullptr) {
-            mMinorVersion = 6;
-        }
-    }
-    // We need to check again since cast2_6.isOk() succeeds even if the provider
-    // version isn't actually 2.6.
-    if (interface2_6 == nullptr){
-        auto cast2_5 =
-                provider::V2_5::ICameraProvider::castFrom(interface);
-        sp<provider::V2_5::ICameraProvider> interface2_5 = nullptr;
-        if (cast2_5.isOk()) {
-            interface2_5 = cast2_5;
-            if (interface != nullptr) {
-                mMinorVersion = 5;
-            }
-        }
-    } else {
-        auto cast2_7 = provider::V2_7::ICameraProvider::castFrom(interface);
-        if (cast2_7.isOk()) {
-            sp<provider::V2_7::ICameraProvider> interface2_7 = cast2_7;
-            if (interface2_7 != nullptr) {
-                mMinorVersion = 7;
-            }
-        }
-    }
-
-    // cameraDeviceStatusChange callbacks may be called (and causing new devices added)
-    // before setCallback returns
-    hardware::Return<Status> status = interface->setCallback(this);
-    if (!status.isOk()) {
-        ALOGE("%s: Transaction error setting up callbacks with camera provider '%s': %s",
-                __FUNCTION__, mProviderName.c_str(), status.description().c_str());
-        return DEAD_OBJECT;
-    }
-    if (status != Status::OK) {
-        ALOGE("%s: Unable to register callbacks with camera provider '%s'",
-                __FUNCTION__, mProviderName.c_str());
-        return mapToStatusT(status);
-    }
-
-    hardware::Return<bool> linked = interface->linkToDeath(this, /*cookie*/ mId);
-    if (!linked.isOk()) {
-        ALOGE("%s: Transaction error in linking to camera provider '%s' death: %s",
-                __FUNCTION__, mProviderName.c_str(), linked.description().c_str());
-        return DEAD_OBJECT;
-    } else if (!linked) {
-        ALOGW("%s: Unable to link to provider '%s' death notifications",
-                __FUNCTION__, mProviderName.c_str());
-    }
-
-    if (!kEnableLazyHal) {
-        // Save HAL reference indefinitely
-        mSavedInterface = interface;
-    } else {
-        mActiveInterface = interface;
-    }
-
-    ALOGV("%s: Setting device state for %s: 0x%" PRIx64,
-            __FUNCTION__, mProviderName.c_str(), mDeviceState);
-    notifyDeviceStateChange(currentDeviceState);
-
-    res = setUpVendorTags();
-    if (res != OK) {
-        ALOGE("%s: Unable to set up vendor tags from provider '%s'",
-                __FUNCTION__, mProviderName.c_str());
-        return res;
-    }
-
-    // Get initial list of camera devices, if any
-    std::vector<std::string> devices;
-    hardware::Return<void> ret = interface->getCameraIdList([&status, this, &devices](
-            Status idStatus,
-            const hardware::hidl_vec<hardware::hidl_string>& cameraDeviceNames) {
-        status = idStatus;
-        if (status == Status::OK) {
-            for (auto& name : cameraDeviceNames) {
-                uint16_t major, minor;
-                std::string type, id;
-                status_t res = parseDeviceName(name, &major, &minor, &type, &id);
-                if (res != OK) {
-                    ALOGE("%s: Error parsing deviceName: %s: %d", __FUNCTION__, name.c_str(), res);
-                    status = Status::INTERNAL_ERROR;
-                } else {
-                    devices.push_back(name);
-                    mProviderPublicCameraIds.push_back(id);
-                }
-            }
-        } });
-    if (!ret.isOk()) {
-        ALOGE("%s: Transaction error in getting camera ID list from provider '%s': %s",
-                __FUNCTION__, mProviderName.c_str(), linked.description().c_str());
-        return DEAD_OBJECT;
-    }
-    if (status != Status::OK) {
-        ALOGE("%s: Unable to query for camera devices from provider '%s'",
-                __FUNCTION__, mProviderName.c_str());
-        return mapToStatusT(status);
-    }
-
-    // Get list of concurrent streaming camera device combinations
-    if (mMinorVersion >= 6) {
-        res = getConcurrentCameraIdsInternalLocked(interface2_6);
-        if (res != OK) {
-            return res;
-        }
-    }
-
-    ret = interface->isSetTorchModeSupported(
-        [this](auto status, bool supported) {
-            if (status == Status::OK) {
-                mSetTorchModeSupported = supported;
-            }
-        });
-    if (!ret.isOk()) {
-        ALOGE("%s: Transaction error checking torch mode support '%s': %s",
-                __FUNCTION__, mProviderName.c_str(), ret.description().c_str());
-        return DEAD_OBJECT;
-    }
-
-    mIsRemote = interface->isRemote();
-
-    sp<StatusListener> listener = mManager->getStatusListener();
-    for (auto& device : devices) {
-        std::string id;
-        status_t res = addDevice(device, common::V1_0::CameraDeviceStatus::PRESENT, &id);
-        if (res != OK) {
-            ALOGE("%s: Unable to enumerate camera device '%s': %s (%d)",
-                    __FUNCTION__, device.c_str(), strerror(-res), res);
-            continue;
-        }
-    }
-
-    ALOGI("Camera provider %s ready with %zu camera devices",
-            mProviderName.c_str(), mDevices.size());
-
-    // Process cached status callbacks
-    std::unique_ptr<std::vector<CameraStatusInfoT>> cachedStatus =
-            std::make_unique<std::vector<CameraStatusInfoT>>();
-    {
-        std::lock_guard<std::mutex> lock(mInitLock);
-
-        for (auto& statusInfo : mCachedStatus) {
-            std::string id, physicalId;
-            status_t res = OK;
-            if (statusInfo.isPhysicalCameraStatus) {
-                res = physicalCameraDeviceStatusChangeLocked(&id, &physicalId,
-                    statusInfo.cameraId, statusInfo.physicalCameraId, statusInfo.status);
-            } else {
-                res = cameraDeviceStatusChangeLocked(&id, statusInfo.cameraId, statusInfo.status);
-            }
-            if (res == OK) {
-                cachedStatus->emplace_back(statusInfo.isPhysicalCameraStatus,
-                        id.c_str(), physicalId.c_str(), statusInfo.status);
-            }
-        }
-        mCachedStatus.clear();
-
-        mInitialized = true;
-    }
-
-    // The cached status change callbacks cannot be fired directly from this
-    // function, due to same-thread deadlock trying to acquire mInterfaceMutex
-    // twice.
-    if (listener != nullptr) {
-        mInitialStatusCallbackFuture = std::async(std::launch::async,
-                &CameraProviderManager::ProviderInfo::notifyInitialStatusChange, this,
-                listener, std::move(cachedStatus));
-    }
-
-    return OK;
-}
-
-const sp<provider::V2_4::ICameraProvider>
-CameraProviderManager::ProviderInfo::startProviderInterface() {
-    ATRACE_CALL();
-    ALOGV("Request to start camera provider: %s", mProviderName.c_str());
-    if (mSavedInterface != nullptr) {
-        return mSavedInterface;
-    }
-    if (!kEnableLazyHal) {
-        ALOGE("Bad provider state! Should not be here on a non-lazy HAL!");
-        return nullptr;
-    }
-
-    auto interface = mActiveInterface.promote();
-    if (interface == nullptr) {
-        ALOGI("Camera HAL provider needs restart, calling getService(%s)", mProviderName.c_str());
-        interface = mManager->mServiceProxy->getService(mProviderName);
-        interface->setCallback(this);
-        hardware::Return<bool> linked = interface->linkToDeath(this, /*cookie*/ mId);
-        if (!linked.isOk()) {
-            ALOGE("%s: Transaction error in linking to camera provider '%s' death: %s",
-                    __FUNCTION__, mProviderName.c_str(), linked.description().c_str());
-            mManager->removeProvider(mProviderName);
-            return nullptr;
-        } else if (!linked) {
-            ALOGW("%s: Unable to link to provider '%s' death notifications",
-                    __FUNCTION__, mProviderName.c_str());
-        }
-        // Send current device state
-        if (mMinorVersion >= 5) {
-            auto castResult = provider::V2_5::ICameraProvider::castFrom(interface);
-            if (castResult.isOk()) {
-                sp<provider::V2_5::ICameraProvider> interface_2_5 = castResult;
-                if (interface_2_5 != nullptr) {
-                    ALOGV("%s: Initial device state for %s: 0x %" PRIx64,
-                            __FUNCTION__, mProviderName.c_str(), mDeviceState);
-                    interface_2_5->notifyDeviceStateChange(mDeviceState);
-                }
-            }
-        }
-
-        mActiveInterface = interface;
-    } else {
-        ALOGV("Camera provider (%s) already in use. Re-using instance.", mProviderName.c_str());
-    }
-    return interface;
-}
-
 const std::string& CameraProviderManager::ProviderInfo::getType() const {
     return mType;
 }
 
-status_t CameraProviderManager::ProviderInfo::addDevice(const std::string& name,
-        CameraDeviceStatus initialStatus, /*out*/ std::string* parsedId) {
-
-    ALOGI("Enumerating new camera device: %s", name.c_str());
-
-    uint16_t major, minor;
-    std::string type, id;
-
-    status_t res = parseDeviceName(name, &major, &minor, &type, &id);
-    if (res != OK) {
-        return res;
-    }
-    if (type != mType) {
-        ALOGE("%s: Device type %s does not match provider type %s", __FUNCTION__,
-                type.c_str(), mType.c_str());
-        return BAD_VALUE;
-    }
-    if (mManager->isValidDeviceLocked(id, major)) {
-        ALOGE("%s: Device %s: ID %s is already in use for device major version %d", __FUNCTION__,
-                name.c_str(), id.c_str(), major);
-        return BAD_VALUE;
-    }
-
-    std::unique_ptr<DeviceInfo> deviceInfo;
-    switch (major) {
-        case 1:
-            ALOGE("%s: Device %s: Unsupported HIDL device HAL major version %d:", __FUNCTION__,
-                    name.c_str(), major);
-            return BAD_VALUE;
-        case 3:
-            deviceInfo = initializeDeviceInfo<DeviceInfo3>(name, mProviderTagid,
-                    id, minor);
-            break;
-        default:
-            ALOGE("%s: Device %s: Unknown HIDL device HAL major version %d:", __FUNCTION__,
-                    name.c_str(), major);
-            return BAD_VALUE;
-    }
-    if (deviceInfo == nullptr) return BAD_VALUE;
-    deviceInfo->mStatus = initialStatus;
-    bool isAPI1Compatible = deviceInfo->isAPI1Compatible();
-
-    mDevices.push_back(std::move(deviceInfo));
-
-    mUniqueCameraIds.insert(id);
-    if (isAPI1Compatible) {
-        // addDevice can be called more than once for the same camera id if HAL
-        // supports openLegacy.
-        if (std::find(mUniqueAPI1CompatibleCameraIds.begin(), mUniqueAPI1CompatibleCameraIds.end(),
-                id) == mUniqueAPI1CompatibleCameraIds.end()) {
-            mUniqueAPI1CompatibleCameraIds.push_back(id);
-        }
-    }
-
-    if (parsedId != nullptr) {
-        *parsedId = id;
-    }
-    return OK;
-}
-
 void CameraProviderManager::ProviderInfo::removeDevice(std::string id) {
     for (auto it = mDevices.begin(); it != mDevices.end(); it++) {
         if ((*it)->mId == id) {
             mUniqueCameraIds.erase(id);
             if ((*it)->isAPI1Compatible()) {
                 mUniqueAPI1CompatibleCameraIds.erase(std::remove(
-                        mUniqueAPI1CompatibleCameraIds.begin(),
-                        mUniqueAPI1CompatibleCameraIds.end(), id));
+                    mUniqueAPI1CompatibleCameraIds.begin(),
+                    mUniqueAPI1CompatibleCameraIds.end(), id));
             }
+
+            // Remove reference to camera provider to avoid pointer leak when
+            // unplugging external camera while in use with lazy HALs
+            mManager->removeRef(DeviceMode::CAMERA, id);
+            mManager->removeRef(DeviceMode::TORCH, id);
+
             mDevices.erase(it);
             break;
         }
     }
 }
 
+void CameraProviderManager::ProviderInfo::removeAllDevices() {
+    std::lock_guard<std::mutex> lock(mLock);
+
+    auto itDevices = mDevices.begin();
+    while (itDevices != mDevices.end()) {
+        std::string id = (*itDevices)->mId;
+        std::string deviceName = (*itDevices)->mName;
+        removeDevice(id);
+        // device was removed, reset iterator
+        itDevices = mDevices.begin();
+
+        //notify CameraService of status change
+        sp<StatusListener> listener = mManager->getStatusListener();
+        if (listener != nullptr) {
+            mLock.unlock();
+            ALOGV("%s: notify device not_present: %s",
+                  __FUNCTION__,
+                  deviceName.c_str());
+            listener->onDeviceStatusChanged(String8(id.c_str()),
+                                            CameraDeviceStatus::NOT_PRESENT);
+            mLock.lock();
+        }
+    }
+}
+
+bool CameraProviderManager::ProviderInfo::isExternalLazyHAL() const {
+    return kEnableLazyHal && (mProviderName == kExternalProviderName);
+}
+
 status_t CameraProviderManager::ProviderInfo::dump(int fd, const Vector<String16>&) const {
     dprintf(fd, "== Camera Provider HAL %s (v2.%d, %s) static info: %zu devices: ==\n",
             mProviderInstance.c_str(),
@@ -1739,395 +1637,18 @@
     return OK;
 }
 
-status_t CameraProviderManager::ProviderInfo::getConcurrentCameraIdsInternalLocked(
-        sp<provider::V2_6::ICameraProvider> &interface2_6) {
-    if (interface2_6 == nullptr) {
-        ALOGE("%s: null interface provided", __FUNCTION__);
-        return BAD_VALUE;
-    }
-    Status status = Status::OK;
-    hardware::Return<void> ret =
-            interface2_6->getConcurrentStreamingCameraIds([&status, this](
-            Status concurrentIdStatus, // TODO: Move all instances of hidl_string to 'using'
-            const hardware::hidl_vec<hardware::hidl_vec<hardware::hidl_string>>&
-                        cameraDeviceIdCombinations) {
-            status = concurrentIdStatus;
-            if (status == Status::OK) {
-                mConcurrentCameraIdCombinations.clear();
-                for (auto& combination : cameraDeviceIdCombinations) {
-                    std::unordered_set<std::string> deviceIds;
-                    for (auto &cameraDeviceId : combination) {
-                        deviceIds.insert(cameraDeviceId.c_str());
-                    }
-                    mConcurrentCameraIdCombinations.push_back(std::move(deviceIds));
-                }
-            } });
-    if (!ret.isOk()) {
-        ALOGE("%s: Transaction error in getting concurrent camera ID list from provider '%s'",
-                __FUNCTION__, mProviderName.c_str());
-            return DEAD_OBJECT;
-    }
-    if (status != Status::OK) {
-        ALOGE("%s: Unable to query for camera devices from provider '%s'",
-                    __FUNCTION__, mProviderName.c_str());
-        return mapToStatusT(status);
-    }
-    return OK;
-}
-
-status_t CameraProviderManager::ProviderInfo::reCacheConcurrentStreamingCameraIdsLocked() {
-    if (mMinorVersion < 6) {
-      // Unsupported operation, nothing to do here
-      return OK;
-    }
-    // Check if the provider is currently active - not going to start it up for this notification
-    auto interface = mSavedInterface != nullptr ? mSavedInterface : mActiveInterface.promote();
-    if (interface == nullptr) {
-        ALOGE("%s: camera provider interface for %s is not valid", __FUNCTION__,
-                mProviderName.c_str());
-        return INVALID_OPERATION;
-    }
-    auto castResult = provider::V2_6::ICameraProvider::castFrom(interface);
-
-    if (castResult.isOk()) {
-        sp<provider::V2_6::ICameraProvider> interface2_6 = castResult;
-        if (interface2_6 != nullptr) {
-            return getConcurrentCameraIdsInternalLocked(interface2_6);
-        } else {
-            // This should not happen since mMinorVersion >= 6
-            ALOGE("%s: mMinorVersion was >= 6, but interface2_6 was nullptr", __FUNCTION__);
-            return UNKNOWN_ERROR;
-        }
-    }
-    return OK;
-}
-
 std::vector<std::unordered_set<std::string>>
 CameraProviderManager::ProviderInfo::getConcurrentCameraIdCombinations() {
     std::lock_guard<std::mutex> lock(mLock);
     return mConcurrentCameraIdCombinations;
 }
 
-hardware::Return<void> CameraProviderManager::ProviderInfo::cameraDeviceStatusChange(
-        const hardware::hidl_string& cameraDeviceName,
-        CameraDeviceStatus newStatus) {
-    sp<StatusListener> listener;
-    std::string id;
-    std::lock_guard<std::mutex> lock(mInitLock);
-
-    if (!mInitialized) {
-        mCachedStatus.emplace_back(false /*isPhysicalCameraStatus*/,
-                cameraDeviceName.c_str(), std::string().c_str(), newStatus);
-        return hardware::Void();
+void CameraProviderManager::ProviderInfo::notifyDeviceInfoStateChangeLocked(
+        int64_t newDeviceState) {
+    std::lock_guard<std::mutex> lock(mLock);
+    for (auto it = mDevices.begin(); it != mDevices.end(); it++) {
+        (*it)->notifyDeviceStateChange(newDeviceState);
     }
-
-    {
-        std::lock_guard<std::mutex> lock(mLock);
-        if (OK != cameraDeviceStatusChangeLocked(&id, cameraDeviceName, newStatus)) {
-            return hardware::Void();
-        }
-        listener = mManager->getStatusListener();
-    }
-
-    // Call without lock held to allow reentrancy into provider manager
-    if (listener != nullptr) {
-        listener->onDeviceStatusChanged(String8(id.c_str()), newStatus);
-    }
-
-    return hardware::Void();
-}
-
-status_t CameraProviderManager::ProviderInfo::cameraDeviceStatusChangeLocked(
-        std::string* id, const hardware::hidl_string& cameraDeviceName,
-        CameraDeviceStatus newStatus) {
-    bool known = false;
-    std::string cameraId;
-    for (auto& deviceInfo : mDevices) {
-        if (deviceInfo->mName == cameraDeviceName) {
-            ALOGI("Camera device %s status is now %s, was %s", cameraDeviceName.c_str(),
-                    deviceStatusToString(newStatus), deviceStatusToString(deviceInfo->mStatus));
-            deviceInfo->mStatus = newStatus;
-            // TODO: Handle device removal (NOT_PRESENT)
-            cameraId = deviceInfo->mId;
-            known = true;
-            break;
-        }
-    }
-    // Previously unseen device; status must not be NOT_PRESENT
-    if (!known) {
-        if (newStatus == CameraDeviceStatus::NOT_PRESENT) {
-            ALOGW("Camera provider %s says an unknown camera device %s is not present. Curious.",
-                mProviderName.c_str(), cameraDeviceName.c_str());
-            return BAD_VALUE;
-        }
-        addDevice(cameraDeviceName, newStatus, &cameraId);
-    } else if (newStatus == CameraDeviceStatus::NOT_PRESENT) {
-        removeDevice(cameraId);
-    }
-    if (reCacheConcurrentStreamingCameraIdsLocked() != OK) {
-        ALOGE("%s: CameraProvider %s could not re-cache concurrent streaming camera id list ",
-                  __FUNCTION__, mProviderName.c_str());
-    }
-    *id = cameraId;
-    return OK;
-}
-
-hardware::Return<void> CameraProviderManager::ProviderInfo::physicalCameraDeviceStatusChange(
-        const hardware::hidl_string& cameraDeviceName,
-        const hardware::hidl_string& physicalCameraDeviceName,
-        CameraDeviceStatus newStatus) {
-    sp<StatusListener> listener;
-    std::string id;
-    std::string physicalId;
-    std::lock_guard<std::mutex> lock(mInitLock);
-
-    if (!mInitialized) {
-        mCachedStatus.emplace_back(true /*isPhysicalCameraStatus*/, cameraDeviceName,
-                physicalCameraDeviceName, newStatus);
-        return hardware::Void();
-    }
-
-    {
-        std::lock_guard<std::mutex> lock(mLock);
-
-        if (OK != physicalCameraDeviceStatusChangeLocked(&id, &physicalId, cameraDeviceName,
-                physicalCameraDeviceName, newStatus)) {
-            return hardware::Void();
-        }
-
-        listener = mManager->getStatusListener();
-    }
-    // Call without lock held to allow reentrancy into provider manager
-    if (listener != nullptr) {
-        listener->onDeviceStatusChanged(String8(id.c_str()),
-                String8(physicalId.c_str()), newStatus);
-    }
-    return hardware::Void();
-}
-
-status_t CameraProviderManager::ProviderInfo::physicalCameraDeviceStatusChangeLocked(
-            std::string* id, std::string* physicalId,
-            const hardware::hidl_string& cameraDeviceName,
-            const hardware::hidl_string& physicalCameraDeviceName,
-            CameraDeviceStatus newStatus) {
-    bool known = false;
-    std::string cameraId;
-    for (auto& deviceInfo : mDevices) {
-        if (deviceInfo->mName == cameraDeviceName) {
-            cameraId = deviceInfo->mId;
-            if (!deviceInfo->mIsLogicalCamera) {
-                ALOGE("%s: Invalid combination of camera id %s, physical id %s",
-                        __FUNCTION__, cameraId.c_str(), physicalCameraDeviceName.c_str());
-                return BAD_VALUE;
-            }
-            if (std::find(deviceInfo->mPhysicalIds.begin(), deviceInfo->mPhysicalIds.end(),
-                    physicalCameraDeviceName) == deviceInfo->mPhysicalIds.end()) {
-                ALOGE("%s: Invalid combination of camera id %s, physical id %s",
-                        __FUNCTION__, cameraId.c_str(), physicalCameraDeviceName.c_str());
-                return BAD_VALUE;
-            }
-            ALOGI("Camera device %s physical device %s status is now %s",
-                    cameraDeviceName.c_str(), physicalCameraDeviceName.c_str(),
-                    deviceStatusToString(newStatus));
-            known = true;
-            break;
-        }
-    }
-    // Previously unseen device; status must not be NOT_PRESENT
-    if (!known) {
-        ALOGW("Camera provider %s says an unknown camera device %s-%s is not present. Curious.",
-                mProviderName.c_str(), cameraDeviceName.c_str(),
-                physicalCameraDeviceName.c_str());
-        return BAD_VALUE;
-    }
-
-    *id = cameraId;
-    *physicalId = physicalCameraDeviceName.c_str();
-    return OK;
-}
-
-hardware::Return<void> CameraProviderManager::ProviderInfo::torchModeStatusChange(
-        const hardware::hidl_string& cameraDeviceName,
-        TorchModeStatus newStatus) {
-    sp<StatusListener> listener;
-    std::string id;
-    {
-        std::lock_guard<std::mutex> lock(mManager->mStatusListenerMutex);
-        bool known = false;
-        for (auto& deviceInfo : mDevices) {
-            if (deviceInfo->mName == cameraDeviceName) {
-                ALOGI("Camera device %s torch status is now %s", cameraDeviceName.c_str(),
-                        torchStatusToString(newStatus));
-                id = deviceInfo->mId;
-                known = true;
-                if (TorchModeStatus::AVAILABLE_ON != newStatus) {
-                    mManager->removeRef(DeviceMode::TORCH, id);
-                }
-                break;
-            }
-        }
-        if (!known) {
-            ALOGW("Camera provider %s says an unknown camera %s now has torch status %d. Curious.",
-                    mProviderName.c_str(), cameraDeviceName.c_str(), newStatus);
-            return hardware::Void();
-        }
-        listener = mManager->getStatusListener();
-    }
-    // Call without lock held to allow reentrancy into provider manager
-    if (listener != nullptr) {
-        listener->onTorchStatusChanged(String8(id.c_str()), newStatus);
-    }
-    return hardware::Void();
-}
-
-void CameraProviderManager::ProviderInfo::serviceDied(uint64_t cookie,
-        const wp<hidl::base::V1_0::IBase>& who) {
-    (void) who;
-    ALOGI("Camera provider '%s' has died; removing it", mProviderInstance.c_str());
-    if (cookie != mId) {
-        ALOGW("%s: Unexpected serviceDied cookie %" PRIu64 ", expected %" PRIu32,
-                __FUNCTION__, cookie, mId);
-    }
-    mManager->removeProvider(mProviderInstance);
-}
-
-status_t CameraProviderManager::ProviderInfo::setUpVendorTags() {
-    if (mVendorTagDescriptor != nullptr)
-        return OK;
-
-    hardware::hidl_vec<VendorTagSection> vts;
-    Status status;
-    hardware::Return<void> ret;
-    const sp<provider::V2_4::ICameraProvider> interface = startProviderInterface();
-    if (interface == nullptr) {
-        return DEAD_OBJECT;
-    }
-    ret = interface->getVendorTags(
-        [&](auto s, const auto& vendorTagSecs) {
-            status = s;
-            if (s == Status::OK) {
-                vts = vendorTagSecs;
-            }
-    });
-    if (!ret.isOk()) {
-        ALOGE("%s: Transaction error getting vendor tags from provider '%s': %s",
-                __FUNCTION__, mProviderName.c_str(), ret.description().c_str());
-        return DEAD_OBJECT;
-    }
-    if (status != Status::OK) {
-        return mapToStatusT(status);
-    }
-
-    // Read all vendor tag definitions into a descriptor
-    status_t res;
-    if ((res = HidlVendorTagDescriptor::createDescriptorFromHidl(vts, /*out*/mVendorTagDescriptor))
-            != OK) {
-        ALOGE("%s: Could not generate descriptor from vendor tag operations,"
-                "received error %s (%d). Camera clients will not be able to use"
-                "vendor tags", __FUNCTION__, strerror(res), res);
-        return res;
-    }
-
-    return OK;
-}
-
-status_t CameraProviderManager::ProviderInfo::notifyDeviceStateChange(
-        hardware::hidl_bitfield<provider::V2_5::DeviceState> newDeviceState) {
-    mDeviceState = newDeviceState;
-    if (mMinorVersion >= 5) {
-        // Check if the provider is currently active - not going to start it up for this notification
-        auto interface = mSavedInterface != nullptr ? mSavedInterface : mActiveInterface.promote();
-        if (interface != nullptr) {
-            // Send current device state
-            auto castResult = provider::V2_5::ICameraProvider::castFrom(interface);
-            if (castResult.isOk()) {
-                sp<provider::V2_5::ICameraProvider> interface_2_5 = castResult;
-                if (interface_2_5 != nullptr) {
-                    interface_2_5->notifyDeviceStateChange(mDeviceState);
-                }
-            }
-        }
-    }
-    return OK;
-}
-
-status_t CameraProviderManager::ProviderInfo::isConcurrentSessionConfigurationSupported(
-        const hardware::hidl_vec<CameraIdAndStreamCombination> &halCameraIdsAndStreamCombinations,
-        bool *isSupported) {
-    status_t res = OK;
-    if (mMinorVersion >= 6) {
-        // Check if the provider is currently active - not going to start it up for this notification
-        auto interface = mSavedInterface != nullptr ? mSavedInterface : mActiveInterface.promote();
-        if (interface == nullptr) {
-            // TODO: This might be some other problem
-            return INVALID_OPERATION;
-        }
-        auto castResult2_6 = provider::V2_6::ICameraProvider::castFrom(interface);
-        auto castResult2_7 = provider::V2_7::ICameraProvider::castFrom(interface);
-        Status callStatus;
-        auto cb =
-                [&isSupported, &callStatus](Status s, bool supported) {
-                      callStatus = s;
-                      *isSupported = supported; };
-
-        ::android::hardware::Return<void> ret;
-        sp<provider::V2_7::ICameraProvider> interface_2_7;
-        sp<provider::V2_6::ICameraProvider> interface_2_6;
-        if (mMinorVersion >= 7 && castResult2_7.isOk()) {
-            interface_2_7 = castResult2_7;
-            if (interface_2_7 != nullptr) {
-                ret = interface_2_7->isConcurrentStreamCombinationSupported_2_7(
-                        halCameraIdsAndStreamCombinations, cb);
-            }
-        } else if (mMinorVersion == 6 && castResult2_6.isOk()) {
-            interface_2_6 = castResult2_6;
-            if (interface_2_6 != nullptr) {
-                hardware::hidl_vec<provider::V2_6::CameraIdAndStreamCombination>
-                        halCameraIdsAndStreamCombinations_2_6;
-                size_t numStreams = halCameraIdsAndStreamCombinations.size();
-                halCameraIdsAndStreamCombinations_2_6.resize(numStreams);
-                for (size_t i = 0; i < numStreams; i++) {
-                    using namespace camera3;
-                    auto const& combination = halCameraIdsAndStreamCombinations[i];
-                    halCameraIdsAndStreamCombinations_2_6[i].cameraId = combination.cameraId;
-                    bool success =
-                            SessionConfigurationUtils::convertHALStreamCombinationFromV37ToV34(
-                                    halCameraIdsAndStreamCombinations_2_6[i].streamConfiguration,
-                                    combination.streamConfiguration);
-                    if (!success) {
-                        *isSupported = false;
-                        return OK;
-                    }
-                }
-                ret = interface_2_6->isConcurrentStreamCombinationSupported(
-                        halCameraIdsAndStreamCombinations_2_6, cb);
-            }
-        }
-
-        if (interface_2_7 != nullptr || interface_2_6 != nullptr) {
-            if (ret.isOk()) {
-                switch (callStatus) {
-                    case Status::OK:
-                        // Expected case, do nothing.
-                        res = OK;
-                        break;
-                    case Status::METHOD_NOT_SUPPORTED:
-                        res = INVALID_OPERATION;
-                        break;
-                    default:
-                        ALOGE("%s: Session configuration query failed: %d", __FUNCTION__,
-                                  callStatus);
-                        res = UNKNOWN_ERROR;
-                }
-            } else {
-                ALOGE("%s: Unexpected binder error: %s", __FUNCTION__, ret.description().c_str());
-                res = UNKNOWN_ERROR;
-            }
-            return res;
-        }
-    }
-    // unsupported operation
-    return INVALID_OPERATION;
 }
 
 void CameraProviderManager::ProviderInfo::notifyInitialStatusChange(
@@ -2144,277 +1665,21 @@
     }
 }
 
-template<class DeviceInfoT>
-std::unique_ptr<CameraProviderManager::ProviderInfo::DeviceInfo>
-    CameraProviderManager::ProviderInfo::initializeDeviceInfo(
-        const std::string &name, const metadata_vendor_id_t tagId,
-        const std::string &id, uint16_t minorVersion) {
-    Status status;
-
-    auto cameraInterface =
-            startDeviceInterface<typename DeviceInfoT::InterfaceT>(name);
-    if (cameraInterface == nullptr) return nullptr;
-
-    CameraResourceCost resourceCost;
-    cameraInterface->getResourceCost([&status, &resourceCost](
-        Status s, CameraResourceCost cost) {
-                status = s;
-                resourceCost = cost;
-            });
-    if (status != Status::OK) {
-        ALOGE("%s: Unable to obtain resource costs for camera device %s: %s", __FUNCTION__,
-                name.c_str(), statusToString(status));
-        return nullptr;
-    }
-
-    for (auto& conflictName : resourceCost.conflictingDevices) {
-        uint16_t major, minor;
-        std::string type, id;
-        status_t res = parseDeviceName(conflictName, &major, &minor, &type, &id);
-        if (res != OK) {
-            ALOGE("%s: Failed to parse conflicting device %s", __FUNCTION__, conflictName.c_str());
-            return nullptr;
-        }
-        conflictName = id;
-    }
-
-    return std::unique_ptr<DeviceInfo>(
-        new DeviceInfoT(name, tagId, id, minorVersion, resourceCost, this,
-                mProviderPublicCameraIds, cameraInterface));
-}
-
-template<class InterfaceT>
-sp<InterfaceT>
-CameraProviderManager::ProviderInfo::startDeviceInterface(const std::string &name) {
-    ALOGE("%s: Device %s: Unknown HIDL device HAL major version %d:", __FUNCTION__,
-            name.c_str(), InterfaceT::version.get_major());
-    return nullptr;
-}
-
-template<>
-sp<device::V3_2::ICameraDevice>
-CameraProviderManager::ProviderInfo::startDeviceInterface
-        <device::V3_2::ICameraDevice>(const std::string &name) {
-    Status status;
-    sp<device::V3_2::ICameraDevice> cameraInterface;
-    hardware::Return<void> ret;
-    const sp<provider::V2_4::ICameraProvider> interface = startProviderInterface();
-    if (interface == nullptr) {
-        return nullptr;
-    }
-    ret = interface->getCameraDeviceInterface_V3_x(name, [&status, &cameraInterface](
-        Status s, sp<device::V3_2::ICameraDevice> interface) {
-                status = s;
-                cameraInterface = interface;
-            });
-    if (!ret.isOk()) {
-        ALOGE("%s: Transaction error trying to obtain interface for camera device %s: %s",
-                __FUNCTION__, name.c_str(), ret.description().c_str());
-        return nullptr;
-    }
-    if (status != Status::OK) {
-        ALOGE("%s: Unable to obtain interface for camera device %s: %s", __FUNCTION__,
-                name.c_str(), statusToString(status));
-        return nullptr;
-    }
-    return cameraInterface;
-}
-
-CameraProviderManager::ProviderInfo::DeviceInfo::~DeviceInfo() {}
-
-template<class InterfaceT>
-sp<InterfaceT> CameraProviderManager::ProviderInfo::DeviceInfo::startDeviceInterface() {
-    sp<InterfaceT> device;
-    ATRACE_CALL();
-    if (mSavedInterface == nullptr) {
-        sp<ProviderInfo> parentProvider = mParentProvider.promote();
-        if (parentProvider != nullptr) {
-            device = parentProvider->startDeviceInterface<InterfaceT>(mName);
-        }
-    } else {
-        device = (InterfaceT *) mSavedInterface.get();
-    }
-    return device;
-}
-
-template<class InterfaceT>
-status_t CameraProviderManager::ProviderInfo::DeviceInfo::setTorchMode(InterfaceT& interface,
-        bool enabled) {
-    Status s = interface->setTorchMode(enabled ? TorchMode::ON : TorchMode::OFF);
-    return mapToStatusT(s);
-}
-
 CameraProviderManager::ProviderInfo::DeviceInfo3::DeviceInfo3(const std::string& name,
         const metadata_vendor_id_t tagId, const std::string &id,
         uint16_t minorVersion,
         const CameraResourceCost& resourceCost,
         sp<ProviderInfo> parentProvider,
-        const std::vector<std::string>& publicCameraIds,
-        sp<InterfaceT> interface) :
+        const std::vector<std::string>& publicCameraIds) :
         DeviceInfo(name, tagId, id, hardware::hidl_version{3, minorVersion},
-                   publicCameraIds, resourceCost, parentProvider) {
-    // Get camera characteristics and initialize flash unit availability
-    Status status;
-    hardware::Return<void> ret;
-    ret = interface->getCameraCharacteristics([&status, this](Status s,
-                    device::V3_2::CameraMetadata metadata) {
-                status = s;
-                if (s == Status::OK) {
-                    camera_metadata_t *buffer =
-                            reinterpret_cast<camera_metadata_t*>(metadata.data());
-                    size_t expectedSize = metadata.size();
-                    int res = validate_camera_metadata_structure(buffer, &expectedSize);
-                    if (res == OK || res == CAMERA_METADATA_VALIDATION_SHIFTED) {
-                        set_camera_metadata_vendor_id(buffer, mProviderTagid);
-                        mCameraCharacteristics = buffer;
-                    } else {
-                        ALOGE("%s: Malformed camera metadata received from HAL", __FUNCTION__);
-                        status = Status::INTERNAL_ERROR;
-                    }
-                }
-            });
-    if (!ret.isOk()) {
-        ALOGE("%s: Transaction error getting camera characteristics for device %s"
-                " to check for a flash unit: %s", __FUNCTION__, id.c_str(),
-                ret.description().c_str());
-        return;
+                   publicCameraIds, resourceCost, parentProvider) { }
+
+void CameraProviderManager::ProviderInfo::DeviceInfo3::notifyDeviceStateChange(int64_t newState) {
+    if (!mDeviceStateOrientationMap.empty() &&
+            (mDeviceStateOrientationMap.find(newState) != mDeviceStateOrientationMap.end())) {
+        mCameraCharacteristics.update(ANDROID_SENSOR_ORIENTATION,
+                &mDeviceStateOrientationMap[newState], 1);
     }
-    if (status != Status::OK) {
-        ALOGE("%s: Unable to get camera characteristics for device %s: %s (%d)",
-                __FUNCTION__, id.c_str(), CameraProviderManager::statusToString(status), status);
-        return;
-    }
-
-    mSystemCameraKind = getSystemCameraKind();
-
-    status_t res = fixupMonochromeTags();
-    if (OK != res) {
-        ALOGE("%s: Unable to fix up monochrome tags based for older HAL version: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        return;
-    }
-    auto stat = addDynamicDepthTags();
-    if (OK != stat) {
-        ALOGE("%s: Failed appending dynamic depth tags: %s (%d)", __FUNCTION__, strerror(-stat),
-                stat);
-    }
-    res = deriveHeicTags();
-    if (OK != res) {
-        ALOGE("%s: Unable to derive HEIC tags based on camera and media capabilities: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-    }
-
-    if (SessionConfigurationUtils::isUltraHighResolutionSensor(mCameraCharacteristics)) {
-        status_t status = addDynamicDepthTags(/*maxResolution*/true);
-        if (OK != status) {
-            ALOGE("%s: Failed appending dynamic depth tags for maximum resolution mode: %s (%d)",
-                    __FUNCTION__, strerror(-status), status);
-        }
-
-        status = deriveHeicTags(/*maxResolution*/true);
-        if (OK != status) {
-            ALOGE("%s: Unable to derive HEIC tags based on camera and media capabilities for"
-                    "maximum resolution mode: %s (%d)", __FUNCTION__, strerror(-status), status);
-        }
-    }
-
-    res = addRotateCropTags();
-    if (OK != res) {
-        ALOGE("%s: Unable to add default SCALER_ROTATE_AND_CROP tags: %s (%d)", __FUNCTION__,
-                strerror(-res), res);
-    }
-    res = addPreCorrectionActiveArraySize();
-    if (OK != res) {
-        ALOGE("%s: Unable to add PRE_CORRECTION_ACTIVE_ARRAY_SIZE: %s (%d)", __FUNCTION__,
-                strerror(-res), res);
-    }
-    res = camera3::ZoomRatioMapper::overrideZoomRatioTags(
-            &mCameraCharacteristics, &mSupportNativeZoomRatio);
-    if (OK != res) {
-        ALOGE("%s: Unable to override zoomRatio related tags: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-    }
-
-    camera_metadata_entry flashAvailable =
-            mCameraCharacteristics.find(ANDROID_FLASH_INFO_AVAILABLE);
-    if (flashAvailable.count == 1 &&
-            flashAvailable.data.u8[0] == ANDROID_FLASH_INFO_AVAILABLE_TRUE) {
-        mHasFlashUnit = true;
-    } else {
-        mHasFlashUnit = false;
-    }
-
-    queryPhysicalCameraIds();
-
-    // Get physical camera characteristics if applicable
-    auto castResult = device::V3_5::ICameraDevice::castFrom(interface);
-    if (!castResult.isOk()) {
-        ALOGV("%s: Unable to convert ICameraDevice instance to version 3.5", __FUNCTION__);
-        return;
-    }
-    sp<device::V3_5::ICameraDevice> interface_3_5 = castResult;
-    if (interface_3_5 == nullptr) {
-        ALOGE("%s: Converted ICameraDevice instance to nullptr", __FUNCTION__);
-        return;
-    }
-
-    if (mIsLogicalCamera) {
-        for (auto& id : mPhysicalIds) {
-            if (std::find(mPublicCameraIds.begin(), mPublicCameraIds.end(), id) !=
-                    mPublicCameraIds.end()) {
-                continue;
-            }
-
-            hardware::hidl_string hidlId(id);
-            ret = interface_3_5->getPhysicalCameraCharacteristics(hidlId,
-                    [&status, &id, this](Status s, device::V3_2::CameraMetadata metadata) {
-                status = s;
-                if (s == Status::OK) {
-                    camera_metadata_t *buffer =
-                            reinterpret_cast<camera_metadata_t*>(metadata.data());
-                    size_t expectedSize = metadata.size();
-                    int res = validate_camera_metadata_structure(buffer, &expectedSize);
-                    if (res == OK || res == CAMERA_METADATA_VALIDATION_SHIFTED) {
-                        set_camera_metadata_vendor_id(buffer, mProviderTagid);
-                        mPhysicalCameraCharacteristics[id] = buffer;
-                    } else {
-                        ALOGE("%s: Malformed camera metadata received from HAL", __FUNCTION__);
-                        status = Status::INTERNAL_ERROR;
-                    }
-                }
-            });
-
-            if (!ret.isOk()) {
-                ALOGE("%s: Transaction error getting physical camera %s characteristics for %s: %s",
-                        __FUNCTION__, id.c_str(), id.c_str(), ret.description().c_str());
-                return;
-            }
-            if (status != Status::OK) {
-                ALOGE("%s: Unable to get physical camera %s characteristics for device %s: %s (%d)",
-                        __FUNCTION__, id.c_str(), mId.c_str(),
-                        CameraProviderManager::statusToString(status), status);
-                return;
-            }
-
-            res = camera3::ZoomRatioMapper::overrideZoomRatioTags(
-                    &mPhysicalCameraCharacteristics[id], &mSupportNativeZoomRatio);
-            if (OK != res) {
-                ALOGE("%s: Unable to override zoomRatio related tags: %s (%d)",
-                        __FUNCTION__, strerror(-res), res);
-            }
-        }
-    }
-
-    if (!kEnableLazyHal) {
-        // Save HAL reference indefinitely
-        mSavedInterface = interface;
-    }
-}
-
-CameraProviderManager::ProviderInfo::DeviceInfo3::~DeviceInfo3() {}
-
-status_t CameraProviderManager::ProviderInfo::DeviceInfo3::setTorchMode(bool enabled) {
-    return setTorchModeForDevice<InterfaceT>(enabled);
 }
 
 status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getCameraInfo(
@@ -2472,21 +1737,6 @@
     return isBackwardCompatible;
 }
 
-status_t CameraProviderManager::ProviderInfo::DeviceInfo3::dumpState(int fd) {
-    native_handle_t* handle = native_handle_create(1,0);
-    handle->data[0] = fd;
-    const sp<InterfaceT> interface = startDeviceInterface<InterfaceT>();
-    if (interface == nullptr) {
-        return DEAD_OBJECT;
-    }
-    auto ret = interface->dumpState(handle);
-    native_handle_delete(handle);
-    if (!ret.isOk()) {
-        return INVALID_OPERATION;
-    }
-    return OK;
-}
-
 status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getCameraCharacteristics(
         bool overrideForPerfClass, CameraMetadata *characteristics) const {
     if (characteristics == nullptr) return BAD_VALUE;
@@ -2512,63 +1762,6 @@
     return OK;
 }
 
-status_t CameraProviderManager::ProviderInfo::DeviceInfo3::isSessionConfigurationSupported(
-        const hardware::camera::device::V3_7::StreamConfiguration &configuration,
-        bool *status /*out*/) {
-
-    const sp<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT> interface =
-            this->startDeviceInterface<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT>();
-    if (interface == nullptr) {
-        return DEAD_OBJECT;
-    }
-    auto castResult_3_5 = device::V3_5::ICameraDevice::castFrom(interface);
-    sp<hardware::camera::device::V3_5::ICameraDevice> interface_3_5 = castResult_3_5;
-    auto castResult_3_7 = device::V3_7::ICameraDevice::castFrom(interface);
-    sp<hardware::camera::device::V3_7::ICameraDevice> interface_3_7 = castResult_3_7;
-
-    status_t res;
-    Status callStatus;
-    ::android::hardware::Return<void> ret;
-    auto halCb =
-            [&callStatus, &status] (Status s, bool combStatus) {
-                callStatus = s;
-                *status = combStatus;
-            };
-    if (interface_3_7 != nullptr) {
-        ret = interface_3_7->isStreamCombinationSupported_3_7(configuration, halCb);
-    } else if (interface_3_5 != nullptr) {
-        hardware::camera::device::V3_4::StreamConfiguration configuration_3_4;
-        bool success = SessionConfigurationUtils::convertHALStreamCombinationFromV37ToV34(
-                configuration_3_4, configuration);
-        if (!success) {
-            *status = false;
-            return OK;
-        }
-        ret = interface_3_5->isStreamCombinationSupported(configuration_3_4, halCb);
-    } else {
-        return INVALID_OPERATION;
-    }
-    if (ret.isOk()) {
-        switch (callStatus) {
-            case Status::OK:
-                // Expected case, do nothing.
-                res = OK;
-                break;
-            case Status::METHOD_NOT_SUPPORTED:
-                res = INVALID_OPERATION;
-                break;
-            default:
-                ALOGE("%s: Session configuration query failed: %d", __FUNCTION__, callStatus);
-                res = UNKNOWN_ERROR;
-        }
-    } else {
-        ALOGE("%s: Unexpected binder error: %s", __FUNCTION__, ret.description().c_str());
-        res = UNKNOWN_ERROR;
-    }
-
-    return res;
-}
-
 status_t CameraProviderManager::ProviderInfo::DeviceInfo3::filterSmallJpegSizes() {
     int32_t thresholdW = SessionConfigurationUtils::PERF_CLASS_JPEG_THRESH_W;
     int32_t thresholdH = SessionConfigurationUtils::PERF_CLASS_JPEG_THRESH_H;
@@ -2820,8 +2013,6 @@
     return OK;
 }
 
-
-
 CameraProviderManager::ProviderInfo::~ProviderInfo() {
     if (mInitialStatusCallbackFuture.valid()) {
         mInitialStatusCallbackFuture.wait();
@@ -2830,167 +2021,6 @@
     // CameraProvider interface dies, so do not unregister callbacks.
 }
 
-status_t CameraProviderManager::mapToStatusT(const Status& s)  {
-    switch(s) {
-        case Status::OK:
-            return OK;
-        case Status::ILLEGAL_ARGUMENT:
-            return BAD_VALUE;
-        case Status::CAMERA_IN_USE:
-            return -EBUSY;
-        case Status::MAX_CAMERAS_IN_USE:
-            return -EUSERS;
-        case Status::METHOD_NOT_SUPPORTED:
-            return UNKNOWN_TRANSACTION;
-        case Status::OPERATION_NOT_SUPPORTED:
-            return INVALID_OPERATION;
-        case Status::CAMERA_DISCONNECTED:
-            return DEAD_OBJECT;
-        case Status::INTERNAL_ERROR:
-            return INVALID_OPERATION;
-    }
-    ALOGW("Unexpected HAL status code %d", s);
-    return INVALID_OPERATION;
-}
-
-const char* CameraProviderManager::statusToString(const Status& s) {
-    switch(s) {
-        case Status::OK:
-            return "OK";
-        case Status::ILLEGAL_ARGUMENT:
-            return "ILLEGAL_ARGUMENT";
-        case Status::CAMERA_IN_USE:
-            return "CAMERA_IN_USE";
-        case Status::MAX_CAMERAS_IN_USE:
-            return "MAX_CAMERAS_IN_USE";
-        case Status::METHOD_NOT_SUPPORTED:
-            return "METHOD_NOT_SUPPORTED";
-        case Status::OPERATION_NOT_SUPPORTED:
-            return "OPERATION_NOT_SUPPORTED";
-        case Status::CAMERA_DISCONNECTED:
-            return "CAMERA_DISCONNECTED";
-        case Status::INTERNAL_ERROR:
-            return "INTERNAL_ERROR";
-    }
-    ALOGW("Unexpected HAL status code %d", s);
-    return "UNKNOWN_ERROR";
-}
-
-const char* CameraProviderManager::deviceStatusToString(const CameraDeviceStatus& s) {
-    switch(s) {
-        case CameraDeviceStatus::NOT_PRESENT:
-            return "NOT_PRESENT";
-        case CameraDeviceStatus::PRESENT:
-            return "PRESENT";
-        case CameraDeviceStatus::ENUMERATING:
-            return "ENUMERATING";
-    }
-    ALOGW("Unexpected HAL device status code %d", s);
-    return "UNKNOWN_STATUS";
-}
-
-const char* CameraProviderManager::torchStatusToString(const TorchModeStatus& s) {
-    switch(s) {
-        case TorchModeStatus::NOT_AVAILABLE:
-            return "NOT_AVAILABLE";
-        case TorchModeStatus::AVAILABLE_OFF:
-            return "AVAILABLE_OFF";
-        case TorchModeStatus::AVAILABLE_ON:
-            return "AVAILABLE_ON";
-    }
-    ALOGW("Unexpected HAL torch mode status code %d", s);
-    return "UNKNOWN_STATUS";
-}
-
-
-status_t HidlVendorTagDescriptor::createDescriptorFromHidl(
-        const hardware::hidl_vec<common::V1_0::VendorTagSection>& vts,
-        /*out*/
-        sp<VendorTagDescriptor>& descriptor) {
-
-    int tagCount = 0;
-
-    for (size_t s = 0; s < vts.size(); s++) {
-        tagCount += vts[s].tags.size();
-    }
-
-    if (tagCount < 0 || tagCount > INT32_MAX) {
-        ALOGE("%s: tag count %d from vendor tag sections is invalid.", __FUNCTION__, tagCount);
-        return BAD_VALUE;
-    }
-
-    Vector<uint32_t> tagArray;
-    LOG_ALWAYS_FATAL_IF(tagArray.resize(tagCount) != tagCount,
-            "%s: too many (%u) vendor tags defined.", __FUNCTION__, tagCount);
-
-
-    sp<HidlVendorTagDescriptor> desc = new HidlVendorTagDescriptor();
-    desc->mTagCount = tagCount;
-
-    SortedVector<String8> sections;
-    KeyedVector<uint32_t, String8> tagToSectionMap;
-
-    int idx = 0;
-    for (size_t s = 0; s < vts.size(); s++) {
-        const common::V1_0::VendorTagSection& section = vts[s];
-        const char *sectionName = section.sectionName.c_str();
-        if (sectionName == NULL) {
-            ALOGE("%s: no section name defined for vendor tag section %zu.", __FUNCTION__, s);
-            return BAD_VALUE;
-        }
-        String8 sectionString(sectionName);
-        sections.add(sectionString);
-
-        for (size_t j = 0; j < section.tags.size(); j++) {
-            uint32_t tag = section.tags[j].tagId;
-            if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) {
-                ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag);
-                return BAD_VALUE;
-            }
-
-            tagArray.editItemAt(idx++) = section.tags[j].tagId;
-
-            const char *tagName = section.tags[j].tagName.c_str();
-            if (tagName == NULL) {
-                ALOGE("%s: no tag name defined for vendor tag %d.", __FUNCTION__, tag);
-                return BAD_VALUE;
-            }
-            desc->mTagToNameMap.add(tag, String8(tagName));
-            tagToSectionMap.add(tag, sectionString);
-
-            int tagType = (int) section.tags[j].tagType;
-            if (tagType < 0 || tagType >= NUM_TYPES) {
-                ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType);
-                return BAD_VALUE;
-            }
-            desc->mTagToTypeMap.add(tag, tagType);
-        }
-    }
-
-    desc->mSections = sections;
-
-    for (size_t i = 0; i < tagArray.size(); ++i) {
-        uint32_t tag = tagArray[i];
-        String8 sectionString = tagToSectionMap.valueFor(tag);
-
-        // Set up tag to section index map
-        ssize_t index = sections.indexOf(sectionString);
-        LOG_ALWAYS_FATAL_IF(index < 0, "index %zd must be non-negative", index);
-        desc->mTagToSectionMap.add(tag, static_cast<uint32_t>(index));
-
-        // Set up reverse mapping
-        ssize_t reverseIndex = -1;
-        if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) {
-            KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
-            reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper);
-        }
-        desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag);
-    }
-
-    descriptor = std::move(desc);
-    return OK;
-}
-
 // Expects to have mInterfaceMutex locked
 std::vector<std::unordered_set<std::string>>
 CameraProviderManager::getConcurrentCameraIds() const {
@@ -3004,59 +2034,6 @@
     return deviceIdCombinations;
 }
 
-status_t CameraProviderManager::convertToHALStreamCombinationAndCameraIdsLocked(
-        const std::vector<CameraIdAndSessionConfiguration> &cameraIdsAndSessionConfigs,
-        const std::set<std::string>& perfClassPrimaryCameraIds,
-        int targetSdkVersion,
-        hardware::hidl_vec<CameraIdAndStreamCombination> *halCameraIdsAndStreamCombinations,
-        bool *earlyExit) {
-    binder::Status bStatus = binder::Status::ok();
-    std::vector<CameraIdAndStreamCombination> halCameraIdsAndStreamsV;
-    bool shouldExit = false;
-    status_t res = OK;
-    for (auto &cameraIdAndSessionConfig : cameraIdsAndSessionConfigs) {
-        const std::string& cameraId = cameraIdAndSessionConfig.mCameraId;
-        hardware::camera::device::V3_7::StreamConfiguration streamConfiguration;
-        CameraMetadata deviceInfo;
-        bool overrideForPerfClass =
-                SessionConfigurationUtils::targetPerfClassPrimaryCamera(
-                        perfClassPrimaryCameraIds, cameraId, targetSdkVersion);
-        res = getCameraCharacteristicsLocked(cameraId, overrideForPerfClass, &deviceInfo);
-        if (res != OK) {
-            return res;
-        }
-        camera3::metadataGetter getMetadata =
-                [this](const String8 &id, bool overrideForPerfClass) {
-                    CameraMetadata physicalDeviceInfo;
-                    getCameraCharacteristicsLocked(id.string(), overrideForPerfClass,
-                                                   &physicalDeviceInfo);
-                    return physicalDeviceInfo;
-                };
-        std::vector<std::string> physicalCameraIds;
-        isLogicalCameraLocked(cameraId, &physicalCameraIds);
-        bStatus =
-            SessionConfigurationUtils::convertToHALStreamCombination(
-                    cameraIdAndSessionConfig.mSessionConfiguration,
-                    String8(cameraId.c_str()), deviceInfo, getMetadata,
-                    physicalCameraIds, streamConfiguration,
-                    overrideForPerfClass, &shouldExit);
-        if (!bStatus.isOk()) {
-            ALOGE("%s: convertToHALStreamCombination failed", __FUNCTION__);
-            return INVALID_OPERATION;
-        }
-        if (shouldExit) {
-            *earlyExit = true;
-            return OK;
-        }
-        CameraIdAndStreamCombination halCameraIdAndStream;
-        halCameraIdAndStream.cameraId = cameraId;
-        halCameraIdAndStream.streamConfiguration = streamConfiguration;
-        halCameraIdsAndStreamsV.push_back(halCameraIdAndStream);
-    }
-    *halCameraIdsAndStreamCombinations = halCameraIdsAndStreamsV;
-    return OK;
-}
-
 // Checks if the containing vector of sets has any set that contains all of the
 // camera ids in cameraIdsAndSessionConfigs.
 static bool checkIfSetContainsAll(
@@ -3091,27 +2068,9 @@
     for (auto &provider : mProviders) {
         if (checkIfSetContainsAll(cameraIdsAndSessionConfigs,
                 provider->getConcurrentCameraIdCombinations())) {
-            // For each camera device in cameraIdsAndSessionConfigs collect
-            // the streamConfigs and create the HAL
-            // CameraIdAndStreamCombination, exit early if needed
-            hardware::hidl_vec<CameraIdAndStreamCombination> halCameraIdsAndStreamCombinations;
-            bool knowUnsupported = false;
-            status_t res = convertToHALStreamCombinationAndCameraIdsLocked(
-                    cameraIdsAndSessionConfigs, perfClassPrimaryCameraIds,
-                    targetSdkVersion, &halCameraIdsAndStreamCombinations, &knowUnsupported);
-            if (res != OK) {
-                ALOGE("%s unable to convert session configurations provided to HAL stream"
-                      "combinations", __FUNCTION__);
-                return res;
-            }
-            if (knowUnsupported) {
-                // We got to know the streams aren't valid before doing the HAL
-                // call itself.
-                *isSupported = false;
-                return OK;
-            }
             return provider->isConcurrentSessionConfigurationSupported(
-                    halCameraIdsAndStreamCombinations, isSupported);
+                    cameraIdsAndSessionConfigs, perfClassPrimaryCameraIds, targetSdkVersion,
+                    isSupported);
         }
     }
     *isSupported = false;
@@ -3121,7 +2080,7 @@
 
 status_t CameraProviderManager::getCameraCharacteristicsLocked(const std::string &id,
         bool overrideForPerfClass, CameraMetadata* characteristics) const {
-    auto deviceInfo = findDeviceInfoLocked(id, /*minVersion*/ {3,0}, /*maxVersion*/ {5,0});
+    auto deviceInfo = findDeviceInfoLocked(id, /*minVersion*/ {3, 0}, /*maxVersion*/ {5, 0});
     if (deviceInfo != nullptr) {
         return deviceInfo->getCameraCharacteristics(overrideForPerfClass, characteristics);
     }
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 1bdbb44..64f5abf 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -29,35 +29,40 @@
 #include <camera/CameraParameters2.h>
 #include <camera/CameraMetadata.h>
 #include <camera/CameraBase.h>
+#include <utils/Condition.h>
 #include <utils/Errors.h>
+#include <android/hardware/ICameraService.h>
+#include <utils/IPCTransport.h>
 #include <android/hardware/camera/common/1.0/types.h>
 #include <android/hardware/camera/provider/2.5/ICameraProvider.h>
 #include <android/hardware/camera/provider/2.6/ICameraProviderCallback.h>
 #include <android/hardware/camera/provider/2.6/ICameraProvider.h>
 #include <android/hardware/camera/provider/2.7/ICameraProvider.h>
 #include <android/hardware/camera/device/3.7/types.h>
+#include <android/hardware/camera/device/3.8/types.h>
 #include <android/hidl/manager/1.0/IServiceNotification.h>
+#include <binder/IServiceManager.h>
 #include <camera/VendorTagDescriptor.h>
 
 namespace android {
 
-/**
- * The vendor tag descriptor class that takes HIDL vendor tag information as
- * input. Not part of VendorTagDescriptor class because that class is used
- * in AIDL generated sources which don't have access to HIDL headers.
- */
-class HidlVendorTagDescriptor : public VendorTagDescriptor {
-public:
-    /**
-     * Create a VendorTagDescriptor object from the HIDL VendorTagSection
-     * vector.
-     *
-     * Returns OK on success, or a negative error code.
-     */
-    static status_t createDescriptorFromHidl(
-            const hardware::hidl_vec<hardware::camera::common::V1_0::VendorTagSection>& vts,
-            /*out*/
-            sp<VendorTagDescriptor>& descriptor);
+using hardware::camera2::utils::CameraIdAndSessionConfiguration;
+
+enum class CameraDeviceStatus : uint32_t {
+  NOT_PRESENT = 0,
+  PRESENT = 1,
+  ENUMERATING = 2
+};
+
+enum class TorchModeStatus : uint32_t {
+  NOT_AVAILABLE = 0,
+  AVAILABLE_OFF = 1,
+  AVAILABLE_ON = 2
+};
+
+struct CameraResourceCost {
+  uint32_t resourceCost;
+  std::vector<std::string> conflictingDevices;
 };
 
 enum SystemCameraKind {
@@ -89,6 +94,7 @@
 #define CAMERA_DEVICE_API_VERSION_3_5 HARDWARE_DEVICE_API_VERSION(3, 5)
 #define CAMERA_DEVICE_API_VERSION_3_6 HARDWARE_DEVICE_API_VERSION(3, 6)
 #define CAMERA_DEVICE_API_VERSION_3_7 HARDWARE_DEVICE_API_VERSION(3, 7)
+#define CAMERA_DEVICE_API_VERSION_3_8 HARDWARE_DEVICE_API_VERSION(3, 8)
 
 /**
  * A manager for all camera providers available on an Android device.
@@ -102,12 +108,14 @@
  */
 class CameraProviderManager : virtual public hidl::manager::V1_0::IServiceNotification {
 public:
-
+    // needs to be made friend strict since HidlProviderInfo needs to inherit
+    // from CameraProviderManager::ProviderInfo which isn't a public member.
+    friend struct HidlProviderInfo;
     ~CameraProviderManager();
 
     // Tiny proxy for the static methods in a HIDL interface that communicate with the hardware
     // service manager, to be replacable in unit tests with a fake.
-    struct ServiceInteractionProxy {
+    struct HidlServiceInteractionProxy {
         virtual bool registerForNotifications(
                 const std::string &serviceName,
                 const sp<hidl::manager::V1_0::IServiceNotification>
@@ -119,12 +127,12 @@
         virtual sp<hardware::camera::provider::V2_4::ICameraProvider> getService(
                 const std::string &serviceName) = 0;
         virtual hardware::hidl_vec<hardware::hidl_string> listServices() = 0;
-        virtual ~ServiceInteractionProxy() {}
+        virtual ~HidlServiceInteractionProxy() {}
     };
 
     // Standard use case - call into the normal generated static methods which invoke
     // the real hardware service manager
-    struct HardwareServiceInteractionProxy : public ServiceInteractionProxy {
+    struct HidlServiceInteractionProxyImpl : public HidlServiceInteractionProxy {
         virtual bool registerForNotifications(
                 const std::string &serviceName,
                 const sp<hidl::manager::V1_0::IServiceNotification>
@@ -151,12 +159,15 @@
         ~StatusListener() {}
 
         virtual void onDeviceStatusChanged(const String8 &cameraId,
-                hardware::camera::common::V1_0::CameraDeviceStatus newStatus) = 0;
+                CameraDeviceStatus newStatus) = 0;
         virtual void onDeviceStatusChanged(const String8 &cameraId,
                 const String8 &physicalCameraId,
-                hardware::camera::common::V1_0::CameraDeviceStatus newStatus) = 0;
+                CameraDeviceStatus newStatus) = 0;
         virtual void onTorchStatusChanged(const String8 &cameraId,
-                hardware::camera::common::V1_0::TorchModeStatus newStatus) = 0;
+                TorchModeStatus newStatus,
+                SystemCameraKind kind) = 0;
+        virtual void onTorchStatusChanged(const String8 &cameraId,
+                TorchModeStatus newStatus) = 0;
         virtual void onNewProviderRegistered() = 0;
     };
 
@@ -176,7 +187,10 @@
      * used for testing. The lifetime of the proxy must exceed the lifetime of the manager.
      */
     status_t initialize(wp<StatusListener> listener,
-            ServiceInteractionProxy *proxy = &sHardwareServiceInteractionProxy);
+            HidlServiceInteractionProxy *hidlProxy = &sHidlServiceInteractionProxy);
+
+    status_t getCameraIdIPCTransport(const std::string &id,
+            IPCTransport *providerTransport) const;
 
     /**
      * Retrieve the total number of available cameras.
@@ -214,7 +228,7 @@
      * Return the resource cost of this camera device
      */
     status_t getResourceCost(const std::string &id,
-            hardware::camera::common::V1_0::CameraResourceCost* cost) const;
+            CameraResourceCost* cost) const;
 
     /**
      * Return the old camera API camera info
@@ -240,7 +254,8 @@
      * Check for device support of specific stream combination.
      */
     status_t isSessionConfigurationSupported(const std::string& id,
-            const hardware::camera::device::V3_7::StreamConfiguration &configuration,
+            const SessionConfiguration &configuration,
+            bool overrideForPerfClass,
             bool *status /*out*/) const;
 
     /**
@@ -255,6 +270,17 @@
     bool supportSetTorchMode(const std::string &id) const;
 
     /**
+     * Check if torch strength update should be skipped or not.
+     */
+    bool shouldSkipTorchStrengthUpdate(const std::string &id, int32_t torchStrength) const;
+
+    /**
+     * Return the default torch strength level if the torch strength control
+     * feature is supported.
+     */
+    int32_t getTorchDefaultStrengthLevel(const std::string &id) const;
+
+    /**
      * Turn on or off the flashlight on a given camera device.
      * May fail if the device does not support this API, is in active use, or if the device
      * doesn't exist, etc.
@@ -262,6 +288,24 @@
     status_t setTorchMode(const std::string &id, bool enabled);
 
     /**
+     * Change the brightness level of the flash unit associated with the cameraId and
+     * set it to the value in torchStrength.
+     * If the torch is OFF and torchStrength > 0, the torch will be turned ON with the
+     * specified strength level. If the torch is ON, only the brightness level will be
+     * changed.
+     *
+     * This operation will fail if the device does not have flash unit, has flash unit
+     * but does not support this API, torchStrength is invalid or if the device doesn't
+     * exist etc.
+     */
+    status_t turnOnTorchWithStrengthLevel(const std::string &id, int32_t torchStrength);
+
+    /**
+     * Return the torch strength level of this camera device.
+     */
+    status_t getTorchStrengthLevel(const std::string &id, int32_t* torchStrength);
+
+    /**
      * Setup vendor tags for all registered providers
      */
     status_t setUpVendorTags();
@@ -269,8 +313,7 @@
     /**
      * Inform registered providers about a device state change, such as folding or unfolding
      */
-    status_t notifyDeviceStateChange(
-        android::hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState> newState);
+    status_t notifyDeviceStateChange(int64_t newState);
 
     /**
      * Open an active session to a camera device.
@@ -278,18 +321,12 @@
      * This fully powers on the camera device hardware, and returns a handle to a
      * session to be used for hardware configuration and operation.
      */
-    status_t openSession(const std::string &id,
+    status_t openHidlSession(const std::string &id,
             const sp<hardware::camera::device::V3_2::ICameraDeviceCallback>& callback,
             /*out*/
             sp<hardware::camera::device::V3_2::ICameraDeviceSession> *session);
 
     /**
-     * Save the ICameraProvider while it is being used by a camera or torch client
-     */
-    void saveRef(DeviceMode usageType, const std::string &cameraId,
-            sp<hardware::camera::provider::V2_4::ICameraProvider> provider);
-
-    /**
      * Notify that the camera or torch is no longer being used by a camera client
      */
     void removeRef(DeviceMode usageType, const std::string &cameraId);
@@ -331,43 +368,56 @@
 
     status_t filterSmallJpegSizes(const std::string& cameraId);
 
+    status_t notifyUsbDeviceEvent(int32_t eventId, const std::string &usbDeviceId);
+
     static const float kDepthARTolerance;
 private:
     // All private members, unless otherwise noted, expect mInterfaceMutex to be locked before use
     mutable std::mutex mInterfaceMutex;
 
-    // the status listener update callbacks will lock mStatusMutex
-    mutable std::mutex mStatusListenerMutex;
     wp<StatusListener> mListener;
-    ServiceInteractionProxy* mServiceProxy;
+    HidlServiceInteractionProxy* mHidlServiceProxy;
 
     // Current overall Android device physical status
-    android::hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState> mDeviceState;
+    int64_t mDeviceState;
 
     // mProviderLifecycleLock is locked during onRegistration and removeProvider
     mutable std::mutex mProviderLifecycleLock;
 
-    static HardwareServiceInteractionProxy sHardwareServiceInteractionProxy;
+    static HidlServiceInteractionProxyImpl sHidlServiceInteractionProxy;
+
+    struct HalCameraProvider {
+      // Empty parent struct for storing either aidl / hidl camera provider reference
+      HalCameraProvider(const char *descriptor) : mDescriptor(descriptor) { };
+      virtual ~HalCameraProvider() {};
+      std::string mDescriptor;
+    };
+
+    struct HidlHalCameraProvider : public HalCameraProvider {
+        HidlHalCameraProvider(
+                const sp<hardware::camera::provider::V2_4::ICameraProvider> &provider,
+                const char *descriptor) :
+                HalCameraProvider(descriptor), mCameraProvider(provider) { };
+     private:
+        sp<hardware::camera::provider::V2_4::ICameraProvider> mCameraProvider;
+    };
 
     // Mapping from CameraDevice IDs to CameraProviders. This map is used to keep the
     // ICameraProvider alive while it is in use by the camera with the given ID for camera
     // capabilities
-    std::unordered_map<std::string, sp<hardware::camera::provider::V2_4::ICameraProvider>>
+    std::unordered_map<std::string, std::shared_ptr<HalCameraProvider>>
             mCameraProviderByCameraId;
 
     // Mapping from CameraDevice IDs to CameraProviders. This map is used to keep the
     // ICameraProvider alive while it is in use by the camera with the given ID for torch
     // capabilities
-    std::unordered_map<std::string, sp<hardware::camera::provider::V2_4::ICameraProvider>>
+    std::unordered_map<std::string, std::shared_ptr<HalCameraProvider>>
             mTorchProviderByCameraId;
 
     // Lock for accessing mCameraProviderByCameraId and mTorchProviderByCameraId
     std::mutex mProviderInterfaceMapLock;
-
-    struct ProviderInfo :
-            virtual public hardware::camera::provider::V2_6::ICameraProviderCallback,
-            virtual public hardware::hidl_death_recipient
-    {
+    struct ProviderInfo : public virtual RefBase {
+        friend struct HidlProviderInfo;
         const std::string mProviderName;
         const std::string mProviderInstance;
         const metadata_vendor_id_t mProviderTagid;
@@ -376,101 +426,96 @@
         bool mSetTorchModeSupported;
         bool mIsRemote;
 
-        // Current overall Android device physical status
-        hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState> mDeviceState;
-
-        // This pointer is used to keep a reference to the ICameraProvider that was last accessed.
-        wp<hardware::camera::provider::V2_4::ICameraProvider> mActiveInterface;
-
-        sp<hardware::camera::provider::V2_4::ICameraProvider> mSavedInterface;
-
         ProviderInfo(const std::string &providerName, const std::string &providerInstance,
                 CameraProviderManager *manager);
         ~ProviderInfo();
 
-        status_t initialize(sp<hardware::camera::provider::V2_4::ICameraProvider>& interface,
-                hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
-                    currentDeviceState);
-
-        const sp<hardware::camera::provider::V2_4::ICameraProvider> startProviderInterface();
+        virtual IPCTransport getIPCTransport() = 0;
 
         const std::string& getType() const;
 
-        status_t addDevice(const std::string& name,
-                hardware::camera::common::V1_0::CameraDeviceStatus initialStatus =
-                hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT,
-                /*out*/ std::string *parsedId = nullptr);
-
         status_t dump(int fd, const Vector<String16>& args) const;
 
-        // ICameraProviderCallbacks interface - these lock the parent mInterfaceMutex
-        hardware::Return<void> cameraDeviceStatusChange(
-                const hardware::hidl_string& cameraDeviceName,
-                hardware::camera::common::V1_0::CameraDeviceStatus newStatus) override;
-        hardware::Return<void> torchModeStatusChange(
-                const hardware::hidl_string& cameraDeviceName,
-                hardware::camera::common::V1_0::TorchModeStatus newStatus) override;
-        hardware::Return<void> physicalCameraDeviceStatusChange(
-                const hardware::hidl_string& cameraDeviceName,
-                const hardware::hidl_string& physicalCameraDeviceName,
-                hardware::camera::common::V1_0::CameraDeviceStatus newStatus) override;
-
-        status_t cameraDeviceStatusChangeLocked(
-                std::string* id, const hardware::hidl_string& cameraDeviceName,
-                hardware::camera::common::V1_0::CameraDeviceStatus newStatus);
-        status_t physicalCameraDeviceStatusChangeLocked(
-                std::string* id, std::string* physicalId,
-                const hardware::hidl_string& cameraDeviceName,
-                const hardware::hidl_string& physicalCameraDeviceName,
-                hardware::camera::common::V1_0::CameraDeviceStatus newStatus);
-
-        // hidl_death_recipient interface - this locks the parent mInterfaceMutex
-        virtual void serviceDied(uint64_t cookie, const wp<hidl::base::V1_0::IBase>& who) override;
-
         /**
          * Setup vendor tags for this provider
          */
-        status_t setUpVendorTags();
+        virtual status_t setUpVendorTags() = 0;
 
         /**
          * Notify provider about top-level device physical state changes
+         *
+         * Note that 'mInterfaceMutex' should not be held when calling this method.
+         * It is possible for camera providers to add/remove devices and try to
+         * acquire it.
          */
-        status_t notifyDeviceStateChange(
-                hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
-                    newDeviceState);
+        virtual status_t notifyDeviceStateChange(int64_t newDeviceState) = 0;
+
+        virtual bool successfullyStartedProviderInterface() = 0;
 
         std::vector<std::unordered_set<std::string>> getConcurrentCameraIdCombinations();
 
         /**
+         * Notify 'DeviceInfo' instanced about top-level device physical state changes
+         *
+         * Note that 'mInterfaceMutex' should be held when calling this method.
+         */
+        void notifyDeviceInfoStateChangeLocked(int64_t newDeviceState);
+
+        /**
          * Query the camera provider for concurrent stream configuration support
          */
-        status_t isConcurrentSessionConfigurationSupported(
-                const hardware::hidl_vec<
-                        hardware::camera::provider::V2_7::CameraIdAndStreamCombination>
-                                &halCameraIdsAndStreamCombinations,
-                bool *isSupported);
+        virtual status_t isConcurrentSessionConfigurationSupported(
+                    const std::vector<CameraIdAndSessionConfiguration> &cameraIdsAndSessionConfigs,
+                    const std::set<std::string>& perfClassPrimaryCameraIds,
+                    int targetSdkVersion, bool *isSupported) = 0;
+
+
+        /**
+         * Remove all devices associated with this provider and notify listeners
+         * with NOT_PRESENT state.
+         */
+        void removeAllDevices();
+
+        /**
+         * Provider is an external lazy HAL
+         */
+        bool isExternalLazyHAL() const;
 
         // Basic device information, common to all camera devices
         struct DeviceInfo {
             const std::string mName;  // Full instance name
             const std::string mId;    // ID section of full name
+            //Both hidl and aidl DeviceInfos. Aidl deviceInfos get {3, 8} to
+            //start off.
             const hardware::hidl_version mVersion;
             const metadata_vendor_id_t mProviderTagid;
             bool mIsLogicalCamera;
             std::vector<std::string> mPhysicalIds;
             hardware::CameraInfo mInfo;
-            sp<IBase> mSavedInterface;
             SystemCameraKind mSystemCameraKind = SystemCameraKind::PUBLIC;
 
-            const hardware::camera::common::V1_0::CameraResourceCost mResourceCost;
+            const CameraResourceCost mResourceCost;
 
-            hardware::camera::common::V1_0::CameraDeviceStatus mStatus;
+            CameraDeviceStatus mStatus;
 
             wp<ProviderInfo> mParentProvider;
+            // Torch strength default, maximum levels if the torch strength control
+            // feature is supported.
+            int32_t mTorchStrengthLevel;
+            int32_t mTorchMaximumStrengthLevel;
+            int32_t mTorchDefaultStrengthLevel;
+
+            // Wait for lazy HALs to confirm device availability
+            static const nsecs_t kDeviceAvailableTimeout = 2000e6; // 2000 ms
+            Mutex     mDeviceAvailableLock;
+            Condition mDeviceAvailableSignal;
+            bool mIsDeviceAvailable = true;
 
             bool hasFlashUnit() const { return mHasFlashUnit; }
             bool supportNativeZoomRatio() const { return mSupportNativeZoomRatio; }
             virtual status_t setTorchMode(bool enabled) = 0;
+            virtual status_t turnOnTorchWithStrengthLevel(int32_t torchStrength) = 0;
+            virtual status_t getTorchStrengthLevel(int32_t *torchStrength) = 0;
             virtual status_t getCameraInfo(hardware::CameraInfo *info) const = 0;
             virtual bool isAPI1Compatible() const = 0;
             virtual status_t dumpState(int fd) = 0;
@@ -488,41 +533,32 @@
             }
 
             virtual status_t isSessionConfigurationSupported(
-                    const hardware::camera::device::V3_7::StreamConfiguration &/*configuration*/,
+                    const SessionConfiguration &/*configuration*/,
+                    bool /*overrideForPerfClass*/,
                     bool * /*status*/) {
                 return INVALID_OPERATION;
             }
             virtual status_t filterSmallJpegSizes() = 0;
-
-            template<class InterfaceT>
-            sp<InterfaceT> startDeviceInterface();
+            virtual void notifyDeviceStateChange(int64_t /*newState*/) {}
 
             DeviceInfo(const std::string& name, const metadata_vendor_id_t tagId,
                     const std::string &id, const hardware::hidl_version& version,
                     const std::vector<std::string>& publicCameraIds,
-                    const hardware::camera::common::V1_0::CameraResourceCost& resourceCost,
+                    const CameraResourceCost& resourceCost,
                     sp<ProviderInfo> parentProvider) :
                     mName(name), mId(id), mVersion(version), mProviderTagid(tagId),
                     mIsLogicalCamera(false), mResourceCost(resourceCost),
-                    mStatus(hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT),
-                    mParentProvider(parentProvider), mHasFlashUnit(false),
-                    mSupportNativeZoomRatio(false), mPublicCameraIds(publicCameraIds) {}
-            virtual ~DeviceInfo();
+                    mStatus(CameraDeviceStatus::PRESENT),
+                    mParentProvider(parentProvider), mTorchStrengthLevel(0),
+                    mTorchMaximumStrengthLevel(0), mTorchDefaultStrengthLevel(0),
+                    mHasFlashUnit(false), mSupportNativeZoomRatio(false),
+                    mPublicCameraIds(publicCameraIds) {}
+            virtual ~DeviceInfo() {}
         protected:
+
             bool mHasFlashUnit; // const after constructor
             bool mSupportNativeZoomRatio; // const after constructor
             const std::vector<std::string>& mPublicCameraIds;
-
-            template<class InterfaceT>
-            static status_t setTorchMode(InterfaceT& interface, bool enabled);
-
-            template<class InterfaceT>
-            status_t setTorchModeForDevice(bool enabled) {
-                // Don't save the ICameraProvider interface here because we assume that this was
-                // called from CameraProviderManager::setTorchMode(), which does save it.
-                const sp<InterfaceT> interface = startDeviceInterface<InterfaceT>();
-                return DeviceInfo::setTorchMode(interface, enabled);
-            }
         };
         std::vector<std::unique_ptr<DeviceInfo>> mDevices;
         std::unordered_set<std::string> mUniqueCameraIds;
@@ -537,31 +573,36 @@
 
         // HALv3-specific camera fields, including the actual device interface
         struct DeviceInfo3 : public DeviceInfo {
-            typedef hardware::camera::device::V3_2::ICameraDevice InterfaceT;
 
-            virtual status_t setTorchMode(bool enabled) override;
+            virtual status_t setTorchMode(bool enabled) = 0;
+            virtual status_t turnOnTorchWithStrengthLevel(int32_t torchStrength) = 0;
+            virtual status_t getTorchStrengthLevel(int32_t *torchStrength) = 0;
             virtual status_t getCameraInfo(hardware::CameraInfo *info) const override;
             virtual bool isAPI1Compatible() const override;
-            virtual status_t dumpState(int fd) override;
+            virtual status_t dumpState(int fd) = 0;
             virtual status_t getCameraCharacteristics(
                     bool overrideForPerfClass,
                     CameraMetadata *characteristics) const override;
             virtual status_t getPhysicalCameraCharacteristics(const std::string& physicalCameraId,
                     CameraMetadata *characteristics) const override;
             virtual status_t isSessionConfigurationSupported(
-                    const hardware::camera::device::V3_7::StreamConfiguration &configuration,
-                    bool *status /*out*/)
-                    override;
+                    const SessionConfiguration &configuration, bool /*overrideForPerfClass*/,
+                    bool *status /*out*/) = 0;
             virtual status_t filterSmallJpegSizes() override;
+            virtual void notifyDeviceStateChange(
+                        int64_t newState) override;
 
             DeviceInfo3(const std::string& name, const metadata_vendor_id_t tagId,
                     const std::string &id, uint16_t minorVersion,
-                    const hardware::camera::common::V1_0::CameraResourceCost& resourceCost,
+                    const CameraResourceCost& resourceCost,
                     sp<ProviderInfo> parentProvider,
-                    const std::vector<std::string>& publicCameraIds, sp<InterfaceT> interface);
-            virtual ~DeviceInfo3();
-        private:
+                    const std::vector<std::string>& publicCameraIds);
+            virtual ~DeviceInfo3() {};
+        protected:
+            // Modified by derived transport specific (hidl / aidl) class
             CameraMetadata mCameraCharacteristics;
+            // Map device states to sensor orientations
+            std::unordered_map<int64_t, int32_t> mDeviceStateOrientationMap;
             // A copy of mCameraCharacteristics without performance class
             // override
             std::unique_ptr<CameraMetadata> mCameraCharNoPCOverride;
@@ -598,8 +639,7 @@
                     const camera_metadata_entry& halStreamConfigs,
                     const camera_metadata_entry& halStreamDurations);
         };
-
-    private:
+    protected:
         std::string mType;
         uint32_t mId;
 
@@ -609,12 +649,12 @@
 
         struct CameraStatusInfoT {
             bool isPhysicalCameraStatus = false;
-            hardware::hidl_string cameraId;
-            hardware::hidl_string physicalCameraId;
-            hardware::camera::common::V1_0::CameraDeviceStatus status;
-            CameraStatusInfoT(bool isForPhysicalCamera, const hardware::hidl_string& id,
-                    const hardware::hidl_string& physicalId,
-                    hardware::camera::common::V1_0::CameraDeviceStatus s) :
+            std::string cameraId;
+            std::string physicalCameraId;
+            CameraDeviceStatus status;
+            CameraStatusInfoT(bool isForPhysicalCamera, const std::string& id,
+                    const std::string& physicalId,
+                    CameraDeviceStatus s) :
                     isPhysicalCameraStatus(isForPhysicalCamera), cameraId(id),
                     physicalCameraId(physicalId), status(s) {}
         };
@@ -631,17 +671,6 @@
 
         std::vector<std::unordered_set<std::string>> mConcurrentCameraIdCombinations;
 
-        // Templated method to instantiate the right kind of DeviceInfo and call the
-        // right CameraProvider getCameraDeviceInterface_* method.
-        template<class DeviceInfoT>
-        std::unique_ptr<DeviceInfo> initializeDeviceInfo(const std::string &name,
-                const metadata_vendor_id_t tagId, const std::string &id,
-                uint16_t minorVersion);
-
-        // Helper for initializeDeviceInfo to use the right CameraProvider get method.
-        template<class InterfaceT>
-        sp<InterfaceT> startDeviceInterface(const std::string &name);
-
         // Parse provider instance name for type and id
         static status_t parseProviderName(const std::string& name,
                 std::string *type, uint32_t *id);
@@ -655,13 +684,14 @@
 
         void removeDevice(std::string id);
 
-        // Expects to have mLock locked
-        status_t reCacheConcurrentStreamingCameraIdsLocked();
-        // Expects to have mLock locked
-        status_t getConcurrentCameraIdsInternalLocked(
-                sp<hardware::camera::provider::V2_6::ICameraProvider> &interface2_6);
     };
 
+    /**
+     * Save the ICameraProvider while it is being used by a camera or torch client
+     */
+    void saveRef(DeviceMode usageType, const std::string &cameraId,
+            std::shared_ptr<HalCameraProvider> provider);
+
     // Utility to find a DeviceInfo by ID; pointer is only valid while mInterfaceMutex is held
     // and the calling code doesn't mutate the list of providers or their lists of devices.
     // Finds the first device of the given ID that falls within the requested version range
@@ -671,9 +701,15 @@
             hardware::hidl_version minVersion = hardware::hidl_version{0,0},
             hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
 
-    status_t addProviderLocked(const std::string& newProvider, bool preexisting = false);
+    // Map external providers to USB devices in order to handle USB hotplug
+    // events for lazy HALs
+    std::pair<std::vector<std::string>, sp<ProviderInfo>>
+        mExternalUsbDevicesForProvider;
+    sp<ProviderInfo> startExternalLazyProvider() const;
 
-    status_t tryToInitializeProviderLocked(const std::string& providerName,
+    status_t addHidlProviderLocked(const std::string& newProvider, bool preexisting = false);
+
+    status_t tryToInitializeHidlProviderLocked(const std::string& providerName,
             const sp<ProviderInfo>& providerInfo);
 
     bool isLogicalCameraLocked(const std::string& id, std::vector<std::string>* physicalCameraIds);
@@ -686,14 +722,6 @@
     size_t mProviderInstanceId = 0;
     std::vector<sp<ProviderInfo>> mProviders;
 
-    void addProviderToMap(
-            const std::string &cameraId,
-            sp<hardware::camera::provider::V2_4::ICameraProvider> provider,
-            bool isTorchUsage);
-    void removeCameraIdFromMap(
-        std::unordered_map<std::string, sp<hardware::camera::provider::V2_4::ICameraProvider>> &map,
-        const std::string &cameraId);
-
     static const char* deviceStatusToString(
         const hardware::camera::common::V1_0::CameraDeviceStatus&);
     static const char* torchStatusToString(
@@ -704,20 +732,14 @@
     void filterLogicalCameraIdsLocked(std::vector<std::string>& deviceIds) const;
 
     status_t getSystemCameraKindLocked(const std::string& id, SystemCameraKind *kind) const;
-    std::pair<bool, ProviderInfo::DeviceInfo *> isHiddenPhysicalCameraInternal(const std::string& cameraId) const;
+    std::pair<bool, ProviderInfo::DeviceInfo *> isHiddenPhysicalCameraInternal(
+            const std::string& cameraId) const;
 
     void collectDeviceIdsLocked(const std::vector<std::string> deviceIds,
             std::vector<std::string>& normalDeviceIds,
             std::vector<std::string>& systemCameraDeviceIds) const;
 
-    status_t convertToHALStreamCombinationAndCameraIdsLocked(
-              const std::vector<hardware::camera2::utils::CameraIdAndSessionConfiguration>
-                      &cameraIdsAndSessionConfigs,
-              const std::set<std::string>& perfClassPrimaryCameraIds,
-              int targetSdkVersion,
-              hardware::hidl_vec<hardware::camera::provider::V2_7::CameraIdAndStreamCombination>
-                      *halCameraIdsAndStreamCombinations,
-              bool *earlyExit);
+    status_t usbDeviceDetached(const std::string &usbDeviceId);
 };
 
 } // namespace android
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
index c995670..719ff2c 100644
--- a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
@@ -42,6 +42,10 @@
 #include <xmpmeta/xmp_data.h>
 #include <xmpmeta/xmp_writer.h>
 
+#ifndef __unused
+#define __unused __attribute__((__unused__))
+#endif
+
 using dynamic_depth::Camera;
 using dynamic_depth::Cameras;
 using dynamic_depth::CameraParams;
diff --git a/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
new file mode 100644
index 0000000..e8432a6
--- /dev/null
+++ b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
@@ -0,0 +1,1564 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "HidlProviderInfo.h"
+
+#include <cutils/properties.h>
+
+#include <android/hardware/ICameraService.h>
+#include <camera_metadata_hidden.h>
+
+#include "device3/ZoomRatioMapper.h"
+#include <utils/SessionConfigurationUtils.h>
+#include <utils/Trace.h>
+
+#include <android/hardware/camera/device/3.7/ICameraDevice.h>
+#include <android/hardware/camera/device/3.8/ICameraDevice.h>
+
+namespace {
+const bool kEnableLazyHal(property_get_bool("ro.camera.enableLazyHal", false));
+} // anonymous namespace
+
+namespace android {
+
+using namespace android::camera3;
+using namespace hardware::camera;
+using hardware::camera::common::V1_0::VendorTagSection;
+using hardware::camera::common::V1_0::Status;
+using hardware::camera::provider::V2_7::CameraIdAndStreamCombination;
+using hardware::camera2::utils::CameraIdAndSessionConfiguration;
+
+
+using StatusListener = CameraProviderManager::StatusListener;
+
+using hardware::camera::provider::V2_5::DeviceState;
+using hardware::ICameraService;
+
+status_t HidlProviderInfo::mapToStatusT(const Status& s)  {
+    switch(s) {
+        case Status::OK:
+            return OK;
+        case Status::ILLEGAL_ARGUMENT:
+            return BAD_VALUE;
+        case Status::CAMERA_IN_USE:
+            return -EBUSY;
+        case Status::MAX_CAMERAS_IN_USE:
+            return -EUSERS;
+        case Status::METHOD_NOT_SUPPORTED:
+            return UNKNOWN_TRANSACTION;
+        case Status::OPERATION_NOT_SUPPORTED:
+            return INVALID_OPERATION;
+        case Status::CAMERA_DISCONNECTED:
+            return DEAD_OBJECT;
+        case Status::INTERNAL_ERROR:
+            return INVALID_OPERATION;
+    }
+    ALOGW("Unexpected HAL status code %d", s);
+    return INVALID_OPERATION;
+}
+
+static hardware::hidl_bitfield<DeviceState> mapToHidlDeviceState(int64_t newState) {
+    hardware::hidl_bitfield<DeviceState> newDeviceState{};
+    if (newState & ICameraService::DEVICE_STATE_BACK_COVERED) {
+        newDeviceState |= DeviceState::BACK_COVERED;
+    }
+    if (newState & ICameraService::DEVICE_STATE_FRONT_COVERED) {
+        newDeviceState |= DeviceState::FRONT_COVERED;
+    }
+    if (newState & ICameraService::DEVICE_STATE_FOLDED) {
+        newDeviceState |= DeviceState::FOLDED;
+    }
+    // Only map vendor bits directly
+    uint64_t vendorBits = static_cast<uint64_t>(newState) & 0xFFFFFFFF00000000l;
+    newDeviceState |= vendorBits;
+
+    ALOGV("%s: New device state 0x%" PRIx64, __FUNCTION__, newDeviceState);
+    return newDeviceState;
+}
+
+const char* statusToString(const Status& s) {
+    switch(s) {
+        case Status::OK:
+            return "OK";
+        case Status::ILLEGAL_ARGUMENT:
+            return "ILLEGAL_ARGUMENT";
+        case Status::CAMERA_IN_USE:
+            return "CAMERA_IN_USE";
+        case Status::MAX_CAMERAS_IN_USE:
+            return "MAX_CAMERAS_IN_USE";
+        case Status::METHOD_NOT_SUPPORTED:
+            return "METHOD_NOT_SUPPORTED";
+        case Status::OPERATION_NOT_SUPPORTED:
+            return "OPERATION_NOT_SUPPORTED";
+        case Status::CAMERA_DISCONNECTED:
+            return "CAMERA_DISCONNECTED";
+        case Status::INTERNAL_ERROR:
+            return "INTERNAL_ERROR";
+    }
+    ALOGW("Unexpected HAL status code %d", s);
+    return "UNKNOWN_ERROR";
+}
+
+static common::V1_0::CameraDeviceStatus mapToHidlCameraDeviceStatus(const CameraDeviceStatus& s)  {
+    switch(s) {
+        case CameraDeviceStatus::PRESENT:
+            return common::V1_0::CameraDeviceStatus::PRESENT;
+        case CameraDeviceStatus::NOT_PRESENT:
+            return common::V1_0::CameraDeviceStatus::NOT_PRESENT;
+        case CameraDeviceStatus::ENUMERATING:
+            return common::V1_0::CameraDeviceStatus::ENUMERATING;
+    }
+    ALOGW("Unexpectedcamera device status code %d", s);
+    return common::V1_0::CameraDeviceStatus::NOT_PRESENT;
+}
+
+static CameraDeviceStatus hidlToInternalCameraDeviceStatus(
+        const common::V1_0::CameraDeviceStatus& s)  {
+    switch(s) {
+        case common::V1_0::CameraDeviceStatus::PRESENT:
+            return CameraDeviceStatus::PRESENT;
+        case common::V1_0::CameraDeviceStatus::NOT_PRESENT:
+            return CameraDeviceStatus::NOT_PRESENT;
+        case common::V1_0::CameraDeviceStatus::ENUMERATING:
+            return CameraDeviceStatus::ENUMERATING;
+    }
+    ALOGW("Unexpectedcamera device status code %d", s);
+    return CameraDeviceStatus::NOT_PRESENT;
+}
+
+static TorchModeStatus hidlToInternalTorchModeStatus(
+        const common::V1_0::TorchModeStatus& s)  {
+    switch(s) {
+        case common::V1_0::TorchModeStatus::NOT_AVAILABLE:
+            return TorchModeStatus::NOT_AVAILABLE;
+        case common::V1_0::TorchModeStatus::AVAILABLE_OFF:
+            return TorchModeStatus::AVAILABLE_OFF;
+        case common::V1_0::TorchModeStatus::AVAILABLE_ON:
+            return TorchModeStatus::AVAILABLE_ON;
+    }
+    ALOGW("Unexpectedcamera torch mode status code %d", s);
+    return TorchModeStatus::NOT_AVAILABLE;
+}
+
+static CameraResourceCost hidlToInternalResourceCost(
+        const common::V1_0::CameraResourceCost& s)  {
+    CameraResourceCost internalResourceCost;
+    internalResourceCost.resourceCost = s.resourceCost;
+    for (const auto device : s.conflictingDevices) {
+        internalResourceCost.conflictingDevices.emplace_back(device.c_str());
+    }
+    return internalResourceCost;
+}
+
+static const char* deviceStatusToString(const common::V1_0::CameraDeviceStatus& s) {
+    switch(s) {
+        case common::V1_0::CameraDeviceStatus::NOT_PRESENT:
+            return "NOT_PRESENT";
+        case common::V1_0::CameraDeviceStatus::PRESENT:
+            return "PRESENT";
+        case common::V1_0::CameraDeviceStatus::ENUMERATING:
+            return "ENUMERATING";
+    }
+    ALOGW("Unexpected HAL device status code %d", s);
+    return "UNKNOWN_STATUS";
+}
+
+static const char* torchStatusToString(const common::V1_0::TorchModeStatus& s) {
+    switch(s) {
+        case common::V1_0::TorchModeStatus::NOT_AVAILABLE:
+            return "NOT_AVAILABLE";
+        case common::V1_0::TorchModeStatus::AVAILABLE_OFF:
+            return "AVAILABLE_OFF";
+        case common::V1_0::TorchModeStatus::AVAILABLE_ON:
+            return "AVAILABLE_ON";
+    }
+    ALOGW("Unexpected HAL torch mode status code %d", s);
+    return "UNKNOWN_STATUS";
+}
+
+status_t HidlProviderInfo::initializeHidlProvider(
+        sp<provider::V2_4::ICameraProvider>& interface,
+        int64_t currentDeviceState) {
+    status_t res = parseProviderName(mProviderName, &mType, &mId);
+    if (res != OK) {
+        ALOGE("%s: Invalid provider name, ignoring", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    ALOGI("Connecting to new camera provider: %s, isRemote? %d",
+            mProviderName.c_str(), interface->isRemote());
+
+    // Determine minor version
+    mMinorVersion = 4;
+    auto cast2_6 = provider::V2_6::ICameraProvider::castFrom(interface);
+    sp<provider::V2_6::ICameraProvider> interface2_6 = nullptr;
+    if (cast2_6.isOk()) {
+        interface2_6 = cast2_6;
+        if (interface2_6 != nullptr) {
+            mMinorVersion = 6;
+        }
+    }
+    // We need to check again since cast2_6.isOk() succeeds even if the provider
+    // version isn't actually 2.6.
+    if (interface2_6 == nullptr){
+        auto cast2_5 =
+                provider::V2_5::ICameraProvider::castFrom(interface);
+        sp<provider::V2_5::ICameraProvider> interface2_5 = nullptr;
+        if (cast2_5.isOk()) {
+            interface2_5 = cast2_5;
+            if (interface != nullptr) {
+                mMinorVersion = 5;
+            }
+        }
+    } else {
+        auto cast2_7 = provider::V2_7::ICameraProvider::castFrom(interface);
+        if (cast2_7.isOk()) {
+            sp<provider::V2_7::ICameraProvider> interface2_7 = cast2_7;
+            if (interface2_7 != nullptr) {
+                mMinorVersion = 7;
+            }
+        }
+    }
+
+    // cameraDeviceStatusChange callbacks may be called (and causing new devices added)
+    // before setCallback returns
+    hardware::Return<Status> status = interface->setCallback(this);
+    if (!status.isOk()) {
+        ALOGE("%s: Transaction error setting up callbacks with camera provider '%s': %s",
+                __FUNCTION__, mProviderName.c_str(), status.description().c_str());
+        return DEAD_OBJECT;
+    }
+    if (status != Status::OK) {
+        ALOGE("%s: Unable to register callbacks with camera provider '%s'",
+                __FUNCTION__, mProviderName.c_str());
+        return mapToStatusT(status);
+    }
+
+    hardware::Return<bool> linked = interface->linkToDeath(this, /*cookie*/ mId);
+    if (!linked.isOk()) {
+        ALOGE("%s: Transaction error in linking to camera provider '%s' death: %s",
+                __FUNCTION__, mProviderName.c_str(), linked.description().c_str());
+        return DEAD_OBJECT;
+    } else if (!linked) {
+        ALOGW("%s: Unable to link to provider '%s' death notifications",
+                __FUNCTION__, mProviderName.c_str());
+    }
+
+    if (!kEnableLazyHal) {
+        // Save HAL reference indefinitely
+        mSavedInterface = interface;
+    } else {
+        mActiveInterface = interface;
+    }
+
+    ALOGV("%s: Setting device state for %s: 0x%" PRIx64,
+            __FUNCTION__, mProviderName.c_str(), mDeviceState);
+    notifyDeviceStateChange(currentDeviceState);
+
+    res = setUpVendorTags();
+    if (res != OK) {
+        ALOGE("%s: Unable to set up vendor tags from provider '%s'",
+                __FUNCTION__, mProviderName.c_str());
+        return res;
+    }
+
+    // Get initial list of camera devices, if any
+    std::vector<std::string> devices;
+    hardware::Return<void> ret = interface->getCameraIdList([&status, this, &devices](
+            Status idStatus,
+            const hardware::hidl_vec<hardware::hidl_string>& cameraDeviceNames) {
+        status = idStatus;
+        if (status == Status::OK) {
+            for (auto& name : cameraDeviceNames) {
+                uint16_t major, minor;
+                std::string type, id;
+                status_t res = parseDeviceName(name, &major, &minor, &type, &id);
+                if (res != OK) {
+                    ALOGE("%s: Error parsing deviceName: %s: %d", __FUNCTION__, name.c_str(), res);
+                    status = Status::INTERNAL_ERROR;
+                } else {
+                    devices.push_back(name);
+                    mProviderPublicCameraIds.push_back(id);
+                }
+            }
+        } });
+    if (!ret.isOk()) {
+        ALOGE("%s: Transaction error in getting camera ID list from provider '%s': %s",
+                __FUNCTION__, mProviderName.c_str(), linked.description().c_str());
+        return DEAD_OBJECT;
+    }
+    if (status != Status::OK) {
+        ALOGE("%s: Unable to query for camera devices from provider '%s'",
+                __FUNCTION__, mProviderName.c_str());
+        return mapToStatusT(status);
+    }
+
+    // Get list of concurrent streaming camera device combinations
+    if (mMinorVersion >= 6) {
+        res = getConcurrentCameraIdsInternalLocked(interface2_6);
+        if (res != OK) {
+            return res;
+        }
+    }
+
+    ret = interface->isSetTorchModeSupported(
+        [this](auto status, bool supported) {
+            if (status == Status::OK) {
+                mSetTorchModeSupported = supported;
+            }
+        });
+    if (!ret.isOk()) {
+        ALOGE("%s: Transaction error checking torch mode support '%s': %s",
+                __FUNCTION__, mProviderName.c_str(), ret.description().c_str());
+        return DEAD_OBJECT;
+    }
+
+    mIsRemote = interface->isRemote();
+
+    sp<StatusListener> listener = mManager->getStatusListener();
+    for (auto& device : devices) {
+        std::string id;
+        status_t res = addDevice(device, common::V1_0::CameraDeviceStatus::PRESENT, &id);
+        if (res != OK) {
+            ALOGE("%s: Unable to enumerate camera device '%s': %s (%d)",
+                    __FUNCTION__, device.c_str(), strerror(-res), res);
+            continue;
+        }
+    }
+
+    ALOGI("Camera provider %s ready with %zu camera devices",
+            mProviderName.c_str(), mDevices.size());
+
+    // Process cached status callbacks
+    std::unique_ptr<std::vector<CameraStatusInfoT>> cachedStatus =
+            std::make_unique<std::vector<CameraStatusInfoT>>();
+    {
+        std::lock_guard<std::mutex> lock(mInitLock);
+
+        for (auto& statusInfo : mCachedStatus) {
+            std::string id, physicalId;
+            status_t res = OK;
+            if (statusInfo.isPhysicalCameraStatus) {
+                res = physicalCameraDeviceStatusChangeLocked(&id, &physicalId,
+                    statusInfo.cameraId, statusInfo.physicalCameraId,
+                    mapToHidlCameraDeviceStatus(statusInfo.status));
+            } else {
+                res = cameraDeviceStatusChangeLocked(&id, statusInfo.cameraId,
+                        mapToHidlCameraDeviceStatus(statusInfo.status));
+            }
+            if (res == OK) {
+                cachedStatus->emplace_back(statusInfo.isPhysicalCameraStatus,
+                        id.c_str(), physicalId.c_str(), statusInfo.status);
+            }
+        }
+        mCachedStatus.clear();
+
+        mInitialized = true;
+    }
+
+    // The cached status change callbacks cannot be fired directly from this
+    // function, due to same-thread deadlock trying to acquire mInterfaceMutex
+    // twice.
+    if (listener != nullptr) {
+        mInitialStatusCallbackFuture = std::async(std::launch::async,
+                &CameraProviderManager::ProviderInfo::notifyInitialStatusChange, this,
+                listener, std::move(cachedStatus));
+    }
+
+    return OK;
+}
+
+status_t HidlProviderInfo::setUpVendorTags() {
+    if (mVendorTagDescriptor != nullptr)
+        return OK;
+
+    hardware::hidl_vec<VendorTagSection> vts;
+    Status status;
+    hardware::Return<void> ret;
+    const sp<hardware::camera::provider::V2_4::ICameraProvider> interface =
+            startProviderInterface();
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+    ret = interface->getVendorTags(
+        [&](auto s, const auto& vendorTagSecs) {
+            status = s;
+            if (s == Status::OK) {
+                vts = vendorTagSecs;
+            }
+    });
+    if (!ret.isOk()) {
+        ALOGE("%s: Transaction error getting vendor tags from provider '%s': %s",
+                __FUNCTION__, mProviderName.c_str(), ret.description().c_str());
+        return DEAD_OBJECT;
+    }
+    if (status != Status::OK) {
+        return mapToStatusT(status);
+    }
+
+    // Read all vendor tag definitions into a descriptor
+    status_t res;
+    if ((res = HidlVendorTagDescriptor::createDescriptorFromHidl(vts, /*out*/mVendorTagDescriptor))
+            != OK) {
+        ALOGE("%s: Could not generate descriptor from vendor tag operations,"
+                "received error %s (%d). Camera clients will not be able to use"
+                "vendor tags", __FUNCTION__, strerror(res), res);
+        return res;
+    }
+
+    return OK;
+}
+
+status_t HidlProviderInfo::notifyDeviceStateChange(int64_t newDeviceState) {
+    mDeviceState = mapToHidlDeviceState(newDeviceState);
+    if (mMinorVersion >= 5) {
+        // Check if the provider is currently active - not going to start it for this notification
+        auto interface = mSavedInterface != nullptr ? mSavedInterface : mActiveInterface.promote();
+        if (interface != nullptr) {
+            // Send current device state
+            auto castResult = provider::V2_5::ICameraProvider::castFrom(interface);
+            if (castResult.isOk()) {
+                sp<provider::V2_5::ICameraProvider> interface_2_5 = castResult;
+                if (interface_2_5 != nullptr) {
+                    interface_2_5->notifyDeviceStateChange(mDeviceState);
+                }
+            }
+        }
+    }
+    return OK;
+}
+
+sp<device::V3_2::ICameraDevice>
+HidlProviderInfo::startDeviceInterface(const std::string &name) {
+    Status status;
+    sp<device::V3_2::ICameraDevice> cameraInterface;
+    hardware::Return<void> ret;
+    const sp<provider::V2_4::ICameraProvider> interface = startProviderInterface();
+    if (interface == nullptr) {
+        return nullptr;
+    }
+    ret = interface->getCameraDeviceInterface_V3_x(name, [&status, &cameraInterface](
+        Status s, sp<device::V3_2::ICameraDevice> interface) {
+                status = s;
+                cameraInterface = interface;
+            });
+    if (!ret.isOk()) {
+        ALOGE("%s: Transaction error trying to obtain interface for camera device %s: %s",
+                __FUNCTION__, name.c_str(), ret.description().c_str());
+        return nullptr;
+    }
+    if (status != Status::OK) {
+        ALOGE("%s: Unable to obtain interface for camera device %s: %s", __FUNCTION__,
+                name.c_str(), statusToString(status));
+        return nullptr;
+    }
+    return cameraInterface;
+}
+
+bool HidlProviderInfo::successfullyStartedProviderInterface() {
+    return startProviderInterface() != nullptr;
+}
+
+const sp<provider::V2_4::ICameraProvider>
+HidlProviderInfo::startProviderInterface() {
+    ATRACE_CALL();
+    ALOGV("Request to start camera provider: %s", mProviderName.c_str());
+    if (mSavedInterface != nullptr) {
+        return mSavedInterface;
+    }
+    if (!kEnableLazyHal) {
+        ALOGE("Bad provider state! Should not be here on a non-lazy HAL!");
+        return nullptr;
+    }
+
+    auto interface = mActiveInterface.promote();
+    if (interface == nullptr) {
+        // Try to get service without starting
+        interface = mManager->mHidlServiceProxy->tryGetService(mProviderName);
+        if (interface == nullptr) {
+            ALOGV("Camera provider actually needs restart, calling getService(%s)",
+                  mProviderName.c_str());
+            interface = mManager->mHidlServiceProxy->getService(mProviderName);
+
+            // Set all devices as ENUMERATING, provider should update status
+            // to PRESENT after initializing.
+            // This avoids failing getCameraDeviceInterface_V3_x before devices
+            // are ready.
+            for (auto& device : mDevices) {
+              device->mIsDeviceAvailable = false;
+            }
+
+            interface->setCallback(this);
+            hardware::Return<bool>
+                linked = interface->linkToDeath(this, /*cookie*/ mId);
+            if (!linked.isOk()) {
+              ALOGE(
+                  "%s: Transaction error in linking to camera provider '%s' death: %s",
+                  __FUNCTION__,
+                  mProviderName.c_str(),
+                  linked.description().c_str());
+              mManager->removeProvider(mProviderName);
+              return nullptr;
+            } else if (!linked) {
+              ALOGW("%s: Unable to link to provider '%s' death notifications",
+                    __FUNCTION__, mProviderName.c_str());
+            }
+            // Send current device state
+            if (mMinorVersion >= 5) {
+              auto castResult =
+                  provider::V2_5::ICameraProvider::castFrom(interface);
+              if (castResult.isOk()) {
+                sp<provider::V2_5::ICameraProvider> interface_2_5 = castResult;
+                if (interface_2_5 != nullptr) {
+                  ALOGV("%s: Initial device state for %s: 0x %" PRIx64,
+                        __FUNCTION__, mProviderName.c_str(), mDeviceState);
+                  interface_2_5->notifyDeviceStateChange(mDeviceState);
+                }
+              }
+            }
+        }
+        mActiveInterface = interface;
+    } else {
+        ALOGV("Camera provider (%s) already in use. Re-using instance.",
+              mProviderName.c_str());
+    }
+
+    return interface;
+}
+
+hardware::Return<void> HidlProviderInfo::cameraDeviceStatusChange(
+        const hardware::hidl_string& cameraDeviceName,
+        hardware::camera::common::V1_0::CameraDeviceStatus newStatus) {
+    sp<StatusListener> listener;
+    std::string id;
+    std::lock_guard<std::mutex> lock(mInitLock);
+    CameraDeviceStatus internalNewStatus = hidlToInternalCameraDeviceStatus(newStatus);
+    if (!mInitialized) {
+        mCachedStatus.emplace_back(false /*isPhysicalCameraStatus*/,
+                cameraDeviceName.c_str(), std::string().c_str(),
+                internalNewStatus);
+        return hardware::Void();
+    }
+
+    {
+        std::lock_guard<std::mutex> lock(mLock);
+        if (OK != cameraDeviceStatusChangeLocked(&id, cameraDeviceName, newStatus)) {
+            return hardware::Void();
+        }
+        listener = mManager->getStatusListener();
+    }
+
+    // Call without lock held to allow reentrancy into provider manager
+    if (listener != nullptr) {
+        listener->onDeviceStatusChanged(String8(id.c_str()), internalNewStatus);
+    }
+
+    return hardware::Void();
+}
+
+status_t HidlProviderInfo::addDevice(const std::string& name,
+        common::V1_0::CameraDeviceStatus initialStatus, /*out*/ std::string* parsedId) {
+
+    ALOGI("Enumerating new camera device: %s", name.c_str());
+
+    uint16_t major, minor;
+    std::string type, id;
+
+    status_t res = parseDeviceName(name, &major, &minor, &type, &id);
+    if (res != OK) {
+        return res;
+    }
+    if (type != mType) {
+        ALOGE("%s: Device type %s does not match provider type %s", __FUNCTION__,
+                type.c_str(), mType.c_str());
+        return BAD_VALUE;
+    }
+    if (mManager->isValidDeviceLocked(id, major)) {
+        ALOGE("%s: Device %s: ID %s is already in use for device major version %d", __FUNCTION__,
+                name.c_str(), id.c_str(), major);
+        return BAD_VALUE;
+    }
+
+    std::unique_ptr<DeviceInfo> deviceInfo;
+    switch (major) {
+        case 1:
+            ALOGE("%s: Device %s: Unsupported HIDL device HAL major version %d:", __FUNCTION__,
+                    name.c_str(), major);
+            return BAD_VALUE;
+        case 3:
+            deviceInfo = initializeDeviceInfo(name, mProviderTagid, id, minor);
+            break;
+        default:
+            ALOGE("%s: Device %s: Unknown HIDL device HAL major version %d:", __FUNCTION__,
+                    name.c_str(), major);
+            return BAD_VALUE;
+    }
+    if (deviceInfo == nullptr) return BAD_VALUE;
+    deviceInfo->notifyDeviceStateChange(mDeviceState);
+    deviceInfo->mStatus = hidlToInternalCameraDeviceStatus(initialStatus);
+    bool isAPI1Compatible = deviceInfo->isAPI1Compatible();
+
+    mDevices.push_back(std::move(deviceInfo));
+
+    mUniqueCameraIds.insert(id);
+    if (isAPI1Compatible) {
+        // addDevice can be called more than once for the same camera id if HAL
+        // supports openLegacy.
+        if (std::find(mUniqueAPI1CompatibleCameraIds.begin(), mUniqueAPI1CompatibleCameraIds.end(),
+                id) == mUniqueAPI1CompatibleCameraIds.end()) {
+            mUniqueAPI1CompatibleCameraIds.push_back(id);
+        }
+    }
+
+    if (parsedId != nullptr) {
+        *parsedId = id;
+    }
+    return OK;
+}
+
+status_t HidlProviderInfo::cameraDeviceStatusChangeLocked(
+        std::string* id, const hardware::hidl_string& cameraDeviceName,
+        hardware::camera::common::V1_0::CameraDeviceStatus newStatus) {
+    bool known = false;
+    std::string cameraId;
+    for (auto& deviceInfo : mDevices) {
+        if (deviceInfo->mName == cameraDeviceName) {
+            Mutex::Autolock l(deviceInfo->mDeviceAvailableLock);
+            ALOGI("Camera device %s status is now %s, was %s", cameraDeviceName.c_str(),
+                    deviceStatusToString(newStatus),
+                    deviceStatusToString(mapToHidlCameraDeviceStatus(deviceInfo->mStatus)));
+            deviceInfo->mStatus = hidlToInternalCameraDeviceStatus(newStatus);
+            // TODO: Handle device removal (NOT_PRESENT)
+            cameraId = deviceInfo->mId;
+            known = true;
+            deviceInfo->mIsDeviceAvailable =
+                (newStatus == hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT);
+            deviceInfo->mDeviceAvailableSignal.signal();
+            break;
+        }
+    }
+    // Previously unseen device; status must not be NOT_PRESENT
+    if (!known) {
+        if (newStatus == hardware::camera::common::V1_0::CameraDeviceStatus::NOT_PRESENT) {
+            ALOGW("Camera provider %s says an unknown camera device %s is not present. Curious.",
+                mProviderName.c_str(), cameraDeviceName.c_str());
+            return BAD_VALUE;
+        }
+        addDevice(cameraDeviceName, newStatus, &cameraId);
+    } else if (newStatus == hardware::camera::common::V1_0::CameraDeviceStatus::NOT_PRESENT) {
+        removeDevice(cameraId);
+    } else if (isExternalLazyHAL()) {
+        // Do not notify CameraService for PRESENT->PRESENT (lazy HAL restart)
+        // because NOT_AVAILABLE is set on CameraService::connect and a PRESENT
+        // notif. would overwrite it
+        return BAD_VALUE;
+    }
+    if (reCacheConcurrentStreamingCameraIdsLocked() != OK) {
+        ALOGE("%s: CameraProvider %s could not re-cache concurrent streaming camera id list ",
+                  __FUNCTION__, mProviderName.c_str());
+    }
+    *id = cameraId;
+    return OK;
+}
+
+hardware::Return<void> HidlProviderInfo::physicalCameraDeviceStatusChange(
+        const hardware::hidl_string& cameraDeviceName,
+        const hardware::hidl_string& physicalCameraDeviceName,
+        hardware::camera::common::V1_0::CameraDeviceStatus newStatus) {
+    sp<StatusListener> listener;
+    std::string id;
+    std::string physicalId;
+    std::lock_guard<std::mutex> lock(mInitLock);
+    CameraDeviceStatus newInternalStatus = hidlToInternalCameraDeviceStatus(newStatus);
+    if (!mInitialized) {
+        mCachedStatus.emplace_back(true /*isPhysicalCameraStatus*/, cameraDeviceName,
+                physicalCameraDeviceName, newInternalStatus);
+        return hardware::Void();
+    }
+
+    {
+        std::lock_guard<std::mutex> lock(mLock);
+
+        if (OK != physicalCameraDeviceStatusChangeLocked(&id, &physicalId, cameraDeviceName,
+                physicalCameraDeviceName, newStatus)) {
+            return hardware::Void();
+        }
+
+        listener = mManager->getStatusListener();
+    }
+    // Call without lock held to allow reentrancy into provider manager
+    if (listener != nullptr) {
+        listener->onDeviceStatusChanged(String8(id.c_str()),
+                String8(physicalId.c_str()), newInternalStatus);
+    }
+    return hardware::Void();
+}
+
+status_t HidlProviderInfo::physicalCameraDeviceStatusChangeLocked(
+            std::string* id, std::string* physicalId,
+            const hardware::hidl_string& cameraDeviceName,
+            const hardware::hidl_string& physicalCameraDeviceName,
+            hardware::camera::common::V1_0::CameraDeviceStatus newStatus) {
+    bool known = false;
+    std::string cameraId;
+    for (auto& deviceInfo : mDevices) {
+        if (deviceInfo->mName == cameraDeviceName) {
+            cameraId = deviceInfo->mId;
+            if (!deviceInfo->mIsLogicalCamera) {
+                ALOGE("%s: Invalid combination of camera id %s, physical id %s",
+                        __FUNCTION__, cameraId.c_str(), physicalCameraDeviceName.c_str());
+                return BAD_VALUE;
+            }
+            if (std::find(deviceInfo->mPhysicalIds.begin(), deviceInfo->mPhysicalIds.end(),
+                    physicalCameraDeviceName) == deviceInfo->mPhysicalIds.end()) {
+                ALOGE("%s: Invalid combination of camera id %s, physical id %s",
+                        __FUNCTION__, cameraId.c_str(), physicalCameraDeviceName.c_str());
+                return BAD_VALUE;
+            }
+            ALOGI("Camera device %s physical device %s status is now %s",
+                    cameraDeviceName.c_str(), physicalCameraDeviceName.c_str(),
+                    deviceStatusToString(newStatus));
+            known = true;
+            break;
+        }
+    }
+    // Previously unseen device; status must not be NOT_PRESENT
+    if (!known) {
+        ALOGW("Camera provider %s says an unknown camera device %s-%s is not present. Curious.",
+                mProviderName.c_str(), cameraDeviceName.c_str(),
+                physicalCameraDeviceName.c_str());
+        return BAD_VALUE;
+    }
+
+    *id = cameraId;
+    *physicalId = physicalCameraDeviceName.c_str();
+    return OK;
+}
+
+hardware::Return<void> HidlProviderInfo::torchModeStatusChange(
+        const hardware::hidl_string& cameraDeviceName,
+        hardware::camera::common::V1_0::TorchModeStatus newStatus) {
+    sp<StatusListener> listener;
+    SystemCameraKind systemCameraKind = SystemCameraKind::PUBLIC;
+    std::string id;
+    bool known = false;
+    {
+        // Hold mLock for accessing mDevices
+        std::lock_guard<std::mutex> lock(mLock);
+        for (auto& deviceInfo : mDevices) {
+            if (deviceInfo->mName == cameraDeviceName) {
+                ALOGI("Camera device %s torch status is now %s", cameraDeviceName.c_str(),
+                        torchStatusToString(newStatus));
+                id = deviceInfo->mId;
+                known = true;
+                systemCameraKind = deviceInfo->mSystemCameraKind;
+                if (hardware::camera::common::V1_0::TorchModeStatus::AVAILABLE_ON != newStatus) {
+                    mManager->removeRef(CameraProviderManager::DeviceMode::TORCH, id);
+                }
+                break;
+            }
+        }
+        if (!known) {
+            ALOGW("Camera provider %s says an unknown camera %s now has torch status %d. Curious.",
+                    mProviderName.c_str(), cameraDeviceName.c_str(), newStatus);
+            return hardware::Void();
+        }
+        // no lock needed since listener is set up only once during
+        // CameraProviderManager initialization and then never changed till it is
+        // destructed.
+        listener = mManager->getStatusListener();
+     }
+    // Call without lock held to allow reentrancy into provider manager
+    // The problem with holding mLock here is that we
+    // might be limiting re-entrancy : CameraService::onTorchStatusChanged calls
+    // back into CameraProviderManager which might try to hold mLock again (eg:
+    // findDeviceInfo, which should be holding mLock while iterating through
+    // each provider's devices).
+    if (listener != nullptr) {
+        listener->onTorchStatusChanged(String8(id.c_str()),
+                hidlToInternalTorchModeStatus(newStatus), systemCameraKind);
+    }
+    return hardware::Void();
+}
+
+void HidlProviderInfo::serviceDied(uint64_t cookie,
+        const wp<hidl::base::V1_0::IBase>& who) {
+    (void) who;
+    ALOGI("Camera provider '%s' has died; removing it", mProviderInstance.c_str());
+    if (cookie != mId) {
+        ALOGW("%s: Unexpected serviceDied cookie %" PRIu64 ", expected %" PRIu32,
+                __FUNCTION__, cookie, mId);
+    }
+    mManager->removeProvider(mProviderInstance);
+}
+
+std::unique_ptr<CameraProviderManager::ProviderInfo::DeviceInfo>
+    HidlProviderInfo::initializeDeviceInfo(
+        const std::string &name, const metadata_vendor_id_t tagId,
+        const std::string &id, uint16_t minorVersion) {
+    Status status;
+
+    auto cameraInterface = startDeviceInterface(name);
+    if (cameraInterface == nullptr) return nullptr;
+
+    common::V1_0::CameraResourceCost resourceCost;
+    cameraInterface->getResourceCost([&status, &resourceCost](
+        Status s, common::V1_0::CameraResourceCost cost) {
+                status = s;
+                resourceCost = cost;
+            });
+    if (status != Status::OK) {
+        ALOGE("%s: Unable to obtain resource costs for camera device %s: %s", __FUNCTION__,
+                name.c_str(), statusToString(status));
+        return nullptr;
+    }
+
+    for (auto& conflictName : resourceCost.conflictingDevices) {
+        uint16_t major, minor;
+        std::string type, id;
+        status_t res = parseDeviceName(conflictName, &major, &minor, &type, &id);
+        if (res != OK) {
+            ALOGE("%s: Failed to parse conflicting device %s", __FUNCTION__, conflictName.c_str());
+            return nullptr;
+        }
+        conflictName = id;
+    }
+
+    return std::unique_ptr<DeviceInfo3>(
+        new HidlDeviceInfo3(name, tagId, id, minorVersion, hidlToInternalResourceCost(resourceCost),
+                this, mProviderPublicCameraIds, cameraInterface));
+}
+
+status_t HidlProviderInfo::reCacheConcurrentStreamingCameraIdsLocked() {
+    if (mMinorVersion < 6) {
+      // Unsupported operation, nothing to do here
+      return OK;
+    }
+    // Check if the provider is currently active - not going to start it up for this notification
+    auto interface = mSavedInterface != nullptr ? mSavedInterface : mActiveInterface.promote();
+    if (interface == nullptr) {
+        ALOGE("%s: camera provider interface for %s is not valid", __FUNCTION__,
+                mProviderName.c_str());
+        return INVALID_OPERATION;
+    }
+    auto castResult = provider::V2_6::ICameraProvider::castFrom(interface);
+
+    if (castResult.isOk()) {
+        sp<provider::V2_6::ICameraProvider> interface2_6 = castResult;
+        if (interface2_6 != nullptr) {
+            return getConcurrentCameraIdsInternalLocked(interface2_6);
+        } else {
+            // This should not happen since mMinorVersion >= 6
+            ALOGE("%s: mMinorVersion was >= 6, but interface2_6 was nullptr", __FUNCTION__);
+            return UNKNOWN_ERROR;
+        }
+    }
+    return OK;
+}
+
+status_t HidlProviderInfo::getConcurrentCameraIdsInternalLocked(
+        sp<provider::V2_6::ICameraProvider> &interface2_6) {
+    if (interface2_6 == nullptr) {
+        ALOGE("%s: null interface provided", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    Status status = Status::OK;
+    hardware::Return<void> ret =
+            interface2_6->getConcurrentStreamingCameraIds([&status, this](
+            Status concurrentIdStatus, // TODO: Move all instances of hidl_string to 'using'
+            const hardware::hidl_vec<hardware::hidl_vec<hardware::hidl_string>>&
+                        cameraDeviceIdCombinations) {
+            status = concurrentIdStatus;
+            if (status == Status::OK) {
+                mConcurrentCameraIdCombinations.clear();
+                for (auto& combination : cameraDeviceIdCombinations) {
+                    std::unordered_set<std::string> deviceIds;
+                    for (auto &cameraDeviceId : combination) {
+                        deviceIds.insert(cameraDeviceId.c_str());
+                    }
+                    mConcurrentCameraIdCombinations.push_back(std::move(deviceIds));
+                }
+            } });
+    if (!ret.isOk()) {
+        ALOGE("%s: Transaction error in getting concurrent camera ID list from provider '%s'",
+                __FUNCTION__, mProviderName.c_str());
+            return DEAD_OBJECT;
+    }
+    if (status != Status::OK) {
+        ALOGE("%s: Unable to query for camera devices from provider '%s'",
+                    __FUNCTION__, mProviderName.c_str());
+        return mapToStatusT(status);
+    }
+    return OK;
+}
+
+HidlProviderInfo::HidlDeviceInfo3::HidlDeviceInfo3(
+        const std::string& name,
+        const metadata_vendor_id_t tagId,
+        const std::string &id, uint16_t minorVersion,
+        const CameraResourceCost& resourceCost,
+        sp<CameraProviderManager::ProviderInfo> parentProvider,
+        const std::vector<std::string>& publicCameraIds,
+        sp<hardware::camera::device::V3_2::ICameraDevice> interface) :
+        DeviceInfo3(name, tagId, id, minorVersion, resourceCost, parentProvider, publicCameraIds) {
+
+    // Get camera characteristics and initialize flash unit availability
+    Status status;
+    hardware::Return<void> ret;
+    ret = interface->getCameraCharacteristics([&status, this](Status s,
+                    device::V3_2::CameraMetadata metadata) {
+                status = s;
+                if (s == Status::OK) {
+                    camera_metadata_t *buffer =
+                            reinterpret_cast<camera_metadata_t*>(metadata.data());
+                    size_t expectedSize = metadata.size();
+                    int res = validate_camera_metadata_structure(buffer, &expectedSize);
+                    if (res == OK || res == CAMERA_METADATA_VALIDATION_SHIFTED) {
+                        set_camera_metadata_vendor_id(buffer, mProviderTagid);
+                        mCameraCharacteristics = buffer;
+                    } else {
+                        ALOGE("%s: Malformed camera metadata received from HAL", __FUNCTION__);
+                        status = Status::INTERNAL_ERROR;
+                    }
+                }
+            });
+    if (!ret.isOk()) {
+        ALOGE("%s: Transaction error getting camera characteristics for device %s"
+                " to check for a flash unit: %s", __FUNCTION__, id.c_str(),
+                ret.description().c_str());
+        return;
+    }
+    if (status != Status::OK) {
+        ALOGE("%s: Unable to get camera characteristics for device %s: %s (%d)",
+                __FUNCTION__, id.c_str(), statusToString(status), status);
+        return;
+    }
+
+    if (mCameraCharacteristics.exists(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS)) {
+        const auto &stateMap = mCameraCharacteristics.find(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS);
+        if ((stateMap.count > 0) && ((stateMap.count % 2) == 0)) {
+            for (size_t i = 0; i < stateMap.count; i += 2) {
+                mDeviceStateOrientationMap.emplace(stateMap.data.i64[i], stateMap.data.i64[i+1]);
+            }
+        } else {
+            ALOGW("%s: Invalid ANDROID_INFO_DEVICE_STATE_ORIENTATIONS map size: %zu", __FUNCTION__,
+                    stateMap.count);
+        }
+    }
+
+    mSystemCameraKind = getSystemCameraKind();
+
+    status_t res = fixupMonochromeTags();
+    if (OK != res) {
+        ALOGE("%s: Unable to fix up monochrome tags based for older HAL version: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return;
+    }
+    auto stat = addDynamicDepthTags();
+    if (OK != stat) {
+        ALOGE("%s: Failed appending dynamic depth tags: %s (%d)", __FUNCTION__, strerror(-stat),
+                stat);
+    }
+    res = deriveHeicTags();
+    if (OK != res) {
+        ALOGE("%s: Unable to derive HEIC tags based on camera and media capabilities: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+    }
+
+    if (SessionConfigurationUtils::isUltraHighResolutionSensor(mCameraCharacteristics)) {
+        status_t status = addDynamicDepthTags(/*maxResolution*/true);
+        if (OK != status) {
+            ALOGE("%s: Failed appending dynamic depth tags for maximum resolution mode: %s (%d)",
+                    __FUNCTION__, strerror(-status), status);
+        }
+
+        status = deriveHeicTags(/*maxResolution*/true);
+        if (OK != status) {
+            ALOGE("%s: Unable to derive HEIC tags based on camera and media capabilities for"
+                    "maximum resolution mode: %s (%d)", __FUNCTION__, strerror(-status), status);
+        }
+    }
+
+    res = addRotateCropTags();
+    if (OK != res) {
+        ALOGE("%s: Unable to add default SCALER_ROTATE_AND_CROP tags: %s (%d)", __FUNCTION__,
+                strerror(-res), res);
+    }
+    res = addPreCorrectionActiveArraySize();
+    if (OK != res) {
+        ALOGE("%s: Unable to add PRE_CORRECTION_ACTIVE_ARRAY_SIZE: %s (%d)", __FUNCTION__,
+                strerror(-res), res);
+    }
+    res = camera3::ZoomRatioMapper::overrideZoomRatioTags(
+            &mCameraCharacteristics, &mSupportNativeZoomRatio);
+    if (OK != res) {
+        ALOGE("%s: Unable to override zoomRatio related tags: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+    }
+
+    camera_metadata_entry flashAvailable =
+            mCameraCharacteristics.find(ANDROID_FLASH_INFO_AVAILABLE);
+    if (flashAvailable.count == 1 &&
+            flashAvailable.data.u8[0] == ANDROID_FLASH_INFO_AVAILABLE_TRUE) {
+        mHasFlashUnit = true;
+    } else {
+        mHasFlashUnit = false;
+    }
+
+    camera_metadata_entry entry =
+            mCameraCharacteristics.find(ANDROID_FLASH_INFO_STRENGTH_DEFAULT_LEVEL);
+    if (entry.count == 1) {
+        mTorchDefaultStrengthLevel = entry.data.i32[0];
+    } else {
+        mTorchDefaultStrengthLevel = 0;
+    }
+    entry = mCameraCharacteristics.find(ANDROID_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL);
+    if (entry.count == 1) {
+        mTorchMaximumStrengthLevel = entry.data.i32[0];
+    } else {
+        mTorchMaximumStrengthLevel = 0;
+    }
+
+    mTorchStrengthLevel = 0;
+
+    queryPhysicalCameraIds();
+
+    // Get physical camera characteristics if applicable
+    auto castResult = device::V3_5::ICameraDevice::castFrom(interface);
+    if (!castResult.isOk()) {
+        ALOGV("%s: Unable to convert ICameraDevice instance to version 3.5", __FUNCTION__);
+        return;
+    }
+    sp<device::V3_5::ICameraDevice> interface_3_5 = castResult;
+    if (interface_3_5 == nullptr) {
+        ALOGE("%s: Converted ICameraDevice instance to nullptr", __FUNCTION__);
+        return;
+    }
+
+    if (mIsLogicalCamera) {
+        for (auto& id : mPhysicalIds) {
+            if (std::find(mPublicCameraIds.begin(), mPublicCameraIds.end(), id) !=
+                    mPublicCameraIds.end()) {
+                continue;
+            }
+
+            hardware::hidl_string hidlId(id);
+            ret = interface_3_5->getPhysicalCameraCharacteristics(hidlId,
+                    [&status, &id, this](Status s, device::V3_2::CameraMetadata metadata) {
+                status = s;
+                if (s == Status::OK) {
+                    camera_metadata_t *buffer =
+                            reinterpret_cast<camera_metadata_t*>(metadata.data());
+                    size_t expectedSize = metadata.size();
+                    int res = validate_camera_metadata_structure(buffer, &expectedSize);
+                    if (res == OK || res == CAMERA_METADATA_VALIDATION_SHIFTED) {
+                        set_camera_metadata_vendor_id(buffer, mProviderTagid);
+                        mPhysicalCameraCharacteristics[id] = buffer;
+                    } else {
+                        ALOGE("%s: Malformed camera metadata received from HAL", __FUNCTION__);
+                        status = Status::INTERNAL_ERROR;
+                    }
+                }
+            });
+
+            if (!ret.isOk()) {
+                ALOGE("%s: Transaction error getting physical camera %s characteristics for %s: %s",
+                        __FUNCTION__, id.c_str(), id.c_str(), ret.description().c_str());
+                return;
+            }
+            if (status != Status::OK) {
+                ALOGE("%s: Unable to get physical camera %s characteristics for device %s: %s (%d)",
+                        __FUNCTION__, id.c_str(), mId.c_str(),
+                        statusToString(status), status);
+                return;
+            }
+
+            res = camera3::ZoomRatioMapper::overrideZoomRatioTags(
+                    &mPhysicalCameraCharacteristics[id], &mSupportNativeZoomRatio);
+            if (OK != res) {
+                ALOGE("%s: Unable to override zoomRatio related tags: %s (%d)",
+                        __FUNCTION__, strerror(-res), res);
+            }
+        }
+    }
+
+    if (!kEnableLazyHal) {
+        // Save HAL reference indefinitely
+        mSavedInterface = interface;
+    }
+
+
+}
+
+status_t HidlProviderInfo::HidlDeviceInfo3::setTorchMode(bool enabled) {
+    using hardware::camera::common::V1_0::TorchMode;
+    const sp<hardware::camera::device::V3_2::ICameraDevice> interface = startDeviceInterface();
+    Status s = interface->setTorchMode(enabled ? TorchMode::ON : TorchMode::OFF);
+    return mapToStatusT(s);
+}
+
+status_t HidlProviderInfo::HidlDeviceInfo3::turnOnTorchWithStrengthLevel(
+        int32_t torchStrength) {
+    const sp<hardware::camera::device::V3_2::ICameraDevice> interface = startDeviceInterface();
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+    sp<hardware::camera::device::V3_8::ICameraDevice> interface_3_8 = nullptr;
+    auto castResult_3_8 = device::V3_8::ICameraDevice::castFrom(interface);
+    if (castResult_3_8.isOk()) {
+        interface_3_8 = castResult_3_8;
+    }
+
+    if (interface_3_8 == nullptr) {
+        return INVALID_OPERATION;
+    }
+
+    Status s = interface_3_8->turnOnTorchWithStrengthLevel(torchStrength);
+    if (s == Status::OK) {
+        mTorchStrengthLevel = torchStrength;
+    }
+    return mapToStatusT(s);
+}
+
+status_t HidlProviderInfo::HidlDeviceInfo3::getTorchStrengthLevel(int32_t *torchStrength) {
+    if (torchStrength == nullptr) {
+        return BAD_VALUE;
+    }
+    const sp<hardware::camera::device::V3_2::ICameraDevice> interface = startDeviceInterface();
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+    auto castResult_3_8 = device::V3_8::ICameraDevice::castFrom(interface);
+    sp<hardware::camera::device::V3_8::ICameraDevice> interface_3_8 = nullptr;
+    if (castResult_3_8.isOk()) {
+        interface_3_8 = castResult_3_8;
+    }
+
+    if (interface_3_8 == nullptr) {
+        return INVALID_OPERATION;
+    }
+
+    Status callStatus;
+    status_t res;
+    hardware::Return<void> ret = interface_3_8->getTorchStrengthLevel([&callStatus, &torchStrength]
+        (Status status, const int32_t& torchStrengthLevel) {
+        callStatus = status;
+        if (status == Status::OK) {
+             *torchStrength = torchStrengthLevel;
+        } });
+
+    if (ret.isOk()) {
+        switch (callStatus) {
+            case Status::OK:
+                // Expected case, do nothing.
+                res = OK;
+                break;
+            case Status::METHOD_NOT_SUPPORTED:
+                res = INVALID_OPERATION;
+                break;
+            default:
+                ALOGE("%s: Get torch strength level failed: %d", __FUNCTION__, callStatus);
+                res = UNKNOWN_ERROR;
+        }
+    } else {
+        ALOGE("%s: Unexpected binder error: %s", __FUNCTION__, ret.description().c_str());
+        res = UNKNOWN_ERROR;
+    }
+
+    return res;
+}
+
+sp<hardware::camera::device::V3_2::ICameraDevice>
+HidlProviderInfo::HidlDeviceInfo3::startDeviceInterface() {
+    Mutex::Autolock l(mDeviceAvailableLock);
+    sp<hardware::camera::device::V3_2::ICameraDevice> device;
+    ATRACE_CALL();
+    if (mSavedInterface == nullptr) {
+        sp<HidlProviderInfo> parentProvider =
+                static_cast<HidlProviderInfo *>(mParentProvider.promote().get());
+        if (parentProvider != nullptr) {
+            // Wait for lazy HALs to confirm device availability
+            if (parentProvider->isExternalLazyHAL() && !mIsDeviceAvailable) {
+                ALOGV("%s: Wait for external device to become available %s",
+                      __FUNCTION__,
+                      mId.c_str());
+
+                auto res = mDeviceAvailableSignal.waitRelative(mDeviceAvailableLock,
+                                                         kDeviceAvailableTimeout);
+                if (res != OK) {
+                    ALOGE("%s: Failed waiting for device to become available",
+                          __FUNCTION__);
+                    return nullptr;
+                }
+            }
+
+            device = parentProvider->startDeviceInterface(mName);
+        }
+    } else {
+        device = (hardware::camera::device::V3_2::ICameraDevice *) mSavedInterface.get();
+    }
+    return device;
+}
+
+status_t HidlProviderInfo::HidlDeviceInfo3::dumpState(int fd) {
+    native_handle_t* handle = native_handle_create(1,0);
+    handle->data[0] = fd;
+    const sp<hardware::camera::device::V3_2::ICameraDevice> interface =
+            startDeviceInterface();
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+    auto ret = interface->dumpState(handle);
+    native_handle_delete(handle);
+    if (!ret.isOk()) {
+        return INVALID_OPERATION;
+    }
+    return OK;
+}
+
+status_t HidlProviderInfo::HidlDeviceInfo3::isSessionConfigurationSupported(
+        const SessionConfiguration &configuration, bool overrideForPerfClass, bool *status) {
+
+    hardware::camera::device::V3_8::StreamConfiguration streamConfiguration;
+    bool earlyExit = false;
+    camera3::metadataGetter getMetadata = [this](const String8 &id, bool /*overrideForPerfClass*/) {
+          CameraMetadata physicalChars;
+          getPhysicalCameraCharacteristics(id.c_str(), &physicalChars);
+          return physicalChars;
+    };
+    auto bRes = SessionConfigurationUtils::convertToHALStreamCombination(configuration,
+            String8(mId.c_str()), mCameraCharacteristics, getMetadata, mPhysicalIds,
+            streamConfiguration, overrideForPerfClass, &earlyExit);
+
+    if (!bRes.isOk()) {
+        return UNKNOWN_ERROR;
+    }
+
+    if (earlyExit) {
+        *status = false;
+        return OK;
+    }
+
+    const sp<hardware::camera::device::V3_2::ICameraDevice> interface =
+            startDeviceInterface();
+
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+
+    auto castResult_3_5 = device::V3_5::ICameraDevice::castFrom(interface);
+    sp<hardware::camera::device::V3_5::ICameraDevice> interface_3_5 = castResult_3_5;
+    auto castResult_3_7 = device::V3_7::ICameraDevice::castFrom(interface);
+    sp<hardware::camera::device::V3_7::ICameraDevice> interface_3_7 = castResult_3_7;
+    auto castResult_3_8 = device::V3_8::ICameraDevice::castFrom(interface);
+    sp<hardware::camera::device::V3_8::ICameraDevice> interface_3_8 = castResult_3_8;
+
+    status_t res;
+    Status callStatus;
+    ::android::hardware::Return<void> ret;
+    auto halCb =
+            [&callStatus, &status] (Status s, bool combStatus) {
+                callStatus = s;
+                *status = combStatus;
+            };
+    if (interface_3_8 != nullptr) {
+        ret = interface_3_8->isStreamCombinationSupported_3_8(streamConfiguration, halCb);
+    } else if (interface_3_7 != nullptr) {
+        hardware::camera::device::V3_7::StreamConfiguration configuration_3_7;
+        bool success = SessionConfigurationUtils::convertHALStreamCombinationFromV38ToV37(
+                configuration_3_7, streamConfiguration);
+        if (!success) {
+            *status = false;
+            return OK;
+        }
+        ret = interface_3_7->isStreamCombinationSupported_3_7(configuration_3_7, halCb);
+    } else if (interface_3_5 != nullptr) {
+        hardware::camera::device::V3_7::StreamConfiguration configuration_3_7;
+        bool success = SessionConfigurationUtils::convertHALStreamCombinationFromV38ToV37(
+                configuration_3_7, streamConfiguration);
+        if (!success) {
+            *status = false;
+            return OK;
+        }
+        hardware::camera::device::V3_4::StreamConfiguration configuration_3_4;
+        success = SessionConfigurationUtils::convertHALStreamCombinationFromV37ToV34(
+                configuration_3_4, configuration_3_7);
+        if (!success) {
+            *status = false;
+            return OK;
+        }
+        ret = interface_3_5->isStreamCombinationSupported(configuration_3_4, halCb);
+    } else {
+        return INVALID_OPERATION;
+    }
+    if (ret.isOk()) {
+        switch (callStatus) {
+            case Status::OK:
+                // Expected case, do nothing.
+                res = OK;
+                break;
+            case Status::METHOD_NOT_SUPPORTED:
+                res = INVALID_OPERATION;
+                break;
+            default:
+                ALOGE("%s: Session configuration query failed: %d", __FUNCTION__, callStatus);
+                res = UNKNOWN_ERROR;
+        }
+    } else {
+        ALOGE("%s: Unexpected binder error: %s", __FUNCTION__, ret.description().c_str());
+        res = UNKNOWN_ERROR;
+    }
+
+    return res;
+}
+
+status_t HidlProviderInfo::convertToHALStreamCombinationAndCameraIdsLocked(
+        const std::vector<CameraIdAndSessionConfiguration> &cameraIdsAndSessionConfigs,
+        const std::set<std::string>& perfClassPrimaryCameraIds,
+        int targetSdkVersion,
+        hardware::hidl_vec<CameraIdAndStreamCombination> *halCameraIdsAndStreamCombinations,
+        bool *earlyExit) {
+    binder::Status bStatus = binder::Status::ok();
+    std::vector<CameraIdAndStreamCombination> halCameraIdsAndStreamsV;
+    bool shouldExit = false;
+    status_t res = OK;
+    for (auto &cameraIdAndSessionConfig : cameraIdsAndSessionConfigs) {
+        const std::string& cameraId = cameraIdAndSessionConfig.mCameraId;
+        hardware::camera::device::V3_8::StreamConfiguration streamConfiguration;
+        CameraMetadata deviceInfo;
+        bool overrideForPerfClass =
+                SessionConfigurationUtils::targetPerfClassPrimaryCamera(
+                        perfClassPrimaryCameraIds, cameraId, targetSdkVersion);
+        res = mManager->getCameraCharacteristicsLocked(cameraId, overrideForPerfClass, &deviceInfo);
+        if (res != OK) {
+            return res;
+        }
+        camera3::metadataGetter getMetadata =
+                [this](const String8 &id, bool overrideForPerfClass) {
+                    CameraMetadata physicalDeviceInfo;
+                    mManager->getCameraCharacteristicsLocked(id.string(), overrideForPerfClass,
+                                                   &physicalDeviceInfo);
+                    return physicalDeviceInfo;
+                };
+        std::vector<std::string> physicalCameraIds;
+        mManager->isLogicalCameraLocked(cameraId, &physicalCameraIds);
+        bStatus =
+            SessionConfigurationUtils::convertToHALStreamCombination(
+                    cameraIdAndSessionConfig.mSessionConfiguration,
+                    String8(cameraId.c_str()), deviceInfo, getMetadata,
+                    physicalCameraIds, streamConfiguration,
+                    overrideForPerfClass, &shouldExit);
+        if (!bStatus.isOk()) {
+            ALOGE("%s: convertToHALStreamCombination failed", __FUNCTION__);
+            return INVALID_OPERATION;
+        }
+        if (shouldExit) {
+            *earlyExit = true;
+            return OK;
+        }
+        CameraIdAndStreamCombination halCameraIdAndStream;
+        halCameraIdAndStream.cameraId = cameraId;
+        SessionConfigurationUtils::convertHALStreamCombinationFromV38ToV37(
+                halCameraIdAndStream.streamConfiguration, streamConfiguration);
+        halCameraIdsAndStreamsV.push_back(halCameraIdAndStream);
+    }
+    *halCameraIdsAndStreamCombinations = halCameraIdsAndStreamsV;
+    return OK;
+}
+
+status_t HidlProviderInfo::isConcurrentSessionConfigurationSupported(
+        const std::vector<CameraIdAndSessionConfiguration> &cameraIdsAndSessionConfigs,
+        const std::set<std::string>& perfClassPrimaryCameraIds,
+        int targetSdkVersion, bool *isSupported) {
+
+      hardware::hidl_vec<CameraIdAndStreamCombination> halCameraIdsAndStreamCombinations;
+      bool knowUnsupported = false;
+      status_t res = convertToHALStreamCombinationAndCameraIdsLocked(
+              cameraIdsAndSessionConfigs, perfClassPrimaryCameraIds,
+              targetSdkVersion, &halCameraIdsAndStreamCombinations, &knowUnsupported);
+      if (res != OK) {
+          ALOGE("%s unable to convert session configurations provided to HAL stream"
+                "combinations", __FUNCTION__);
+          return res;
+      }
+      if (knowUnsupported) {
+          // We got to know the streams aren't valid before doing the HAL
+          // call itself.
+          *isSupported = false;
+          return OK;
+      }
+
+    if (mMinorVersion >= 6) {
+        // Check if the provider is currently active - not going to start it for this notification
+        auto interface = mSavedInterface != nullptr ? mSavedInterface : mActiveInterface.promote();
+        if (interface == nullptr) {
+            // TODO: This might be some other problem
+            return INVALID_OPERATION;
+        }
+        auto castResult2_6 = provider::V2_6::ICameraProvider::castFrom(interface);
+        auto castResult2_7 = provider::V2_7::ICameraProvider::castFrom(interface);
+        Status callStatus;
+        auto cb =
+                [&isSupported, &callStatus](Status s, bool supported) {
+                      callStatus = s;
+                      *isSupported = supported; };
+
+        ::android::hardware::Return<void> ret;
+        sp<provider::V2_7::ICameraProvider> interface_2_7;
+        sp<provider::V2_6::ICameraProvider> interface_2_6;
+        if (mMinorVersion >= 7 && castResult2_7.isOk()) {
+            interface_2_7 = castResult2_7;
+            if (interface_2_7 != nullptr) {
+                ret = interface_2_7->isConcurrentStreamCombinationSupported_2_7(
+                        halCameraIdsAndStreamCombinations, cb);
+            }
+        } else if (mMinorVersion == 6 && castResult2_6.isOk()) {
+            interface_2_6 = castResult2_6;
+            if (interface_2_6 != nullptr) {
+                hardware::hidl_vec<provider::V2_6::CameraIdAndStreamCombination>
+                        halCameraIdsAndStreamCombinations_2_6;
+                size_t numStreams = halCameraIdsAndStreamCombinations.size();
+                halCameraIdsAndStreamCombinations_2_6.resize(numStreams);
+                for (size_t i = 0; i < numStreams; i++) {
+                    using namespace camera3;
+                    auto const& combination = halCameraIdsAndStreamCombinations[i];
+                    halCameraIdsAndStreamCombinations_2_6[i].cameraId = combination.cameraId;
+                    bool success =
+                            SessionConfigurationUtils::convertHALStreamCombinationFromV37ToV34(
+                                    halCameraIdsAndStreamCombinations_2_6[i].streamConfiguration,
+                                    combination.streamConfiguration);
+                    if (!success) {
+                        *isSupported = false;
+                        return OK;
+                    }
+                }
+                ret = interface_2_6->isConcurrentStreamCombinationSupported(
+                        halCameraIdsAndStreamCombinations_2_6, cb);
+            }
+        }
+
+        if (interface_2_7 != nullptr || interface_2_6 != nullptr) {
+            if (ret.isOk()) {
+                switch (callStatus) {
+                    case Status::OK:
+                        // Expected case, do nothing.
+                        res = OK;
+                        break;
+                    case Status::METHOD_NOT_SUPPORTED:
+                        res = INVALID_OPERATION;
+                        break;
+                    default:
+                        ALOGE("%s: Session configuration query failed: %d", __FUNCTION__,
+                                  callStatus);
+                        res = UNKNOWN_ERROR;
+                }
+            } else {
+                ALOGE("%s: Unexpected binder error: %s", __FUNCTION__, ret.description().c_str());
+                res = UNKNOWN_ERROR;
+            }
+            return res;
+        }
+    }
+    // unsupported operation
+    return INVALID_OPERATION;
+}
+
+status_t HidlVendorTagDescriptor::createDescriptorFromHidl(
+        const hardware::hidl_vec<common::V1_0::VendorTagSection>& vts,
+        sp<VendorTagDescriptor>& descriptor) {
+
+    int tagCount = 0;
+
+    for (size_t s = 0; s < vts.size(); s++) {
+        tagCount += vts[s].tags.size();
+    }
+
+    if (tagCount < 0 || tagCount > INT32_MAX) {
+        ALOGE("%s: tag count %d from vendor tag sections is invalid.", __FUNCTION__, tagCount);
+        return BAD_VALUE;
+    }
+
+    Vector<uint32_t> tagArray;
+    LOG_ALWAYS_FATAL_IF(tagArray.resize(tagCount) != tagCount,
+            "%s: too many (%u) vendor tags defined.", __FUNCTION__, tagCount);
+
+
+    sp<HidlVendorTagDescriptor> desc = new HidlVendorTagDescriptor();
+    desc->mTagCount = tagCount;
+
+    SortedVector<String8> sections;
+    KeyedVector<uint32_t, String8> tagToSectionMap;
+
+    int idx = 0;
+    for (size_t s = 0; s < vts.size(); s++) {
+        const common::V1_0::VendorTagSection& section = vts[s];
+        const char *sectionName = section.sectionName.c_str();
+        if (sectionName == NULL) {
+            ALOGE("%s: no section name defined for vendor tag section %zu.", __FUNCTION__, s);
+            return BAD_VALUE;
+        }
+        String8 sectionString(sectionName);
+        sections.add(sectionString);
+
+        for (size_t j = 0; j < section.tags.size(); j++) {
+            uint32_t tag = section.tags[j].tagId;
+            if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) {
+                ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag);
+                return BAD_VALUE;
+            }
+
+            tagArray.editItemAt(idx++) = section.tags[j].tagId;
+
+            const char *tagName = section.tags[j].tagName.c_str();
+            if (tagName == NULL) {
+                ALOGE("%s: no tag name defined for vendor tag %d.", __FUNCTION__, tag);
+                return BAD_VALUE;
+            }
+            desc->mTagToNameMap.add(tag, String8(tagName));
+            tagToSectionMap.add(tag, sectionString);
+
+            int tagType = (int) section.tags[j].tagType;
+            if (tagType < 0 || tagType >= NUM_TYPES) {
+                ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType);
+                return BAD_VALUE;
+            }
+            desc->mTagToTypeMap.add(tag, tagType);
+        }
+    }
+
+    desc->mSections = sections;
+
+    for (size_t i = 0; i < tagArray.size(); ++i) {
+        uint32_t tag = tagArray[i];
+        String8 sectionString = tagToSectionMap.valueFor(tag);
+
+        // Set up tag to section index map
+        ssize_t index = sections.indexOf(sectionString);
+        LOG_ALWAYS_FATAL_IF(index < 0, "index %zd must be non-negative", index);
+        desc->mTagToSectionMap.add(tag, static_cast<uint32_t>(index));
+
+        // Set up reverse mapping
+        ssize_t reverseIndex = -1;
+        if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) {
+            KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
+            reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper);
+        }
+        desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag);
+    }
+
+    descriptor = std::move(desc);
+    return OK;
+}
+
+} //namespace android
diff --git a/services/camera/libcameraservice/common/hidl/HidlProviderInfo.h b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.h
new file mode 100644
index 0000000..0ba2aff
--- /dev/null
+++ b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERAPROVIDER_HIDLPROVIDERINFOH
+#define ANDROID_SERVERS_CAMERA_CAMERAPROVIDER_HIDLPROVIDERINFOH
+
+#include "common/CameraProviderManager.h"
+
+namespace android {
+
+/**
+ * The vendor tag descriptor class that takes HIDL vendor tag information as
+ * input. Not part of VendorTagDescriptor class because that class is used
+ * in AIDL generated sources which don't have access to HIDL headers.
+ */
+class HidlVendorTagDescriptor : public VendorTagDescriptor {
+public:
+    /**
+     * Create a VendorTagDescriptor object from the HIDL VendorTagSection
+     * vector.
+     *
+     * Returns OK on success, or a negative error code.
+     */
+    static status_t createDescriptorFromHidl(
+            const hardware::hidl_vec<hardware::camera::common::V1_0::VendorTagSection>& vts,
+            /*out*/
+            sp<VendorTagDescriptor>& descriptor);
+};
+
+struct HidlProviderInfo : public CameraProviderManager::ProviderInfo,
+            virtual public hardware::camera::provider::V2_6::ICameraProviderCallback,
+            virtual public hardware::hidl_death_recipient {
+    // Current overall Android device physical status
+    hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState> mDeviceState;
+
+    // This pointer is used to keep a reference to the ICameraProvider that was last accessed.
+    wp<hardware::camera::provider::V2_4::ICameraProvider> mActiveInterface;
+
+    sp<hardware::camera::provider::V2_4::ICameraProvider> mSavedInterface;
+    HidlProviderInfo(
+            const std::string &providerName,
+            const std::string &providerInstance,
+            CameraProviderManager *manager) :
+            CameraProviderManager::ProviderInfo(providerName, providerInstance, manager) {}
+
+    virtual ~HidlProviderInfo() {}
+
+    static status_t mapToStatusT(const hardware::camera::common::V1_0::Status &status);
+
+    status_t initializeHidlProvider(
+            sp<hardware::camera::provider::V2_4::ICameraProvider>& interface,
+            int64_t currentDeviceState);
+
+    IPCTransport getIPCTransport() override {return IPCTransport::HIDL;}
+
+    const sp<hardware::camera::provider::V2_4::ICameraProvider> startProviderInterface();
+
+    virtual bool successfullyStartedProviderInterface() override;
+
+    virtual status_t setUpVendorTags() override;
+    virtual status_t notifyDeviceStateChange(int64_t) override;
+
+    /**
+     * Query the camera provider for concurrent stream configuration support
+     */
+    virtual status_t isConcurrentSessionConfigurationSupported(
+        const std::vector<CameraIdAndSessionConfiguration> &cameraIdsAndSessionConfigs,
+        const std::set<std::string>& perfClassPrimaryCameraIds,
+        int targetSdkVersion, bool *isSupported) override;
+
+    // Helper for initializeDeviceInfo to use the right CameraProvider get method.
+    sp<hardware::camera::device::V3_2::ICameraDevice>
+            startDeviceInterface(const std::string &deviceName);
+
+    // ICameraProviderCallbacks interface - these lock the parent mInterfaceMutex
+    hardware::Return<void> cameraDeviceStatusChange(
+            const hardware::hidl_string& ,
+            hardware::camera::common::V1_0::CameraDeviceStatus ) override;
+    hardware::Return<void> torchModeStatusChange(
+            const hardware::hidl_string& ,
+            hardware::camera::common::V1_0::TorchModeStatus ) override;
+    hardware::Return<void> physicalCameraDeviceStatusChange(
+            const hardware::hidl_string& ,
+            const hardware::hidl_string& ,
+            hardware::camera::common::V1_0::CameraDeviceStatus ) override;
+
+    // hidl_death_recipient interface - this locks the parent mInterfaceMutex
+    virtual void serviceDied(uint64_t , const wp<hidl::base::V1_0::IBase>& ) override;
+
+    struct HidlDeviceInfo3 : public CameraProviderManager::ProviderInfo::DeviceInfo3 {
+
+        const hardware::hidl_version mVersion = hardware::hidl_version{3, 2};
+        sp<IBase> mSavedInterface = nullptr;
+
+        HidlDeviceInfo3(const std::string& , const metadata_vendor_id_t ,
+                const std::string &, uint16_t ,
+                const CameraResourceCost& ,
+                sp<ProviderInfo> ,
+                const std::vector<std::string>& ,
+                sp<hardware::camera::device::V3_2::ICameraDevice>);
+
+        ~HidlDeviceInfo3() {}
+
+        virtual status_t setTorchMode(bool enabled) override;
+        virtual status_t turnOnTorchWithStrengthLevel(int32_t torchStrength) override;
+        virtual status_t getTorchStrengthLevel(int32_t *torchStrength) override;
+
+        virtual status_t dumpState(int fd) override;
+
+        virtual status_t isSessionConfigurationSupported(
+                const SessionConfiguration &/*configuration*/,
+                bool overrideForPerfClass,
+                bool *status/*status*/);
+        sp<hardware::camera::device::V3_2::ICameraDevice> startDeviceInterface();
+    };
+
+ private:
+
+    status_t cameraDeviceStatusChangeLocked(
+                std::string* , const hardware::hidl_string& ,
+                hardware::camera::common::V1_0::CameraDeviceStatus );
+
+    status_t physicalCameraDeviceStatusChangeLocked(
+                std::string* , std::string* ,
+                const hardware::hidl_string& ,
+                const hardware::hidl_string& ,
+                hardware::camera::common::V1_0::CameraDeviceStatus );
+
+   status_t addDevice(const std::string& ,
+            hardware::camera::common::V1_0::CameraDeviceStatus ,
+            /*out*/ std::string *);
+
+    std::unique_ptr<DeviceInfo> initializeDeviceInfo(const std::string &,
+            const metadata_vendor_id_t , const std::string &,
+            uint16_t );
+    status_t reCacheConcurrentStreamingCameraIdsLocked();
+
+    //Expects to have mLock locked
+    status_t getConcurrentCameraIdsInternalLocked(
+            sp<hardware::camera::provider::V2_6::ICameraProvider> &);
+
+    //expects to have mManager->mInterfaceMutex locked
+    status_t convertToHALStreamCombinationAndCameraIdsLocked(
+        const std::vector<hardware::camera2::utils::CameraIdAndSessionConfiguration>&
+                cameraIdsAndSessionConfigs,
+        const std::set<std::string>& perfClassPrimaryCameraIds,
+        int targetSdkVersion,
+        hardware::hidl_vec<hardware::camera::provider::V2_7::CameraIdAndStreamCombination>*
+                halCameraIdsAndStreamCombinations,
+        bool *earlyExit);
+}; // HidlProviderInfo
+
+} // namespace android
+#endif
diff --git a/services/camera/libcameraservice/device3/BufferUtils.cpp b/services/camera/libcameraservice/device3/BufferUtils.cpp
index f3adf20..c0d47d5 100644
--- a/services/camera/libcameraservice/device3/BufferUtils.cpp
+++ b/services/camera/libcameraservice/device3/BufferUtils.cpp
@@ -28,16 +28,6 @@
 namespace android {
 namespace camera3 {
 
-camera_buffer_status_t mapHidlBufferStatus(hardware::camera::device::V3_2::BufferStatus status) {
-    using hardware::camera::device::V3_2::BufferStatus;
-
-    switch (status) {
-        case BufferStatus::OK: return CAMERA_BUFFER_STATUS_OK;
-        case BufferStatus::ERROR: return CAMERA_BUFFER_STATUS_ERROR;
-    }
-    return CAMERA_BUFFER_STATUS_ERROR;
-}
-
 void BufferRecords::takeInflightBufferMap(BufferRecords& other) {
     std::lock_guard<std::mutex> oLock(other.mInflightLock);
     std::lock_guard<std::mutex> lock(mInflightLock);
diff --git a/services/camera/libcameraservice/device3/BufferUtils.h b/services/camera/libcameraservice/device3/BufferUtils.h
index 1e1cd60..96fc111 100644
--- a/services/camera/libcameraservice/device3/BufferUtils.h
+++ b/services/camera/libcameraservice/device3/BufferUtils.h
@@ -104,7 +104,7 @@
 
         // Return the removed buffer ID if input cache is found.
         // Otherwise return BUFFER_ID_NO_BUFFER
-        uint64_t removeOneBufferCache(int streamId, const native_handle_t* handle);
+        uint64_t removeOneBufferCache(int streamId, const native_handle_t* handle) override;
 
         // Clear all caches for input stream, but do not remove the stream
         // Removed buffers' ID are returned
@@ -154,9 +154,6 @@
     }; // class BufferRecords
 
     static const uint64_t BUFFER_ID_NO_BUFFER = 0;
-
-    camera_buffer_status_t mapHidlBufferStatus(
-            hardware::camera::device::V3_2::BufferStatus status);
 } // namespace camera3
 
 } // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index fd645c7..992027a 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -65,6 +65,8 @@
 #include "utils/TraceHFR.h"
 #include "utils/CameraServiceProxyWrapper.h"
 
+#include "../common/hidl/HidlProviderInfo.h"
+
 #include <algorithm>
 #include <tuple>
 
@@ -75,8 +77,9 @@
 
 namespace android {
 
-Camera3Device::Camera3Device(const String8 &id, bool overrideForPerfClass):
+Camera3Device::Camera3Device(const String8 &id, bool overrideForPerfClass, bool legacyClient):
         mId(id),
+        mLegacyClient(legacyClient),
         mOperatingMode(NO_MODE),
         mIsConstrainedHighSpeedConfiguration(false),
         mStatus(STATUS_UNINITIALIZED),
@@ -111,164 +114,6 @@
     return mId;
 }
 
-status_t Camera3Device::initialize(sp<CameraProviderManager> manager, const String8& monitorTags) {
-    ATRACE_CALL();
-    Mutex::Autolock il(mInterfaceLock);
-    Mutex::Autolock l(mLock);
-
-    ALOGV("%s: Initializing HIDL device for camera %s", __FUNCTION__, mId.string());
-    if (mStatus != STATUS_UNINITIALIZED) {
-        CLOGE("Already initialized!");
-        return INVALID_OPERATION;
-    }
-    if (manager == nullptr) return INVALID_OPERATION;
-
-    sp<ICameraDeviceSession> session;
-    ATRACE_BEGIN("CameraHal::openSession");
-    status_t res = manager->openSession(mId.string(), this,
-            /*out*/ &session);
-    ATRACE_END();
-    if (res != OK) {
-        SET_ERR_L("Could not open camera session: %s (%d)", strerror(-res), res);
-        return res;
-    }
-
-    res = manager->getCameraCharacteristics(mId.string(), mOverrideForPerfClass, &mDeviceInfo);
-    if (res != OK) {
-        SET_ERR_L("Could not retrieve camera characteristics: %s (%d)", strerror(-res), res);
-        session->close();
-        return res;
-    }
-    mSupportNativeZoomRatio = manager->supportNativeZoomRatio(mId.string());
-
-    std::vector<std::string> physicalCameraIds;
-    bool isLogical = manager->isLogicalCamera(mId.string(), &physicalCameraIds);
-    if (isLogical) {
-        for (auto& physicalId : physicalCameraIds) {
-            // Do not override characteristics for physical cameras
-            res = manager->getCameraCharacteristics(
-                    physicalId, /*overrideForPerfClass*/false, &mPhysicalDeviceInfoMap[physicalId]);
-            if (res != OK) {
-                SET_ERR_L("Could not retrieve camera %s characteristics: %s (%d)",
-                        physicalId.c_str(), strerror(-res), res);
-                session->close();
-                return res;
-            }
-
-            bool usePrecorrectArray =
-                    DistortionMapper::isDistortionSupported(mPhysicalDeviceInfoMap[physicalId]);
-            if (usePrecorrectArray) {
-                res = mDistortionMappers[physicalId].setupStaticInfo(
-                        mPhysicalDeviceInfoMap[physicalId]);
-                if (res != OK) {
-                    SET_ERR_L("Unable to read camera %s's calibration fields for distortion "
-                            "correction", physicalId.c_str());
-                    session->close();
-                    return res;
-                }
-            }
-
-            mZoomRatioMappers[physicalId] = ZoomRatioMapper(
-                    &mPhysicalDeviceInfoMap[physicalId],
-                    mSupportNativeZoomRatio, usePrecorrectArray);
-
-            if (SessionConfigurationUtils::isUltraHighResolutionSensor(
-                    mPhysicalDeviceInfoMap[physicalId])) {
-                mUHRCropAndMeteringRegionMappers[physicalId] =
-                        UHRCropAndMeteringRegionMapper(mPhysicalDeviceInfoMap[physicalId],
-                                usePrecorrectArray);
-            }
-        }
-    }
-
-    std::shared_ptr<RequestMetadataQueue> queue;
-    auto requestQueueRet = session->getCaptureRequestMetadataQueue(
-        [&queue](const auto& descriptor) {
-            queue = std::make_shared<RequestMetadataQueue>(descriptor);
-            if (!queue->isValid() || queue->availableToWrite() <= 0) {
-                ALOGE("HAL returns empty request metadata fmq, not use it");
-                queue = nullptr;
-                // don't use the queue onwards.
-            }
-        });
-    if (!requestQueueRet.isOk()) {
-        ALOGE("Transaction error when getting request metadata fmq: %s, not use it",
-                requestQueueRet.description().c_str());
-        return DEAD_OBJECT;
-    }
-
-    std::unique_ptr<ResultMetadataQueue>& resQueue = mResultMetadataQueue;
-    auto resultQueueRet = session->getCaptureResultMetadataQueue(
-        [&resQueue](const auto& descriptor) {
-            resQueue = std::make_unique<ResultMetadataQueue>(descriptor);
-            if (!resQueue->isValid() || resQueue->availableToWrite() <= 0) {
-                ALOGE("HAL returns empty result metadata fmq, not use it");
-                resQueue = nullptr;
-                // Don't use the resQueue onwards.
-            }
-        });
-    if (!resultQueueRet.isOk()) {
-        ALOGE("Transaction error when getting result metadata queue from camera session: %s",
-                resultQueueRet.description().c_str());
-        return DEAD_OBJECT;
-    }
-    IF_ALOGV() {
-        session->interfaceChain([](
-            ::android::hardware::hidl_vec<::android::hardware::hidl_string> interfaceChain) {
-                ALOGV("Session interface chain:");
-                for (const auto& iface : interfaceChain) {
-                    ALOGV("  %s", iface.c_str());
-                }
-            });
-    }
-
-    camera_metadata_entry bufMgrMode =
-            mDeviceInfo.find(ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION);
-    if (bufMgrMode.count > 0) {
-         mUseHalBufManager = (bufMgrMode.data.u8[0] ==
-            ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION_HIDL_DEVICE_3_5);
-    }
-
-    camera_metadata_entry_t capabilities = mDeviceInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
-    for (size_t i = 0; i < capabilities.count; i++) {
-        uint8_t capability = capabilities.data.u8[i];
-        if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_OFFLINE_PROCESSING) {
-            mSupportOfflineProcessing = true;
-        }
-    }
-
-    mInterface = new HalInterface(session, queue, mUseHalBufManager, mSupportOfflineProcessing);
-    std::string providerType;
-    mVendorTagId = manager->getProviderTagIdLocked(mId.string());
-    mTagMonitor.initialize(mVendorTagId);
-    if (!monitorTags.isEmpty()) {
-        mTagMonitor.parseTagsToMonitor(String8(monitorTags));
-    }
-
-    // Metadata tags needs fixup for monochrome camera device version less
-    // than 3.5.
-    hardware::hidl_version maxVersion{0,0};
-    res = manager->getHighestSupportedVersion(mId.string(), &maxVersion);
-    if (res != OK) {
-        ALOGE("%s: Error in getting camera device version id: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        return res;
-    }
-    int deviceVersion = HARDWARE_DEVICE_API_VERSION(
-            maxVersion.get_major(), maxVersion.get_minor());
-
-    bool isMonochrome = false;
-    for (size_t i = 0; i < capabilities.count; i++) {
-        uint8_t capability = capabilities.data.u8[i];
-        if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MONOCHROME) {
-            isMonochrome = true;
-        }
-    }
-    mNeedFixupMonochromeTags = (isMonochrome && deviceVersion < CAMERA_DEVICE_API_VERSION_3_5);
-
-    return initializeCommonLocked();
-}
-
 status_t Camera3Device::initializeCommonLocked() {
 
     /** Start up status tracker thread */
@@ -322,7 +167,7 @@
     }
 
     /** Start up request queue thread */
-    mRequestThread = new RequestThread(
+    mRequestThread = createNewRequestThread(
             this, mStatusTracker, mInterface, sessionParamKeys,
             mUseHalBufManager, mSupportCameraMute);
     res = mRequestThread->run(String8::format("C3Dev-%s-ReqQueue", mId.string()).string());
@@ -380,7 +225,8 @@
         mRotateAndCropMappers.emplace(mId.c_str(), &mDeviceInfo);
     }
 
-    mInjectionMethods = new Camera3DeviceInjectionMethods(this);
+    // Hidl/AidlCamera3DeviceInjectionMethods
+    mInjectionMethods = createCamera3DeviceInjectionMethods(this);
 
     return OK;
 }
@@ -530,86 +376,16 @@
     return measured;
 }
 
-hardware::graphics::common::V1_0::PixelFormat Camera3Device::mapToPixelFormat(
-        int frameworkFormat) {
-    return (hardware::graphics::common::V1_0::PixelFormat) frameworkFormat;
-}
-
-DataspaceFlags Camera3Device::mapToHidlDataspace(
-        android_dataspace dataSpace) {
-    return dataSpace;
-}
-
-BufferUsageFlags Camera3Device::mapToConsumerUsage(
-        uint64_t usage) {
-    return usage;
-}
-
-StreamRotation Camera3Device::mapToStreamRotation(camera_stream_rotation_t rotation) {
-    switch (rotation) {
-        case CAMERA_STREAM_ROTATION_0:
-            return StreamRotation::ROTATION_0;
-        case CAMERA_STREAM_ROTATION_90:
-            return StreamRotation::ROTATION_90;
-        case CAMERA_STREAM_ROTATION_180:
-            return StreamRotation::ROTATION_180;
-        case CAMERA_STREAM_ROTATION_270:
-            return StreamRotation::ROTATION_270;
-    }
-    ALOGE("%s: Unknown stream rotation %d", __FUNCTION__, rotation);
-    return StreamRotation::ROTATION_0;
-}
-
-status_t Camera3Device::mapToStreamConfigurationMode(
-        camera_stream_configuration_mode_t operationMode, StreamConfigurationMode *mode) {
-    if (mode == nullptr) return BAD_VALUE;
-    if (operationMode < CAMERA_VENDOR_STREAM_CONFIGURATION_MODE_START) {
-        switch(operationMode) {
-            case CAMERA_STREAM_CONFIGURATION_NORMAL_MODE:
-                *mode = StreamConfigurationMode::NORMAL_MODE;
-                break;
-            case CAMERA_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE:
-                *mode = StreamConfigurationMode::CONSTRAINED_HIGH_SPEED_MODE;
-                break;
-            default:
-                ALOGE("%s: Unknown stream configuration mode %d", __FUNCTION__, operationMode);
-                return BAD_VALUE;
-        }
-    } else {
-        *mode = static_cast<StreamConfigurationMode>(operationMode);
-    }
-    return OK;
-}
-
-int Camera3Device::mapToFrameworkFormat(
-        hardware::graphics::common::V1_0::PixelFormat pixelFormat) {
-    return static_cast<uint32_t>(pixelFormat);
-}
-
-android_dataspace Camera3Device::mapToFrameworkDataspace(
-        DataspaceFlags dataSpace) {
-    return static_cast<android_dataspace>(dataSpace);
-}
-
-uint64_t Camera3Device::mapConsumerToFrameworkUsage(
-        BufferUsageFlags usage) {
-    return usage;
-}
-
-uint64_t Camera3Device::mapProducerToFrameworkUsage(
-        BufferUsageFlags usage) {
-    return usage;
-}
-
-ssize_t Camera3Device::getJpegBufferSize(uint32_t width, uint32_t height) const {
+ssize_t Camera3Device::getJpegBufferSize(const CameraMetadata &info, uint32_t width,
+        uint32_t height) const {
     // Get max jpeg size (area-wise) for default sensor pixel mode
     camera3::Size maxDefaultJpegResolution =
-            SessionConfigurationUtils::getMaxJpegResolution(mDeviceInfo,
+            SessionConfigurationUtils::getMaxJpegResolution(info,
                     /*isUltraHighResolutionSensor*/false);
     // Get max jpeg size (area-wise) for max resolution sensor pixel mode / 0 if
     // not ultra high res sensor
     camera3::Size uhrMaxJpegResolution =
-            SessionConfigurationUtils::getMaxJpegResolution(mDeviceInfo,
+            SessionConfigurationUtils::getMaxJpegResolution(info,
                     /*isUltraHighResolution*/true);
     if (maxDefaultJpegResolution.width == 0) {
         ALOGE("%s: Camera %s: Can't find valid available jpeg sizes in static metadata!",
@@ -625,7 +401,7 @@
 
     // Get max jpeg buffer size
     ssize_t maxJpegBufferSize = 0;
-    camera_metadata_ro_entry jpegBufMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE);
+    camera_metadata_ro_entry jpegBufMaxSize = info.find(ANDROID_JPEG_MAX_SIZE);
     if (jpegBufMaxSize.count == 0) {
         ALOGE("%s: Camera %s: Can't find maximum JPEG size in static metadata!", __FUNCTION__,
                 mId.string());
@@ -655,9 +431,9 @@
     return jpegBufferSize;
 }
 
-ssize_t Camera3Device::getPointCloudBufferSize() const {
+ssize_t Camera3Device::getPointCloudBufferSize(const CameraMetadata &info) const {
     const int FLOATS_PER_POINT=4;
-    camera_metadata_ro_entry maxPointCount = mDeviceInfo.find(ANDROID_DEPTH_MAX_DEPTH_SAMPLES);
+    camera_metadata_ro_entry maxPointCount = info.find(ANDROID_DEPTH_MAX_DEPTH_SAMPLES);
     if (maxPointCount.count == 0) {
         ALOGE("%s: Camera %s: Can't find maximum depth point cloud size in static metadata!",
                 __FUNCTION__, mId.string());
@@ -668,14 +444,14 @@
     return maxBytesForPointCloud;
 }
 
-ssize_t Camera3Device::getRawOpaqueBufferSize(int32_t width, int32_t height,
-        bool maxResolution) const {
+ssize_t Camera3Device::getRawOpaqueBufferSize(const CameraMetadata &info, int32_t width,
+        int32_t height, bool maxResolution) const {
     const int PER_CONFIGURATION_SIZE = 3;
     const int WIDTH_OFFSET = 0;
     const int HEIGHT_OFFSET = 1;
     const int SIZE_OFFSET = 2;
     camera_metadata_ro_entry rawOpaqueSizes =
-        mDeviceInfo.find(
+        info.find(
             camera3::SessionConfigurationUtils::getAppropriateModeTag(
                     ANDROID_SENSOR_OPAQUE_RAW_SIZE,
                     maxResolution));
@@ -848,6 +624,21 @@
     return OK;
 }
 
+status_t Camera3Device::startWatchingTags(const String8 &tags) {
+    mTagMonitor.parseTagsToMonitor(tags);
+    return OK;
+}
+
+status_t Camera3Device::stopWatchingTags() {
+    mTagMonitor.disableMonitoring();
+    return OK;
+}
+
+status_t Camera3Device::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+    mTagMonitor.getLatestMonitoredTagEvents(out);
+    return OK;
+}
+
 const CameraMetadata& Camera3Device::infoPhysical(const String8& physicalId) const {
     ALOGVV("%s: E", __FUNCTION__);
     if (CC_UNLIKELY(mStatus == STATUS_UNINITIALIZED ||
@@ -1027,176 +818,6 @@
     return res;
 }
 
-hardware::Return<void> Camera3Device::requestStreamBuffers(
-        const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
-        requestStreamBuffers_cb _hidl_cb) {
-    RequestBufferStates states {
-        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder,
-        *this, *mInterface, *this};
-    camera3::requestStreamBuffers(states, bufReqs, _hidl_cb);
-    return hardware::Void();
-}
-
-hardware::Return<void> Camera3Device::returnStreamBuffers(
-        const hardware::hidl_vec<hardware::camera::device::V3_2::StreamBuffer>& buffers) {
-    ReturnBufferStates states {
-        mId, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder, *mInterface};
-    camera3::returnStreamBuffers(states, buffers);
-    return hardware::Void();
-}
-
-hardware::Return<void> Camera3Device::processCaptureResult_3_4(
-        const hardware::hidl_vec<
-                hardware::camera::device::V3_4::CaptureResult>& results) {
-    // Ideally we should grab mLock, but that can lead to deadlock, and
-    // it's not super important to get up to date value of mStatus for this
-    // warning print, hence skipping the lock here
-    if (mStatus == STATUS_ERROR) {
-        // Per API contract, HAL should act as closed after device error
-        // But mStatus can be set to error by framework as well, so just log
-        // a warning here.
-        ALOGW("%s: received capture result in error state.", __FUNCTION__);
-    }
-
-    sp<NotificationListener> listener;
-    {
-        std::lock_guard<std::mutex> l(mOutputLock);
-        listener = mListener.promote();
-    }
-
-    if (mProcessCaptureResultLock.tryLock() != OK) {
-        // This should never happen; it indicates a wrong client implementation
-        // that doesn't follow the contract. But, we can be tolerant here.
-        ALOGE("%s: callback overlapped! waiting 1s...",
-                __FUNCTION__);
-        if (mProcessCaptureResultLock.timedLock(1000000000 /* 1s */) != OK) {
-            ALOGE("%s: cannot acquire lock in 1s, dropping results",
-                    __FUNCTION__);
-            // really don't know what to do, so bail out.
-            return hardware::Void();
-        }
-    }
-    CaptureOutputStates states {
-        mId,
-        mInFlightLock, mLastCompletedRegularFrameNumber,
-        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
-        mInFlightMap, mOutputLock,  mResultQueue, mResultSignal,
-        mNextShutterFrameNumber,
-        mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
-        mNextResultFrameNumber,
-        mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
-        mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
-        mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
-        mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
-        *mInterface
-    };
-
-    for (const auto& result : results) {
-        processOneCaptureResultLocked(states, result.v3_2, result.physicalCameraMetadata);
-    }
-    mProcessCaptureResultLock.unlock();
-    return hardware::Void();
-}
-
-// Only one processCaptureResult should be called at a time, so
-// the locks won't block. The locks are present here simply to enforce this.
-hardware::Return<void> Camera3Device::processCaptureResult(
-        const hardware::hidl_vec<
-                hardware::camera::device::V3_2::CaptureResult>& results) {
-    hardware::hidl_vec<hardware::camera::device::V3_4::PhysicalCameraMetadata> noPhysMetadata;
-
-    // Ideally we should grab mLock, but that can lead to deadlock, and
-    // it's not super important to get up to date value of mStatus for this
-    // warning print, hence skipping the lock here
-    if (mStatus == STATUS_ERROR) {
-        // Per API contract, HAL should act as closed after device error
-        // But mStatus can be set to error by framework as well, so just log
-        // a warning here.
-        ALOGW("%s: received capture result in error state.", __FUNCTION__);
-    }
-
-    sp<NotificationListener> listener;
-    {
-        std::lock_guard<std::mutex> l(mOutputLock);
-        listener = mListener.promote();
-    }
-
-    if (mProcessCaptureResultLock.tryLock() != OK) {
-        // This should never happen; it indicates a wrong client implementation
-        // that doesn't follow the contract. But, we can be tolerant here.
-        ALOGE("%s: callback overlapped! waiting 1s...",
-                __FUNCTION__);
-        if (mProcessCaptureResultLock.timedLock(1000000000 /* 1s */) != OK) {
-            ALOGE("%s: cannot acquire lock in 1s, dropping results",
-                    __FUNCTION__);
-            // really don't know what to do, so bail out.
-            return hardware::Void();
-        }
-    }
-
-    CaptureOutputStates states {
-        mId,
-        mInFlightLock, mLastCompletedRegularFrameNumber,
-        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
-        mInFlightMap, mOutputLock,  mResultQueue, mResultSignal,
-        mNextShutterFrameNumber,
-        mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
-        mNextResultFrameNumber,
-        mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
-        mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
-        mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
-        mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
-        *mInterface
-    };
-
-    for (const auto& result : results) {
-        processOneCaptureResultLocked(states, result, noPhysMetadata);
-    }
-    mProcessCaptureResultLock.unlock();
-    return hardware::Void();
-}
-
-hardware::Return<void> Camera3Device::notify(
-        const hardware::hidl_vec<hardware::camera::device::V3_2::NotifyMsg>& msgs) {
-    // Ideally we should grab mLock, but that can lead to deadlock, and
-    // it's not super important to get up to date value of mStatus for this
-    // warning print, hence skipping the lock here
-    if (mStatus == STATUS_ERROR) {
-        // Per API contract, HAL should act as closed after device error
-        // But mStatus can be set to error by framework as well, so just log
-        // a warning here.
-        ALOGW("%s: received notify message in error state.", __FUNCTION__);
-    }
-
-    sp<NotificationListener> listener;
-    {
-        std::lock_guard<std::mutex> l(mOutputLock);
-        listener = mListener.promote();
-    }
-
-    CaptureOutputStates states {
-        mId,
-        mInFlightLock, mLastCompletedRegularFrameNumber,
-        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
-        mInFlightMap, mOutputLock,  mResultQueue, mResultSignal,
-        mNextShutterFrameNumber,
-        mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
-        mNextResultFrameNumber,
-        mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
-        mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
-        mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
-        mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
-        *mInterface
-    };
-    for (const auto& msg : msgs) {
-        camera3::notify(states, msg);
-    }
-    return hardware::Void();
-}
-
 status_t Camera3Device::captureList(const List<const PhysicalCameraSettingsList> &requestsList,
                                     const std::list<const SurfaceMap> &surfaceMaps,
                                     int64_t *lastFrameNumber) {
@@ -1357,7 +978,7 @@
             const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
             std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution,
-            uint64_t consumerUsage) {
+            uint64_t consumerUsage, int dynamicRangeProfile) {
     ATRACE_CALL();
 
     if (consumer == nullptr) {
@@ -1370,7 +991,7 @@
 
     return createStream(consumers, /*hasDeferredConsumer*/ false, width, height,
             format, dataSpace, rotation, id, physicalCameraId, sensorPixelModesUsed, surfaceIds,
-            streamSetId, isShared, isMultiResolution, consumerUsage);
+            streamSetId, isShared, isMultiResolution, consumerUsage, dynamicRangeProfile);
 }
 
 static bool isRawFormat(int format) {
@@ -1390,7 +1011,7 @@
         android_dataspace dataSpace, camera_stream_rotation_t rotation, int *id,
         const String8& physicalCameraId, const std::unordered_set<int32_t> &sensorPixelModesUsed,
         std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution,
-        uint64_t consumerUsage) {
+        uint64_t consumerUsage, int dynamicRangeProfile) {
     ATRACE_CALL();
 
     Mutex::Autolock il(mInterfaceLock);
@@ -1451,7 +1072,7 @@
     if (format == HAL_PIXEL_FORMAT_BLOB) {
         ssize_t blobBufferSize;
         if (dataSpace == HAL_DATASPACE_DEPTH) {
-            blobBufferSize = getPointCloudBufferSize();
+            blobBufferSize = getPointCloudBufferSize(infoPhysical(physicalCameraId));
             if (blobBufferSize <= 0) {
                 SET_ERR_L("Invalid point cloud buffer size %zd", blobBufferSize);
                 return BAD_VALUE;
@@ -1459,7 +1080,7 @@
         } else if (dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS)) {
             blobBufferSize = width * height;
         } else {
-            blobBufferSize = getJpegBufferSize(width, height);
+            blobBufferSize = getJpegBufferSize(infoPhysical(physicalCameraId), width, height);
             if (blobBufferSize <= 0) {
                 SET_ERR_L("Invalid jpeg buffer size %zd", blobBufferSize);
                 return BAD_VALUE;
@@ -1468,12 +1089,13 @@
         newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, blobBufferSize, format, dataSpace, rotation,
                 mTimestampOffset, physicalCameraId, sensorPixelModesUsed, streamSetId,
-                isMultiResolution);
+                isMultiResolution, dynamicRangeProfile);
     } else if (format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
         bool maxResolution =
                 sensorPixelModesUsed.find(ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) !=
                         sensorPixelModesUsed.end();
-        ssize_t rawOpaqueBufferSize = getRawOpaqueBufferSize(width, height, maxResolution);
+        ssize_t rawOpaqueBufferSize = getRawOpaqueBufferSize(infoPhysical(physicalCameraId), width,
+                height, maxResolution);
         if (rawOpaqueBufferSize <= 0) {
             SET_ERR_L("Invalid RAW opaque buffer size %zd", rawOpaqueBufferSize);
             return BAD_VALUE;
@@ -1481,22 +1103,22 @@
         newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, rawOpaqueBufferSize, format, dataSpace, rotation,
                 mTimestampOffset, physicalCameraId, sensorPixelModesUsed, streamSetId,
-                isMultiResolution);
+                isMultiResolution, dynamicRangeProfile);
     } else if (isShared) {
         newStream = new Camera3SharedOutputStream(mNextStreamId, consumers,
                 width, height, format, consumerUsage, dataSpace, rotation,
                 mTimestampOffset, physicalCameraId, sensorPixelModesUsed, streamSetId,
-                mUseHalBufManager);
+                mUseHalBufManager, dynamicRangeProfile);
     } else if (consumers.size() == 0 && hasDeferredConsumer) {
         newStream = new Camera3OutputStream(mNextStreamId,
                 width, height, format, consumerUsage, dataSpace, rotation,
                 mTimestampOffset, physicalCameraId, sensorPixelModesUsed, streamSetId,
-                isMultiResolution);
+                isMultiResolution, dynamicRangeProfile);
     } else {
         newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, format, dataSpace, rotation,
                 mTimestampOffset, physicalCameraId, sensorPixelModesUsed, streamSetId,
-                isMultiResolution);
+                isMultiResolution, dynamicRangeProfile);
     }
 
     size_t consumerCount = consumers.size();
@@ -1583,6 +1205,7 @@
     streamInfo->originalFormat = stream->getOriginalFormat();
     streamInfo->dataSpaceOverridden = stream->isDataSpaceOverridden();
     streamInfo->originalDataSpace = stream->getOriginalDataSpace();
+    streamInfo->dynamicRangeProfile = stream->getDynamicRangeProfile();
     return OK;
 }
 
@@ -2195,7 +1818,8 @@
                 streamStats.emplace_back(stream->getWidth(), stream->getHeight(),
                     stream->getFormat(), stream->getDataSpace(), usage,
                     stream->getMaxHalBuffers(),
-                    stream->getMaxTotalBuffers() - stream->getMaxHalBuffers());
+                    stream->getMaxTotalBuffers() - stream->getMaxHalBuffers(),
+                    stream->getDynamicRangeProfile());
             }
         }
     }
@@ -2455,22 +2079,24 @@
     }
 
     if (mSupportCameraMute) {
-        auto testPatternModeEntry =
-                newRequest->mSettingsList.begin()->metadata.find(ANDROID_SENSOR_TEST_PATTERN_MODE);
-        newRequest->mOriginalTestPatternMode = testPatternModeEntry.count > 0 ?
-                testPatternModeEntry.data.i32[0] :
-                ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
+        for (auto& settings : newRequest->mSettingsList) {
+            auto testPatternModeEntry =
+                    settings.metadata.find(ANDROID_SENSOR_TEST_PATTERN_MODE);
+            settings.mOriginalTestPatternMode = testPatternModeEntry.count > 0 ?
+                    testPatternModeEntry.data.i32[0] :
+                    ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
 
-        auto testPatternDataEntry =
-                newRequest->mSettingsList.begin()->metadata.find(ANDROID_SENSOR_TEST_PATTERN_DATA);
-        if (testPatternDataEntry.count >= 4) {
-            memcpy(newRequest->mOriginalTestPatternData, testPatternDataEntry.data.i32,
-                    sizeof(CaptureRequest::mOriginalTestPatternData));
-        } else {
-            newRequest->mOriginalTestPatternData[0] = 0;
-            newRequest->mOriginalTestPatternData[1] = 0;
-            newRequest->mOriginalTestPatternData[2] = 0;
-            newRequest->mOriginalTestPatternData[3] = 0;
+            auto testPatternDataEntry =
+                    settings.metadata.find(ANDROID_SENSOR_TEST_PATTERN_DATA);
+            if (testPatternDataEntry.count >= 4) {
+                memcpy(settings.mOriginalTestPatternData, testPatternDataEntry.data.i32,
+                        sizeof(PhysicalCameraSettings::mOriginalTestPatternData));
+            } else {
+                settings.mOriginalTestPatternData[0] = 0;
+                settings.mOriginalTestPatternData[1] = 0;
+                settings.mOriginalTestPatternData[2] = 0;
+                settings.mOriginalTestPatternData[3] = 0;
+            }
         }
     }
 
@@ -2677,6 +2303,7 @@
     }
 
     mGroupIdPhysicalCameraMap.clear();
+    bool composerSurfacePresent = false;
     for (size_t i = 0; i < mOutputStreams.size(); i++) {
 
         // Don't configure bidi streams twice, nor add them twice to the list
@@ -2701,7 +2328,8 @@
                                                                 // always occupy the initial entry.
             if (outputStream->data_space == HAL_DATASPACE_V0_JFIF) {
                 bufferSizes[k] = static_cast<uint32_t>(
-                        getJpegBufferSize(outputStream->width, outputStream->height));
+                        getJpegBufferSize(infoPhysical(String8(outputStream->physical_camera_id)),
+                                outputStream->width, outputStream->height));
             } else if (outputStream->data_space ==
                     static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS)) {
                 bufferSizes[k] = outputStream->width * outputStream->height;
@@ -2716,6 +2344,10 @@
             const String8& physicalCameraId = mOutputStreams[i]->getPhysicalCameraId();
             mGroupIdPhysicalCameraMap[streamGroupId].insert(physicalCameraId);
         }
+
+        if (outputStream->usage & GraphicBuffer::USAGE_HW_COMPOSER) {
+            composerSurfacePresent = true;
+        }
     }
 
     config.streams = streams.editArray();
@@ -2783,6 +2415,8 @@
         }
     }
 
+    mRequestThread->setComposerSurface(composerSurfacePresent);
+
     // Request thread needs to know to avoid using repeat-last-settings protocol
     // across configure_streams() calls
     if (notifyRequestThread) {
@@ -2836,17 +2470,28 @@
         mRequestBufferSM.onStreamsConfigured();
     }
 
+    // First call injectCamera() and then run configureStreamsLocked() case:
     // Since the streams configuration of the injection camera is based on the internal camera, we
-    // must wait until the internal camera configure streams before calling injectCamera() to
+    // must wait until the internal camera configure streams before running the injection job to
     // configure the injection streams.
     if (mInjectionMethods->isInjecting()) {
-        ALOGV("%s: Injection camera %s: Start to configure streams.",
+        ALOGD("%s: Injection camera %s: Start to configure streams.",
               __FUNCTION__, mInjectionMethods->getInjectedCamId().string());
         res = mInjectionMethods->injectCamera(config, bufferSizes);
         if (res != OK) {
             ALOGE("Can't finish inject camera process!");
             return res;
         }
+    } else {
+        // First run configureStreamsLocked() and then call injectCamera() case:
+        // If the stream configuration has been completed and camera deive is active, but the
+        // injection camera has not been injected yet, we need to store the stream configuration of
+        // the internal camera (because the stream configuration of the injection camera is based
+        // on the internal camera). When injecting occurs later, this configuration can be used by
+        // the injection camera.
+        ALOGV("%s: The stream configuration is complete and the camera device is active, but the"
+              " injection camera has not been injected yet.", __FUNCTION__);
+        mInjectionMethods->storeInjectionConfig(config, bufferSizes);
     }
 
     return OK;
@@ -3072,712 +2717,18 @@
 
 void Camera3Device::monitorMetadata(TagMonitor::eventSource source,
         int64_t frameNumber, nsecs_t timestamp, const CameraMetadata& metadata,
-        const std::unordered_map<std::string, CameraMetadata>& physicalMetadata) {
+        const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
+        const camera_stream_buffer_t *outputBuffers, uint32_t numOutputBuffers,
+        int32_t inputStreamId) {
 
     mTagMonitor.monitorMetadata(source, frameNumber, timestamp, metadata,
-            physicalMetadata);
+            physicalMetadata, outputBuffers, numOutputBuffers, inputStreamId);
 }
 
 /**
  * HalInterface inner class methods
  */
 
-Camera3Device::HalInterface::HalInterface(
-            sp<ICameraDeviceSession> &session,
-            std::shared_ptr<RequestMetadataQueue> queue,
-            bool useHalBufManager, bool supportOfflineProcessing) :
-        mHidlSession(session),
-        mRequestMetadataQueue(queue),
-        mUseHalBufManager(useHalBufManager),
-        mIsReconfigurationQuerySupported(true),
-        mSupportOfflineProcessing(supportOfflineProcessing) {
-    // Check with hardware service manager if we can downcast these interfaces
-    // Somewhat expensive, so cache the results at startup
-    auto castResult_3_7 = device::V3_7::ICameraDeviceSession::castFrom(mHidlSession);
-    if (castResult_3_7.isOk()) {
-        mHidlSession_3_7 = castResult_3_7;
-    }
-    auto castResult_3_6 = device::V3_6::ICameraDeviceSession::castFrom(mHidlSession);
-    if (castResult_3_6.isOk()) {
-        mHidlSession_3_6 = castResult_3_6;
-    }
-    auto castResult_3_5 = device::V3_5::ICameraDeviceSession::castFrom(mHidlSession);
-    if (castResult_3_5.isOk()) {
-        mHidlSession_3_5 = castResult_3_5;
-    }
-    auto castResult_3_4 = device::V3_4::ICameraDeviceSession::castFrom(mHidlSession);
-    if (castResult_3_4.isOk()) {
-        mHidlSession_3_4 = castResult_3_4;
-    }
-    auto castResult_3_3 = device::V3_3::ICameraDeviceSession::castFrom(mHidlSession);
-    if (castResult_3_3.isOk()) {
-        mHidlSession_3_3 = castResult_3_3;
-    }
-}
-
-Camera3Device::HalInterface::HalInterface() :
-        mUseHalBufManager(false),
-        mSupportOfflineProcessing(false) {}
-
-Camera3Device::HalInterface::HalInterface(const HalInterface& other) :
-        mHidlSession(other.mHidlSession),
-        mRequestMetadataQueue(other.mRequestMetadataQueue),
-        mUseHalBufManager(other.mUseHalBufManager),
-        mSupportOfflineProcessing(other.mSupportOfflineProcessing) {}
-
-bool Camera3Device::HalInterface::valid() {
-    return (mHidlSession != nullptr);
-}
-
-void Camera3Device::HalInterface::clear() {
-    mHidlSession_3_7.clear();
-    mHidlSession_3_6.clear();
-    mHidlSession_3_5.clear();
-    mHidlSession_3_4.clear();
-    mHidlSession_3_3.clear();
-    mHidlSession.clear();
-}
-
-status_t Camera3Device::HalInterface::constructDefaultRequestSettings(
-        camera_request_template_t templateId,
-        /*out*/ camera_metadata_t **requestTemplate) {
-    ATRACE_NAME("CameraHal::constructDefaultRequestSettings");
-    if (!valid()) return INVALID_OPERATION;
-    status_t res = OK;
-
-    common::V1_0::Status status;
-
-    auto requestCallback = [&status, &requestTemplate]
-            (common::V1_0::Status s, const device::V3_2::CameraMetadata& request) {
-            status = s;
-            if (status == common::V1_0::Status::OK) {
-                const camera_metadata *r =
-                        reinterpret_cast<const camera_metadata_t*>(request.data());
-                size_t expectedSize = request.size();
-                int ret = validate_camera_metadata_structure(r, &expectedSize);
-                if (ret == OK || ret == CAMERA_METADATA_VALIDATION_SHIFTED) {
-                    *requestTemplate = clone_camera_metadata(r);
-                    if (*requestTemplate == nullptr) {
-                        ALOGE("%s: Unable to clone camera metadata received from HAL",
-                                __FUNCTION__);
-                        status = common::V1_0::Status::INTERNAL_ERROR;
-                    }
-                } else {
-                    ALOGE("%s: Malformed camera metadata received from HAL", __FUNCTION__);
-                    status = common::V1_0::Status::INTERNAL_ERROR;
-                }
-            }
-        };
-    hardware::Return<void> err;
-    RequestTemplate id;
-    switch (templateId) {
-        case CAMERA_TEMPLATE_PREVIEW:
-            id = RequestTemplate::PREVIEW;
-            break;
-        case CAMERA_TEMPLATE_STILL_CAPTURE:
-            id = RequestTemplate::STILL_CAPTURE;
-            break;
-        case CAMERA_TEMPLATE_VIDEO_RECORD:
-            id = RequestTemplate::VIDEO_RECORD;
-            break;
-        case CAMERA_TEMPLATE_VIDEO_SNAPSHOT:
-            id = RequestTemplate::VIDEO_SNAPSHOT;
-            break;
-        case CAMERA_TEMPLATE_ZERO_SHUTTER_LAG:
-            id = RequestTemplate::ZERO_SHUTTER_LAG;
-            break;
-        case CAMERA_TEMPLATE_MANUAL:
-            id = RequestTemplate::MANUAL;
-            break;
-        default:
-            // Unknown template ID, or this HAL is too old to support it
-            return BAD_VALUE;
-    }
-    err = mHidlSession->constructDefaultRequestSettings(id, requestCallback);
-
-    if (!err.isOk()) {
-        ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
-        res = DEAD_OBJECT;
-    } else {
-        res = CameraProviderManager::mapToStatusT(status);
-    }
-
-    return res;
-}
-
-bool Camera3Device::HalInterface::isReconfigurationRequired(CameraMetadata& oldSessionParams,
-        CameraMetadata& newSessionParams) {
-    // We do reconfiguration by default;
-    bool ret = true;
-    if ((mHidlSession_3_5 != nullptr) && mIsReconfigurationQuerySupported) {
-        android::hardware::hidl_vec<uint8_t> oldParams, newParams;
-        camera_metadata_t* oldSessioMeta = const_cast<camera_metadata_t*>(
-                oldSessionParams.getAndLock());
-        camera_metadata_t* newSessioMeta = const_cast<camera_metadata_t*>(
-                newSessionParams.getAndLock());
-        oldParams.setToExternal(reinterpret_cast<uint8_t*>(oldSessioMeta),
-                get_camera_metadata_size(oldSessioMeta));
-        newParams.setToExternal(reinterpret_cast<uint8_t*>(newSessioMeta),
-                get_camera_metadata_size(newSessioMeta));
-        hardware::camera::common::V1_0::Status callStatus;
-        bool required;
-        auto hidlCb = [&callStatus, &required] (hardware::camera::common::V1_0::Status s,
-                bool requiredFlag) {
-            callStatus = s;
-            required = requiredFlag;
-        };
-        auto err = mHidlSession_3_5->isReconfigurationRequired(oldParams, newParams, hidlCb);
-        oldSessionParams.unlock(oldSessioMeta);
-        newSessionParams.unlock(newSessioMeta);
-        if (err.isOk()) {
-            switch (callStatus) {
-                case hardware::camera::common::V1_0::Status::OK:
-                    ret = required;
-                    break;
-                case hardware::camera::common::V1_0::Status::METHOD_NOT_SUPPORTED:
-                    mIsReconfigurationQuerySupported = false;
-                    ret = true;
-                    break;
-                default:
-                    ALOGV("%s: Reconfiguration query failed: %d", __FUNCTION__, callStatus);
-                    ret = true;
-            }
-        } else {
-            ALOGE("%s: Unexpected binder error: %s", __FUNCTION__, err.description().c_str());
-            ret = true;
-        }
-    }
-
-    return ret;
-}
-
-status_t Camera3Device::HalInterface::configureStreams(const camera_metadata_t *sessionParams,
-        camera_stream_configuration *config, const std::vector<uint32_t>& bufferSizes) {
-    ATRACE_NAME("CameraHal::configureStreams");
-    if (!valid()) return INVALID_OPERATION;
-    status_t res = OK;
-
-    if (config->input_is_multi_resolution && mHidlSession_3_7 == nullptr) {
-        ALOGE("%s: Camera device doesn't support multi-resolution input stream", __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    // Convert stream config to HIDL
-    std::set<int> activeStreams;
-    device::V3_2::StreamConfiguration requestedConfiguration3_2;
-    device::V3_4::StreamConfiguration requestedConfiguration3_4;
-    device::V3_7::StreamConfiguration requestedConfiguration3_7;
-    requestedConfiguration3_2.streams.resize(config->num_streams);
-    requestedConfiguration3_4.streams.resize(config->num_streams);
-    requestedConfiguration3_7.streams.resize(config->num_streams);
-    for (size_t i = 0; i < config->num_streams; i++) {
-        device::V3_2::Stream &dst3_2 = requestedConfiguration3_2.streams[i];
-        device::V3_4::Stream &dst3_4 = requestedConfiguration3_4.streams[i];
-        device::V3_7::Stream &dst3_7 = requestedConfiguration3_7.streams[i];
-        camera3::camera_stream_t *src = config->streams[i];
-
-        Camera3Stream* cam3stream = Camera3Stream::cast(src);
-        cam3stream->setBufferFreedListener(this);
-        int streamId = cam3stream->getId();
-        StreamType streamType;
-        switch (src->stream_type) {
-            case CAMERA_STREAM_OUTPUT:
-                streamType = StreamType::OUTPUT;
-                break;
-            case CAMERA_STREAM_INPUT:
-                streamType = StreamType::INPUT;
-                break;
-            default:
-                ALOGE("%s: Stream %d: Unsupported stream type %d",
-                        __FUNCTION__, streamId, config->streams[i]->stream_type);
-                return BAD_VALUE;
-        }
-        dst3_2.id = streamId;
-        dst3_2.streamType = streamType;
-        dst3_2.width = src->width;
-        dst3_2.height = src->height;
-        dst3_2.usage = mapToConsumerUsage(cam3stream->getUsage());
-        dst3_2.rotation = mapToStreamRotation((camera_stream_rotation_t) src->rotation);
-        // For HidlSession version 3.5 or newer, the format and dataSpace sent
-        // to HAL are original, not the overridden ones.
-        if (mHidlSession_3_5 != nullptr) {
-            dst3_2.format = mapToPixelFormat(cam3stream->isFormatOverridden() ?
-                    cam3stream->getOriginalFormat() : src->format);
-            dst3_2.dataSpace = mapToHidlDataspace(cam3stream->isDataSpaceOverridden() ?
-                    cam3stream->getOriginalDataSpace() : src->data_space);
-        } else {
-            dst3_2.format = mapToPixelFormat(src->format);
-            dst3_2.dataSpace = mapToHidlDataspace(src->data_space);
-        }
-        dst3_4.v3_2 = dst3_2;
-        dst3_4.bufferSize = bufferSizes[i];
-        if (src->physical_camera_id != nullptr) {
-            dst3_4.physicalCameraId = src->physical_camera_id;
-        }
-        dst3_7.v3_4 = dst3_4;
-        dst3_7.groupId = cam3stream->getHalStreamGroupId();
-        dst3_7.sensorPixelModesUsed.resize(src->sensor_pixel_modes_used.size());
-        size_t j = 0;
-        for (int mode : src->sensor_pixel_modes_used) {
-            dst3_7.sensorPixelModesUsed[j++] =
-                    static_cast<CameraMetadataEnumAndroidSensorPixelMode>(mode);
-        }
-        activeStreams.insert(streamId);
-        // Create Buffer ID map if necessary
-        mBufferRecords.tryCreateBufferCache(streamId);
-    }
-    // remove BufferIdMap for deleted streams
-    mBufferRecords.removeInactiveBufferCaches(activeStreams);
-
-    StreamConfigurationMode operationMode;
-    res = mapToStreamConfigurationMode(
-            (camera_stream_configuration_mode_t) config->operation_mode,
-            /*out*/ &operationMode);
-    if (res != OK) {
-        return res;
-    }
-    requestedConfiguration3_2.operationMode = operationMode;
-    requestedConfiguration3_4.operationMode = operationMode;
-    requestedConfiguration3_7.operationMode = operationMode;
-    size_t sessionParamSize = get_camera_metadata_size(sessionParams);
-    requestedConfiguration3_4.sessionParams.setToExternal(
-            reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(sessionParams)),
-            sessionParamSize);
-    requestedConfiguration3_7.operationMode = operationMode;
-    requestedConfiguration3_7.sessionParams.setToExternal(
-            reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(sessionParams)),
-            sessionParamSize);
-
-    // Invoke configureStreams
-    device::V3_3::HalStreamConfiguration finalConfiguration;
-    device::V3_4::HalStreamConfiguration finalConfiguration3_4;
-    device::V3_6::HalStreamConfiguration finalConfiguration3_6;
-    common::V1_0::Status status;
-
-    auto configStream34Cb = [&status, &finalConfiguration3_4]
-            (common::V1_0::Status s, const device::V3_4::HalStreamConfiguration& halConfiguration) {
-                finalConfiguration3_4 = halConfiguration;
-                status = s;
-            };
-
-    auto configStream36Cb = [&status, &finalConfiguration3_6]
-            (common::V1_0::Status s, const device::V3_6::HalStreamConfiguration& halConfiguration) {
-                finalConfiguration3_6 = halConfiguration;
-                status = s;
-            };
-
-    auto postprocConfigStream34 = [&finalConfiguration, &finalConfiguration3_4]
-            (hardware::Return<void>& err) -> status_t {
-                if (!err.isOk()) {
-                    ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
-                    return DEAD_OBJECT;
-                }
-                finalConfiguration.streams.resize(finalConfiguration3_4.streams.size());
-                for (size_t i = 0; i < finalConfiguration3_4.streams.size(); i++) {
-                    finalConfiguration.streams[i] = finalConfiguration3_4.streams[i].v3_3;
-                }
-                return OK;
-            };
-
-    auto postprocConfigStream36 = [&finalConfiguration, &finalConfiguration3_6]
-            (hardware::Return<void>& err) -> status_t {
-                if (!err.isOk()) {
-                    ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
-                    return DEAD_OBJECT;
-                }
-                finalConfiguration.streams.resize(finalConfiguration3_6.streams.size());
-                for (size_t i = 0; i < finalConfiguration3_6.streams.size(); i++) {
-                    finalConfiguration.streams[i] = finalConfiguration3_6.streams[i].v3_4.v3_3;
-                }
-                return OK;
-            };
-
-    // See which version of HAL we have
-    if (mHidlSession_3_7 != nullptr) {
-        ALOGV("%s: v3.7 device found", __FUNCTION__);
-        requestedConfiguration3_7.streamConfigCounter = mNextStreamConfigCounter++;
-        requestedConfiguration3_7.multiResolutionInputImage = config->input_is_multi_resolution;
-        auto err = mHidlSession_3_7->configureStreams_3_7(
-                requestedConfiguration3_7, configStream36Cb);
-        res = postprocConfigStream36(err);
-        if (res != OK) {
-            return res;
-        }
-    } else if (mHidlSession_3_6 != nullptr) {
-        ALOGV("%s: v3.6 device found", __FUNCTION__);
-        device::V3_5::StreamConfiguration requestedConfiguration3_5;
-        requestedConfiguration3_5.v3_4 = requestedConfiguration3_4;
-        requestedConfiguration3_5.streamConfigCounter = mNextStreamConfigCounter++;
-        auto err = mHidlSession_3_6->configureStreams_3_6(
-                requestedConfiguration3_5, configStream36Cb);
-        res = postprocConfigStream36(err);
-        if (res != OK) {
-            return res;
-        }
-    } else if (mHidlSession_3_5 != nullptr) {
-        ALOGV("%s: v3.5 device found", __FUNCTION__);
-        device::V3_5::StreamConfiguration requestedConfiguration3_5;
-        requestedConfiguration3_5.v3_4 = requestedConfiguration3_4;
-        requestedConfiguration3_5.streamConfigCounter = mNextStreamConfigCounter++;
-        auto err = mHidlSession_3_5->configureStreams_3_5(
-                requestedConfiguration3_5, configStream34Cb);
-        res = postprocConfigStream34(err);
-        if (res != OK) {
-            return res;
-        }
-    } else if (mHidlSession_3_4 != nullptr) {
-        // We do; use v3.4 for the call
-        ALOGV("%s: v3.4 device found", __FUNCTION__);
-        auto err = mHidlSession_3_4->configureStreams_3_4(
-                requestedConfiguration3_4, configStream34Cb);
-        res = postprocConfigStream34(err);
-        if (res != OK) {
-            return res;
-        }
-    } else if (mHidlSession_3_3 != nullptr) {
-        // We do; use v3.3 for the call
-        ALOGV("%s: v3.3 device found", __FUNCTION__);
-        auto err = mHidlSession_3_3->configureStreams_3_3(requestedConfiguration3_2,
-            [&status, &finalConfiguration]
-            (common::V1_0::Status s, const device::V3_3::HalStreamConfiguration& halConfiguration) {
-                finalConfiguration = halConfiguration;
-                status = s;
-            });
-        if (!err.isOk()) {
-            ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
-            return DEAD_OBJECT;
-        }
-    } else {
-        // We don't; use v3.2 call and construct a v3.3 HalStreamConfiguration
-        ALOGV("%s: v3.2 device found", __FUNCTION__);
-        HalStreamConfiguration finalConfiguration_3_2;
-        auto err = mHidlSession->configureStreams(requestedConfiguration3_2,
-                [&status, &finalConfiguration_3_2]
-                (common::V1_0::Status s, const HalStreamConfiguration& halConfiguration) {
-                    finalConfiguration_3_2 = halConfiguration;
-                    status = s;
-                });
-        if (!err.isOk()) {
-            ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
-            return DEAD_OBJECT;
-        }
-        finalConfiguration.streams.resize(finalConfiguration_3_2.streams.size());
-        for (size_t i = 0; i < finalConfiguration_3_2.streams.size(); i++) {
-            finalConfiguration.streams[i].v3_2 = finalConfiguration_3_2.streams[i];
-            finalConfiguration.streams[i].overrideDataSpace =
-                    requestedConfiguration3_2.streams[i].dataSpace;
-        }
-    }
-
-    if (status != common::V1_0::Status::OK ) {
-        return CameraProviderManager::mapToStatusT(status);
-    }
-
-    // And convert output stream configuration from HIDL
-
-    for (size_t i = 0; i < config->num_streams; i++) {
-        camera3::camera_stream_t *dst = config->streams[i];
-        int streamId = Camera3Stream::cast(dst)->getId();
-
-        // Start scan at i, with the assumption that the stream order matches
-        size_t realIdx = i;
-        bool found = false;
-        size_t halStreamCount = finalConfiguration.streams.size();
-        for (size_t idx = 0; idx < halStreamCount; idx++) {
-            if (finalConfiguration.streams[realIdx].v3_2.id == streamId) {
-                found = true;
-                break;
-            }
-            realIdx = (realIdx >= halStreamCount - 1) ? 0 : realIdx + 1;
-        }
-        if (!found) {
-            ALOGE("%s: Stream %d not found in stream configuration response from HAL",
-                    __FUNCTION__, streamId);
-            return INVALID_OPERATION;
-        }
-        device::V3_3::HalStream &src = finalConfiguration.streams[realIdx];
-        device::V3_6::HalStream &src_36 = finalConfiguration3_6.streams[realIdx];
-
-        Camera3Stream* dstStream = Camera3Stream::cast(dst);
-        int overrideFormat = mapToFrameworkFormat(src.v3_2.overrideFormat);
-        android_dataspace overrideDataSpace = mapToFrameworkDataspace(src.overrideDataSpace);
-
-        if (mHidlSession_3_6 != nullptr) {
-            dstStream->setOfflineProcessingSupport(src_36.supportOffline);
-        }
-
-        if (dstStream->getOriginalFormat() != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
-            dstStream->setFormatOverride(false);
-            dstStream->setDataSpaceOverride(false);
-            if (dst->format != overrideFormat) {
-                ALOGE("%s: Stream %d: Format override not allowed for format 0x%x", __FUNCTION__,
-                        streamId, dst->format);
-            }
-            if (dst->data_space != overrideDataSpace) {
-                ALOGE("%s: Stream %d: DataSpace override not allowed for format 0x%x", __FUNCTION__,
-                        streamId, dst->format);
-            }
-        } else {
-            bool needFormatOverride =
-                    requestedConfiguration3_2.streams[i].format != src.v3_2.overrideFormat;
-            bool needDataspaceOverride =
-                    requestedConfiguration3_2.streams[i].dataSpace != src.overrideDataSpace;
-            // Override allowed with IMPLEMENTATION_DEFINED
-            dstStream->setFormatOverride(needFormatOverride);
-            dstStream->setDataSpaceOverride(needDataspaceOverride);
-            dst->format = overrideFormat;
-            dst->data_space = overrideDataSpace;
-        }
-
-        if (dst->stream_type == CAMERA_STREAM_INPUT) {
-            if (src.v3_2.producerUsage != 0) {
-                ALOGE("%s: Stream %d: INPUT streams must have 0 for producer usage",
-                        __FUNCTION__, streamId);
-                return INVALID_OPERATION;
-            }
-            dstStream->setUsage(
-                    mapConsumerToFrameworkUsage(src.v3_2.consumerUsage));
-        } else {
-            // OUTPUT
-            if (src.v3_2.consumerUsage != 0) {
-                ALOGE("%s: Stream %d: OUTPUT streams must have 0 for consumer usage",
-                        __FUNCTION__, streamId);
-                return INVALID_OPERATION;
-            }
-            dstStream->setUsage(
-                    mapProducerToFrameworkUsage(src.v3_2.producerUsage));
-        }
-        dst->max_buffers = src.v3_2.maxBuffers;
-    }
-
-    return res;
-}
-
-status_t Camera3Device::HalInterface::configureInjectedStreams(
-        const camera_metadata_t* sessionParams, camera_stream_configuration* config,
-        const std::vector<uint32_t>& bufferSizes,
-        const CameraMetadata& cameraCharacteristics) {
-    ATRACE_NAME("InjectionCameraHal::configureStreams");
-    if (!valid()) return INVALID_OPERATION;
-    status_t res = OK;
-
-    if (config->input_is_multi_resolution) {
-        ALOGE("%s: Injection camera device doesn't support multi-resolution input "
-                "stream", __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    // Convert stream config to HIDL
-    std::set<int> activeStreams;
-    device::V3_2::StreamConfiguration requestedConfiguration3_2;
-    device::V3_4::StreamConfiguration requestedConfiguration3_4;
-    device::V3_7::StreamConfiguration requestedConfiguration3_7;
-    requestedConfiguration3_2.streams.resize(config->num_streams);
-    requestedConfiguration3_4.streams.resize(config->num_streams);
-    requestedConfiguration3_7.streams.resize(config->num_streams);
-    for (size_t i = 0; i < config->num_streams; i++) {
-        device::V3_2::Stream& dst3_2 = requestedConfiguration3_2.streams[i];
-        device::V3_4::Stream& dst3_4 = requestedConfiguration3_4.streams[i];
-        device::V3_7::Stream& dst3_7 = requestedConfiguration3_7.streams[i];
-        camera3::camera_stream_t* src = config->streams[i];
-
-        Camera3Stream* cam3stream = Camera3Stream::cast(src);
-        cam3stream->setBufferFreedListener(this);
-        int streamId = cam3stream->getId();
-        StreamType streamType;
-        switch (src->stream_type) {
-            case CAMERA_STREAM_OUTPUT:
-                streamType = StreamType::OUTPUT;
-                break;
-            case CAMERA_STREAM_INPUT:
-                streamType = StreamType::INPUT;
-                break;
-            default:
-                ALOGE("%s: Stream %d: Unsupported stream type %d", __FUNCTION__,
-                        streamId, config->streams[i]->stream_type);
-            return BAD_VALUE;
-        }
-        dst3_2.id = streamId;
-        dst3_2.streamType = streamType;
-        dst3_2.width = src->width;
-        dst3_2.height = src->height;
-        dst3_2.usage = mapToConsumerUsage(cam3stream->getUsage());
-        dst3_2.rotation =
-                mapToStreamRotation((camera_stream_rotation_t)src->rotation);
-        // For HidlSession version 3.5 or newer, the format and dataSpace sent
-        // to HAL are original, not the overridden ones.
-        if (mHidlSession_3_5 != nullptr) {
-            dst3_2.format = mapToPixelFormat(cam3stream->isFormatOverridden()
-                                            ? cam3stream->getOriginalFormat()
-                                            : src->format);
-            dst3_2.dataSpace =
-                    mapToHidlDataspace(cam3stream->isDataSpaceOverridden()
-                                    ? cam3stream->getOriginalDataSpace()
-                                    : src->data_space);
-        } else {
-            dst3_2.format = mapToPixelFormat(src->format);
-            dst3_2.dataSpace = mapToHidlDataspace(src->data_space);
-        }
-        dst3_4.v3_2 = dst3_2;
-        dst3_4.bufferSize = bufferSizes[i];
-        if (src->physical_camera_id != nullptr) {
-            dst3_4.physicalCameraId = src->physical_camera_id;
-        }
-        dst3_7.v3_4 = dst3_4;
-        dst3_7.groupId = cam3stream->getHalStreamGroupId();
-        dst3_7.sensorPixelModesUsed.resize(src->sensor_pixel_modes_used.size());
-        size_t j = 0;
-        for (int mode : src->sensor_pixel_modes_used) {
-            dst3_7.sensorPixelModesUsed[j++] =
-                    static_cast<CameraMetadataEnumAndroidSensorPixelMode>(mode);
-        }
-        activeStreams.insert(streamId);
-        // Create Buffer ID map if necessary
-        mBufferRecords.tryCreateBufferCache(streamId);
-    }
-    // remove BufferIdMap for deleted streams
-    mBufferRecords.removeInactiveBufferCaches(activeStreams);
-
-    StreamConfigurationMode operationMode;
-    res = mapToStreamConfigurationMode(
-            (camera_stream_configuration_mode_t)config->operation_mode,
-            /*out*/ &operationMode);
-    if (res != OK) {
-        return res;
-    }
-    requestedConfiguration3_7.operationMode = operationMode;
-    size_t sessionParamSize = get_camera_metadata_size(sessionParams);
-    requestedConfiguration3_7.operationMode = operationMode;
-    requestedConfiguration3_7.sessionParams.setToExternal(
-            reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(sessionParams)),
-            sessionParamSize);
-
-    // See which version of HAL we have
-    if (mHidlSession_3_7 != nullptr) {
-        requestedConfiguration3_7.streamConfigCounter = mNextStreamConfigCounter++;
-        requestedConfiguration3_7.multiResolutionInputImage =
-                config->input_is_multi_resolution;
-
-        const camera_metadata_t* rawMetadata = cameraCharacteristics.getAndLock();
-        ::android::hardware::camera::device::V3_2::CameraMetadata hidlChars = {};
-        hidlChars.setToExternal(
-                reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(rawMetadata)),
-                get_camera_metadata_size(rawMetadata));
-        cameraCharacteristics.unlock(rawMetadata);
-
-        sp<hardware::camera::device::V3_7::ICameraInjectionSession>
-                hidlInjectionSession_3_7;
-        auto castInjectionResult_3_7 =
-                device::V3_7::ICameraInjectionSession::castFrom(mHidlSession_3_7);
-        if (castInjectionResult_3_7.isOk()) {
-            hidlInjectionSession_3_7 = castInjectionResult_3_7;
-        } else {
-            ALOGE("%s: Transaction error: %s", __FUNCTION__,
-                    castInjectionResult_3_7.description().c_str());
-            return DEAD_OBJECT;
-        }
-
-        auto err = hidlInjectionSession_3_7->configureInjectionStreams(
-                requestedConfiguration3_7, hidlChars);
-        if (!err.isOk()) {
-            ALOGE("%s: Transaction error: %s", __FUNCTION__,
-                    err.description().c_str());
-            return DEAD_OBJECT;
-        }
-    } else {
-        ALOGE("%s: mHidlSession_3_7 does not exist, the lowest version of injection "
-                "session is 3.7", __FUNCTION__);
-        return DEAD_OBJECT;
-    }
-
-    return res;
-}
-
-status_t Camera3Device::HalInterface::wrapAsHidlRequest(camera_capture_request_t* request,
-        /*out*/device::V3_2::CaptureRequest* captureRequest,
-        /*out*/std::vector<native_handle_t*>* handlesCreated,
-        /*out*/std::vector<std::pair<int32_t, int32_t>>* inflightBuffers) {
-    ATRACE_CALL();
-    if (captureRequest == nullptr || handlesCreated == nullptr || inflightBuffers == nullptr) {
-        ALOGE("%s: captureRequest (%p), handlesCreated (%p), and inflightBuffers(%p) "
-                "must not be null", __FUNCTION__, captureRequest, handlesCreated, inflightBuffers);
-        return BAD_VALUE;
-    }
-
-    captureRequest->frameNumber = request->frame_number;
-
-    captureRequest->fmqSettingsSize = 0;
-
-    {
-        if (request->input_buffer != nullptr) {
-            int32_t streamId = Camera3Stream::cast(request->input_buffer->stream)->getId();
-            buffer_handle_t buf = *(request->input_buffer->buffer);
-            auto pair = getBufferId(buf, streamId);
-            bool isNewBuffer = pair.first;
-            uint64_t bufferId = pair.second;
-            captureRequest->inputBuffer.streamId = streamId;
-            captureRequest->inputBuffer.bufferId = bufferId;
-            captureRequest->inputBuffer.buffer = (isNewBuffer) ? buf : nullptr;
-            captureRequest->inputBuffer.status = BufferStatus::OK;
-            native_handle_t *acquireFence = nullptr;
-            if (request->input_buffer->acquire_fence != -1) {
-                acquireFence = native_handle_create(1,0);
-                acquireFence->data[0] = request->input_buffer->acquire_fence;
-                handlesCreated->push_back(acquireFence);
-            }
-            captureRequest->inputBuffer.acquireFence = acquireFence;
-            captureRequest->inputBuffer.releaseFence = nullptr;
-
-            mBufferRecords.pushInflightBuffer(captureRequest->frameNumber, streamId,
-                    request->input_buffer->buffer);
-            inflightBuffers->push_back(std::make_pair(captureRequest->frameNumber, streamId));
-        } else {
-            captureRequest->inputBuffer.streamId = -1;
-            captureRequest->inputBuffer.bufferId = BUFFER_ID_NO_BUFFER;
-        }
-
-        captureRequest->outputBuffers.resize(request->num_output_buffers);
-        for (size_t i = 0; i < request->num_output_buffers; i++) {
-            const camera_stream_buffer_t *src = request->output_buffers + i;
-            StreamBuffer &dst = captureRequest->outputBuffers[i];
-            int32_t streamId = Camera3Stream::cast(src->stream)->getId();
-            if (src->buffer != nullptr) {
-                buffer_handle_t buf = *(src->buffer);
-                auto pair = getBufferId(buf, streamId);
-                bool isNewBuffer = pair.first;
-                dst.bufferId = pair.second;
-                dst.buffer = isNewBuffer ? buf : nullptr;
-                native_handle_t *acquireFence = nullptr;
-                if (src->acquire_fence != -1) {
-                    acquireFence = native_handle_create(1,0);
-                    acquireFence->data[0] = src->acquire_fence;
-                    handlesCreated->push_back(acquireFence);
-                }
-                dst.acquireFence = acquireFence;
-            } else if (mUseHalBufManager) {
-                // HAL buffer management path
-                dst.bufferId = BUFFER_ID_NO_BUFFER;
-                dst.buffer = nullptr;
-                dst.acquireFence = nullptr;
-            } else {
-                ALOGE("%s: cannot send a null buffer in capture request!", __FUNCTION__);
-                return BAD_VALUE;
-            }
-            dst.streamId = streamId;
-            dst.status = BufferStatus::OK;
-            dst.releaseFence = nullptr;
-
-            // Output buffers are empty when using HAL buffer manager
-            if (!mUseHalBufManager) {
-                mBufferRecords.pushInflightBuffer(
-                        captureRequest->frameNumber, streamId, src->buffer);
-                inflightBuffers->push_back(std::make_pair(captureRequest->frameNumber, streamId));
-            }
-        }
-    }
-    return OK;
-}
-
 void Camera3Device::HalInterface::cleanupNativeHandles(
         std::vector<native_handle_t*> *handles, bool closeFd) {
     if (handles == nullptr) {
@@ -3795,302 +2746,6 @@
     return;
 }
 
-status_t Camera3Device::HalInterface::processBatchCaptureRequests(
-        std::vector<camera_capture_request_t*>& requests,/*out*/uint32_t* numRequestProcessed) {
-    ATRACE_NAME("CameraHal::processBatchCaptureRequests");
-    if (!valid()) return INVALID_OPERATION;
-
-    sp<device::V3_4::ICameraDeviceSession> hidlSession_3_4;
-    sp<device::V3_7::ICameraDeviceSession> hidlSession_3_7;
-    auto castResult_3_7 = device::V3_7::ICameraDeviceSession::castFrom(mHidlSession);
-    if (castResult_3_7.isOk()) {
-        hidlSession_3_7 = castResult_3_7;
-    }
-    auto castResult_3_4 = device::V3_4::ICameraDeviceSession::castFrom(mHidlSession);
-    if (castResult_3_4.isOk()) {
-        hidlSession_3_4 = castResult_3_4;
-    }
-
-    hardware::hidl_vec<device::V3_2::CaptureRequest> captureRequests;
-    hardware::hidl_vec<device::V3_4::CaptureRequest> captureRequests_3_4;
-    hardware::hidl_vec<device::V3_7::CaptureRequest> captureRequests_3_7;
-    size_t batchSize = requests.size();
-    if (hidlSession_3_7 != nullptr) {
-        captureRequests_3_7.resize(batchSize);
-    } else if (hidlSession_3_4 != nullptr) {
-        captureRequests_3_4.resize(batchSize);
-    } else {
-        captureRequests.resize(batchSize);
-    }
-    std::vector<native_handle_t*> handlesCreated;
-    std::vector<std::pair<int32_t, int32_t>> inflightBuffers;
-
-    status_t res = OK;
-    for (size_t i = 0; i < batchSize; i++) {
-        if (hidlSession_3_7 != nullptr) {
-            res = wrapAsHidlRequest(requests[i], /*out*/&captureRequests_3_7[i].v3_4.v3_2,
-                    /*out*/&handlesCreated, /*out*/&inflightBuffers);
-        } else if (hidlSession_3_4 != nullptr) {
-            res = wrapAsHidlRequest(requests[i], /*out*/&captureRequests_3_4[i].v3_2,
-                    /*out*/&handlesCreated, /*out*/&inflightBuffers);
-        } else {
-            res = wrapAsHidlRequest(requests[i], /*out*/&captureRequests[i],
-                    /*out*/&handlesCreated, /*out*/&inflightBuffers);
-        }
-        if (res != OK) {
-            mBufferRecords.popInflightBuffers(inflightBuffers);
-            cleanupNativeHandles(&handlesCreated);
-            return res;
-        }
-    }
-
-    std::vector<device::V3_2::BufferCache> cachesToRemove;
-    {
-        std::lock_guard<std::mutex> lock(mFreedBuffersLock);
-        for (auto& pair : mFreedBuffers) {
-            // The stream might have been removed since onBufferFreed
-            if (mBufferRecords.isStreamCached(pair.first)) {
-                cachesToRemove.push_back({pair.first, pair.second});
-            }
-        }
-        mFreedBuffers.clear();
-    }
-
-    common::V1_0::Status status = common::V1_0::Status::INTERNAL_ERROR;
-    *numRequestProcessed = 0;
-
-    // Write metadata to FMQ.
-    for (size_t i = 0; i < batchSize; i++) {
-        camera_capture_request_t* request = requests[i];
-        device::V3_2::CaptureRequest* captureRequest;
-        if (hidlSession_3_7 != nullptr) {
-            captureRequest = &captureRequests_3_7[i].v3_4.v3_2;
-        } else if (hidlSession_3_4 != nullptr) {
-            captureRequest = &captureRequests_3_4[i].v3_2;
-        } else {
-            captureRequest = &captureRequests[i];
-        }
-
-        if (request->settings != nullptr) {
-            size_t settingsSize = get_camera_metadata_size(request->settings);
-            if (mRequestMetadataQueue != nullptr && mRequestMetadataQueue->write(
-                    reinterpret_cast<const uint8_t*>(request->settings), settingsSize)) {
-                captureRequest->settings.resize(0);
-                captureRequest->fmqSettingsSize = settingsSize;
-            } else {
-                if (mRequestMetadataQueue != nullptr) {
-                    ALOGW("%s: couldn't utilize fmq, fallback to hwbinder", __FUNCTION__);
-                }
-                captureRequest->settings.setToExternal(
-                        reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(request->settings)),
-                        get_camera_metadata_size(request->settings));
-                captureRequest->fmqSettingsSize = 0u;
-            }
-        } else {
-            // A null request settings maps to a size-0 CameraMetadata
-            captureRequest->settings.resize(0);
-            captureRequest->fmqSettingsSize = 0u;
-        }
-
-        // hidl session 3.7 specific handling.
-        if (hidlSession_3_7 != nullptr) {
-            captureRequests_3_7[i].inputWidth = request->input_width;
-            captureRequests_3_7[i].inputHeight = request->input_height;
-        }
-
-        // hidl session 3.7 and 3.4 specific handling.
-        if (hidlSession_3_7 != nullptr || hidlSession_3_4 != nullptr) {
-            hardware::hidl_vec<device::V3_4::PhysicalCameraSetting>& physicalCameraSettings =
-                    (hidlSession_3_7 != nullptr) ?
-                    captureRequests_3_7[i].v3_4.physicalCameraSettings :
-                    captureRequests_3_4[i].physicalCameraSettings;
-            physicalCameraSettings.resize(request->num_physcam_settings);
-            for (size_t j = 0; j < request->num_physcam_settings; j++) {
-                if (request->physcam_settings != nullptr) {
-                    size_t settingsSize = get_camera_metadata_size(request->physcam_settings[j]);
-                    if (mRequestMetadataQueue != nullptr && mRequestMetadataQueue->write(
-                                reinterpret_cast<const uint8_t*>(request->physcam_settings[j]),
-                                settingsSize)) {
-                        physicalCameraSettings[j].settings.resize(0);
-                        physicalCameraSettings[j].fmqSettingsSize = settingsSize;
-                    } else {
-                        if (mRequestMetadataQueue != nullptr) {
-                            ALOGW("%s: couldn't utilize fmq, fallback to hwbinder", __FUNCTION__);
-                        }
-                        physicalCameraSettings[j].settings.setToExternal(
-                                reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(
-                                        request->physcam_settings[j])),
-                                get_camera_metadata_size(request->physcam_settings[j]));
-                        physicalCameraSettings[j].fmqSettingsSize = 0u;
-                    }
-                } else {
-                    physicalCameraSettings[j].fmqSettingsSize = 0u;
-                    physicalCameraSettings[j].settings.resize(0);
-                }
-                physicalCameraSettings[j].physicalCameraId = request->physcam_id[j];
-            }
-        }
-    }
-
-    hardware::details::return_status err;
-    auto resultCallback =
-        [&status, &numRequestProcessed] (auto s, uint32_t n) {
-                status = s;
-                *numRequestProcessed = n;
-        };
-    if (hidlSession_3_7 != nullptr) {
-        err = hidlSession_3_7->processCaptureRequest_3_7(captureRequests_3_7, cachesToRemove,
-                                                         resultCallback);
-    } else if (hidlSession_3_4 != nullptr) {
-        err = hidlSession_3_4->processCaptureRequest_3_4(captureRequests_3_4, cachesToRemove,
-                                                         resultCallback);
-    } else {
-        err = mHidlSession->processCaptureRequest(captureRequests, cachesToRemove,
-                                                  resultCallback);
-    }
-    if (!err.isOk()) {
-        ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
-        status = common::V1_0::Status::CAMERA_DISCONNECTED;
-    }
-
-    if (status == common::V1_0::Status::OK && *numRequestProcessed != batchSize) {
-        ALOGE("%s: processCaptureRequest returns OK but processed %d/%zu requests",
-                __FUNCTION__, *numRequestProcessed, batchSize);
-        status = common::V1_0::Status::INTERNAL_ERROR;
-    }
-
-    res = CameraProviderManager::mapToStatusT(status);
-    if (res == OK) {
-        if (mHidlSession->isRemote()) {
-            // Only close acquire fence FDs when the HIDL transaction succeeds (so the FDs have been
-            // sent to camera HAL processes)
-            cleanupNativeHandles(&handlesCreated, /*closeFd*/true);
-        } else {
-            // In passthrough mode the FDs are now owned by HAL
-            cleanupNativeHandles(&handlesCreated);
-        }
-    } else {
-        mBufferRecords.popInflightBuffers(inflightBuffers);
-        cleanupNativeHandles(&handlesCreated);
-    }
-    return res;
-}
-
-status_t Camera3Device::HalInterface::flush() {
-    ATRACE_NAME("CameraHal::flush");
-    if (!valid()) return INVALID_OPERATION;
-    status_t res = OK;
-
-    auto err = mHidlSession->flush();
-    if (!err.isOk()) {
-        ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
-        res = DEAD_OBJECT;
-    } else {
-        res = CameraProviderManager::mapToStatusT(err);
-    }
-
-    return res;
-}
-
-status_t Camera3Device::HalInterface::dump(int /*fd*/) {
-    ATRACE_NAME("CameraHal::dump");
-    if (!valid()) return INVALID_OPERATION;
-
-    // Handled by CameraProviderManager::dump
-
-    return OK;
-}
-
-status_t Camera3Device::HalInterface::close() {
-    ATRACE_NAME("CameraHal::close()");
-    if (!valid()) return INVALID_OPERATION;
-    status_t res = OK;
-
-    auto err = mHidlSession->close();
-    // Interface will be dead shortly anyway, so don't log errors
-    if (!err.isOk()) {
-        res = DEAD_OBJECT;
-    }
-
-    return res;
-}
-
-void Camera3Device::HalInterface::signalPipelineDrain(const std::vector<int>& streamIds) {
-    ATRACE_NAME("CameraHal::signalPipelineDrain");
-    if (!valid() || mHidlSession_3_5 == nullptr) {
-        ALOGE("%s called on invalid camera!", __FUNCTION__);
-        return;
-    }
-
-    auto err = mHidlSession_3_5->signalStreamFlush(streamIds, mNextStreamConfigCounter - 1);
-    if (!err.isOk()) {
-        ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
-        return;
-    }
-}
-
-status_t Camera3Device::HalInterface::switchToOffline(
-        const std::vector<int32_t>& streamsToKeep,
-        /*out*/hardware::camera::device::V3_6::CameraOfflineSessionInfo* offlineSessionInfo,
-        /*out*/sp<hardware::camera::device::V3_6::ICameraOfflineSession>* offlineSession,
-        /*out*/camera3::BufferRecords* bufferRecords) {
-    ATRACE_NAME("CameraHal::switchToOffline");
-    if (!valid() || mHidlSession_3_6 == nullptr) {
-        ALOGE("%s called on invalid camera!", __FUNCTION__);
-        return INVALID_OPERATION;
-    }
-
-    if (offlineSessionInfo == nullptr || offlineSession == nullptr || bufferRecords == nullptr) {
-        ALOGE("%s: output arguments must not be null!", __FUNCTION__);
-        return INVALID_OPERATION;
-    }
-
-    common::V1_0::Status status = common::V1_0::Status::INTERNAL_ERROR;
-    auto resultCallback =
-        [&status, &offlineSessionInfo, &offlineSession] (auto s, auto info, auto session) {
-                status = s;
-                *offlineSessionInfo = info;
-                *offlineSession = session;
-        };
-    auto err = mHidlSession_3_6->switchToOffline(streamsToKeep, resultCallback);
-
-    if (!err.isOk()) {
-        ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
-        return DEAD_OBJECT;
-    }
-
-    status_t ret = CameraProviderManager::mapToStatusT(status);
-    if (ret != OK) {
-        return ret;
-    }
-
-    // TODO: assert no ongoing requestBuffer/returnBuffer call here
-    // TODO: update RequestBufferStateMachine to block requestBuffer/returnBuffer once HAL
-    //       returns from switchToOffline.
-
-
-    // Validate buffer caches
-    std::vector<int32_t> streams;
-    streams.reserve(offlineSessionInfo->offlineStreams.size());
-    for (auto offlineStream : offlineSessionInfo->offlineStreams) {
-        int32_t id = offlineStream.id;
-        streams.push_back(id);
-        // Verify buffer caches
-        std::vector<uint64_t> bufIds(offlineStream.circulatingBufferIds.begin(),
-                offlineStream.circulatingBufferIds.end());
-        if (!verifyBufferIds(id, bufIds)) {
-            ALOGE("%s: stream ID %d buffer cache records mismatch!", __FUNCTION__, id);
-            return UNKNOWN_ERROR;
-        }
-    }
-
-    // Move buffer records
-    bufferRecords->takeBufferCaches(mBufferRecords, streams);
-    bufferRecords->takeInflightBufferMap(mBufferRecords);
-    bufferRecords->takeRequestedBufferMap(mBufferRecords);
-    return ret;
-}
-
 void Camera3Device::HalInterface::getInflightBufferKeys(
         std::vector<std::pair<int32_t, int32_t>>* out) {
     mBufferRecords.getInflightBufferKeys(out);
@@ -4132,6 +2787,11 @@
     return mBufferRecords.getBufferId(buf, streamId);
 }
 
+uint64_t Camera3Device::HalInterface::removeOneBufferCache(int streamId,
+        const native_handle_t* handle) {
+    return mBufferRecords.removeOneBufferCache(streamId, handle);
+}
+
 void Camera3Device::HalInterface::onBufferFreed(
         int streamId, const native_handle_t* handle) {
     uint32_t bufferId = mBufferRecords.removeOneBufferCache(streamId, handle);
@@ -4174,6 +2834,7 @@
         mCurrentAfTriggerId(0),
         mCurrentPreCaptureTriggerId(0),
         mRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_NONE),
+        mComposerOutput(false),
         mCameraMute(ANDROID_SENSOR_TEST_PATTERN_MODE_OFF),
         mCameraMuteChanged(false),
         mRepeatingLastFrameNumber(
@@ -4329,10 +2990,20 @@
 }
 
 status_t Camera3Device::RequestThread::clearRepeatingRequestsLocked(/*out*/int64_t *lastFrameNumber) {
+    std::vector<int32_t> streamIds;
+    for (const auto& request : mRepeatingRequests) {
+        for (const auto& stream : request->mOutputStreams) {
+            streamIds.push_back(stream->getId());
+        }
+    }
+
     mRepeatingRequests.clear();
     if (lastFrameNumber != NULL) {
         *lastFrameNumber = mRepeatingLastFrameNumber;
     }
+
+    mInterface->repeatingRequestEnd(mRepeatingLastFrameNumber, streamIds);
+
     mRepeatingLastFrameNumber = hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES;
     return OK;
 }
@@ -4578,9 +3249,15 @@
 
         sp<Camera3Device> parent = mParent.promote();
         if (parent != NULL) {
+            int32_t inputStreamId = -1;
+            if (halRequest.input_buffer != nullptr) {
+              inputStreamId = Camera3Stream::cast(halRequest.input_buffer->stream)->getId();
+            }
+
             parent->monitorMetadata(TagMonitor::REQUEST,
                     halRequest.frame_number,
-                    0, mLatestRequest, mLatestPhysicalRequest);
+                    0, mLatestRequest, mLatestPhysicalRequest, halRequest.output_buffers,
+                    halRequest.num_output_buffers, inputStreamId);
         }
     }
 
@@ -4778,6 +3455,26 @@
     return submitRequestSuccess;
 }
 
+status_t Camera3Device::removeFwkOnlyRegionKeys(CameraMetadata *request) {
+    static const std::array<uint32_t, 4> kFwkOnlyRegionKeys = {ANDROID_CONTROL_AF_REGIONS_SET,
+        ANDROID_CONTROL_AE_REGIONS_SET, ANDROID_CONTROL_AWB_REGIONS_SET,
+        ANDROID_SCALER_CROP_REGION_SET};
+    if (request == nullptr) {
+        ALOGE("%s request metadata nullptr", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    status_t res = OK;
+    for (const auto &key : kFwkOnlyRegionKeys) {
+        if (request->exists(key)) {
+            res = request->erase(key);
+            if (res != OK) {
+                return res;
+            }
+        }
+    }
+    return OK;
+}
+
 status_t Camera3Device::RequestThread::prepareHalRequests() {
     ATRACE_CALL();
 
@@ -4804,7 +3501,11 @@
         bool triggersMixedIn = (triggerCount > 0 || mPrevTriggers > 0);
         mPrevTriggers = triggerCount;
 
-        bool rotateAndCropChanged = overrideAutoRotateAndCrop(captureRequest);
+        // Do not override rotate&crop for stream configurations that include
+        // SurfaceViews(HW_COMPOSER) output. The display rotation there will be
+        // compensated by NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY
+        bool rotateAndCropChanged = mComposerOutput ? false :
+            overrideAutoRotateAndCrop(captureRequest);
         bool testPatternChanged = overrideTestPattern(captureRequest);
 
         // If the request is the same as last, or we had triggers now or last time or
@@ -4837,6 +3538,12 @@
                             it != captureRequest->mSettingsList.end(); it++) {
                         if (parent->mUHRCropAndMeteringRegionMappers.find(it->cameraId) ==
                                 parent->mUHRCropAndMeteringRegionMappers.end()) {
+                            if (removeFwkOnlyRegionKeys(&(it->metadata)) != OK) {
+                                SET_ERR("RequestThread: Unable to remove fwk-only keys from request"
+                                        "%d: %s (%d)", halRequest->frame_number, strerror(-res),
+                                        res);
+                                return INVALID_OPERATION;
+                            }
                             continue;
                         }
 
@@ -4851,6 +3558,12 @@
                                 return INVALID_OPERATION;
                             }
                             captureRequest->mUHRCropAndMeteringRegionsUpdated = true;
+                            if (removeFwkOnlyRegionKeys(&(it->metadata)) != OK) {
+                                SET_ERR("RequestThread: Unable to remove fwk-only keys from request"
+                                        "%d: %s (%d)", halRequest->frame_number, strerror(-res),
+                                        res);
+                                return INVALID_OPERATION;
+                            }
                         }
                     }
 
@@ -5255,38 +3968,6 @@
     mPrevRequest.clear();
 }
 
-status_t Camera3Device::RequestThread::switchToOffline(
-        const std::vector<int32_t>& streamsToKeep,
-        /*out*/hardware::camera::device::V3_6::CameraOfflineSessionInfo* offlineSessionInfo,
-        /*out*/sp<hardware::camera::device::V3_6::ICameraOfflineSession>* offlineSession,
-        /*out*/camera3::BufferRecords* bufferRecords) {
-    Mutex::Autolock l(mRequestLock);
-    clearRepeatingRequestsLocked(/*lastFrameNumber*/nullptr);
-
-    // Wait until request thread is fully stopped
-    // TBD: check if request thread is being paused by other APIs (shouldn't be)
-
-    // We could also check for mRepeatingRequests.empty(), but the API interface
-    // is serialized by Camera3Device::mInterfaceLock so no one should be able to submit any
-    // new requests during the call; hence skip that check.
-    bool queueEmpty = mNextRequests.empty() && mRequestQueue.empty();
-    while (!queueEmpty) {
-        status_t res = mRequestSubmittedSignal.waitRelative(mRequestLock, kRequestSubmitTimeout);
-        if (res == TIMED_OUT) {
-            ALOGE("%s: request thread failed to submit one request within timeout!", __FUNCTION__);
-            return res;
-        } else if (res != OK) {
-            ALOGE("%s: request thread failed to submit a request: %s (%d)!",
-                    __FUNCTION__, strerror(-res), res);
-            return res;
-        }
-        queueEmpty = mNextRequests.empty() && mRequestQueue.empty();
-    }
-
-    return mInterface->switchToOffline(
-            streamsToKeep, offlineSessionInfo, offlineSession, bufferRecords);
-}
-
 status_t Camera3Device::RequestThread::setRotateAndCropAutoBehavior(
         camera_metadata_enum_android_scaler_rotate_and_crop_t rotateAndCropValue) {
     ATRACE_CALL();
@@ -5298,6 +3979,13 @@
     return OK;
 }
 
+status_t Camera3Device::RequestThread::setComposerSurface(bool composerSurfacePresent) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mTriggerMutex);
+    mComposerOutput = composerSurfacePresent;
+    return OK;
+}
+
 status_t Camera3Device::RequestThread::setCameraMute(int32_t muteMode) {
     ATRACE_CALL();
     Mutex::Autolock l(mTriggerMutex);
@@ -5376,7 +4064,8 @@
                     outputBuffers->editItemAt(i).acquire_fence = -1;
                 }
                 outputBuffers->editItemAt(i).status = CAMERA_BUFFER_STATUS_ERROR;
-                captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i], 0,
+                captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i],
+                        /*timestamp*/0, /*readoutTimestamp*/0,
                         /*timestampIncreasing*/true, std::vector<size_t> (),
                         captureRequest->mResultExtras.frameNumber);
             }
@@ -5871,48 +4560,53 @@
 
     bool changed = false;
 
-    int32_t testPatternMode = request->mOriginalTestPatternMode;
-    int32_t testPatternData[4] = {
-        request->mOriginalTestPatternData[0],
-        request->mOriginalTestPatternData[1],
-        request->mOriginalTestPatternData[2],
-        request->mOriginalTestPatternData[3]
-    };
+    // For a multi-camera, the physical cameras support the same set of
+    // test pattern modes as the logical camera.
+    for (auto& settings : request->mSettingsList) {
+        CameraMetadata &metadata = settings.metadata;
 
-    if (mCameraMute != ANDROID_SENSOR_TEST_PATTERN_MODE_OFF) {
-        testPatternMode = mCameraMute;
-        testPatternData[0] = 0;
-        testPatternData[1] = 0;
-        testPatternData[2] = 0;
-        testPatternData[3] = 0;
-    }
-
-    CameraMetadata &metadata = request->mSettingsList.begin()->metadata;
-
-    auto testPatternEntry = metadata.find(ANDROID_SENSOR_TEST_PATTERN_MODE);
-    if (testPatternEntry.count > 0) {
-        if (testPatternEntry.data.i32[0] != testPatternMode) {
-            testPatternEntry.data.i32[0] = testPatternMode;
-            changed = true;
+        int32_t testPatternMode = settings.mOriginalTestPatternMode;
+        int32_t testPatternData[4] = {
+            settings.mOriginalTestPatternData[0],
+            settings.mOriginalTestPatternData[1],
+            settings.mOriginalTestPatternData[2],
+            settings.mOriginalTestPatternData[3]
+        };
+        if (mCameraMute != ANDROID_SENSOR_TEST_PATTERN_MODE_OFF) {
+            testPatternMode = mCameraMute;
+            testPatternData[0] = 0;
+            testPatternData[1] = 0;
+            testPatternData[2] = 0;
+            testPatternData[3] = 0;
         }
-    } else {
-        metadata.update(ANDROID_SENSOR_TEST_PATTERN_MODE,
-                &testPatternMode, 1);
-        changed = true;
-    }
 
-    auto testPatternColor = metadata.find(ANDROID_SENSOR_TEST_PATTERN_DATA);
-    if (testPatternColor.count >= 4) {
-        for (size_t i = 0; i < 4; i++) {
-            if (testPatternColor.data.i32[i] != testPatternData[i]) {
-                testPatternColor.data.i32[i] = testPatternData[i];
+        auto testPatternEntry = metadata.find(ANDROID_SENSOR_TEST_PATTERN_MODE);
+        bool supportTestPatternModeKey = settings.mHasTestPatternModeTag;
+        if (testPatternEntry.count > 0) {
+            if (testPatternEntry.data.i32[0] != testPatternMode) {
+                testPatternEntry.data.i32[0] = testPatternMode;
                 changed = true;
             }
+        } else if (supportTestPatternModeKey) {
+            metadata.update(ANDROID_SENSOR_TEST_PATTERN_MODE,
+                    &testPatternMode, 1);
+            changed = true;
         }
-    } else {
-        metadata.update(ANDROID_SENSOR_TEST_PATTERN_DATA,
-                testPatternData, 4);
-        changed = true;
+
+        auto testPatternColor = metadata.find(ANDROID_SENSOR_TEST_PATTERN_DATA);
+        bool supportTestPatternDataKey = settings.mHasTestPatternDataTag;
+        if (testPatternColor.count >= 4) {
+            for (size_t i = 0; i < 4; i++) {
+                if (testPatternColor.data.i32[i] != testPatternData[i]) {
+                    testPatternColor.data.i32[i] = testPatternData[i];
+                    changed = true;
+                }
+            }
+        } else if (supportTestPatternDataKey) {
+            metadata.update(ANDROID_SENSOR_TEST_PATTERN_DATA,
+                    testPatternData, 4);
+            changed = true;
+        }
     }
 
     return changed;
@@ -6304,220 +4998,6 @@
     return ret;
 }
 
-status_t Camera3Device::switchToOffline(
-        const std::vector<int32_t>& streamsToKeep,
-        /*out*/ sp<CameraOfflineSessionBase>* session) {
-    ATRACE_CALL();
-    if (session == nullptr) {
-        ALOGE("%s: session must not be null", __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    Mutex::Autolock il(mInterfaceLock);
-
-    bool hasInputStream = mInputStream != nullptr;
-    int32_t inputStreamId = hasInputStream ? mInputStream->getId() : -1;
-    bool inputStreamSupportsOffline = hasInputStream ?
-            mInputStream->getOfflineProcessingSupport() : false;
-    auto outputStreamIds = mOutputStreams.getStreamIds();
-    auto streamIds = outputStreamIds;
-    if (hasInputStream) {
-        streamIds.push_back(mInputStream->getId());
-    }
-
-    // Check all streams in streamsToKeep supports offline mode
-    for (auto id : streamsToKeep) {
-        if (std::find(streamIds.begin(), streamIds.end(), id) == streamIds.end()) {
-            ALOGE("%s: Unknown stream ID %d", __FUNCTION__, id);
-            return BAD_VALUE;
-        } else if (id == inputStreamId) {
-            if (!inputStreamSupportsOffline) {
-                ALOGE("%s: input stream %d cannot be switched to offline",
-                        __FUNCTION__, id);
-                return BAD_VALUE;
-            }
-        } else {
-            sp<camera3::Camera3OutputStreamInterface> stream = mOutputStreams.get(id);
-            if (!stream->getOfflineProcessingSupport()) {
-                ALOGE("%s: output stream %d cannot be switched to offline",
-                        __FUNCTION__, id);
-                return BAD_VALUE;
-            }
-        }
-    }
-
-    // TODO: block surface sharing and surface group streams until we can support them
-
-    // Stop repeating request, wait until all remaining requests are submitted, then call into
-    // HAL switchToOffline
-    hardware::camera::device::V3_6::CameraOfflineSessionInfo offlineSessionInfo;
-    sp<hardware::camera::device::V3_6::ICameraOfflineSession> offlineSession;
-    camera3::BufferRecords bufferRecords;
-    status_t ret = mRequestThread->switchToOffline(
-            streamsToKeep, &offlineSessionInfo, &offlineSession, &bufferRecords);
-
-    if (ret != OK) {
-        SET_ERR("Switch to offline failed: %s (%d)", strerror(-ret), ret);
-        return ret;
-    }
-
-    bool succ = mRequestBufferSM.onSwitchToOfflineSuccess();
-    if (!succ) {
-        SET_ERR("HAL must not be calling requestStreamBuffers call");
-        // TODO: block ALL callbacks from HAL till app configured new streams?
-        return UNKNOWN_ERROR;
-    }
-
-    // Verify offlineSessionInfo
-    std::vector<int32_t> offlineStreamIds;
-    offlineStreamIds.reserve(offlineSessionInfo.offlineStreams.size());
-    for (auto offlineStream : offlineSessionInfo.offlineStreams) {
-        // verify stream IDs
-        int32_t id = offlineStream.id;
-        if (std::find(streamIds.begin(), streamIds.end(), id) == streamIds.end()) {
-            SET_ERR("stream ID %d not found!", id);
-            return UNKNOWN_ERROR;
-        }
-
-        // When not using HAL buf manager, only allow streams requested by app to be preserved
-        if (!mUseHalBufManager) {
-            if (std::find(streamsToKeep.begin(), streamsToKeep.end(), id) == streamsToKeep.end()) {
-                SET_ERR("stream ID %d must not be switched to offline!", id);
-                return UNKNOWN_ERROR;
-            }
-        }
-
-        offlineStreamIds.push_back(id);
-        sp<Camera3StreamInterface> stream = (id == inputStreamId) ?
-                static_cast<sp<Camera3StreamInterface>>(mInputStream) :
-                static_cast<sp<Camera3StreamInterface>>(mOutputStreams.get(id));
-        // Verify number of outstanding buffers
-        if (stream->getOutstandingBuffersCount() != offlineStream.numOutstandingBuffers) {
-            SET_ERR("Offline stream %d # of remaining buffer mismatch: (%zu,%d) (service/HAL)",
-                    id, stream->getOutstandingBuffersCount(), offlineStream.numOutstandingBuffers);
-            return UNKNOWN_ERROR;
-        }
-    }
-
-    // Verify all streams to be deleted don't have any outstanding buffers
-    if (hasInputStream && std::find(offlineStreamIds.begin(), offlineStreamIds.end(),
-                inputStreamId) == offlineStreamIds.end()) {
-        if (mInputStream->hasOutstandingBuffers()) {
-            SET_ERR("Input stream %d still has %zu outstanding buffer!",
-                    inputStreamId, mInputStream->getOutstandingBuffersCount());
-            return UNKNOWN_ERROR;
-        }
-    }
-
-    for (const auto& outStreamId : outputStreamIds) {
-        if (std::find(offlineStreamIds.begin(), offlineStreamIds.end(),
-                outStreamId) == offlineStreamIds.end()) {
-            auto outStream = mOutputStreams.get(outStreamId);
-            if (outStream->hasOutstandingBuffers()) {
-                SET_ERR("Output stream %d still has %zu outstanding buffer!",
-                        outStreamId, outStream->getOutstandingBuffersCount());
-                return UNKNOWN_ERROR;
-            }
-        }
-    }
-
-    InFlightRequestMap offlineReqs;
-    // Verify inflight requests and their pending buffers
-    {
-        std::lock_guard<std::mutex> l(mInFlightLock);
-        for (auto offlineReq : offlineSessionInfo.offlineRequests) {
-            int idx = mInFlightMap.indexOfKey(offlineReq.frameNumber);
-            if (idx == NAME_NOT_FOUND) {
-                SET_ERR("Offline request frame number %d not found!", offlineReq.frameNumber);
-                return UNKNOWN_ERROR;
-            }
-
-            const auto& inflightReq = mInFlightMap.valueAt(idx);
-            // TODO: check specific stream IDs
-            size_t numBuffersLeft = static_cast<size_t>(inflightReq.numBuffersLeft);
-            if (numBuffersLeft != offlineReq.pendingStreams.size()) {
-                SET_ERR("Offline request # of remaining buffer mismatch: (%d,%d) (service/HAL)",
-                        inflightReq.numBuffersLeft, offlineReq.pendingStreams.size());
-                return UNKNOWN_ERROR;
-            }
-            offlineReqs.add(offlineReq.frameNumber, inflightReq);
-        }
-    }
-
-    // Create Camera3OfflineSession and transfer object ownership
-    //   (streams, inflight requests, buffer caches)
-    camera3::StreamSet offlineStreamSet;
-    sp<camera3::Camera3Stream> inputStream;
-    for (auto offlineStream : offlineSessionInfo.offlineStreams) {
-        int32_t id = offlineStream.id;
-        if (mInputStream != nullptr && id == mInputStream->getId()) {
-            inputStream = mInputStream;
-        } else {
-            offlineStreamSet.add(id, mOutputStreams.get(id));
-        }
-    }
-
-    // TODO: check if we need to lock before copying states
-    //       though technically no other thread should be talking to Camera3Device at this point
-    Camera3OfflineStates offlineStates(
-            mTagMonitor, mVendorTagId, mUseHalBufManager, mNeedFixupMonochromeTags,
-            mUsePartialResult, mNumPartialResults, mLastCompletedRegularFrameNumber,
-            mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
-            mNextResultFrameNumber, mNextReprocessResultFrameNumber,
-            mNextZslStillResultFrameNumber, mNextShutterFrameNumber,
-            mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
-            mDeviceInfo, mPhysicalDeviceInfoMap, mDistortionMappers,
-            mZoomRatioMappers, mRotateAndCropMappers);
-
-    *session = new Camera3OfflineSession(mId, inputStream, offlineStreamSet,
-            std::move(bufferRecords), offlineReqs, offlineStates, offlineSession);
-
-    // Delete all streams that has been transferred to offline session
-    Mutex::Autolock l(mLock);
-    for (auto offlineStream : offlineSessionInfo.offlineStreams) {
-        int32_t id = offlineStream.id;
-        if (mInputStream != nullptr && id == mInputStream->getId()) {
-            mInputStream.clear();
-        } else {
-            mOutputStreams.remove(id);
-        }
-    }
-
-    // disconnect all other streams and switch to UNCONFIGURED state
-    if (mInputStream != nullptr) {
-        ret = mInputStream->disconnect();
-        if (ret != OK) {
-            SET_ERR_L("disconnect input stream failed!");
-            return UNKNOWN_ERROR;
-        }
-    }
-
-    for (auto streamId : mOutputStreams.getStreamIds()) {
-        sp<Camera3StreamInterface> stream = mOutputStreams.get(streamId);
-        ret = stream->disconnect();
-        if (ret != OK) {
-            SET_ERR_L("disconnect output stream %d failed!", streamId);
-            return UNKNOWN_ERROR;
-        }
-    }
-
-    mInputStream.clear();
-    mOutputStreams.clear();
-    mNeedConfig = true;
-    internalUpdateStatusLocked(STATUS_UNCONFIGURED);
-    mOperatingMode = NO_MODE;
-    mIsConstrainedHighSpeedConfiguration = false;
-    mRequestThread->clearPreviousRequest();
-
-    return OK;
-    // TO be done by CameraDeviceClient/Camera3OfflineSession
-    // register the offline client to camera service
-    // Setup result passthing threads etc
-    // Initialize offline session so HAL can start sending callback to it (result Fmq)
-    // TODO: check how many onIdle callback will be sent
-    // Java side to make sure the CameraCaptureSession is properly closed
-}
-
 void Camera3Device::getOfflineStreamIds(std::vector<int> *offlineStreamIds) {
     ATRACE_CALL();
 
@@ -6582,6 +5062,13 @@
     ALOGI("%s Injection camera: injectedCamId = %s", __FUNCTION__, injectedCamId.string());
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
+    // When the camera device is active, injectCamera() and stopInjection() will call
+    // internalPauseAndWaitLocked() and internalResumeLocked(), and then they will call
+    // mStatusChanged.waitRelative(mLock, timeout) of waitUntilStateThenRelock(). But
+    // mStatusChanged.waitRelative(mLock, timeout)'s parameter: mutex "mLock" must be in the locked
+    // state, so we need to add "Mutex::Autolock l(mLock)" to lock the "mLock" before calling
+    // waitUntilStateThenRelock().
+    Mutex::Autolock l(mLock);
 
     status_t res = NO_ERROR;
     if (mInjectionMethods->isInjecting()) {
@@ -6597,23 +5084,32 @@
         }
     }
 
-    res = mInjectionMethods->injectionInitialize(injectedCamId, manager, this);
+    res = injectionCameraInitialize(injectedCamId, manager);
     if (res != OK) {
         ALOGE("%s: Failed to initialize the injection camera! ret != NO_ERROR: %d",
                 __FUNCTION__, res);
         return res;
     }
 
-    camera3::camera_stream_configuration injectionConfig;
-    std::vector<uint32_t> injectionBufferSizes;
-    mInjectionMethods->getInjectionConfig(&injectionConfig, &injectionBufferSizes);
     // When the second display of android is cast to the remote device, and the opened camera is
     // also cast to the second display, in this case, because the camera has configured the streams
     // at this time, we can directly call injectCamera() to replace the internal camera with
     // injection camera.
-    if (mOperatingMode >= 0 && injectionConfig.num_streams > 0
-                && injectionBufferSizes.size() > 0) {
-        ALOGV("%s: The opened camera is directly cast to the remote device.", __FUNCTION__);
+    if (mInjectionMethods->isStreamConfigCompleteButNotInjected()) {
+        ALOGD("%s: The opened camera is directly cast to the remote device.", __FUNCTION__);
+
+        camera3::camera_stream_configuration injectionConfig;
+        std::vector<uint32_t> injectionBufferSizes;
+        mInjectionMethods->getInjectionConfig(&injectionConfig, &injectionBufferSizes);
+        if (mOperatingMode < 0 || injectionConfig.num_streams <= 0
+                    || injectionBufferSizes.size() <= 0) {
+            ALOGE("Failed to inject camera due to abandoned configuration! "
+                    "mOperatingMode: %d injectionConfig.num_streams: %d "
+                    "injectionBufferSizes.size(): %zu", mOperatingMode,
+                    injectionConfig.num_streams, injectionBufferSizes.size());
+            return DEAD_OBJECT;
+        }
+
         res = mInjectionMethods->injectCamera(
                 injectionConfig, injectionBufferSizes);
         if (res != OK) {
@@ -6628,6 +5124,7 @@
 status_t Camera3Device::stopInjection() {
     ALOGI("%s: Injection camera: stopInjection", __FUNCTION__);
     Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
     return mInjectionMethods->stopInjection();
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 39714f0..6c4ba49 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -29,20 +29,9 @@
 #include <utils/KeyedVector.h>
 #include <utils/Timers.h>
 
-#include <android/hardware/camera/device/3.2/ICameraDevice.h>
-#include <android/hardware/camera/device/3.2/ICameraDeviceSession.h>
-#include <android/hardware/camera/device/3.3/ICameraDeviceSession.h>
-#include <android/hardware/camera/device/3.4/ICameraDeviceSession.h>
-#include <android/hardware/camera/device/3.5/ICameraDeviceSession.h>
-#include <android/hardware/camera/device/3.6/ICameraDeviceSession.h>
-#include <android/hardware/camera/device/3.7/ICameraDeviceSession.h>
-#include <android/hardware/camera/device/3.2/ICameraDeviceCallback.h>
-#include <android/hardware/camera/device/3.4/ICameraDeviceCallback.h>
-#include <android/hardware/camera/device/3.5/ICameraDeviceCallback.h>
-#include <fmq/MessageQueue.h>
-
 #include <camera/CaptureResult.h>
 
+#include "android/hardware/camera/metadata/3.8/types.h"
 #include "common/CameraDeviceBase.h"
 #include "device3/BufferUtils.h"
 #include "device3/StatusTracker.h"
@@ -56,6 +45,7 @@
 #include "device3/Camera3OfflineSession.h"
 #include "device3/Camera3StreamInterface.h"
 #include "utils/TagMonitor.h"
+#include "utils/IPCTransport.h"
 #include "utils/LatencyHistogram.h"
 #include <camera_metadata_hidden.h>
 
@@ -83,14 +73,14 @@
  */
 class Camera3Device :
             public CameraDeviceBase,
-            virtual public hardware::camera::device::V3_5::ICameraDeviceCallback,
             public camera3::SetErrorInterface,
             public camera3::InflightRequestUpdateInterface,
             public camera3::RequestBufferInterface,
             public camera3::FlushBufferInterface {
+  friend class HidlCamera3Device;
   public:
 
-    explicit Camera3Device(const String8& id, bool overrideForPerfClass);
+    explicit Camera3Device(const String8& id, bool overrideForPerfClass, bool legacyClient = false);
 
     virtual ~Camera3Device();
 
@@ -103,9 +93,14 @@
     metadata_vendor_id_t getVendorTagId() const override { return mVendorTagId; }
 
     // Transitions to idle state on success.
-    status_t initialize(sp<CameraProviderManager> manager, const String8& monitorTags) override;
+    virtual status_t initialize(sp<CameraProviderManager> /*manager*/,
+            const String8& /*monitorTags*/) = 0;
+
     status_t disconnect() override;
     status_t dump(int fd, const Vector<String16> &args) override;
+    status_t startWatchingTags(const String8 &tags) override;
+    status_t stopWatchingTags() override;
+    status_t dumpWatchedEventsToVector(std::vector<std::string> &out) override;
     const CameraMetadata& info() const override;
     const CameraMetadata& infoPhysical(const String8& physicalId) const override;
 
@@ -137,7 +132,9 @@
             std::vector<int> *surfaceIds = nullptr,
             int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
             bool isShared = false, bool isMultiResolution = false,
-            uint64_t consumerUsage = 0) override;
+            uint64_t consumerUsage = 0,
+            int dynamicRangeProfile =
+            ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) override;
 
     status_t createStream(const std::vector<sp<Surface>>& consumers,
             bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
@@ -147,7 +144,9 @@
             std::vector<int> *surfaceIds = nullptr,
             int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
             bool isShared = false, bool isMultiResolution = false,
-            uint64_t consumerUsage = 0) override;
+            uint64_t consumerUsage = 0,
+            int dynamicRangeProfile =
+            ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) override;
 
     status_t createInputStream(
             uint32_t width, uint32_t height, int format, bool isMultiResolution,
@@ -192,9 +191,11 @@
 
     status_t prepare(int maxCount, int streamId) override;
 
-    ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const override;
-    ssize_t getPointCloudBufferSize() const;
-    ssize_t getRawOpaqueBufferSize(int32_t width, int32_t height, bool maxResolution) const;
+    ssize_t getJpegBufferSize(const CameraMetadata &info, uint32_t width,
+            uint32_t height) const override;
+    ssize_t getPointCloudBufferSize(const CameraMetadata &info) const;
+    ssize_t getRawOpaqueBufferSize(const CameraMetadata &info, int32_t width, int32_t height,
+            bool maxResolution) const;
 
     // Methods called by subclasses
     void             notifyStatus(bool idle); // updates from StatusTracker
@@ -223,8 +224,10 @@
 
     nsecs_t getExpectedInFlightDuration() override;
 
-    status_t switchToOffline(const std::vector<int32_t>& streamsToKeep,
-            /*out*/ sp<CameraOfflineSessionBase>* session) override;
+    virtual status_t switchToOffline(const std::vector<int32_t>& ,
+            /*out*/ sp<CameraOfflineSessionBase>* )  override {
+      return INVALID_OPERATION;
+    };
 
     // RequestBufferInterface
     bool startRequestBuffer() override;
@@ -275,31 +278,9 @@
      */
     status_t stopInjection();
 
-    /**
-     * Helper functions to map between framework and HIDL values
-     */
-    static hardware::graphics::common::V1_0::PixelFormat mapToPixelFormat(int frameworkFormat);
-    static hardware::camera::device::V3_2::DataspaceFlags mapToHidlDataspace(
-            android_dataspace dataSpace);
-    static hardware::camera::device::V3_2::BufferUsageFlags mapToConsumerUsage(uint64_t usage);
-    static hardware::camera::device::V3_2::StreamRotation mapToStreamRotation(
-            camera_stream_rotation_t rotation);
-    // Returns a negative error code if the passed-in operation mode is not valid.
-    static status_t mapToStreamConfigurationMode(camera_stream_configuration_mode_t operationMode,
-            /*out*/ hardware::camera::device::V3_2::StreamConfigurationMode *mode);
-    static int mapToFrameworkFormat(hardware::graphics::common::V1_0::PixelFormat pixelFormat);
-    static android_dataspace mapToFrameworkDataspace(
-            hardware::camera::device::V3_2::DataspaceFlags);
-    static uint64_t mapConsumerToFrameworkUsage(
-            hardware::camera::device::V3_2::BufferUsageFlags usage);
-    static uint64_t mapProducerToFrameworkUsage(
-            hardware::camera::device::V3_2::BufferUsageFlags usage);
-
-  private:
+  protected:
     status_t disconnectImpl();
-
-    // internal typedefs
-    using RequestMetadataQueue = hardware::MessageQueue<uint8_t, hardware::kSynchronizedReadWrite>;
+    static status_t removeFwkOnlyRegionKeys(CameraMetadata *request);
 
     static const size_t        kDumpLockAttempts  = 10;
     static const size_t        kDumpSleepDuration = 100000; // 0.10 sec
@@ -332,6 +313,9 @@
     // Camera device ID
     const String8              mId;
 
+    // Legacy camera client flag
+    bool                       mLegacyClient;
+
     // Current stream configuration mode;
     int                        mOperatingMode;
     // Current session wide parameters
@@ -343,68 +327,65 @@
     // Flag indicating is the current active stream configuration is constrained high speed.
     bool                       mIsConstrainedHighSpeedConfiguration;
 
-    // FMQ to write result on. Must be guarded by mProcessCaptureResultLock.
-    std::unique_ptr<ResultMetadataQueue> mResultMetadataQueue;
-
     /**** Scope for mLock ****/
 
-    /**
-     * Adapter for legacy HAL / HIDL HAL interface calls; calls either into legacy HALv3 or the
-     * HIDL HALv3 interfaces.
-     */
     class HalInterface : public camera3::Camera3StreamBufferFreedListener,
             public camera3::BufferRecordsInterface {
       public:
-        HalInterface(sp<hardware::camera::device::V3_2::ICameraDeviceSession> &session,
-                     std::shared_ptr<RequestMetadataQueue> queue,
-                     bool useHalBufManager, bool supportOfflineProcessing);
+        HalInterface(bool useHalBufManager, bool supportOfflineProcessing) :
+              mUseHalBufManager(useHalBufManager),
+              mIsReconfigurationQuerySupported(true),
+              mSupportOfflineProcessing(supportOfflineProcessing)
+               {};
         HalInterface(const HalInterface &other);
         HalInterface();
 
+        virtual IPCTransport getTransportType() = 0;
+
         // Returns true if constructed with a valid device or session, and not yet cleared
-        bool valid();
+        virtual bool valid() = 0;
 
         // Reset this HalInterface object (does not call close())
-        void clear();
+        virtual void clear() = 0;
 
         // Calls into the HAL interface
 
         // Caller takes ownership of requestTemplate
-        status_t constructDefaultRequestSettings(camera_request_template templateId,
-                /*out*/ camera_metadata_t **requestTemplate);
-        status_t configureStreams(const camera_metadata_t *sessionParams,
-                /*inout*/ camera_stream_configuration_t *config,
-                const std::vector<uint32_t>& bufferSizes);
+        virtual status_t constructDefaultRequestSettings(camera_request_template templateId,
+                /*out*/ camera_metadata_t **requestTemplate) = 0;
+
+        virtual status_t configureStreams(const camera_metadata_t * sessionParams,
+                /*inout*/ camera_stream_configuration_t * config,
+                const std::vector<uint32_t>& bufferSizes) = 0;
 
         // The injection camera configures the streams to hal.
-        status_t configureInjectedStreams(
+        virtual status_t configureInjectedStreams(
                 const camera_metadata_t* sessionParams,
                 /*inout*/ camera_stream_configuration_t* config,
                 const std::vector<uint32_t>& bufferSizes,
-                const CameraMetadata& cameraCharacteristics);
+                const CameraMetadata& cameraCharacteristics) = 0;
 
         // When the call succeeds, the ownership of acquire fences in requests is transferred to
         // HalInterface. More specifically, the current implementation will send the fence to
         // HAL process and close the FD in cameraserver process. When the call fails, the ownership
         // of the acquire fence still belongs to the caller.
-        status_t processBatchCaptureRequests(
+        virtual status_t processBatchCaptureRequests(
                 std::vector<camera_capture_request_t*>& requests,
-                /*out*/uint32_t* numRequestProcessed);
-        status_t flush();
-        status_t dump(int fd);
-        status_t close();
+                /*out*/uint32_t* numRequestProcessed) = 0;
 
-        void signalPipelineDrain(const std::vector<int>& streamIds);
-        bool isReconfigurationRequired(CameraMetadata& oldSessionParams,
-                CameraMetadata& newSessionParams);
+        virtual status_t flush() = 0;
 
-        // Upon successful return, HalInterface will return buffer maps needed for offline
-        // processing, and clear all its internal buffer maps.
-        status_t switchToOffline(
-                const std::vector<int32_t>& streamsToKeep,
-                /*out*/hardware::camera::device::V3_6::CameraOfflineSessionInfo* offlineSessionInfo,
-                /*out*/sp<hardware::camera::device::V3_6::ICameraOfflineSession>* offlineSession,
-                /*out*/camera3::BufferRecords* bufferRecords);
+        virtual status_t dump(int fd) = 0;
+
+        virtual status_t close() = 0;
+
+        virtual void signalPipelineDrain(const std::vector<int>& streamIds) = 0;
+
+        virtual bool isReconfigurationRequired(CameraMetadata& oldSessionParams,
+                CameraMetadata& newSessionParams) = 0;
+
+        virtual status_t repeatingRequestEnd(uint32_t frameNumber,
+                const std::vector<int32_t> &streamIds) = 0;
 
         /////////////////////////////////////////////////////////////////////
         // Implements BufferRecordsInterface
@@ -412,6 +393,8 @@
         std::pair<bool, uint64_t> getBufferId(
                 const buffer_handle_t& buf, int streamId) override;
 
+        uint64_t removeOneBufferCache(int streamId, const native_handle_t* handle) override;
+
         status_t popInflightBuffer(int32_t frameNumber, int32_t streamId,
                 /*out*/ buffer_handle_t **buffer) override;
 
@@ -433,38 +416,36 @@
 
         void onStreamReConfigured(int streamId);
 
-      private:
-        // Always valid
-        sp<hardware::camera::device::V3_2::ICameraDeviceSession> mHidlSession;
-        // Valid if ICameraDeviceSession is @3.3 or newer
-        sp<hardware::camera::device::V3_3::ICameraDeviceSession> mHidlSession_3_3;
-        // Valid if ICameraDeviceSession is @3.4 or newer
-        sp<hardware::camera::device::V3_4::ICameraDeviceSession> mHidlSession_3_4;
-        // Valid if ICameraDeviceSession is @3.5 or newer
-        sp<hardware::camera::device::V3_5::ICameraDeviceSession> mHidlSession_3_5;
-        // Valid if ICameraDeviceSession is @3.6 or newer
-        sp<hardware::camera::device::V3_6::ICameraDeviceSession> mHidlSession_3_6;
-        // Valid if ICameraDeviceSession is @3.7 or newer
-        sp<hardware::camera::device::V3_7::ICameraDeviceSession> mHidlSession_3_7;
-
-        std::shared_ptr<RequestMetadataQueue> mRequestMetadataQueue;
-
-        // The output HIDL request still depends on input camera_capture_request_t
-        // Do not free input camera_capture_request_t before output HIDL request
-        status_t wrapAsHidlRequest(camera_capture_request_t* in,
-                /*out*/hardware::camera::device::V3_2::CaptureRequest* out,
-                /*out*/std::vector<native_handle_t*>* handlesCreated,
-                /*out*/std::vector<std::pair<int32_t, int32_t>>* inflightBuffers);
-
-        status_t pushInflightBufferLocked(int32_t frameNumber, int32_t streamId,
-                buffer_handle_t *buffer);
-
-        // Pop inflight buffers based on pairs of (frameNumber,streamId)
-        void popInflightBuffers(const std::vector<std::pair<int32_t, int32_t>>& buffers);
+      protected:
 
         // Return true if the input caches match what we have; otherwise false
         bool verifyBufferIds(int32_t streamId, std::vector<uint64_t>& inBufIds);
 
+        template <typename OfflineSessionInfoT>
+        status_t verifyBufferCaches(
+            const OfflineSessionInfoT *offlineSessionInfo, camera3::BufferRecords *bufferRecords) {
+            // Validate buffer caches
+            std::vector<int32_t> streams;
+            streams.reserve(offlineSessionInfo->offlineStreams.size());
+            for (auto offlineStream : offlineSessionInfo->offlineStreams) {
+                int32_t id = offlineStream.id;
+                streams.push_back(id);
+                // Verify buffer caches
+                std::vector<uint64_t> bufIds(offlineStream.circulatingBufferIds.begin(),
+                        offlineStream.circulatingBufferIds.end());
+                if (!verifyBufferIds(id, bufIds)) {
+                    ALOGE("%s: stream ID %d buffer cache records mismatch!", __FUNCTION__, id);
+                    return UNKNOWN_ERROR;
+                }
+            }
+
+            // Move buffer records
+            bufferRecords->takeBufferCaches(mBufferRecords, streams);
+            bufferRecords->takeInflightBufferMap(mBufferRecords);
+            bufferRecords->takeRequestedBufferMap(mBufferRecords);
+            return OK;
+        }
+
         // Delete and optionally close native handles and clear the input vector afterward
         static void cleanupNativeHandles(
                 std::vector<native_handle_t*> *handles, bool closeFd = false);
@@ -483,7 +464,7 @@
         bool mIsReconfigurationQuerySupported;
 
         const bool mSupportOfflineProcessing;
-    };
+    }; // class HalInterface
 
     sp<HalInterface> mInterface;
 
@@ -570,10 +551,6 @@
         // overriding of ROTATE_AND_CROP value and adjustment of coordinates
         // in several other controls in both the request and the result
         bool                                mRotateAndCropAuto;
-        // Original value of TEST_PATTERN_MODE and DATA so that they can be
-        // restored when sensor muting is turned off
-        int32_t                             mOriginalTestPatternMode;
-        int32_t                             mOriginalTestPatternData[4];
 
         // Whether this capture request has its zoom ratio set to 1.0x before
         // the framework overrides it for camera HAL consumption.
@@ -581,7 +558,6 @@
         // The systemTime timestamp when the request is created.
         nsecs_t                             mRequestTimeNs;
 
-
         // Whether this capture request's distortion correction update has
         // been done.
         bool                                mDistortionCorrectionUpdated = false;
@@ -614,33 +590,6 @@
                                   bool repeating,
                                   int64_t *lastFrameNumber = NULL);
 
-
-    /**
-     * Implementation of android::hardware::camera::device::V3_5::ICameraDeviceCallback
-     */
-
-    hardware::Return<void> processCaptureResult_3_4(
-            const hardware::hidl_vec<
-                    hardware::camera::device::V3_4::CaptureResult>& results) override;
-    hardware::Return<void> processCaptureResult(
-            const hardware::hidl_vec<
-                    hardware::camera::device::V3_2::CaptureResult>& results) override;
-    hardware::Return<void> notify(
-            const hardware::hidl_vec<
-                    hardware::camera::device::V3_2::NotifyMsg>& msgs) override;
-
-    hardware::Return<void> requestStreamBuffers(
-            const hardware::hidl_vec<
-                    hardware::camera::device::V3_5::BufferRequest>& bufReqs,
-            requestStreamBuffers_cb _hidl_cb) override;
-
-    hardware::Return<void> returnStreamBuffers(
-            const hardware::hidl_vec<
-                    hardware::camera::device::V3_2::StreamBuffer>& buffers) override;
-
-    // Handle one notify message
-    void notify(const hardware::camera::device::V3_2::NotifyMsg& msg);
-
     // lock to ensure only one processCaptureResult is called at a time.
     Mutex mProcessCaptureResultLock;
 
@@ -658,6 +607,9 @@
      */
     virtual CameraMetadata getLatestRequestLocked();
 
+    virtual status_t injectionCameraInitialize(const String8 &injectCamId,
+            sp<CameraProviderManager> manager) = 0;
+
     /**
      * Update the current device status and wake all waiting threads.
      *
@@ -906,16 +858,11 @@
         void signalPipelineDrain(const std::vector<int>& streamIds);
         void resetPipelineDrain();
 
-        status_t switchToOffline(
-                const std::vector<int32_t>& streamsToKeep,
-                /*out*/hardware::camera::device::V3_6::CameraOfflineSessionInfo* offlineSessionInfo,
-                /*out*/sp<hardware::camera::device::V3_6::ICameraOfflineSession>* offlineSession,
-                /*out*/camera3::BufferRecords* bufferRecords);
-
         void clearPreviousRequest();
 
         status_t setRotateAndCropAutoBehavior(
                 camera_metadata_enum_android_scaler_rotate_and_crop_t rotateAndCropValue);
+        status_t setComposerSurface(bool composerSurfacePresent);
 
         status_t setCameraMute(int32_t muteMode);
 
@@ -925,7 +872,6 @@
 
         virtual bool threadLoop();
 
-      private:
         static const String8& getId(const wp<Camera3Device> &device);
 
         status_t           queueTriggerLocked(RequestTrigger trigger);
@@ -1068,6 +1014,7 @@
         uint32_t           mCurrentAfTriggerId;
         uint32_t           mCurrentPreCaptureTriggerId;
         camera_metadata_enum_android_scaler_rotate_and_crop_t mRotateAndCropOverride;
+        bool               mComposerOutput;
         int32_t            mCameraMute; // 0 = no mute, otherwise the TEST_PATTERN_MODE to use
         bool               mCameraMuteChanged;
 
@@ -1089,6 +1036,14 @@
         const bool         mUseHalBufManager;
         const bool         mSupportCameraMute;
     };
+
+    virtual sp<RequestThread> createNewRequestThread(wp<Camera3Device> /*parent*/,
+                sp<camera3::StatusTracker> /*statusTracker*/,
+                sp<HalInterface> /*interface*/,
+                const Vector<int32_t>& /*sessionParamKeys*/,
+                bool /*useHalBufManager*/,
+                bool /*supportCameraMute*/) = 0;
+
     sp<RequestThread> mRequestThread;
 
     /**
@@ -1249,7 +1204,9 @@
 
     void monitorMetadata(TagMonitor::eventSource source, int64_t frameNumber,
             nsecs_t timestamp, const CameraMetadata& metadata,
-            const std::unordered_map<std::string, CameraMetadata>& physicalMetadata);
+            const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
+            const camera_stream_buffer_t *outputBuffers, uint32_t numOutputBuffers,
+            int32_t inputStreamId);
 
     metadata_vendor_id_t mVendorTagId;
 
@@ -1362,44 +1319,46 @@
 
         ~Camera3DeviceInjectionMethods();
 
-        // Initialize the injection camera and generate an hal interface.
-        status_t injectionInitialize(
-                const String8& injectedCamId, sp<CameraProviderManager> manager,
-                const sp<
-                    android::hardware::camera::device::V3_2 ::ICameraDeviceCallback>&
-                    callback);
-
         // Injection camera will replace the internal camera and configure streams
         // when device is IDLE and request thread is paused.
         status_t injectCamera(
                 camera3::camera_stream_configuration& injectionConfig,
-                std::vector<uint32_t>& injectionBufferSizes);
+                const std::vector<uint32_t>& injectionBufferSizes);
 
         // Stop the injection camera and switch back to backup hal interface.
         status_t stopInjection();
 
         bool isInjecting();
 
+        bool isStreamConfigCompleteButNotInjected();
+
         const String8& getInjectedCamId() const;
 
         void getInjectionConfig(/*out*/ camera3::camera_stream_configuration* injectionConfig,
                 /*out*/ std::vector<uint32_t>* injectionBufferSizes);
 
-      private:
+        // When the streaming configuration is completed and the camera device is active, but the
+        // injection camera has not yet been injected, the streaming configuration of the internal
+        // camera will be stored first.
+        void storeInjectionConfig(
+                const camera3::camera_stream_configuration& injectionConfig,
+                const std::vector<uint32_t>& injectionBufferSizes);
+
+      protected:
         // Configure the streams of injection camera, it need wait until the
         // output streams are created and configured to the original camera before
         // proceeding.
         status_t injectionConfigureStreams(
                 camera3::camera_stream_configuration& injectionConfig,
-                std::vector<uint32_t>& injectionBufferSizes);
+                const std::vector<uint32_t>& injectionBufferSizes);
 
         // Disconnect the injection camera and delete the hal interface.
         void injectionDisconnectImpl();
 
         // Use injection camera hal interface to replace and backup original
         // camera hal interface.
-        status_t replaceHalInterface(sp<HalInterface> newHalInterface,
-                bool keepBackup);
+        virtual status_t replaceHalInterface(sp<HalInterface> /*newHalInterface*/,
+                bool /*keepBackup*/) = 0;
 
         wp<Camera3Device> mParent;
 
@@ -1409,9 +1368,16 @@
         // Generated injection camera hal interface.
         sp<HalInterface> mInjectedCamHalInterface;
 
+        // The flag indicates that the stream configuration is complete, the camera device is
+        // active, but the injection camera has not yet been injected.
+        bool mIsStreamConfigCompleteButNotInjected = false;
+
         // Copy the configuration of the internal camera.
         camera3::camera_stream_configuration mInjectionConfig;
 
+        // Copy the streams of the internal camera.
+        Vector<camera3::camera_stream_t*> mInjectionStreams;
+
         // Copy the bufferSizes of the output streams of the internal camera.
         std::vector<uint32_t> mInjectionBufferSizes;
 
@@ -1422,6 +1388,10 @@
         // The injection camera ID.
         String8 mInjectedCamId;
     };
+
+    virtual sp<Camera3DeviceInjectionMethods>
+            createCamera3DeviceInjectionMethods(wp<Camera3Device>) = 0;
+
     sp<Camera3DeviceInjectionMethods> mInjectionMethods;
 
 }; // class Camera3Device
diff --git a/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp b/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp
index f145dac..6818acf 100644
--- a/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp
@@ -39,98 +39,10 @@
     injectionDisconnectImpl();
 }
 
-status_t Camera3Device::Camera3DeviceInjectionMethods::injectionInitialize(
-        const String8& injectedCamId, sp<CameraProviderManager> manager,
-        const sp<android::hardware::camera::device::V3_2::ICameraDeviceCallback>&
-                callback) {
-    ATRACE_CALL();
-    Mutex::Autolock lock(mInjectionLock);
-
-    if (manager == nullptr) {
-        ALOGE("%s: manager does not exist!", __FUNCTION__);
-        return INVALID_OPERATION;
-    }
-
-    sp<Camera3Device> parent = mParent.promote();
-    if (parent == nullptr) {
-        ALOGE("%s: parent does not exist!", __FUNCTION__);
-        return INVALID_OPERATION;
-    }
-
-    mInjectedCamId = injectedCamId;
-    sp<ICameraDeviceSession> session;
-    ATRACE_BEGIN("Injection CameraHal::openSession");
-    status_t res = manager->openSession(injectedCamId.string(), callback,
-                                          /*out*/ &session);
-    ATRACE_END();
-    if (res != OK) {
-        ALOGE("Injection camera could not open camera session: %s (%d)",
-                strerror(-res), res);
-        return res;
-    }
-
-    std::shared_ptr<RequestMetadataQueue> queue;
-    auto requestQueueRet =
-        session->getCaptureRequestMetadataQueue([&queue](const auto& descriptor) {
-            queue = std::make_shared<RequestMetadataQueue>(descriptor);
-            if (!queue->isValid() || queue->availableToWrite() <= 0) {
-                ALOGE("Injection camera HAL returns empty request metadata fmq, not "
-                        "use it");
-                queue = nullptr;
-                // don't use the queue onwards.
-            }
-        });
-    if (!requestQueueRet.isOk()) {
-        ALOGE("Injection camera transaction error when getting request metadata fmq: "
-                "%s, not use it", requestQueueRet.description().c_str());
-        return DEAD_OBJECT;
-    }
-
-    std::unique_ptr<ResultMetadataQueue>& resQueue = parent->mResultMetadataQueue;
-    auto resultQueueRet = session->getCaptureResultMetadataQueue(
-        [&resQueue](const auto& descriptor) {
-            resQueue = std::make_unique<ResultMetadataQueue>(descriptor);
-            if (!resQueue->isValid() || resQueue->availableToWrite() <= 0) {
-                ALOGE("Injection camera HAL returns empty result metadata fmq, not use "
-                        "it");
-                resQueue = nullptr;
-                // Don't use the resQueue onwards.
-            }
-        });
-    if (!resultQueueRet.isOk()) {
-        ALOGE("Injection camera transaction error when getting result metadata queue "
-                "from camera session: %s", resultQueueRet.description().c_str());
-        return DEAD_OBJECT;
-    }
-    IF_ALOGV() {
-        session->interfaceChain(
-                [](::android::hardware::hidl_vec<::android::hardware::hidl_string>
-                        interfaceChain) {
-                        ALOGV("Injection camera session interface chain:");
-                        for (const auto& iface : interfaceChain) {
-                            ALOGV("  %s", iface.c_str());
-                        }
-                });
-    }
-
-    ALOGV("%s: Injection camera interface = new HalInterface()", __FUNCTION__);
-    mInjectedCamHalInterface =
-            new HalInterface(session, queue, parent->mUseHalBufManager,
-                       parent->mSupportOfflineProcessing);
-    if (mInjectedCamHalInterface == nullptr) {
-        ALOGE("%s: mInjectedCamHalInterface does not exist!", __FUNCTION__);
-        return DEAD_OBJECT;
-    }
-
-    return OK;
-}
-
 status_t Camera3Device::Camera3DeviceInjectionMethods::injectCamera(
         camera3::camera_stream_configuration& injectionConfig,
-        std::vector<uint32_t>& injectionBufferSizes) {
+        const std::vector<uint32_t>& injectionBufferSizes) {
     status_t res = NO_ERROR;
-    mInjectionConfig = injectionConfig;
-    mInjectionBufferSizes = injectionBufferSizes;
 
     if (mInjectedCamHalInterface == nullptr) {
         ALOGE("%s: mInjectedCamHalInterface does not exist!", __FUNCTION__);
@@ -148,7 +60,6 @@
     if (parent->mStatus == STATUS_ACTIVE) {
         ALOGV("%s: Let the device be IDLE and the request thread is paused",
                 __FUNCTION__);
-        parent->mPauseStateNotify = true;
         res = parent->internalPauseAndWaitLocked(maxExpectedDuration);
         if (res != OK) {
             ALOGE("%s: Can't pause captures to inject camera!", __FUNCTION__);
@@ -188,7 +99,7 @@
         ALOGV("%s: Restarting activity to inject camera", __FUNCTION__);
         // Reuse current operating mode and session parameters for new stream
         // config.
-        parent->internalUpdateStatusLocked(STATUS_ACTIVE);
+        parent->internalResumeLocked();
     }
 
     return OK;
@@ -208,7 +119,6 @@
     if (parent->mStatus == STATUS_ACTIVE) {
         ALOGV("%s: Let the device be IDLE and the request thread is paused",
                 __FUNCTION__);
-        parent->mPauseStateNotify = true;
         res = parent->internalPauseAndWaitLocked(maxExpectedDuration);
         if (res != OK) {
             ALOGE("%s: Can't pause captures to stop injection!", __FUNCTION__);
@@ -229,7 +139,7 @@
         ALOGV("%s: Restarting activity to stop injection", __FUNCTION__);
         // Reuse current operating mode and session parameters for new stream
         // config.
-        parent->internalUpdateStatusLocked(STATUS_ACTIVE);
+        parent->internalResumeLocked();
     }
 
     return OK;
@@ -243,6 +153,10 @@
     }
 }
 
+bool Camera3Device::Camera3DeviceInjectionMethods::isStreamConfigCompleteButNotInjected() {
+    return mIsStreamConfigCompleteButNotInjected;
+}
+
 const String8& Camera3Device::Camera3DeviceInjectionMethods::getInjectedCamId()
         const {
     return mInjectedCamId;
@@ -260,10 +174,22 @@
     *injectionBufferSizes = mInjectionBufferSizes;
 }
 
+void Camera3Device::Camera3DeviceInjectionMethods::storeInjectionConfig(
+        const camera3::camera_stream_configuration& injectionConfig,
+        const std::vector<uint32_t>& injectionBufferSizes) {
+    mIsStreamConfigCompleteButNotInjected = true;
+    mInjectionConfig = injectionConfig;
+    mInjectionStreams.clear();
+    for (size_t i = 0; i < injectionConfig.num_streams; i++) {
+        mInjectionStreams.push_back(injectionConfig.streams[i]);
+    }
+    mInjectionConfig.streams = mInjectionStreams.editArray();
+    mInjectionBufferSizes = injectionBufferSizes;
+}
 
 status_t Camera3Device::Camera3DeviceInjectionMethods::injectionConfigureStreams(
         camera3::camera_stream_configuration& injectionConfig,
-        std::vector<uint32_t>& injectionBufferSizes) {
+        const std::vector<uint32_t>& injectionBufferSizes) {
     ATRACE_CALL();
     status_t res = NO_ERROR;
 
@@ -326,7 +252,6 @@
             mInjectedCamId.string());
 
     auto rc = parent->mPreparerThread->resume();
-
     if (rc != OK) {
         ALOGE("%s: Injection camera %s: Preparer thread failed to resume!",
                  __FUNCTION__, mInjectedCamId.string());
@@ -339,6 +264,9 @@
 void Camera3Device::Camera3DeviceInjectionMethods::injectionDisconnectImpl() {
     ATRACE_CALL();
     ALOGI("%s: Injection camera disconnect", __FUNCTION__);
+    mIsStreamConfigCompleteButNotInjected = false;
+    mInjectionStreams.clear();
+    mInjectionConfig.streams = nullptr;
 
     mBackupHalInterface = nullptr;
     HalInterface* interface = nullptr;
@@ -365,29 +293,4 @@
     }
 }
 
-status_t Camera3Device::Camera3DeviceInjectionMethods::replaceHalInterface(
-        sp<HalInterface> newHalInterface, bool keepBackup) {
-    Mutex::Autolock lock(mInjectionLock);
-    if (newHalInterface.get() == nullptr) {
-        ALOGE("%s: The newHalInterface does not exist, to stop replacing.",
-                __FUNCTION__);
-        return DEAD_OBJECT;
-    }
-
-    sp<Camera3Device> parent = mParent.promote();
-    if (parent == nullptr) {
-        ALOGE("%s: parent does not exist!", __FUNCTION__);
-        return INVALID_OPERATION;
-    }
-
-    if (keepBackup && mBackupHalInterface == nullptr) {
-        mBackupHalInterface = parent->mInterface;
-    } else if (!keepBackup) {
-        mBackupHalInterface = nullptr;
-    }
-    parent->mInterface = newHalInterface;
-
-    return OK;
-}
-
 };  // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.cpp b/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
index 8cc6833..61e43cb 100644
--- a/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
@@ -48,7 +48,7 @@
 
 status_t Camera3FakeStream::returnBufferLocked(
         const camera_stream_buffer &,
-        nsecs_t, const std::vector<size_t>&) {
+        nsecs_t, nsecs_t, int32_t, const std::vector<size_t>&) {
     ATRACE_CALL();
     ALOGE("%s: Stream %d: Fake stream cannot return buffers!", __FUNCTION__, mId);
     return INVALID_OPERATION;
@@ -56,8 +56,9 @@
 
 status_t Camera3FakeStream::returnBufferCheckedLocked(
             const camera_stream_buffer &,
-            nsecs_t,
+            nsecs_t, nsecs_t,
             bool,
+            int32_t,
             const std::vector<size_t>&,
             /*out*/
             sp<Fence>*) {
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.h b/services/camera/libcameraservice/device3/Camera3FakeStream.h
index 914ccbf..df19c3d 100644
--- a/services/camera/libcameraservice/device3/Camera3FakeStream.h
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.h
@@ -108,7 +108,9 @@
     virtual status_t returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
+            int32_t transform,
             const std::vector<size_t>& surface_ids,
             /*out*/
             sp<Fence> *releaseFenceOut);
@@ -134,7 +136,8 @@
             const std::vector<size_t>& surface_ids = std::vector<size_t>());
     virtual status_t returnBufferLocked(
             const camera_stream_buffer &buffer,
-            nsecs_t timestamp, const std::vector<size_t>& surface_ids);
+            nsecs_t timestamp, nsecs_t readoutTimestamp, int32_t transform,
+            const std::vector<size_t>& surface_ids);
 
     virtual status_t configureQueueLocked();
 
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index 0204d49..ba97367 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -34,10 +34,11 @@
         android_dataspace dataSpace, camera_stream_rotation_t rotation,
         const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
-        int setId, bool isMultiResolution) :
+        int setId, bool isMultiResolution, int dynamicRangeProfile) :
         Camera3Stream(id, type,
                 width, height, maxSize, format, dataSpace, rotation,
-                physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution),
+                physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
+                dynamicRangeProfile),
         mTotalBufferCount(0),
         mHandoutTotalBufferCount(0),
         mHandoutOutputBufferCount(0),
@@ -82,11 +83,12 @@
             camera_stream::width, camera_stream::height,
             camera_stream::format, camera_stream::data_space);
     lines.appendFormat("      Max size: %zu\n", mMaxSize);
-    lines.appendFormat("      Combined usage: %" PRIu64 ", max HAL buffers: %d\n",
+    lines.appendFormat("      Combined usage: 0x%" PRIx64 ", max HAL buffers: %d\n",
             mUsage | consumerUsage, camera_stream::max_buffers);
     if (strlen(camera_stream::physical_camera_id) > 0) {
         lines.appendFormat("      Physical camera id: %s\n", camera_stream::physical_camera_id);
     }
+    lines.appendFormat("      Dynamic Range Profile: 0x%x", camera_stream::dynamic_range_profile);
     lines.appendFormat("      Frames produced: %d, last timestamp: %" PRId64 " ns\n",
             mFrameCount, mLastTimestamp);
     lines.appendFormat("      Total buffers: %zu, currently dequeued: %zu\n",
@@ -224,7 +226,9 @@
 status_t Camera3IOStreamBase::returnAnyBufferLocked(
         const camera_stream_buffer &buffer,
         nsecs_t timestamp,
+        nsecs_t readoutTimestamp,
         bool output,
+        int32_t transform,
         const std::vector<size_t>& surface_ids) {
     status_t res;
 
@@ -241,7 +245,8 @@
     }
 
     sp<Fence> releaseFence;
-    res = returnBufferCheckedLocked(buffer, timestamp, output, surface_ids,
+    res = returnBufferCheckedLocked(buffer, timestamp, readoutTimestamp,
+                                    output, transform, surface_ids,
                                     &releaseFence);
     // Res may be an error, but we still want to decrement our owned count
     // to enable clean shutdown. So we'll just return the error but otherwise
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index 90c8a7b..518ee42 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -37,7 +37,8 @@
             android_dataspace dataSpace, camera_stream_rotation_t rotation,
             const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
-            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false);
+            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD);
 
   public:
 
@@ -67,13 +68,17 @@
     status_t         returnAnyBufferLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
+            int32_t transform,
             const std::vector<size_t>& surface_ids = std::vector<size_t>());
 
     virtual status_t returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
+            int32_t transform,
             const std::vector<size_t>& surface_ids,
             /*out*/
             sp<Fence> *releaseFenceOut) = 0;
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 6d8317b..9a3f7ed 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -105,12 +105,15 @@
 status_t Camera3InputStream::returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
+            int32_t /*transform*/,
             const std::vector<size_t>&,
             /*out*/
             sp<Fence> *releaseFenceOut) {
 
     (void)timestamp;
+    (void)readoutTimestamp;
     (void)output;
     ALOG_ASSERT(!output, "Expected output to be false");
 
@@ -175,7 +178,8 @@
         const camera_stream_buffer &buffer) {
     ATRACE_CALL();
 
-    return returnAnyBufferLocked(buffer, /*timestamp*/0, /*output*/false);
+    return returnAnyBufferLocked(buffer, /*timestamp*/0, /*readoutTimestamp*/0,
+                                 /*output*/false, /*transform*/ -1);
 }
 
 status_t Camera3InputStream::getInputBufferProducerLocked(
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h
index 46221d1..5e0587b 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.h
@@ -61,7 +61,9 @@
     virtual status_t returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
+            int32_t transform,
             const std::vector<size_t>& surface_ids,
             /*out*/
             sp<Fence> *releaseFenceOut);
diff --git a/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp b/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
index a7e64ce..7cfa255 100644
--- a/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
@@ -47,14 +47,12 @@
         const camera3::StreamSet& offlineStreamSet,
         camera3::BufferRecords&& bufferRecords,
         const camera3::InFlightRequestMap& offlineReqs,
-        const Camera3OfflineStates& offlineStates,
-        sp<hardware::camera::device::V3_6::ICameraOfflineSession> offlineSession) :
+        const Camera3OfflineStates& offlineStates) :
         mId(id),
         mInputStream(inputStream),
         mOutputStreams(offlineStreamSet),
         mBufferRecords(std::move(bufferRecords)),
         mOfflineReqs(offlineReqs),
-        mSession(offlineSession),
         mTagMonitor(offlineStates.mTagMonitor),
         mVendorTagId(offlineStates.mVendorTagId),
         mUseHalBufManager(offlineStates.mUseHalBufManager),
@@ -90,43 +88,6 @@
     return mId;
 }
 
-status_t Camera3OfflineSession::initialize(wp<NotificationListener> listener) {
-    ATRACE_CALL();
-
-    if (mSession == nullptr) {
-        ALOGE("%s: HIDL session is null!", __FUNCTION__);
-        return DEAD_OBJECT;
-    }
-
-    {
-        std::lock_guard<std::mutex> lock(mLock);
-
-        mListener = listener;
-
-        // setup result FMQ
-        std::unique_ptr<ResultMetadataQueue>& resQueue = mResultMetadataQueue;
-        auto resultQueueRet = mSession->getCaptureResultMetadataQueue(
-            [&resQueue](const auto& descriptor) {
-                resQueue = std::make_unique<ResultMetadataQueue>(descriptor);
-                if (!resQueue->isValid() || resQueue->availableToWrite() <= 0) {
-                    ALOGE("HAL returns empty result metadata fmq, not use it");
-                    resQueue = nullptr;
-                    // Don't use resQueue onwards.
-                }
-            });
-        if (!resultQueueRet.isOk()) {
-            ALOGE("Transaction error when getting result metadata queue from camera session: %s",
-                    resultQueueRet.description().c_str());
-            return DEAD_OBJECT;
-        }
-        mStatus = STATUS_ACTIVE;
-    }
-
-    mSession->setCallback(this);
-
-    return OK;
-}
-
 status_t Camera3OfflineSession::dump(int /*fd*/) {
     ATRACE_CALL();
     std::lock_guard<std::mutex> il(mInterfaceLock);
@@ -135,6 +96,7 @@
 
 status_t Camera3OfflineSession::disconnect() {
     ATRACE_CALL();
+    disconnectSession();
     return disconnectImpl();
 }
 
@@ -170,10 +132,6 @@
         streams.push_back(mInputStream);
     }
 
-    if (mSession != nullptr) {
-        mSession->close();
-    }
-
     FlushInflightReqStates states {
         mId, mOfflineReqsLock, mOfflineReqs, mUseHalBufManager,
         listener, *this, mBufferRecords, *this, mSessionStatsBuilder};
@@ -182,7 +140,6 @@
 
     {
         std::lock_guard<std::mutex> lock(mLock);
-        mSession.clear();
         mOutputStreams.clear();
         mInputStream.clear();
         mStatus = STATUS_CLOSED;
@@ -235,149 +192,6 @@
     return OK;
 }
 
-hardware::Return<void> Camera3OfflineSession::processCaptureResult_3_4(
-        const hardware::hidl_vec<
-                hardware::camera::device::V3_4::CaptureResult>& results) {
-    sp<NotificationListener> listener;
-    {
-        std::lock_guard<std::mutex> lock(mLock);
-        if (mStatus != STATUS_ACTIVE) {
-            ALOGE("%s called in wrong state %d", __FUNCTION__, mStatus);
-            return hardware::Void();
-        }
-        listener = mListener.promote();
-    }
-
-    CaptureOutputStates states {
-        mId,
-        mOfflineReqsLock, mLastCompletedRegularFrameNumber,
-        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
-        mOfflineReqs, mOutputLock, mResultQueue, mResultSignal,
-        mNextShutterFrameNumber,
-        mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
-        mNextResultFrameNumber,
-        mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
-        mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
-        mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
-        mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
-        mBufferRecords
-    };
-
-    std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
-    for (const auto& result : results) {
-        processOneCaptureResultLocked(states, result.v3_2, result.physicalCameraMetadata);
-    }
-    return hardware::Void();
-}
-
-hardware::Return<void> Camera3OfflineSession::processCaptureResult(
-        const hardware::hidl_vec<
-                hardware::camera::device::V3_2::CaptureResult>& results) {
-    // TODO: changed impl to call into processCaptureResult_3_4 instead?
-    //       might need to figure how to reduce copy though.
-    sp<NotificationListener> listener;
-    {
-        std::lock_guard<std::mutex> lock(mLock);
-        if (mStatus != STATUS_ACTIVE) {
-            ALOGE("%s called in wrong state %d", __FUNCTION__, mStatus);
-            return hardware::Void();
-        }
-        listener = mListener.promote();
-    }
-
-    hardware::hidl_vec<hardware::camera::device::V3_4::PhysicalCameraMetadata> noPhysMetadata;
-
-    CaptureOutputStates states {
-        mId,
-        mOfflineReqsLock, mLastCompletedRegularFrameNumber,
-        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
-        mOfflineReqs, mOutputLock, mResultQueue, mResultSignal,
-        mNextShutterFrameNumber,
-        mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
-        mNextResultFrameNumber,
-        mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
-        mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
-        mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
-        mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
-        mBufferRecords
-    };
-
-    std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
-    for (const auto& result : results) {
-        processOneCaptureResultLocked(states, result, noPhysMetadata);
-    }
-    return hardware::Void();
-}
-
-hardware::Return<void> Camera3OfflineSession::notify(
-        const hardware::hidl_vec<hardware::camera::device::V3_2::NotifyMsg>& msgs) {
-    sp<NotificationListener> listener;
-    {
-        std::lock_guard<std::mutex> lock(mLock);
-        if (mStatus != STATUS_ACTIVE) {
-            ALOGE("%s called in wrong state %d", __FUNCTION__, mStatus);
-            return hardware::Void();
-        }
-        listener = mListener.promote();
-    }
-
-    CaptureOutputStates states {
-        mId,
-        mOfflineReqsLock, mLastCompletedRegularFrameNumber,
-        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
-        mOfflineReqs, mOutputLock, mResultQueue, mResultSignal,
-        mNextShutterFrameNumber,
-        mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
-        mNextResultFrameNumber,
-        mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
-        mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
-        mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
-        mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
-        mBufferRecords
-    };
-    for (const auto& msg : msgs) {
-        camera3::notify(states, msg);
-    }
-    return hardware::Void();
-}
-
-hardware::Return<void> Camera3OfflineSession::requestStreamBuffers(
-        const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
-        requestStreamBuffers_cb _hidl_cb) {
-    {
-        std::lock_guard<std::mutex> lock(mLock);
-        if (mStatus != STATUS_ACTIVE) {
-            ALOGE("%s called in wrong state %d", __FUNCTION__, mStatus);
-            return hardware::Void();
-        }
-    }
-
-    RequestBufferStates states {
-        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder,
-        *this, mBufferRecords, *this};
-    camera3::requestStreamBuffers(states, bufReqs, _hidl_cb);
-    return hardware::Void();
-}
-
-hardware::Return<void> Camera3OfflineSession::returnStreamBuffers(
-        const hardware::hidl_vec<hardware::camera::device::V3_2::StreamBuffer>& buffers) {
-    {
-        std::lock_guard<std::mutex> lock(mLock);
-        if (mStatus != STATUS_ACTIVE) {
-            ALOGE("%s called in wrong state %d", __FUNCTION__, mStatus);
-            return hardware::Void();
-        }
-    }
-
-    ReturnBufferStates states {
-        mId, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder, mBufferRecords};
-    camera3::returnStreamBuffers(states, buffers);
-    return hardware::Void();
-}
-
 void Camera3OfflineSession::setErrorState(const char *fmt, ...) {
     ATRACE_CALL();
     std::lock_guard<std::mutex> lock(mLock);
diff --git a/services/camera/libcameraservice/device3/Camera3OfflineSession.h b/services/camera/libcameraservice/device3/Camera3OfflineSession.h
index 5581964..0f7d145 100644
--- a/services/camera/libcameraservice/device3/Camera3OfflineSession.h
+++ b/services/camera/libcameraservice/device3/Camera3OfflineSession.h
@@ -131,7 +131,6 @@
  */
 class Camera3OfflineSession :
             public CameraOfflineSessionBase,
-            virtual public hardware::camera::device::V3_5::ICameraDeviceCallback,
             public camera3::SetErrorInterface,
             public camera3::InflightRequestUpdateInterface,
             public camera3::RequestBufferInterface,
@@ -144,12 +143,11 @@
             const camera3::StreamSet& offlineStreamSet,
             camera3::BufferRecords&& bufferRecords,
             const camera3::InFlightRequestMap& offlineReqs,
-            const Camera3OfflineStates& offlineStates,
-            sp<hardware::camera::device::V3_6::ICameraOfflineSession> offlineSession);
+            const Camera3OfflineStates& offlineStates);
 
     virtual ~Camera3OfflineSession();
 
-    virtual status_t initialize(wp<NotificationListener> listener) override;
+    virtual status_t initialize(wp<NotificationListener> /*listener*/) = 0;
 
     /**
      * CameraOfflineSessionBase interface
@@ -171,38 +169,7 @@
      * End of CameraOfflineSessionBase interface
      */
 
-    /**
-     * HIDL ICameraDeviceCallback interface
-     */
-
-    /**
-     * Implementation of android::hardware::camera::device::V3_5::ICameraDeviceCallback
-     */
-
-    hardware::Return<void> processCaptureResult_3_4(
-            const hardware::hidl_vec<
-                    hardware::camera::device::V3_4::CaptureResult>& results) override;
-    hardware::Return<void> processCaptureResult(
-            const hardware::hidl_vec<
-                    hardware::camera::device::V3_2::CaptureResult>& results) override;
-    hardware::Return<void> notify(
-            const hardware::hidl_vec<
-                    hardware::camera::device::V3_2::NotifyMsg>& msgs) override;
-
-    hardware::Return<void> requestStreamBuffers(
-            const hardware::hidl_vec<
-                    hardware::camera::device::V3_5::BufferRequest>& bufReqs,
-            requestStreamBuffers_cb _hidl_cb) override;
-
-    hardware::Return<void> returnStreamBuffers(
-            const hardware::hidl_vec<
-                    hardware::camera::device::V3_2::StreamBuffer>& buffers) override;
-
-    /**
-     * End of CameraOfflineSessionBase interface
-     */
-
-  private:
+  protected:
     // Camera device ID
     const String8 mId;
     sp<camera3::Camera3Stream> mInputStream;
@@ -213,8 +180,6 @@
     std::mutex mOfflineReqsLock;
     camera3::InFlightRequestMap mOfflineReqs;
 
-    sp<hardware::camera::device::V3_6::ICameraOfflineSession> mSession;
-
     TagMonitor mTagMonitor;
     const metadata_vendor_id_t mVendorTagId;
 
@@ -269,8 +234,6 @@
     // End of mLock protect scope
 
     std::mutex mProcessCaptureResultLock;
-    // FMQ to write result on. Must be guarded by mProcessCaptureResultLock.
-    std::unique_ptr<ResultMetadataQueue> mResultMetadataQueue;
 
     // Tracking cause of fatal errors when in STATUS_ERROR
     String8 mErrorCause;
@@ -305,6 +268,8 @@
     void setErrorStateLockedV(const char *fmt, va_list args);
 
     status_t disconnectImpl();
+    virtual void disconnectSession() = 0;
+
 }; // class Camera3OfflineSession
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3OutputInterface.h b/services/camera/libcameraservice/device3/Camera3OutputInterface.h
index 8817833..40eef1d 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputInterface.h
@@ -50,6 +50,10 @@
         // return pair of (newlySeenBuffer?, bufferId)
         virtual std::pair<bool, uint64_t> getBufferId(const buffer_handle_t& buf, int streamId) = 0;
 
+        // Return the removed buffer ID if input cache is found.
+        // Otherwise return BUFFER_ID_NO_BUFFER
+        virtual uint64_t removeOneBufferCache(int streamId, const native_handle_t* handle) = 0;
+
         // Find a buffer_handle_t based on frame number and stream ID
         virtual status_t popInflightBuffer(int32_t frameNumber, int32_t streamId,
                 /*out*/ buffer_handle_t **buffer) = 0;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 03b77fc..69723b6 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -22,6 +22,7 @@
 #include <fstream>
 
 #include <android-base/unique_fd.h>
+#include <cutils/properties.h>
 #include <ui/GraphicBuffer.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
@@ -45,10 +46,11 @@
         android_dataspace dataSpace, camera_stream_rotation_t rotation,
         nsecs_t timestampOffset, const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
-        int setId, bool isMultiResolution) :
+        int setId, bool isMultiResolution, int dynamicRangeProfile) :
         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
                             /*maxSize*/0, format, dataSpace, rotation,
-                            physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution),
+                            physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
+                            dynamicRangeProfile),
         mConsumer(consumer),
         mTransform(0),
         mTraceFirstBuffer(true),
@@ -73,10 +75,10 @@
         android_dataspace dataSpace, camera_stream_rotation_t rotation,
         nsecs_t timestampOffset, const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
-        int setId, bool isMultiResolution) :
+        int setId, bool isMultiResolution, int dynamicRangeProfile) :
         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height, maxSize,
                             format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
-                            setId, isMultiResolution),
+                            setId, isMultiResolution, dynamicRangeProfile),
         mConsumer(consumer),
         mTransform(0),
         mTraceFirstBuffer(true),
@@ -108,10 +110,11 @@
         camera_stream_rotation_t rotation, nsecs_t timestampOffset,
         const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
-        int setId, bool isMultiResolution) :
+        int setId, bool isMultiResolution, int dynamicRangeProfile) :
         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
                             /*maxSize*/0, format, dataSpace, rotation,
-                            physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution),
+                            physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
+                            dynamicRangeProfile),
         mConsumer(nullptr),
         mTransform(0),
         mTraceFirstBuffer(true),
@@ -148,11 +151,13 @@
                                          const String8& physicalCameraId,
                                         const std::unordered_set<int32_t> &sensorPixelModesUsed,
                                          uint64_t consumerUsage, nsecs_t timestampOffset,
-                                         int setId, bool isMultiResolution) :
+                                         int setId, bool isMultiResolution,
+                                         int dynamicRangeProfile) :
         Camera3IOStreamBase(id, type, width, height,
                             /*maxSize*/0,
                             format, dataSpace, rotation,
-                            physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution),
+                            physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
+                            dynamicRangeProfile),
         mTransform(0),
         mTraceFirstBuffer(true),
         mUseMonoTimestamp(false),
@@ -263,14 +268,16 @@
 
 status_t Camera3OutputStream::returnBufferLocked(
         const camera_stream_buffer &buffer,
-        nsecs_t timestamp, const std::vector<size_t>& surface_ids) {
+        nsecs_t timestamp, nsecs_t readoutTimestamp,
+        int32_t transform, const std::vector<size_t>& surface_ids) {
     ATRACE_HFR_CALL();
 
     if (mHandoutTotalBufferCount == 1) {
         returnPrefetchedBuffersLocked();
     }
 
-    status_t res = returnAnyBufferLocked(buffer, timestamp, /*output*/true, surface_ids);
+    status_t res = returnAnyBufferLocked(buffer, timestamp, readoutTimestamp,
+                                         /*output*/true, transform, surface_ids);
 
     if (res != OK) {
         return res;
@@ -285,7 +292,9 @@
 status_t Camera3OutputStream::returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
+            int32_t transform,
             const std::vector<size_t>& surface_ids,
             /*out*/
             sp<Fence> *releaseFenceOut) {
@@ -346,16 +355,6 @@
             mTraceFirstBuffer = false;
         }
 
-        /* Certain consumers (such as AudioSource or HardwareComposer) use
-         * MONOTONIC time, causing time misalignment if camera timestamp is
-         * in BOOTTIME. Do the conversion if necessary. */
-        res = native_window_set_buffers_timestamp(mConsumer.get(),
-                mUseMonoTimestamp ? timestamp - mTimestampOffset : timestamp);
-        if (res != OK) {
-            ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
-                  __FUNCTION__, mId, strerror(-res), res);
-            return res;
-        }
         // If this is a JPEG output, and image dump mask is set, save image to
         // disk.
         if (getFormat() == HAL_PIXEL_FORMAT_BLOB && getDataSpace() == HAL_DATASPACE_V0_JFIF &&
@@ -363,10 +362,35 @@
             dumpImageToDisk(timestamp, anwBuffer, anwReleaseFence);
         }
 
-        res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence, surface_ids);
-        if (shouldLogError(res, state)) {
-            ALOGE("%s: Stream %d: Error queueing buffer to native window:"
-                  " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
+        /* Certain consumers (such as AudioSource or HardwareComposer) use
+         * MONOTONIC time, causing time misalignment if camera timestamp is
+         * in BOOTTIME. Do the conversion if necessary. */
+        nsecs_t t = mPreviewFrameScheduler != nullptr ? readoutTimestamp : timestamp;
+        nsecs_t adjustedTs = mUseMonoTimestamp ? t - mTimestampOffset : t;
+        if (mPreviewFrameScheduler != nullptr) {
+            res = mPreviewFrameScheduler->queuePreviewBuffer(adjustedTs, transform,
+                    anwBuffer, anwReleaseFence);
+            if (res != OK) {
+                ALOGE("%s: Stream %d: Error queuing buffer to preview buffer scheduler: %s (%d)",
+                        __FUNCTION__, mId, strerror(-res), res);
+                return res;
+            }
+        } else {
+            setTransform(transform);
+            res = native_window_set_buffers_timestamp(mConsumer.get(), adjustedTs);
+            if (res != OK) {
+                ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
+                      __FUNCTION__, mId, strerror(-res), res);
+                return res;
+            }
+
+            queueHDRMetadata(anwBuffer->handle, currentConsumer, dynamic_range_profile);
+
+            res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence, surface_ids);
+            if (shouldLogError(res, state)) {
+                ALOGE("%s: Stream %d: Error queueing buffer to native window:"
+                      " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
+            }
         }
     }
     mLock.lock();
@@ -407,6 +431,9 @@
 
 status_t Camera3OutputStream::setTransformLocked(int transform) {
     status_t res = OK;
+
+    if (transform == -1) return res;
+
     if (mState == STATE_ERROR) {
         ALOGE("%s: Stream in error state", __FUNCTION__);
         return INVALID_OPERATION;
@@ -432,7 +459,7 @@
         return res;
     }
 
-    if ((res = configureConsumerQueueLocked()) != OK) {
+    if ((res = configureConsumerQueueLocked(true /*allowPreviewScheduler*/)) != OK) {
         return res;
     }
 
@@ -456,7 +483,7 @@
     return OK;
 }
 
-status_t Camera3OutputStream::configureConsumerQueueLocked() {
+status_t Camera3OutputStream::configureConsumerQueueLocked(bool allowPreviewScheduler) {
     status_t res;
 
     mTraceFirstBuffer = true;
@@ -542,6 +569,15 @@
     }
 
     mTotalBufferCount = maxConsumerBuffers + camera_stream::max_buffers;
+    if (allowPreviewScheduler && isConsumedByHWComposer()) {
+        // We cannot distinguish between a SurfaceView and an ImageReader of
+        // preview buffer format. The PreviewFrameScheduler needs to handle both.
+        if (!property_get_bool("camera.disable_preview_scheduler", false)) {
+            mPreviewFrameScheduler = std::make_unique<PreviewFrameScheduler>(*this, mConsumer);
+            mTotalBufferCount += PreviewFrameScheduler::kQueueDepthWatermark;
+        }
+    }
+
     mHandoutTotalBufferCount = 0;
     mFrameCount = 0;
     mLastTimestamp = 0;
@@ -1180,6 +1216,11 @@
     }
 }
 
+bool Camera3OutputStream::shouldLogError(status_t res) {
+    Mutex::Autolock l(mLock);
+    return shouldLogError(res, mState);
+}
+
 }; // namespace camera3
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index ad03b53..d9bf62a 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -27,6 +27,7 @@
 #include "Camera3IOStreamBase.h"
 #include "Camera3OutputStreamInterface.h"
 #include "Camera3BufferManager.h"
+#include "PreviewFrameScheduler.h"
 
 namespace android {
 
@@ -88,7 +89,8 @@
             android_dataspace dataSpace, camera_stream_rotation_t rotation,
             nsecs_t timestampOffset, const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
-            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false);
+            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD);
     /**
      * Set up a stream for formats that have a variable buffer size for the same
      * dimensions, such as compressed JPEG.
@@ -100,7 +102,8 @@
             android_dataspace dataSpace, camera_stream_rotation_t rotation,
             nsecs_t timestampOffset, const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
-            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false);
+            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD);
     /**
      * Set up a stream with deferred consumer for formats that have 2 dimensions, such as
      * RAW and YUV. The consumer must be set before using this stream for output. A valid
@@ -111,7 +114,8 @@
             camera_stream_rotation_t rotation, nsecs_t timestampOffset,
             const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
-            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false);
+            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD);
 
     virtual ~Camera3OutputStream();
 
@@ -229,6 +233,7 @@
     static void applyZSLUsageQuirk(int format, uint64_t *consumerUsage /*inout*/);
 
     void setImageDumpMask(int mask) { mImageDumpMask = mask; }
+    bool shouldLogError(status_t res);
 
   protected:
     Camera3OutputStream(int id, camera_stream_type_t type,
@@ -237,7 +242,8 @@
             const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
             uint64_t consumerUsage = 0, nsecs_t timestampOffset = 0,
-            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false);
+            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD);
 
     /**
      * Note that we release the lock briefly in this function
@@ -245,7 +251,9 @@
     virtual status_t returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
+            int32_t transform,
             const std::vector<size_t>& surface_ids,
             /*out*/
             sp<Fence> *releaseFenceOut);
@@ -254,7 +262,7 @@
 
     status_t getEndpointUsageForSurface(uint64_t *usage,
             const sp<Surface>& surface) const;
-    status_t configureConsumerQueueLocked();
+    status_t configureConsumerQueueLocked(bool allowPreviewScheduler);
 
     // Consumer as the output of camera HAL
     sp<Surface> mConsumer;
@@ -332,7 +340,8 @@
 
     virtual status_t returnBufferLocked(
             const camera_stream_buffer &buffer,
-            nsecs_t timestamp, const std::vector<size_t>& surface_ids);
+            nsecs_t timestamp, nsecs_t readoutTimestamp,
+            int32_t transform, const std::vector<size_t>& surface_ids);
 
     virtual status_t queueBufferToConsumer(sp<ANativeWindow>& consumer,
             ANativeWindowBuffer* buffer, int anwReleaseFence,
@@ -369,6 +378,8 @@
 
     int mImageDumpMask = 0;
 
+    // The preview stream scheduler for re-timing frames
+    std::unique_ptr<PreviewFrameScheduler> mPreviewFrameScheduler;
 }; // class Camera3OutputStream
 
 } // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index 9f225d0..d8cc685 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -42,10 +42,13 @@
 #include <android/hardware/camera/device/3.5/ICameraDeviceCallback.h>
 #include <android/hardware/camera/device/3.5/ICameraDeviceSession.h>
 
+#include <camera/CameraUtils.h>
 #include <camera_metadata_hidden.h>
 
 #include "device3/Camera3OutputUtils.h"
 
+#include "system/camera_metadata.h"
+
 using namespace android::camera3;
 using namespace android::hardware::camera;
 
@@ -373,43 +376,6 @@
     insertResultLocked(states, &captureResult, frameNumber);
 }
 
-// Reading one camera metadata from result argument via fmq or from the result
-// Assuming the fmq is protected by a lock already
-status_t readOneCameraMetadataLocked(
-        std::unique_ptr<ResultMetadataQueue>& fmq,
-        uint64_t fmqResultSize,
-        hardware::camera::device::V3_2::CameraMetadata& resultMetadata,
-        const hardware::camera::device::V3_2::CameraMetadata& result) {
-    if (fmqResultSize > 0) {
-        resultMetadata.resize(fmqResultSize);
-        if (fmq == nullptr) {
-            return NO_MEMORY; // logged in initialize()
-        }
-        if (!fmq->read(resultMetadata.data(), fmqResultSize)) {
-            ALOGE("%s: Cannot read camera metadata from fmq, size = %" PRIu64,
-                    __FUNCTION__, fmqResultSize);
-            return INVALID_OPERATION;
-        }
-    } else {
-        resultMetadata.setToExternal(const_cast<uint8_t *>(result.data()),
-                result.size());
-    }
-
-    if (resultMetadata.size() != 0) {
-        status_t res;
-        const camera_metadata_t* metadata =
-                reinterpret_cast<const camera_metadata_t*>(resultMetadata.data());
-        size_t expected_metadata_size = resultMetadata.size();
-        if ((res = validate_camera_metadata_structure(metadata, &expected_metadata_size)) != OK) {
-            ALOGE("%s: Invalid camera metadata received by camera service from HAL: %s (%d)",
-                    __FUNCTION__, strerror(-res), res);
-            return INVALID_OPERATION;
-        }
-    }
-
-    return OK;
-}
-
 void removeInFlightMapEntryLocked(CaptureOutputStates& states, int idx) {
     ATRACE_CALL();
     InFlightRequestMap& inflightMap = states.inflightMap;
@@ -460,16 +426,16 @@
         returnOutputBuffers(
             states.useHalBufManager, states.listener,
             request.pendingOutputBuffers.array(),
-            request.pendingOutputBuffers.size(), 0,
+            request.pendingOutputBuffers.size(), /*timestamp*/0, /*readoutTimestamp*/0,
             /*requested*/true, request.requestTimeNs, states.sessionStatsBuilder,
             /*timestampIncreasing*/true,
             request.outputSurfaces, request.resultExtras,
-            request.errorBufStrategy);
+            request.errorBufStrategy, request.transform);
 
         // Note down the just completed frame number
         if (request.hasInputBuffer) {
             states.lastCompletedReprocessFrameNumber = frameNumber;
-        } else if (request.zslCapture) {
+        } else if (request.zslCapture && request.stillCapture) {
             states.lastCompletedZslFrameNumber = frameNumber;
         } else {
             states.lastCompletedRegularFrameNumber = frameNumber;
@@ -555,6 +521,31 @@
         if (result->partial_result != 0)
             request.resultExtras.partialResultCount = result->partial_result;
 
+        if ((result->result != nullptr) && !states.legacyClient) {
+            camera_metadata_ro_entry entry;
+            auto ret = find_camera_metadata_ro_entry(result->result,
+                    ANDROID_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID, &entry);
+            if ((ret == OK) && (entry.count > 0)) {
+                std::string physicalId(reinterpret_cast<const char *>(entry.data.u8));
+                auto deviceInfo = states.physicalDeviceInfoMap.find(physicalId);
+                if (deviceInfo != states.physicalDeviceInfoMap.end()) {
+                    auto orientation = deviceInfo->second.find(ANDROID_SENSOR_ORIENTATION);
+                    if (orientation.count > 0) {
+                        ret = CameraUtils::getRotationTransform(deviceInfo->second,
+                                &request.transform);
+                        if (ret != OK) {
+                            ALOGE("%s: Failed to calculate current stream transformation: %s (%d)",
+                                    __FUNCTION__, strerror(-ret), ret);
+                        }
+                    } else {
+                        ALOGE("%s: Physical device orientation absent!", __FUNCTION__);
+                    }
+                } else {
+                    ALOGE("%s: Physical device not found in device info map found!", __FUNCTION__);
+                }
+            }
+        }
+
         // Check if this result carries only partial metadata
         if (states.usePartialResult && result->result != NULL) {
             if (result->partial_result > states.numPartialResults || result->partial_result < 1) {
@@ -691,162 +682,15 @@
     }
 }
 
-void processOneCaptureResultLocked(
-        CaptureOutputStates& states,
-        const hardware::camera::device::V3_2::CaptureResult& result,
-        const hardware::hidl_vec<
-                hardware::camera::device::V3_4::PhysicalCameraMetadata> physicalCameraMetadata) {
-    using hardware::camera::device::V3_2::StreamBuffer;
-    using hardware::camera::device::V3_2::BufferStatus;
-    std::unique_ptr<ResultMetadataQueue>& fmq = states.fmq;
-    BufferRecordsInterface& bufferRecords = states.bufferRecordsIntf;
-    camera_capture_result r;
-    status_t res;
-    r.frame_number = result.frameNumber;
-
-    // Read and validate the result metadata.
-    hardware::camera::device::V3_2::CameraMetadata resultMetadata;
-    res = readOneCameraMetadataLocked(
-            fmq, result.fmqResultSize,
-            resultMetadata, result.result);
-    if (res != OK) {
-        ALOGE("%s: Frame %d: Failed to read capture result metadata",
-                __FUNCTION__, result.frameNumber);
-        return;
-    }
-    r.result = reinterpret_cast<const camera_metadata_t*>(resultMetadata.data());
-
-    // Read and validate physical camera metadata
-    size_t physResultCount = physicalCameraMetadata.size();
-    std::vector<const char*> physCamIds(physResultCount);
-    std::vector<const camera_metadata_t *> phyCamMetadatas(physResultCount);
-    std::vector<hardware::camera::device::V3_2::CameraMetadata> physResultMetadata;
-    physResultMetadata.resize(physResultCount);
-    for (size_t i = 0; i < physicalCameraMetadata.size(); i++) {
-        res = readOneCameraMetadataLocked(fmq, physicalCameraMetadata[i].fmqMetadataSize,
-                physResultMetadata[i], physicalCameraMetadata[i].metadata);
-        if (res != OK) {
-            ALOGE("%s: Frame %d: Failed to read capture result metadata for camera %s",
-                    __FUNCTION__, result.frameNumber,
-                    physicalCameraMetadata[i].physicalCameraId.c_str());
-            return;
-        }
-        physCamIds[i] = physicalCameraMetadata[i].physicalCameraId.c_str();
-        phyCamMetadatas[i] = reinterpret_cast<const camera_metadata_t*>(
-                physResultMetadata[i].data());
-    }
-    r.num_physcam_metadata = physResultCount;
-    r.physcam_ids = physCamIds.data();
-    r.physcam_metadata = phyCamMetadatas.data();
-
-    std::vector<camera_stream_buffer_t> outputBuffers(result.outputBuffers.size());
-    std::vector<buffer_handle_t> outputBufferHandles(result.outputBuffers.size());
-    for (size_t i = 0; i < result.outputBuffers.size(); i++) {
-        auto& bDst = outputBuffers[i];
-        const StreamBuffer &bSrc = result.outputBuffers[i];
-
-        sp<Camera3StreamInterface> stream = states.outputStreams.get(bSrc.streamId);
-        if (stream == nullptr) {
-            ALOGE("%s: Frame %d: Buffer %zu: Invalid output stream id %d",
-                    __FUNCTION__, result.frameNumber, i, bSrc.streamId);
-            return;
-        }
-        bDst.stream = stream->asHalStream();
-
-        bool noBufferReturned = false;
-        buffer_handle_t *buffer = nullptr;
-        if (states.useHalBufManager) {
-            // This is suspicious most of the time but can be correct during flush where HAL
-            // has to return capture result before a buffer is requested
-            if (bSrc.bufferId == BUFFER_ID_NO_BUFFER) {
-                if (bSrc.status == BufferStatus::OK) {
-                    ALOGE("%s: Frame %d: Buffer %zu: No bufferId for stream %d",
-                            __FUNCTION__, result.frameNumber, i, bSrc.streamId);
-                    // Still proceeds so other buffers can be returned
-                }
-                noBufferReturned = true;
-            }
-            if (noBufferReturned) {
-                res = OK;
-            } else {
-                res = bufferRecords.popInflightRequestBuffer(bSrc.bufferId, &buffer);
-            }
-        } else {
-            res = bufferRecords.popInflightBuffer(result.frameNumber, bSrc.streamId, &buffer);
-        }
-
-        if (res != OK) {
-            ALOGE("%s: Frame %d: Buffer %zu: No in-flight buffer for stream %d",
-                    __FUNCTION__, result.frameNumber, i, bSrc.streamId);
-            return;
-        }
-
-        bDst.buffer = buffer;
-        bDst.status = mapHidlBufferStatus(bSrc.status);
-        bDst.acquire_fence = -1;
-        if (bSrc.releaseFence == nullptr) {
-            bDst.release_fence = -1;
-        } else if (bSrc.releaseFence->numFds == 1) {
-            if (noBufferReturned) {
-                ALOGE("%s: got releaseFence without output buffer!", __FUNCTION__);
-            }
-            bDst.release_fence = dup(bSrc.releaseFence->data[0]);
-        } else {
-            ALOGE("%s: Frame %d: Invalid release fence for buffer %zu, fd count is %d, not 1",
-                    __FUNCTION__, result.frameNumber, i, bSrc.releaseFence->numFds);
-            return;
-        }
-    }
-    r.num_output_buffers = outputBuffers.size();
-    r.output_buffers = outputBuffers.data();
-
-    camera_stream_buffer_t inputBuffer;
-    if (result.inputBuffer.streamId == -1) {
-        r.input_buffer = nullptr;
-    } else {
-        if (states.inputStream->getId() != result.inputBuffer.streamId) {
-            ALOGE("%s: Frame %d: Invalid input stream id %d", __FUNCTION__,
-                    result.frameNumber, result.inputBuffer.streamId);
-            return;
-        }
-        inputBuffer.stream = states.inputStream->asHalStream();
-        buffer_handle_t *buffer;
-        res = bufferRecords.popInflightBuffer(result.frameNumber, result.inputBuffer.streamId,
-                &buffer);
-        if (res != OK) {
-            ALOGE("%s: Frame %d: Input buffer: No in-flight buffer for stream %d",
-                    __FUNCTION__, result.frameNumber, result.inputBuffer.streamId);
-            return;
-        }
-        inputBuffer.buffer = buffer;
-        inputBuffer.status = mapHidlBufferStatus(result.inputBuffer.status);
-        inputBuffer.acquire_fence = -1;
-        if (result.inputBuffer.releaseFence == nullptr) {
-            inputBuffer.release_fence = -1;
-        } else if (result.inputBuffer.releaseFence->numFds == 1) {
-            inputBuffer.release_fence = dup(result.inputBuffer.releaseFence->data[0]);
-        } else {
-            ALOGE("%s: Frame %d: Invalid release fence for input buffer, fd count is %d, not 1",
-                    __FUNCTION__, result.frameNumber, result.inputBuffer.releaseFence->numFds);
-            return;
-        }
-        r.input_buffer = &inputBuffer;
-    }
-
-    r.partial_result = result.partialResult;
-
-    processCaptureResult(states, &r);
-}
-
 void returnOutputBuffers(
         bool useHalBufManager,
         sp<NotificationListener> listener,
         const camera_stream_buffer_t *outputBuffers, size_t numBuffers,
-        nsecs_t timestamp, bool requested, nsecs_t requestTimeNs,
-        SessionStatsBuilder& sessionStatsBuilder, bool timestampIncreasing,
-        const SurfaceMap& outputSurfaces,
+        nsecs_t timestamp, nsecs_t readoutTimestamp, bool requested,
+        nsecs_t requestTimeNs, SessionStatsBuilder& sessionStatsBuilder,
+        bool timestampIncreasing, const SurfaceMap& outputSurfaces,
         const CaptureResultExtras &inResultExtras,
-        ERROR_BUF_STRATEGY errorBufStrategy) {
+        ERROR_BUF_STRATEGY errorBufStrategy, int32_t transform) {
 
     for (size_t i = 0; i < numBuffers; i++)
     {
@@ -888,12 +732,12 @@
                 errorBufStrategy != ERROR_BUF_CACHE) {
             if (it != outputSurfaces.end()) {
                 res = stream->returnBuffer(
-                        outputBuffers[i], timestamp, timestampIncreasing, it->second,
-                        inResultExtras.frameNumber);
+                        outputBuffers[i], timestamp, readoutTimestamp, timestampIncreasing,
+                        it->second, inResultExtras.frameNumber, transform);
             } else {
                 res = stream->returnBuffer(
-                        outputBuffers[i], timestamp, timestampIncreasing, std::vector<size_t> (),
-                        inResultExtras.frameNumber);
+                        outputBuffers[i], timestamp, readoutTimestamp, timestampIncreasing,
+                        std::vector<size_t> (), inResultExtras.frameNumber, transform);
             }
         }
         // Note: stream may be deallocated at this point, if this buffer was
@@ -923,9 +767,9 @@
             // cancel the buffer
             camera_stream_buffer_t sb = outputBuffers[i];
             sb.status = CAMERA_BUFFER_STATUS_ERROR;
-            stream->returnBuffer(sb, /*timestamp*/0,
+            stream->returnBuffer(sb, /*timestamp*/0, /*readoutTimestamp*/0,
                     timestampIncreasing, std::vector<size_t> (),
-                    inResultExtras.frameNumber);
+                    inResultExtras.frameNumber, transform);
 
             if (listener != nullptr) {
                 CaptureResultExtras extras = inResultExtras;
@@ -941,14 +785,15 @@
 void returnAndRemovePendingOutputBuffers(bool useHalBufManager,
         sp<NotificationListener> listener, InFlightRequest& request,
         SessionStatsBuilder& sessionStatsBuilder) {
-    bool timestampIncreasing = !(request.zslCapture || request.hasInputBuffer);
+    bool timestampIncreasing =
+            !((request.zslCapture && request.stillCapture) || request.hasInputBuffer);
     returnOutputBuffers(useHalBufManager, listener,
             request.pendingOutputBuffers.array(),
             request.pendingOutputBuffers.size(),
-            request.shutterTimestamp, /*requested*/true,
-            request.requestTimeNs, sessionStatsBuilder, timestampIncreasing,
+            request.shutterTimestamp, request.shutterReadoutTimestamp,
+            /*requested*/true, request.requestTimeNs, sessionStatsBuilder, timestampIncreasing,
             request.outputSurfaces, request.resultExtras,
-            request.errorBufStrategy);
+            request.errorBufStrategy, request.transform);
 
     // Remove error buffers that are not cached.
     for (auto iter = request.pendingOutputBuffers.begin();
@@ -1007,6 +852,7 @@
             }
 
             r.shutterTimestamp = msg.timestamp;
+            r.shutterReadoutTimestamp = msg.readout_timestamp;
             if (r.hasCallback) {
                 ALOGVV("Camera %s: %s: Shutter fired for frame %d (id %d) at %" PRId64,
                     states.cameraId.string(), __FUNCTION__,
@@ -1164,289 +1010,6 @@
     }
 }
 
-void notify(CaptureOutputStates& states,
-        const hardware::camera::device::V3_2::NotifyMsg& msg) {
-    using android::hardware::camera::device::V3_2::MsgType;
-    using android::hardware::camera::device::V3_2::ErrorCode;
-
-    ATRACE_CALL();
-    camera_notify_msg m;
-    switch (msg.type) {
-        case MsgType::ERROR:
-            m.type = CAMERA_MSG_ERROR;
-            m.message.error.frame_number = msg.msg.error.frameNumber;
-            if (msg.msg.error.errorStreamId >= 0) {
-                sp<Camera3StreamInterface> stream =
-                        states.outputStreams.get(msg.msg.error.errorStreamId);
-                if (stream == nullptr) {
-                    ALOGE("%s: Frame %d: Invalid error stream id %d", __FUNCTION__,
-                            m.message.error.frame_number, msg.msg.error.errorStreamId);
-                    return;
-                }
-                m.message.error.error_stream = stream->asHalStream();
-            } else {
-                m.message.error.error_stream = nullptr;
-            }
-            switch (msg.msg.error.errorCode) {
-                case ErrorCode::ERROR_DEVICE:
-                    m.message.error.error_code = CAMERA_MSG_ERROR_DEVICE;
-                    break;
-                case ErrorCode::ERROR_REQUEST:
-                    m.message.error.error_code = CAMERA_MSG_ERROR_REQUEST;
-                    break;
-                case ErrorCode::ERROR_RESULT:
-                    m.message.error.error_code = CAMERA_MSG_ERROR_RESULT;
-                    break;
-                case ErrorCode::ERROR_BUFFER:
-                    m.message.error.error_code = CAMERA_MSG_ERROR_BUFFER;
-                    break;
-            }
-            break;
-        case MsgType::SHUTTER:
-            m.type = CAMERA_MSG_SHUTTER;
-            m.message.shutter.frame_number = msg.msg.shutter.frameNumber;
-            m.message.shutter.timestamp = msg.msg.shutter.timestamp;
-            break;
-    }
-    notify(states, &m);
-}
-
-void requestStreamBuffers(RequestBufferStates& states,
-        const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
-        hardware::camera::device::V3_5::ICameraDeviceCallback::requestStreamBuffers_cb _hidl_cb) {
-    using android::hardware::camera::device::V3_2::BufferStatus;
-    using android::hardware::camera::device::V3_2::StreamBuffer;
-    using android::hardware::camera::device::V3_5::BufferRequestStatus;
-    using android::hardware::camera::device::V3_5::StreamBufferRet;
-    using android::hardware::camera::device::V3_5::StreamBufferRequestError;
-
-    std::lock_guard<std::mutex> lock(states.reqBufferLock);
-
-    hardware::hidl_vec<StreamBufferRet> bufRets;
-    if (!states.useHalBufManager) {
-        ALOGE("%s: Camera %s does not support HAL buffer management",
-                __FUNCTION__, states.cameraId.string());
-        _hidl_cb(BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS, bufRets);
-        return;
-    }
-
-    SortedVector<int32_t> streamIds;
-    ssize_t sz = streamIds.setCapacity(bufReqs.size());
-    if (sz < 0 || static_cast<size_t>(sz) != bufReqs.size()) {
-        ALOGE("%s: failed to allocate memory for %zu buffer requests",
-                __FUNCTION__, bufReqs.size());
-        _hidl_cb(BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS, bufRets);
-        return;
-    }
-
-    if (bufReqs.size() > states.outputStreams.size()) {
-        ALOGE("%s: too many buffer requests (%zu > # of output streams %zu)",
-                __FUNCTION__, bufReqs.size(), states.outputStreams.size());
-        _hidl_cb(BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS, bufRets);
-        return;
-    }
-
-    // Check for repeated streamId
-    for (const auto& bufReq : bufReqs) {
-        if (streamIds.indexOf(bufReq.streamId) != NAME_NOT_FOUND) {
-            ALOGE("%s: Stream %d appear multiple times in buffer requests",
-                    __FUNCTION__, bufReq.streamId);
-            _hidl_cb(BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS, bufRets);
-            return;
-        }
-        streamIds.add(bufReq.streamId);
-    }
-
-    if (!states.reqBufferIntf.startRequestBuffer()) {
-        ALOGE("%s: request buffer disallowed while camera service is configuring",
-                __FUNCTION__);
-        _hidl_cb(BufferRequestStatus::FAILED_CONFIGURING, bufRets);
-        return;
-    }
-
-    bufRets.resize(bufReqs.size());
-
-    bool allReqsSucceeds = true;
-    bool oneReqSucceeds = false;
-    for (size_t i = 0; i < bufReqs.size(); i++) {
-        const auto& bufReq = bufReqs[i];
-        auto& bufRet = bufRets[i];
-        int32_t streamId = bufReq.streamId;
-        sp<Camera3OutputStreamInterface> outputStream = states.outputStreams.get(streamId);
-        if (outputStream == nullptr) {
-            ALOGE("%s: Output stream id %d not found!", __FUNCTION__, streamId);
-            hardware::hidl_vec<StreamBufferRet> emptyBufRets;
-            _hidl_cb(BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS, emptyBufRets);
-            states.reqBufferIntf.endRequestBuffer();
-            return;
-        }
-
-        bufRet.streamId = streamId;
-        if (outputStream->isAbandoned()) {
-            bufRet.val.error(StreamBufferRequestError::STREAM_DISCONNECTED);
-            allReqsSucceeds = false;
-            continue;
-        }
-
-        size_t handOutBufferCount = outputStream->getOutstandingBuffersCount();
-        uint32_t numBuffersRequested = bufReq.numBuffersRequested;
-        size_t totalHandout = handOutBufferCount + numBuffersRequested;
-        uint32_t maxBuffers = outputStream->asHalStream()->max_buffers;
-        if (totalHandout > maxBuffers) {
-            // Not able to allocate enough buffer. Exit early for this stream
-            ALOGE("%s: request too much buffers for stream %d: at HAL: %zu + requesting: %d"
-                    " > max: %d", __FUNCTION__, streamId, handOutBufferCount,
-                    numBuffersRequested, maxBuffers);
-            bufRet.val.error(StreamBufferRequestError::MAX_BUFFER_EXCEEDED);
-            allReqsSucceeds = false;
-            continue;
-        }
-
-        hardware::hidl_vec<StreamBuffer> tmpRetBuffers(numBuffersRequested);
-        bool currentReqSucceeds = true;
-        std::vector<camera_stream_buffer_t> streamBuffers(numBuffersRequested);
-        size_t numAllocatedBuffers = 0;
-        size_t numPushedInflightBuffers = 0;
-        for (size_t b = 0; b < numBuffersRequested; b++) {
-            camera_stream_buffer_t& sb = streamBuffers[b];
-            // Since this method can run concurrently with request thread
-            // We need to update the wait duration everytime we call getbuffer
-            nsecs_t waitDuration =  states.reqBufferIntf.getWaitDuration();
-            status_t res = outputStream->getBuffer(&sb, waitDuration);
-            if (res != OK) {
-                if (res == NO_INIT || res == DEAD_OBJECT) {
-                    ALOGV("%s: Can't get output buffer for stream %d: %s (%d)",
-                            __FUNCTION__, streamId, strerror(-res), res);
-                    bufRet.val.error(StreamBufferRequestError::STREAM_DISCONNECTED);
-                    states.sessionStatsBuilder.stopCounter(streamId);
-                } else {
-                    ALOGE("%s: Can't get output buffer for stream %d: %s (%d)",
-                            __FUNCTION__, streamId, strerror(-res), res);
-                    if (res == TIMED_OUT || res == NO_MEMORY) {
-                        bufRet.val.error(StreamBufferRequestError::NO_BUFFER_AVAILABLE);
-                    } else {
-                        bufRet.val.error(StreamBufferRequestError::UNKNOWN_ERROR);
-                    }
-                }
-                currentReqSucceeds = false;
-                break;
-            }
-            numAllocatedBuffers++;
-
-            buffer_handle_t *buffer = sb.buffer;
-            auto pair = states.bufferRecordsIntf.getBufferId(*buffer, streamId);
-            bool isNewBuffer = pair.first;
-            uint64_t bufferId = pair.second;
-            StreamBuffer& hBuf = tmpRetBuffers[b];
-
-            hBuf.streamId = streamId;
-            hBuf.bufferId = bufferId;
-            hBuf.buffer = (isNewBuffer) ? *buffer : nullptr;
-            hBuf.status = BufferStatus::OK;
-            hBuf.releaseFence = nullptr;
-
-            native_handle_t *acquireFence = nullptr;
-            if (sb.acquire_fence != -1) {
-                acquireFence = native_handle_create(1,0);
-                acquireFence->data[0] = sb.acquire_fence;
-            }
-            hBuf.acquireFence.setTo(acquireFence, /*shouldOwn*/true);
-            hBuf.releaseFence = nullptr;
-
-            res = states.bufferRecordsIntf.pushInflightRequestBuffer(bufferId, buffer, streamId);
-            if (res != OK) {
-                ALOGE("%s: Can't get register request buffers for stream %d: %s (%d)",
-                        __FUNCTION__, streamId, strerror(-res), res);
-                bufRet.val.error(StreamBufferRequestError::UNKNOWN_ERROR);
-                currentReqSucceeds = false;
-                break;
-            }
-            numPushedInflightBuffers++;
-        }
-        if (currentReqSucceeds) {
-            bufRet.val.buffers(std::move(tmpRetBuffers));
-            oneReqSucceeds = true;
-        } else {
-            allReqsSucceeds = false;
-            for (size_t b = 0; b < numPushedInflightBuffers; b++) {
-                StreamBuffer& hBuf = tmpRetBuffers[b];
-                buffer_handle_t* buffer;
-                status_t res = states.bufferRecordsIntf.popInflightRequestBuffer(
-                        hBuf.bufferId, &buffer);
-                if (res != OK) {
-                    SET_ERR("%s: popInflightRequestBuffer failed for stream %d: %s (%d)",
-                            __FUNCTION__, streamId, strerror(-res), res);
-                }
-            }
-            for (size_t b = 0; b < numAllocatedBuffers; b++) {
-                camera_stream_buffer_t& sb = streamBuffers[b];
-                sb.acquire_fence = -1;
-                sb.status = CAMERA_BUFFER_STATUS_ERROR;
-            }
-            returnOutputBuffers(states.useHalBufManager, /*listener*/nullptr,
-                    streamBuffers.data(), numAllocatedBuffers, 0, /*requested*/false,
-                    /*requestTimeNs*/0, states.sessionStatsBuilder);
-        }
-    }
-
-    _hidl_cb(allReqsSucceeds ? BufferRequestStatus::OK :
-            oneReqSucceeds ? BufferRequestStatus::FAILED_PARTIAL :
-                             BufferRequestStatus::FAILED_UNKNOWN,
-            bufRets);
-    states.reqBufferIntf.endRequestBuffer();
-}
-
-void returnStreamBuffers(ReturnBufferStates& states,
-        const hardware::hidl_vec<hardware::camera::device::V3_2::StreamBuffer>& buffers) {
-    if (!states.useHalBufManager) {
-        ALOGE("%s: Camera %s does not support HAL buffer managerment",
-                __FUNCTION__, states.cameraId.string());
-        return;
-    }
-
-    for (const auto& buf : buffers) {
-        if (buf.bufferId == BUFFER_ID_NO_BUFFER) {
-            ALOGE("%s: cannot return a buffer without bufferId", __FUNCTION__);
-            continue;
-        }
-
-        buffer_handle_t* buffer;
-        status_t res = states.bufferRecordsIntf.popInflightRequestBuffer(buf.bufferId, &buffer);
-
-        if (res != OK) {
-            ALOGE("%s: cannot find in-flight buffer %" PRIu64 " for stream %d",
-                    __FUNCTION__, buf.bufferId, buf.streamId);
-            continue;
-        }
-
-        camera_stream_buffer_t streamBuffer;
-        streamBuffer.buffer = buffer;
-        streamBuffer.status = CAMERA_BUFFER_STATUS_ERROR;
-        streamBuffer.acquire_fence = -1;
-        streamBuffer.release_fence = -1;
-
-        if (buf.releaseFence == nullptr) {
-            streamBuffer.release_fence = -1;
-        } else if (buf.releaseFence->numFds == 1) {
-            streamBuffer.release_fence = dup(buf.releaseFence->data[0]);
-        } else {
-            ALOGE("%s: Invalid release fence, fd count is %d, not 1",
-                    __FUNCTION__, buf.releaseFence->numFds);
-            continue;
-        }
-
-        sp<Camera3StreamInterface> stream = states.outputStreams.get(buf.streamId);
-        if (stream == nullptr) {
-            ALOGE("%s: Output stream id %d not found!", __FUNCTION__, buf.streamId);
-            continue;
-        }
-        streamBuffer.stream = stream->asHalStream();
-        returnOutputBuffers(states.useHalBufManager, /*listener*/nullptr,
-                &streamBuffer, /*size*/1, /*timestamp*/ 0, /*requested*/false,
-                /*requestTimeNs*/0, states.sessionStatsBuilder);
-    }
-}
-
 void flushInflightRequests(FlushInflightReqStates& states) {
     ATRACE_CALL();
     { // First return buffers cached in inFlightMap
@@ -1456,9 +1019,10 @@
             returnOutputBuffers(
                 states.useHalBufManager, states.listener,
                 request.pendingOutputBuffers.array(),
-                request.pendingOutputBuffers.size(), 0, /*requested*/true,
-                request.requestTimeNs, states.sessionStatsBuilder, /*timestampIncreasing*/true,
-                request.outputSurfaces, request.resultExtras, request.errorBufStrategy);
+                request.pendingOutputBuffers.size(), /*timestamp*/0, /*readoutTimestamp*/0,
+                /*requested*/true, request.requestTimeNs, states.sessionStatsBuilder,
+                /*timestampIncreasing*/true, request.outputSurfaces, request.resultExtras,
+                request.errorBufStrategy);
             ALOGW("%s: Frame %d |  Timestamp: %" PRId64 ", metadata"
                     " arrived: %s, buffers left: %d.\n", __FUNCTION__,
                     states.inflightMap.keyAt(idx), request.shutterTimestamp,
@@ -1530,7 +1094,7 @@
                 switch (halStream->stream_type) {
                     case CAMERA_STREAM_OUTPUT:
                         res = stream->returnBuffer(streamBuffer, /*timestamp*/ 0,
-                                /*timestampIncreasing*/true,
+                                /*readoutTimestamp*/0, /*timestampIncreasing*/true,
                                 std::vector<size_t> (), frameNumber);
                         if (res != OK) {
                             ALOGE("%s: Can't return output buffer for frame %d to"
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.h b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
index 142889a..4d1eb75 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
@@ -26,6 +26,8 @@
 
 #include <common/CameraDeviceBase.h>
 
+#include <android/hardware/camera/device/3.8/ICameraDeviceCallback.h>
+
 #include "device3/BufferUtils.h"
 #include "device3/DistortionMapper.h"
 #include "device3/ZoomRatioMapper.h"
@@ -38,70 +40,8 @@
 
 namespace android {
 
-using ResultMetadataQueue = hardware::MessageQueue<uint8_t, hardware::kSynchronizedReadWrite>;
-
 namespace camera3 {
 
-    typedef struct camera_stream_configuration {
-        uint32_t num_streams;
-        camera_stream_t **streams;
-        uint32_t operation_mode;
-        bool input_is_multi_resolution;
-    } camera_stream_configuration_t;
-
-    typedef struct camera_capture_request {
-        uint32_t frame_number;
-        const camera_metadata_t *settings;
-        camera_stream_buffer_t *input_buffer;
-        uint32_t num_output_buffers;
-        const camera_stream_buffer_t *output_buffers;
-        uint32_t num_physcam_settings;
-        const char **physcam_id;
-        const camera_metadata_t **physcam_settings;
-        int32_t input_width;
-        int32_t input_height;
-    } camera_capture_request_t;
-
-    typedef struct camera_capture_result {
-        uint32_t frame_number;
-        const camera_metadata_t *result;
-        uint32_t num_output_buffers;
-        const camera_stream_buffer_t *output_buffers;
-        const camera_stream_buffer_t *input_buffer;
-        uint32_t partial_result;
-        uint32_t num_physcam_metadata;
-        const char **physcam_ids;
-        const camera_metadata_t **physcam_metadata;
-    } camera_capture_result_t;
-
-    typedef struct camera_shutter_msg {
-        uint32_t frame_number;
-        uint64_t timestamp;
-    } camera_shutter_msg_t;
-
-    typedef struct camera_error_msg {
-        uint32_t frame_number;
-        camera_stream_t *error_stream;
-        int error_code;
-    } camera_error_msg_t;
-
-    typedef enum camera_error_msg_code {
-        CAMERA_MSG_ERROR_DEVICE = 1,
-        CAMERA_MSG_ERROR_REQUEST = 2,
-        CAMERA_MSG_ERROR_RESULT = 3,
-        CAMERA_MSG_ERROR_BUFFER = 4,
-        CAMERA_MSG_NUM_ERRORS
-    } camera_error_msg_code_t;
-
-    typedef struct camera_notify_msg {
-        int type;
-
-        union {
-            camera_error_msg_t error;
-            camera_shutter_msg_t shutter;
-        } message;
-    } camera_notify_msg_t;
-
     /**
      * Helper methods shared between Camera3Device/Camera3OfflineSession for HAL callbacks
      */
@@ -112,13 +52,15 @@
             bool useHalBufManager,
             sp<NotificationListener> listener, // Only needed when outputSurfaces is not empty
             const camera_stream_buffer_t *outputBuffers,
-            size_t numBuffers, nsecs_t timestamp, bool requested, nsecs_t requestTimeNs,
+            size_t numBuffers, nsecs_t timestamp,
+            nsecs_t readoutTimestamp, bool requested, nsecs_t requestTimeNs,
             SessionStatsBuilder& sessionStatsBuilder, bool timestampIncreasing = true,
             // The following arguments are only meant for surface sharing use case
             const SurfaceMap& outputSurfaces = SurfaceMap{},
             // Used to send buffer error callback when failing to return buffer
             const CaptureResultExtras &resultExtras = CaptureResultExtras{},
-            ERROR_BUF_STRATEGY errorBufStrategy = ERROR_BUF_RETURN);
+            ERROR_BUF_STRATEGY errorBufStrategy = ERROR_BUF_RETURN,
+            int32_t transform = -1);
 
     // helper function to return the output buffers to output streams, and
     // remove the returned buffers from the inflight request's pending buffers
@@ -153,7 +95,6 @@
         const metadata_vendor_id_t vendorTagId;
         const CameraMetadata& deviceInfo;
         const std::unordered_map<std::string, CameraMetadata>& physicalDeviceInfoMap;
-        std::unique_ptr<ResultMetadataQueue>& fmq;
         std::unordered_map<std::string, camera3::DistortionMapper>& distortionMappers;
         std::unordered_map<std::string, camera3::ZoomRatioMapper>& zoomRatioMappers;
         std::unordered_map<std::string, camera3::RotateAndCropMapper>& rotateAndCropMappers;
@@ -165,19 +106,11 @@
         SetErrorInterface& setErrIntf;
         InflightRequestUpdateInterface& inflightIntf;
         BufferRecordsInterface& bufferRecordsIntf;
+        bool legacyClient;
     };
 
-    // Handle one capture result. Assume callers hold the lock to serialize all
-    // processCaptureResult calls
-    void processOneCaptureResultLocked(
-            CaptureOutputStates& states,
-            const hardware::camera::device::V3_2::CaptureResult& result,
-            const hardware::hidl_vec<
-                    hardware::camera::device::V3_4::PhysicalCameraMetadata> physicalCameraMetadata);
-
-    // Handle one notify message
-    void notify(CaptureOutputStates& states,
-            const hardware::camera::device::V3_2::NotifyMsg& msg);
+    void processCaptureResult(CaptureOutputStates& states, const camera_capture_result *result);
+    void notify(CaptureOutputStates& states, const camera_notify_msg *msg);
 
     struct RequestBufferStates {
         const String8& cameraId;
@@ -190,10 +123,6 @@
         RequestBufferInterface& reqBufferIntf;
     };
 
-    void requestStreamBuffers(RequestBufferStates& states,
-            const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
-            hardware::camera::device::V3_5::ICameraDeviceCallback::requestStreamBuffers_cb _hidl_cb);
-
     struct ReturnBufferStates {
         const String8& cameraId;
         const bool useHalBufManager;
@@ -202,9 +131,6 @@
         BufferRecordsInterface& bufferRecordsIntf;
     };
 
-    void returnStreamBuffers(ReturnBufferStates& states,
-            const hardware::hidl_vec<hardware::camera::device::V3_2::StreamBuffer>& buffers);
-
     struct FlushInflightReqStates {
         const String8& cameraId;
         std::mutex& inflightLock;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtilsTemplated.h b/services/camera/libcameraservice/device3/Camera3OutputUtilsTemplated.h
new file mode 100644
index 0000000..7dc8e10
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtilsTemplated.h
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_OUTPUT_TEMPLUTILS_H
+#define ANDROID_SERVERS_CAMERA3_OUTPUT_TEMPLUTILS_H
+
+#include <inttypes.h>
+
+#include <utils/Log.h>
+#include <utils/SortedVector.h>
+#include <utils/Trace.h>
+
+#include <aidl/android/hardware/common/NativeHandle.h>
+#include <android/hardware/camera2/ICameraDeviceCallbacks.h>
+
+#include <android/hardware/camera/device/3.4/ICameraDeviceCallback.h>
+#include <android/hardware/camera/device/3.5/ICameraDeviceCallback.h>
+#include <android/hardware/camera/device/3.5/ICameraDeviceSession.h>
+
+#include <camera/CameraUtils.h>
+#include <camera_metadata_hidden.h>
+
+#include "device3/Camera3OutputUtils.h"
+
+#include "system/camera_metadata.h"
+
+using namespace android::camera3;
+using namespace android::hardware::camera;
+
+namespace android {
+namespace camera3 {
+
+template <class BufferStatusType>
+camera_buffer_status_t mapBufferStatus(BufferStatusType status) {
+    switch (status) {
+        case BufferStatusType::OK: return CAMERA_BUFFER_STATUS_OK;
+        case BufferStatusType::ERROR: return CAMERA_BUFFER_STATUS_ERROR;
+    }
+    return CAMERA_BUFFER_STATUS_ERROR;
+}
+
+inline void readBufferFromVec(hardware::hidl_vec<uint8_t> &dst,
+        const hardware::hidl_vec<uint8_t> &src) {
+    // Not cloning here since that will be done in processCaptureResult whil
+    // assigning to CameraMetadata.
+    dst.setToExternal(const_cast<uint8_t *>(src.data()), src.size());
+}
+
+inline void readBufferFromVec(std::vector<uint8_t> &dst, const std::vector<uint8_t> &src) {
+    // TODO: Check if we're really supposed to copy
+    dst = src;
+}
+// Reading one camera metadata from result argument via fmq or from the result
+// Assuming the fmq is protected by a lock already
+template <class FmqType, class MetadataType>
+status_t readOneCameraMetadataLockedT(
+        std::unique_ptr<FmqType>& fmq,
+        uint64_t fmqResultSize,
+        MetadataType& resultMetadata,
+        const MetadataType& result) {
+    if (fmqResultSize > 0) {
+        resultMetadata.resize(fmqResultSize);
+        if (fmq == nullptr) {
+            return NO_MEMORY; // logged in initialize()
+        }
+        if (!fmq->read(resultMetadata.data(), fmqResultSize)) {
+            ALOGE("%s: Cannot read camera metadata from fmq, size = %" PRIu64,
+                    __FUNCTION__, fmqResultSize);
+            return INVALID_OPERATION;
+        }
+    } else {
+        readBufferFromVec(resultMetadata, result);
+    }
+
+    if (resultMetadata.size() != 0) {
+        status_t res;
+        const camera_metadata_t* metadata =
+                reinterpret_cast<const camera_metadata_t*>(resultMetadata.data());
+        size_t expected_metadata_size = resultMetadata.size();
+        if ((res = validate_camera_metadata_structure(metadata, &expected_metadata_size)) != OK) {
+            ALOGE("%s: Invalid camera metadata received by camera service from HAL: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return INVALID_OPERATION;
+        }
+    }
+
+    return OK;
+}
+
+inline bool isHandleNull(const hardware::hidl_handle &handle) {
+    return handle == nullptr;
+}
+
+inline bool isHandleNull(const aidl::android::hardware::common::NativeHandle &handle) {
+    return (handle.fds.size() == 0) && (handle.ints.size() == 0);
+}
+
+inline size_t numFdsInHandle(const hardware::hidl_handle &handle) {
+    return handle->numFds;
+}
+
+inline size_t numFdsInHandle(const aidl::android::hardware::common::NativeHandle &handle) {
+    return handle.fds.size();
+}
+
+inline int32_t getHandleFirstFd(const hardware::hidl_handle &handle) {
+    if (handle->numFds != 1) {
+        return -1;
+    }
+    return handle->data[0];
+}
+
+inline int32_t getHandleFirstFd(const aidl::android::hardware::common::NativeHandle &handle) {
+    if (handle.fds.size() != 1) {
+        return -1;
+    }
+    return handle.fds[0].get();
+}
+
+template <class StatesType, class CaptureResultType, class PhysMetadataType, class MetadataType,
+        class FmqType, class BufferStatusType>
+void processOneCaptureResultLockedT(
+        StatesType& states,
+        const CaptureResultType& result,
+        const PhysMetadataType &physicalCameraMetadata) {
+    std::unique_ptr<FmqType>& fmq = states.fmq;
+    BufferRecordsInterface& bufferRecords = states.bufferRecordsIntf;
+    camera_capture_result r;
+    status_t res;
+    r.frame_number = result.frameNumber;
+
+    // Read and validate the result metadata.
+    MetadataType resultMetadata;
+    res = readOneCameraMetadataLockedT(
+            fmq, result.fmqResultSize,
+            resultMetadata, result.result);
+    if (res != OK) {
+        ALOGE("%s: Frame %d: Failed to read capture result metadata",
+                __FUNCTION__, result.frameNumber);
+        return;
+    }
+    r.result = reinterpret_cast<const camera_metadata_t*>(resultMetadata.data());
+
+    // Read and validate physical camera metadata
+    size_t physResultCount = physicalCameraMetadata.size();
+    std::vector<const char*> physCamIds(physResultCount);
+    std::vector<const camera_metadata_t *> phyCamMetadatas(physResultCount);
+    std::vector<MetadataType> physResultMetadata;
+    physResultMetadata.resize(physResultCount);
+    for (size_t i = 0; i < physicalCameraMetadata.size(); i++) {
+        res = readOneCameraMetadataLockedT(fmq, physicalCameraMetadata[i].fmqMetadataSize,
+                physResultMetadata[i], physicalCameraMetadata[i].metadata);
+        if (res != OK) {
+            ALOGE("%s: Frame %d: Failed to read capture result metadata for camera %s",
+                    __FUNCTION__, result.frameNumber,
+                    physicalCameraMetadata[i].physicalCameraId.c_str());
+            return;
+        }
+        physCamIds[i] = physicalCameraMetadata[i].physicalCameraId.c_str();
+        phyCamMetadatas[i] = reinterpret_cast<const camera_metadata_t*>(
+                physResultMetadata[i].data());
+    }
+    r.num_physcam_metadata = physResultCount;
+    r.physcam_ids = physCamIds.data();
+    r.physcam_metadata = phyCamMetadatas.data();
+
+    std::vector<camera_stream_buffer_t> outputBuffers(result.outputBuffers.size());
+    std::vector<buffer_handle_t> outputBufferHandles(result.outputBuffers.size());
+    for (size_t i = 0; i < result.outputBuffers.size(); i++) {
+        auto& bDst = outputBuffers[i];
+        const auto &bSrc = result.outputBuffers[i];
+
+        sp<Camera3StreamInterface> stream = states.outputStreams.get(bSrc.streamId);
+        if (stream == nullptr) {
+            ALOGE("%s: Frame %d: Buffer %zu: Invalid output stream id %d",
+                    __FUNCTION__, result.frameNumber, i, bSrc.streamId);
+            return;
+        }
+        bDst.stream = stream->asHalStream();
+
+        bool noBufferReturned = false;
+        buffer_handle_t *buffer = nullptr;
+        if (states.useHalBufManager) {
+            // This is suspicious most of the time but can be correct during flush where HAL
+            // has to return capture result before a buffer is requested
+            if (bSrc.bufferId == BUFFER_ID_NO_BUFFER) {
+                if (bSrc.status == BufferStatusType::OK) {
+                    ALOGE("%s: Frame %d: Buffer %zu: No bufferId for stream %d",
+                            __FUNCTION__, result.frameNumber, i, bSrc.streamId);
+                    // Still proceeds so other buffers can be returned
+                }
+                noBufferReturned = true;
+            }
+            if (noBufferReturned) {
+                res = OK;
+            } else {
+                res = bufferRecords.popInflightRequestBuffer(bSrc.bufferId, &buffer);
+            }
+        } else {
+            res = bufferRecords.popInflightBuffer(result.frameNumber, bSrc.streamId, &buffer);
+        }
+
+        if (res != OK) {
+            ALOGE("%s: Frame %d: Buffer %zu: No in-flight buffer for stream %d",
+                    __FUNCTION__, result.frameNumber, i, bSrc.streamId);
+            return;
+        }
+
+        bDst.buffer = buffer;
+        bDst.status = mapBufferStatus<BufferStatusType>(bSrc.status);
+        bDst.acquire_fence = -1;
+        if (isHandleNull(bSrc.releaseFence)) {
+            bDst.release_fence = -1;
+        } else if (numFdsInHandle(bSrc.releaseFence) == 1) {
+            if (noBufferReturned) {
+                ALOGE("%s: got releaseFence without output buffer!", __FUNCTION__);
+            }
+            bDst.release_fence = dup(getHandleFirstFd(bSrc.releaseFence));
+        } else {
+            ALOGE("%s: Frame %d: Invalid release fence for buffer %zu, fd count is %d, not 1",
+                    __FUNCTION__, result.frameNumber, i, (int)numFdsInHandle(bSrc.releaseFence));
+            return;
+        }
+    }
+    r.num_output_buffers = outputBuffers.size();
+    r.output_buffers = outputBuffers.data();
+
+    camera_stream_buffer_t inputBuffer;
+    if (result.inputBuffer.streamId == -1) {
+        r.input_buffer = nullptr;
+    } else {
+        if (states.inputStream->getId() != result.inputBuffer.streamId) {
+            ALOGE("%s: Frame %d: Invalid input stream id %d", __FUNCTION__,
+                    result.frameNumber, result.inputBuffer.streamId);
+            return;
+        }
+        inputBuffer.stream = states.inputStream->asHalStream();
+        buffer_handle_t *buffer;
+        res = bufferRecords.popInflightBuffer(result.frameNumber, result.inputBuffer.streamId,
+                &buffer);
+        if (res != OK) {
+            ALOGE("%s: Frame %d: Input buffer: No in-flight buffer for stream %d",
+                    __FUNCTION__, result.frameNumber, result.inputBuffer.streamId);
+            return;
+        }
+        inputBuffer.buffer = buffer;
+        inputBuffer.status = mapBufferStatus<BufferStatusType>(result.inputBuffer.status);
+        inputBuffer.acquire_fence = -1;
+        if (isHandleNull(result.inputBuffer.releaseFence)) {
+            inputBuffer.release_fence = -1;
+        } else if (numFdsInHandle(result.inputBuffer.releaseFence) == 1) {
+            inputBuffer.release_fence = dup(getHandleFirstFd(result.inputBuffer.releaseFence));
+        } else {
+            ALOGE("%s: Frame %d: Invalid release fence for input buffer, fd count is %d, not 1",
+                    __FUNCTION__, result.frameNumber,
+                    (int)numFdsInHandle(result.inputBuffer.releaseFence));
+            return;
+        }
+        r.input_buffer = &inputBuffer;
+    }
+
+    r.partial_result = result.partialResult;
+
+    processCaptureResult(states, &r);
+}
+
+template <class VecStreamBufferType>
+void returnStreamBuffersT(ReturnBufferStates& states,
+        const VecStreamBufferType& buffers) {
+    if (!states.useHalBufManager) {
+        ALOGE("%s: Camera %s does not support HAL buffer managerment",
+                __FUNCTION__, states.cameraId.string());
+        return;
+    }
+
+    for (const auto& buf : buffers) {
+        if (buf.bufferId == BUFFER_ID_NO_BUFFER) {
+            ALOGE("%s: cannot return a buffer without bufferId", __FUNCTION__);
+            continue;
+        }
+
+        buffer_handle_t* buffer;
+        status_t res = states.bufferRecordsIntf.popInflightRequestBuffer(buf.bufferId, &buffer);
+
+        if (res != OK) {
+            ALOGE("%s: cannot find in-flight buffer %" PRIu64 " for stream %d",
+                    __FUNCTION__, buf.bufferId, buf.streamId);
+            continue;
+        }
+
+        camera_stream_buffer_t streamBuffer;
+        streamBuffer.buffer = buffer;
+        streamBuffer.status = CAMERA_BUFFER_STATUS_ERROR;
+        streamBuffer.acquire_fence = -1;
+        streamBuffer.release_fence = -1;
+
+        if (isHandleNull(buf.releaseFence)) {
+            streamBuffer.release_fence = -1;
+        } else if (numFdsInHandle(buf.releaseFence) == 1) {
+            streamBuffer.release_fence = dup(getHandleFirstFd(buf.releaseFence));
+        } else {
+            ALOGE("%s: Invalid release fence, fd count is %d, not 1",
+                    __FUNCTION__, (int)numFdsInHandle(buf.releaseFence));
+            continue;
+        }
+
+        sp<Camera3StreamInterface> stream = states.outputStreams.get(buf.streamId);
+        if (stream == nullptr) {
+            ALOGE("%s: Output stream id %d not found!", __FUNCTION__, buf.streamId);
+            continue;
+        }
+        streamBuffer.stream = stream->asHalStream();
+        returnOutputBuffers(states.useHalBufManager, /*listener*/nullptr,
+                &streamBuffer, /*size*/1, /*timestamp*/ 0, /*readoutTimestamp*/0,
+                /*requested*/false, /*requestTimeNs*/0, states.sessionStatsBuilder);
+    }
+}
+
+} // camera3
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
index 15cf7f4..0e2671a 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -33,10 +33,10 @@
         camera_stream_rotation_t rotation,
         nsecs_t timestampOffset, const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
-        int setId, bool useHalBufManager) :
+        int setId, bool useHalBufManager, int dynamicProfile) :
         Camera3OutputStream(id, CAMERA_STREAM_OUTPUT, width, height,
                             format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
-                            consumerUsage, timestampOffset, setId),
+                            consumerUsage, timestampOffset, setId, dynamicProfile),
         mUseHalBufManager(useHalBufManager) {
     size_t consumerCount = std::min(surfaces.size(), kMaxOutputs);
     if (surfaces.size() > consumerCount) {
@@ -67,7 +67,7 @@
     }
 
     res = mStreamSplitter->connect(initialSurfaces, usage, mUsage, camera_stream::max_buffers,
-            getWidth(), getHeight(), getFormat(), &mConsumer);
+            getWidth(), getHeight(), getFormat(), &mConsumer, camera_stream::dynamic_range_profile);
     if (res != OK) {
         ALOGE("%s: Failed to connect to stream splitter: %s(%d)",
                 __FUNCTION__, strerror(-res), res);
@@ -247,7 +247,7 @@
         return res;
     }
 
-    res = configureConsumerQueueLocked();
+    res = configureConsumerQueueLocked(false/*allowPreviewScheduler*/);
     if (res != OK) {
         ALOGE("Failed to configureConsumerQueueLocked: %s(%d)", strerror(-res), res);
         return res;
@@ -388,13 +388,15 @@
         bool sizeMismatch = ((static_cast<uint32_t>(infoIt.width) != getWidth()) ||
                                 (static_cast<uint32_t> (infoIt.height) != getHeight())) ?
                                 true : false;
-        if ((imgReaderUsage && sizeMismatch) ||
+        bool dynamicRangeMismatch = dynamic_range_profile != infoIt.dynamicRangeProfile;
+        if ((imgReaderUsage && sizeMismatch) || dynamicRangeMismatch ||
                 (infoIt.format != getOriginalFormat() && infoIt.format != getFormat()) ||
                 (infoIt.dataSpace != getDataSpace() &&
                  infoIt.dataSpace != getOriginalDataSpace())) {
-            ALOGE("%s: Shared surface parameters format: 0x%x dataSpace: 0x%x "
-                    " don't match source stream format: 0x%x  dataSpace: 0x%x", __FUNCTION__,
-                    infoIt.format, infoIt.dataSpace, getFormat(), getDataSpace());
+            ALOGE("%s: Shared surface parameters format: 0x%x dataSpace: 0x%x dynamic range 0x%x "
+                    " don't match source stream format: 0x%x  dataSpace: 0x%x dynamic range 0x%x"
+                    , __FUNCTION__, infoIt.format, infoIt.dataSpace, infoIt.dynamicRangeProfile,
+                    getFormat(), getDataSpace(), dynamic_range_profile);
             return BAD_VALUE;
         }
     }
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
index 4b6341b..fafa26f 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
@@ -40,7 +40,8 @@
             const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
             int setId = CAMERA3_STREAM_SET_ID_INVALID,
-            bool useHalBufManager = false);
+            bool useHalBufManager = false,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD);
 
     virtual ~Camera3SharedOutputStream();
 
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 02b6585..83f9a98 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#include <vector>
+#include "system/window.h"
 #define LOG_TAG "Camera3-Stream"
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
@@ -23,6 +25,7 @@
 #include "device3/Camera3Stream.h"
 #include "device3/StatusTracker.h"
 #include "utils/TraceHFR.h"
+#include "ui/GraphicBufferMapper.h"
 
 #include <cutils/properties.h>
 
@@ -51,7 +54,7 @@
         android_dataspace dataSpace, camera_stream_rotation_t rotation,
         const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
-        int setId, bool isMultiResolution) :
+        int setId, bool isMultiResolution, int dynamicRangeProfile) :
     camera_stream(),
     mId(id),
     mSetId(setId),
@@ -87,6 +90,7 @@
     camera_stream::max_buffers = 0;
     camera_stream::physical_camera_id = mPhysicalCameraId.string();
     camera_stream::sensor_pixel_modes_used = sensorPixelModesUsed;
+    camera_stream::dynamic_range_profile = dynamicRangeProfile;
 
     if ((format == HAL_PIXEL_FORMAT_BLOB || format == HAL_PIXEL_FORMAT_RAW_OPAQUE) &&
             maxSize == 0) {
@@ -147,6 +151,10 @@
     return mOriginalFormat;
 }
 
+int Camera3Stream::getDynamicRangeProfile() const {
+    return camera_stream::dynamic_range_profile;
+}
+
 void Camera3Stream::setDataSpaceOverride(bool dataSpaceOverridden) {
     mDataSpaceOverridden = dataSpaceOverridden;
 }
@@ -557,7 +565,8 @@
     for (size_t i = 0; i < mPreparedBufferIdx; i++) {
         mPreparedBuffers.editItemAt(i).release_fence = -1;
         mPreparedBuffers.editItemAt(i).status = CAMERA_BUFFER_STATUS_ERROR;
-        returnBufferLocked(mPreparedBuffers[i], 0);
+        returnBufferLocked(mPreparedBuffers[i], /*timestamp*/0, /*readoutTimestamp*/0,
+                /*transform*/ -1);
     }
     mPreparedBuffers.clear();
     mPreparedBufferIdx = 0;
@@ -713,8 +722,8 @@
 }
 
 status_t Camera3Stream::returnBuffer(const camera_stream_buffer &buffer,
-        nsecs_t timestamp, bool timestampIncreasing,
-         const std::vector<size_t>& surface_ids, uint64_t frameNumber) {
+        nsecs_t timestamp, nsecs_t readoutTimestamp, bool timestampIncreasing,
+         const std::vector<size_t>& surface_ids, uint64_t frameNumber, int32_t transform) {
     ATRACE_HFR_CALL();
     Mutex::Autolock l(mLock);
 
@@ -743,7 +752,7 @@
      *
      * Do this for getBuffer as well.
      */
-    status_t res = returnBufferLocked(b, timestamp, surface_ids);
+    status_t res = returnBufferLocked(b, timestamp, readoutTimestamp, transform, surface_ids);
     if (res == OK) {
         fireBufferListenersLocked(b, /*acquired*/false, /*output*/true, timestamp, frameNumber);
     }
@@ -931,7 +940,7 @@
 }
 
 status_t Camera3Stream::returnBufferLocked(const camera_stream_buffer &,
-                                           nsecs_t, const std::vector<size_t>&) {
+                                           nsecs_t, nsecs_t, int32_t, const std::vector<size_t>&) {
     ALOGE("%s: This type of stream does not support output", __FUNCTION__);
     return INVALID_OPERATION;
 }
@@ -1077,6 +1086,52 @@
     return res;
 }
 
+void Camera3Stream::queueHDRMetadata(buffer_handle_t buffer, sp<ANativeWindow>& anw,
+        int dynamicRangeProfile) {
+    auto& mapper = GraphicBufferMapper::get();
+    switch (dynamicRangeProfile) {
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10: {
+            std::optional<ui::Smpte2086> smpte2086;
+            auto res = mapper.getSmpte2086(buffer, &smpte2086);
+            if ((res == OK) && smpte2086.has_value()) {
+                const auto& metaValue = smpte2086.value();
+                android_smpte2086_metadata meta = {
+                    .displayPrimaryRed.x = metaValue.primaryRed.x,
+                    .displayPrimaryRed.y = metaValue.primaryRed.y,
+                    .displayPrimaryGreen.x = metaValue.primaryGreen.x,
+                    .displayPrimaryGreen.y = metaValue.primaryGreen.y,
+                    .displayPrimaryBlue.x = metaValue.primaryBlue.x,
+                    .displayPrimaryBlue.y = metaValue.primaryBlue.y,
+                    .whitePoint.x = metaValue.whitePoint.x,
+                    .whitePoint.y = metaValue.whitePoint.y,
+                    .maxLuminance = metaValue.maxLuminance,
+                    .minLuminance = metaValue.minLuminance};
+                native_window_set_buffers_smpte2086_metadata(anw.get(), &meta);
+            } else {
+                ALOGE("%s Couldn't retrieve Smpte2086 metadata %s (%d)",
+                        __FUNCTION__, strerror(-res), res);
+            }
+            break;
+        }
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10_PLUS: {
+            std::optional<std::vector<uint8_t>> smpte2094_40;
+            auto res = mapper.getSmpte2094_40(buffer, &smpte2094_40);
+            if ((res == OK) && smpte2094_40.has_value()) {
+                native_window_set_buffers_hdr10_plus_metadata(anw.get(),
+                        smpte2094_40.value().size(), smpte2094_40.value().data());
+            } else {
+                ALOGE("%s Couldn't retrieve Smpte2094_40 metadata %s (%d)",
+                        __FUNCTION__, strerror(-res), res);
+            }
+            break;
+        }
+        default:
+            // No-op
+            break;
+    }
+}
+
+
 }; // namespace camera3
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 5a364ab..bbbea8d 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -138,6 +138,10 @@
     static Camera3Stream*       cast(camera_stream *stream);
     static const Camera3Stream* cast(const camera_stream *stream);
 
+    // Queue corresponding HDR metadata to given native window.
+    static void queueHDRMetadata(buffer_handle_t buffer, sp<ANativeWindow>& anw,
+            int dynamicRangeProfile);
+
     /**
      * Get the stream's ID
      */
@@ -168,6 +172,7 @@
     void              setFormatOverride(bool formatOverriden);
     bool              isFormatOverridden() const;
     int               getOriginalFormat() const;
+    int               getDynamicRangeProfile() const;
     void              setDataSpaceOverride(bool dataSpaceOverriden);
     bool              isDataSpaceOverridden() const;
     android_dataspace getOriginalDataSpace() const;
@@ -352,9 +357,9 @@
      * For bidirectional streams, this method applies to the output-side buffers
      */
     status_t         returnBuffer(const camera_stream_buffer &buffer,
-            nsecs_t timestamp, bool timestampIncreasing,
+            nsecs_t timestamp, nsecs_t readoutTimestamp, bool timestampIncreasing,
             const std::vector<size_t>& surface_ids = std::vector<size_t>(),
-            uint64_t frameNumber = 0);
+            uint64_t frameNumber = 0, int32_t transform = -1);
 
     /**
      * Fill in the camera_stream_buffer with the next valid buffer for this
@@ -500,7 +505,7 @@
             android_dataspace dataSpace, camera_stream_rotation_t rotation,
             const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
-            int setId, bool isMultiResolution);
+            int setId, bool isMultiResolution, int dynamicRangeProfile);
 
     wp<Camera3StreamBufferFreedListener> mBufferFreedListener;
 
@@ -517,7 +522,7 @@
     virtual status_t getBufferLocked(camera_stream_buffer *buffer,
             const std::vector<size_t>& surface_ids = std::vector<size_t>());
     virtual status_t returnBufferLocked(const camera_stream_buffer &buffer,
-            nsecs_t timestamp,
+            nsecs_t timestamp, nsecs_t readoutTimestamp, int32_t transform,
             const std::vector<size_t>& surface_ids = std::vector<size_t>());
 
     virtual status_t getBuffersLocked(std::vector<OutstandingBuffer>*);
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 2d3397c..ef10f0d 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -64,6 +64,7 @@
     const char* physical_camera_id;
 
     std::unordered_set<int32_t> sensor_pixel_modes_used;
+    int dynamic_range_profile;
 } camera_stream_t;
 
 typedef struct camera_stream_buffer {
@@ -107,14 +108,17 @@
         bool finalized = false;
         bool supportsOffline = false;
         std::unordered_set<int32_t> sensorPixelModesUsed;
+        int dynamicRangeProfile;
         OutputStreamInfo() :
             width(-1), height(-1), format(-1), dataSpace(HAL_DATASPACE_UNKNOWN),
-            consumerUsage(0) {}
+            consumerUsage(0),
+            dynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {}
         OutputStreamInfo(int _width, int _height, int _format, android_dataspace _dataSpace,
-                uint64_t _consumerUsage, const std::unordered_set<int32_t>& _sensorPixelModesUsed) :
+                uint64_t _consumerUsage, const std::unordered_set<int32_t>& _sensorPixelModesUsed,
+                int _dynamicRangeProfile) :
             width(_width), height(_height), format(_format),
             dataSpace(_dataSpace), consumerUsage(_consumerUsage),
-            sensorPixelModesUsed(_sensorPixelModesUsed) {}
+            sensorPixelModesUsed(_sensorPixelModesUsed), dynamicRangeProfile(_dynamicRangeProfile){}
 };
 
 /**
@@ -154,6 +158,7 @@
     virtual uint32_t getWidth() const = 0;
     virtual uint32_t getHeight() const = 0;
     virtual int      getFormat() const = 0;
+    virtual int      getDynamicRangeProfile() const = 0;
     virtual android_dataspace getDataSpace() const = 0;
     virtual void setFormatOverride(bool formatOverriden) = 0;
     virtual bool isFormatOverridden() const = 0;
@@ -357,9 +362,9 @@
      * For bidirectional streams, this method applies to the output-side buffers
      */
     virtual status_t returnBuffer(const camera_stream_buffer &buffer,
-            nsecs_t timestamp, bool timestampIncreasing = true,
+            nsecs_t timestamp, nsecs_t readoutTimestamp, bool timestampIncreasing = true,
             const std::vector<size_t>& surface_ids = std::vector<size_t>(),
-            uint64_t frameNumber = 0) = 0;
+            uint64_t frameNumber = 0, int32_t transform = -1) = 0;
 
     /**
      * Fill in the camera_stream_buffer with the next valid buffer for this
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
index 5c6c518..1149d13 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
@@ -34,13 +34,16 @@
 
 #include <cutils/atomic.h>
 
+#include "Camera3Stream.h"
+
 #include "Camera3StreamSplitter.h"
 
 namespace android {
 
 status_t Camera3StreamSplitter::connect(const std::unordered_map<size_t, sp<Surface>> &surfaces,
         uint64_t consumerUsage, uint64_t producerUsage, size_t halMaxBuffers, uint32_t width,
-        uint32_t height, android::PixelFormat format, sp<Surface>* consumer) {
+        uint32_t height, android::PixelFormat format, sp<Surface>* consumer,
+        int dynamicRangeProfile) {
     ATRACE_CALL();
     if (consumer == nullptr) {
         SP_LOGE("%s: consumer pointer is NULL", __FUNCTION__);
@@ -61,6 +64,7 @@
 
     mMaxHalBuffers = halMaxBuffers;
     mConsumerName = getUniqueConsumerName();
+    mDynamicRangeProfile = dynamicRangeProfile;
     // Add output surfaces. This has to be before creating internal buffer queue
     // in order to get max consumer side buffers.
     for (auto &it : surfaces) {
@@ -136,6 +140,7 @@
         }
     }
     mOutputs.clear();
+    mOutputSurfaces.clear();
     mOutputSlots.clear();
     mConsumerBufferCount.clear();
 
@@ -258,6 +263,7 @@
 
     // Add new entry into mOutputs
     mOutputs[surfaceId] = gbp;
+    mOutputSurfaces[surfaceId] = outputQueue;
     mConsumerBufferCount[surfaceId] = maxConsumerBuffers;
     if (mConsumerBufferCount[surfaceId] > mMaxHalBuffers) {
         SP_LOGW("%s: Consumer buffer count %zu larger than max. Hal buffers: %zu", __FUNCTION__,
@@ -316,6 +322,7 @@
         }
     }
     mOutputs[surfaceId] = nullptr;
+    mOutputSurfaces[surfaceId] = nullptr;
     mOutputSlots[gbp] = nullptr;
     for (const auto &id : pendingBufferIds) {
         decrementBufRefCountLocked(id, surfaceId);
@@ -356,6 +363,14 @@
     const BufferTracker& tracker = *(mBuffers[bufferId]);
     int slot = getSlotForOutputLocked(output, tracker.getBuffer());
 
+    if (mOutputSurfaces[surfaceId] != nullptr) {
+        sp<ANativeWindow> anw = mOutputSurfaces[surfaceId];
+        camera3::Camera3Stream::queueHDRMetadata(
+                bufferItem.mGraphicBuffer->getNativeBuffer()->handle, anw, mDynamicRangeProfile);
+    } else {
+        SP_LOGE("%s: Invalid surface id: %zu!", __FUNCTION__, surfaceId);
+    }
+
     // In case the output BufferQueue has its own lock, if we hold splitter lock while calling
     // queueBuffer (which will try to acquire the output lock), the output could be holding its
     // own lock calling releaseBuffer (which  will try to acquire the splitter lock), running into
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
index 4eb455a..827865c 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
@@ -19,6 +19,8 @@
 
 #include <unordered_set>
 
+#include <camera/CameraMetadata.h>
+
 #include <gui/IConsumerListener.h>
 #include <gui/IProducerListener.h>
 #include <gui/BufferItemConsumer.h>
@@ -55,7 +57,8 @@
     // with output surfaces.
     status_t connect(const std::unordered_map<size_t, sp<Surface>> &surfaces,
             uint64_t consumerUsage, uint64_t producerUsage, size_t halMaxBuffers, uint32_t width,
-            uint32_t height, android::PixelFormat format, sp<Surface>* consumer);
+            uint32_t height, android::PixelFormat format, sp<Surface>* consumer,
+            int dynamicRangeProfile);
 
     // addOutput adds an output BufferQueue to the splitter. The splitter
     // connects to outputQueue as a CPU producer, and any buffers queued
@@ -232,6 +235,7 @@
     uint32_t mHeight = 0;
     android::PixelFormat mFormat = android::PIXEL_FORMAT_NONE;
     uint64_t mProducerUsage = 0;
+    int mDynamicRangeProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
 
     // The attachBuffer call will happen on different thread according to mUseHalBufManager and have
     // different timing constraint.
@@ -251,6 +255,9 @@
     //Map surface ids -> gbp outputs
     std::unordered_map<int, sp<IGraphicBufferProducer> > mOutputs;
 
+    //Map surface ids -> gbp outputs
+    std::unordered_map<int, sp<Surface>> mOutputSurfaces;
+
     //Map surface ids -> consumer buffer count
     std::unordered_map<int, size_t > mConsumerBufferCount;
 
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.cpp b/services/camera/libcameraservice/device3/DistortionMapper.cpp
index 89dd115..15807bf 100644
--- a/services/camera/libcameraservice/device3/DistortionMapper.cpp
+++ b/services/camera/libcameraservice/device3/DistortionMapper.cpp
@@ -22,7 +22,7 @@
 #include <cmath>
 
 #include "device3/DistortionMapper.h"
-#include "utils/SessionConfigurationUtils.h"
+#include "utils/SessionConfigurationUtilsHost.h"
 
 namespace android {
 
diff --git a/services/camera/libcameraservice/device3/InFlightRequest.h b/services/camera/libcameraservice/device3/InFlightRequest.h
index 523a2c7..0c97f3e 100644
--- a/services/camera/libcameraservice/device3/InFlightRequest.h
+++ b/services/camera/libcameraservice/device3/InFlightRequest.h
@@ -30,6 +30,67 @@
 
 namespace camera3 {
 
+typedef struct camera_stream_configuration {
+    uint32_t num_streams;
+    camera_stream_t **streams;
+    uint32_t operation_mode;
+    bool input_is_multi_resolution;
+} camera_stream_configuration_t;
+
+typedef struct camera_capture_request {
+    uint32_t frame_number;
+    const camera_metadata_t *settings;
+    camera_stream_buffer_t *input_buffer;
+    uint32_t num_output_buffers;
+    const camera_stream_buffer_t *output_buffers;
+    uint32_t num_physcam_settings;
+    const char **physcam_id;
+    const camera_metadata_t **physcam_settings;
+    int32_t input_width;
+    int32_t input_height;
+} camera_capture_request_t;
+
+typedef struct camera_capture_result {
+    uint32_t frame_number;
+    const camera_metadata_t *result;
+    uint32_t num_output_buffers;
+    const camera_stream_buffer_t *output_buffers;
+    const camera_stream_buffer_t *input_buffer;
+    uint32_t partial_result;
+    uint32_t num_physcam_metadata;
+    const char **physcam_ids;
+    const camera_metadata_t **physcam_metadata;
+} camera_capture_result_t;
+
+typedef struct camera_shutter_msg {
+    uint32_t frame_number;
+    uint64_t timestamp;
+    uint64_t readout_timestamp;
+} camera_shutter_msg_t;
+
+typedef struct camera_error_msg {
+    uint32_t frame_number;
+    camera_stream_t *error_stream;
+    int error_code;
+} camera_error_msg_t;
+
+typedef enum camera_error_msg_code {
+    CAMERA_MSG_ERROR_DEVICE = 1,
+    CAMERA_MSG_ERROR_REQUEST = 2,
+    CAMERA_MSG_ERROR_RESULT = 3,
+    CAMERA_MSG_ERROR_BUFFER = 4,
+    CAMERA_MSG_NUM_ERRORS
+} camera_error_msg_code_t;
+
+typedef struct camera_notify_msg {
+    int type;
+
+    union {
+        camera_error_msg_t error;
+        camera_shutter_msg_t shutter;
+    } message;
+} camera_notify_msg_t;
+
 typedef enum {
     // Cache the buffers with STATUS_ERROR within InFlightRequest
     ERROR_BUF_CACHE,
@@ -41,9 +102,10 @@
 } ERROR_BUF_STRATEGY;
 
 struct InFlightRequest {
-
     // Set by notify() SHUTTER call.
     nsecs_t shutterTimestamp;
+    // Set by notify() SHUTTER call with readout time.
+    nsecs_t shutterReadoutTimestamp;
     // Set by process_capture_result().
     nsecs_t sensorTimestamp;
     int     requestStatus;
@@ -122,6 +184,9 @@
     // What shared surfaces an output should go to
     SurfaceMap outputSurfaces;
 
+    // Current output transformation
+    int32_t transform;
+
     // TODO: dedupe
     static const nsecs_t kDefaultExpectedDuration = 100000000; // 100 ms
 
@@ -140,7 +205,8 @@
             stillCapture(false),
             zslCapture(false),
             rotateAndCropAuto(false),
-            requestTimeNs(0) {
+            requestTimeNs(0),
+            transform(-1) {
     }
 
     InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput,
@@ -165,7 +231,8 @@
             rotateAndCropAuto(rotateAndCropAuto),
             cameraIdsWithZoom(idsWithZoom),
             requestTimeNs(requestNs),
-            outputSurfaces(outSurfaces) {
+            outputSurfaces(outSurfaces),
+            transform(-1) {
     }
 };
 
diff --git a/services/camera/libcameraservice/device3/PreviewFrameScheduler.cpp b/services/camera/libcameraservice/device3/PreviewFrameScheduler.cpp
new file mode 100644
index 0000000..6135f9e
--- /dev/null
+++ b/services/camera/libcameraservice/device3/PreviewFrameScheduler.cpp
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-PreviewFrameScheduler"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include <android/looper.h>
+#include "PreviewFrameScheduler.h"
+#include "Camera3OutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * Internal Choreographer thread implementation for polling and handling callbacks
+ */
+
+// Callback function for Choreographer
+static void frameCallback(const AChoreographerFrameCallbackData* callbackData, void* data) {
+    PreviewFrameScheduler* parent = static_cast<PreviewFrameScheduler*>(data);
+    if (parent == nullptr) {
+        ALOGE("%s: Invalid data for Choreographer callback!", __FUNCTION__);
+        return;
+    }
+
+    size_t length = AChoreographerFrameCallbackData_getFrameTimelinesLength(callbackData);
+    std::vector<nsecs_t> timeline(length);
+    for (size_t i = 0; i < length; i++) {
+        nsecs_t timestamp = AChoreographerFrameCallbackData_getFrameTimelineExpectedPresentTimeNanos(
+                callbackData, i);
+        timeline[i] = timestamp;
+    }
+
+    parent->onNewPresentationTime(timeline);
+
+    AChoreographer_postExtendedFrameCallback(AChoreographer_getInstance(), frameCallback, data);
+}
+
+struct ChoreographerThread : public Thread {
+    ChoreographerThread();
+    status_t start(PreviewFrameScheduler* parent);
+    virtual status_t readyToRun() override;
+    virtual bool threadLoop() override;
+
+protected:
+    virtual ~ChoreographerThread() {}
+
+private:
+    ChoreographerThread &operator=(const ChoreographerThread &);
+
+    // This only impacts the shutdown time. It won't impact the choreographer
+    // callback frequency.
+    static constexpr nsecs_t kPollingTimeoutMs = 5;
+    PreviewFrameScheduler* mParent = nullptr;
+};
+
+ChoreographerThread::ChoreographerThread() : Thread(false /*canCallJava*/) {
+}
+
+status_t ChoreographerThread::start(PreviewFrameScheduler* parent) {
+    mParent = parent;
+    return run("PreviewChoreographer");
+}
+
+status_t ChoreographerThread::readyToRun() {
+    ALooper_prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
+    if (AChoreographer_getInstance() == NULL) {
+        return NO_INIT;
+    }
+
+    AChoreographer_postExtendedFrameCallback(
+            AChoreographer_getInstance(), frameCallback, mParent);
+    return OK;
+}
+
+bool ChoreographerThread::threadLoop() {
+    if (exitPending()) {
+        return false;
+    }
+    ALooper_pollOnce(kPollingTimeoutMs, nullptr, nullptr, nullptr);
+    return true;
+}
+
+/**
+ * PreviewFrameScheduler implementation
+ */
+
+PreviewFrameScheduler::PreviewFrameScheduler(Camera3OutputStream& parent, sp<Surface> consumer) :
+        mParent(parent),
+        mConsumer(consumer),
+        mChoreographerThread(new ChoreographerThread()) {
+}
+
+PreviewFrameScheduler::~PreviewFrameScheduler() {
+    {
+        Mutex::Autolock l(mLock);
+        mChoreographerThread->requestExit();
+    }
+    mChoreographerThread->join();
+}
+
+status_t PreviewFrameScheduler::queuePreviewBuffer(nsecs_t timestamp, int32_t transform,
+        ANativeWindowBuffer* anwBuffer, int releaseFence) {
+    // Start choreographer thread if it's not already running.
+    if (!mChoreographerThread->isRunning()) {
+        status_t res = mChoreographerThread->start(this);
+        if (res != OK) {
+            ALOGE("%s: Failed to init choreographer thread!", __FUNCTION__);
+            return res;
+        }
+    }
+
+    {
+        Mutex::Autolock l(mLock);
+        mPendingBuffers.emplace(timestamp, transform, anwBuffer, releaseFence);
+
+        // Queue buffer to client right away if pending buffers are more than
+        // the queue depth watermark.
+        if (mPendingBuffers.size() > kQueueDepthWatermark) {
+            auto oldBuffer = mPendingBuffers.front();
+            mPendingBuffers.pop();
+
+            status_t res = queueBufferToClientLocked(oldBuffer, oldBuffer.timestamp);
+            if (res != OK) {
+                return res;
+            }
+
+            // Reset the last capture and presentation time
+            mLastCameraCaptureTime = 0;
+            mLastCameraPresentTime = 0;
+        } else {
+            ATRACE_INT(kPendingBufferTraceName, mPendingBuffers.size());
+        }
+    }
+    return OK;
+}
+
+void PreviewFrameScheduler::onNewPresentationTime(const std::vector<nsecs_t>& timeline) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mLock);
+    if (mPendingBuffers.size() > 0) {
+        auto nextBuffer = mPendingBuffers.front();
+        mPendingBuffers.pop();
+
+        // Find the best presentation time by finding the element in the
+        // choreographer timeline that's closest to the ideal presentation time.
+        // The ideal presentation time is the last presentation time + frame
+        // interval.
+        nsecs_t cameraInterval = nextBuffer.timestamp - mLastCameraCaptureTime;
+        nsecs_t idealPresentTime = (cameraInterval < kSpacingResetIntervalNs) ?
+                (mLastCameraPresentTime + cameraInterval) : nextBuffer.timestamp;
+        nsecs_t presentTime = *std::min_element(timeline.begin(), timeline.end(),
+                [idealPresentTime](nsecs_t p1, nsecs_t p2) {
+                        return std::abs(p1 - idealPresentTime) < std::abs(p2 - idealPresentTime);
+                });
+
+        status_t res = queueBufferToClientLocked(nextBuffer, presentTime);
+        ATRACE_INT(kPendingBufferTraceName, mPendingBuffers.size());
+
+        if (mParent.shouldLogError(res)) {
+            ALOGE("%s: Preview Stream: Error queueing buffer to native window:"
+                    " %s (%d)", __FUNCTION__, strerror(-res), res);
+        }
+
+        mLastCameraCaptureTime = nextBuffer.timestamp;
+        mLastCameraPresentTime = presentTime;
+    }
+}
+
+status_t PreviewFrameScheduler::queueBufferToClientLocked(
+        const BufferHolder& bufferHolder, nsecs_t timestamp) {
+    mParent.setTransform(bufferHolder.transform);
+
+    status_t res = native_window_set_buffers_timestamp(mConsumer.get(), timestamp);
+    if (res != OK) {
+        ALOGE("%s: Preview Stream: Error setting timestamp: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    Camera3Stream::queueHDRMetadata(bufferHolder.anwBuffer.get()->handle, mConsumer,
+            mParent.getDynamicRangeProfile());
+
+    res = mConsumer->queueBuffer(mConsumer.get(), bufferHolder.anwBuffer.get(),
+            bufferHolder.releaseFence);
+    if (res != OK) {
+        close(bufferHolder.releaseFence);
+    }
+
+    return res;
+}
+
+}; // namespace camera3
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/PreviewFrameScheduler.h b/services/camera/libcameraservice/device3/PreviewFrameScheduler.h
new file mode 100644
index 0000000..c0574fd
--- /dev/null
+++ b/services/camera/libcameraservice/device3/PreviewFrameScheduler.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA3_PREVIEWFRAMESCHEDULER_H
+#define ANDROID_SERVERS_CAMERA_CAMERA3_PREVIEWFRAMESCHEDULER_H
+
+#include <queue>
+
+#include <android/choreographer.h>
+#include <gui/Surface.h>
+#include <gui/ISurfaceComposer.h>
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
+#include <utils/Looper.h>
+#include <utils/Thread.h>
+#include <utils/Timers.h>
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3OutputStream;
+struct ChoreographerThread;
+
+/***
+ * Preview stream scheduler for better preview display synchronization
+ *
+ * The ideal viewfinder user experience is that frames are presented to the
+ * user in the same cadence as outputed by the camera sensor. However, the
+ * processing latency between frames could vary, due to factors such
+ * as CPU load, differences in request settings, etc. This frame processing
+ * latency results in variation in presentation of frames to the user.
+ *
+ * The PreviewFrameScheduler improves the viewfinder user experience by:
+ * 1. Cache preview buffers in the scheduler
+ * 2. For each choreographer callback, queue the oldest cached buffer with
+ *    the best matching presentation timestamp. Frame N's presentation timestamp
+ *    is the choreographer timeline timestamp closest to (Frame N-1's
+ *    presentation time + camera capture interval between frame N-1 and frame N).
+ * 3. Maintain at most 2 queue-able buffers. If the 3rd preview buffer becomes
+ *    available, queue the oldest cached buffer to the buffer queue.
+ */
+class PreviewFrameScheduler {
+  public:
+    explicit PreviewFrameScheduler(Camera3OutputStream& parent, sp<Surface> consumer);
+    virtual ~PreviewFrameScheduler();
+
+    // Queue preview buffer locally
+    status_t queuePreviewBuffer(nsecs_t timestamp, int32_t transform,
+            ANativeWindowBuffer* anwBuffer, int releaseFence);
+
+    // Callback function with a new presentation timeline from choreographer. This
+    // will trigger a locally queued buffer be sent to the buffer queue.
+    void onNewPresentationTime(const std::vector<nsecs_t>& presentationTimeline);
+
+    // Maintain at most 2 queue-able buffers
+    static constexpr int32_t kQueueDepthWatermark = 2;
+
+  private:
+    // structure holding cached preview buffer info
+    struct BufferHolder {
+        nsecs_t timestamp;
+        int32_t transform;
+        sp<ANativeWindowBuffer> anwBuffer;
+        int releaseFence;
+
+        BufferHolder(nsecs_t t, int32_t tr, ANativeWindowBuffer* anwb, int rf) :
+                timestamp(t), transform(tr), anwBuffer(anwb), releaseFence(rf) {}
+    };
+
+    status_t queueBufferToClientLocked(const BufferHolder& bufferHolder,
+            nsecs_t presentTime);
+
+    static constexpr char kPendingBufferTraceName[] = "pending_preview_buffers";
+
+    // Camera capture interval for resetting frame spacing between preview sessions
+    static constexpr nsecs_t kSpacingResetIntervalNs = 1000000000L; // 1 second
+
+    Camera3OutputStream& mParent;
+    sp<ANativeWindow> mConsumer;
+    mutable Mutex mLock;
+
+    std::queue<BufferHolder> mPendingBuffers;
+    nsecs_t mLastCameraCaptureTime = 0;
+    nsecs_t mLastCameraPresentTime = 0;
+
+    // Choreographer related
+    sp<Looper> mLooper;
+    sp<ChoreographerThread> mChoreographerThread;
+};
+
+}; //namespace camera3
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp b/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
index 7ec0956..27b00c9 100644
--- a/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
+++ b/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
@@ -20,7 +20,7 @@
 #include <algorithm>
 
 #include "device3/ZoomRatioMapper.h"
-#include "utils/SessionConfigurationUtils.h"
+#include "utils/SessionConfigurationUtilsHost.h"
 
 namespace android {
 
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
new file mode 100644
index 0000000..7b7a2a2
--- /dev/null
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
@@ -0,0 +1,1893 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "HidlCamera3-Device"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0  // Per-frame verbose logging
+
+#ifdef LOG_NNDEBUG
+#define ALOGVV(...) ALOGV(__VA_ARGS__)
+#else
+#define ALOGVV(...) ((void)0)
+#endif
+
+// Convenience macro for transient errors
+#define CLOGE(fmt, ...) ALOGE("Camera %s: %s: " fmt, mId.string(), __FUNCTION__, \
+            ##__VA_ARGS__)
+
+// Convenience macros for transitioning to the error state
+#define SET_ERR(fmt, ...) setErrorState(   \
+    "%s: " fmt, __FUNCTION__,              \
+    ##__VA_ARGS__)
+#define SET_ERR_L(fmt, ...) setErrorStateLocked( \
+    "%s: " fmt, __FUNCTION__,                    \
+    ##__VA_ARGS__)
+
+
+#include <inttypes.h>
+
+#include <utility>
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <utils/Timers.h>
+#include <cutils/properties.h>
+
+#include <android/hardware/camera/device/3.7/ICameraInjectionSession.h>
+#include <android/hardware/camera2/ICameraDeviceUser.h>
+
+#include "device3/hidl/HidlCamera3OutputUtils.h"
+#include "device3/hidl/HidlCamera3OfflineSession.h"
+#include "utils/SessionConfigurationUtils.h"
+#include "utils/TraceHFR.h"
+
+#include "../../common/hidl/HidlProviderInfo.h"
+#include "HidlCamera3Device.h"
+
+#include <algorithm>
+#include <tuple>
+
+using namespace android::camera3;
+using namespace android::hardware::camera;
+using namespace android::hardware::camera::device::V3_2;
+using android::hardware::camera::metadata::V3_6::CameraMetadataEnumAndroidSensorPixelMode;
+
+namespace android {
+
+CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap
+HidlCamera3Device::mapToHidlDynamicProfile(int dynamicRangeProfile) {
+    return static_cast<CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap>(
+            dynamicRangeProfile);
+}
+
+hardware::graphics::common::V1_0::PixelFormat HidlCamera3Device::mapToPixelFormat(
+        int frameworkFormat) {
+    return (hardware::graphics::common::V1_0::PixelFormat) frameworkFormat;
+}
+
+DataspaceFlags HidlCamera3Device::mapToHidlDataspace(
+        android_dataspace dataSpace) {
+    return dataSpace;
+}
+
+BufferUsageFlags HidlCamera3Device::mapToConsumerUsage(
+        uint64_t usage) {
+    return usage;
+}
+
+StreamRotation HidlCamera3Device::mapToStreamRotation(camera_stream_rotation_t rotation) {
+    switch (rotation) {
+        case CAMERA_STREAM_ROTATION_0:
+            return StreamRotation::ROTATION_0;
+        case CAMERA_STREAM_ROTATION_90:
+            return StreamRotation::ROTATION_90;
+        case CAMERA_STREAM_ROTATION_180:
+            return StreamRotation::ROTATION_180;
+        case CAMERA_STREAM_ROTATION_270:
+            return StreamRotation::ROTATION_270;
+    }
+    ALOGE("%s: Unknown stream rotation %d", __FUNCTION__, rotation);
+    return StreamRotation::ROTATION_0;
+}
+
+status_t HidlCamera3Device::mapToStreamConfigurationMode(
+        camera_stream_configuration_mode_t operationMode, StreamConfigurationMode *mode) {
+    if (mode == nullptr) return BAD_VALUE;
+    if (operationMode < CAMERA_VENDOR_STREAM_CONFIGURATION_MODE_START) {
+        switch(operationMode) {
+            case CAMERA_STREAM_CONFIGURATION_NORMAL_MODE:
+                *mode = StreamConfigurationMode::NORMAL_MODE;
+                break;
+            case CAMERA_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE:
+                *mode = StreamConfigurationMode::CONSTRAINED_HIGH_SPEED_MODE;
+                break;
+            default:
+                ALOGE("%s: Unknown stream configuration mode %d", __FUNCTION__, operationMode);
+                return BAD_VALUE;
+        }
+    } else {
+        *mode = static_cast<StreamConfigurationMode>(operationMode);
+    }
+    return OK;
+}
+
+int HidlCamera3Device::mapToFrameworkFormat(
+        hardware::graphics::common::V1_0::PixelFormat pixelFormat) {
+    return static_cast<uint32_t>(pixelFormat);
+}
+
+android_dataspace HidlCamera3Device::mapToFrameworkDataspace(
+        DataspaceFlags dataSpace) {
+    return static_cast<android_dataspace>(dataSpace);
+}
+
+uint64_t HidlCamera3Device::mapConsumerToFrameworkUsage(
+        BufferUsageFlags usage) {
+    return usage;
+}
+
+uint64_t HidlCamera3Device::mapProducerToFrameworkUsage(
+        BufferUsageFlags usage) {
+    return usage;
+}
+
+status_t HidlCamera3Device::initialize(sp<CameraProviderManager> manager,
+        const String8& monitorTags) {
+    ATRACE_CALL();
+    Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
+
+    ALOGV("%s: Initializing HIDL device for camera %s", __FUNCTION__, mId.string());
+    if (mStatus != STATUS_UNINITIALIZED) {
+        CLOGE("Already initialized!");
+        return INVALID_OPERATION;
+    }
+    if (manager == nullptr) return INVALID_OPERATION;
+
+    sp<ICameraDeviceSession> session;
+    ATRACE_BEGIN("CameraHal::openSession");
+    status_t res = manager->openHidlSession(mId.string(), this,
+            /*out*/ &session);
+    ATRACE_END();
+    if (res != OK) {
+        SET_ERR_L("Could not open camera session: %s (%d)", strerror(-res), res);
+        return res;
+    }
+
+    res = manager->getCameraCharacteristics(mId.string(), mOverrideForPerfClass, &mDeviceInfo);
+    if (res != OK) {
+        SET_ERR_L("Could not retrieve camera characteristics: %s (%d)", strerror(-res), res);
+        session->close();
+        return res;
+    }
+    mSupportNativeZoomRatio = manager->supportNativeZoomRatio(mId.string());
+
+    std::vector<std::string> physicalCameraIds;
+    bool isLogical = manager->isLogicalCamera(mId.string(), &physicalCameraIds);
+    if (isLogical) {
+        for (auto& physicalId : physicalCameraIds) {
+            // Do not override characteristics for physical cameras
+            res = manager->getCameraCharacteristics(
+                    physicalId, /*overrideForPerfClass*/false, &mPhysicalDeviceInfoMap[physicalId]);
+            if (res != OK) {
+                SET_ERR_L("Could not retrieve camera %s characteristics: %s (%d)",
+                        physicalId.c_str(), strerror(-res), res);
+                session->close();
+                return res;
+            }
+
+            bool usePrecorrectArray =
+                    DistortionMapper::isDistortionSupported(mPhysicalDeviceInfoMap[physicalId]);
+            if (usePrecorrectArray) {
+                res = mDistortionMappers[physicalId].setupStaticInfo(
+                        mPhysicalDeviceInfoMap[physicalId]);
+                if (res != OK) {
+                    SET_ERR_L("Unable to read camera %s's calibration fields for distortion "
+                            "correction", physicalId.c_str());
+                    session->close();
+                    return res;
+                }
+            }
+
+            mZoomRatioMappers[physicalId] = ZoomRatioMapper(
+                    &mPhysicalDeviceInfoMap[physicalId],
+                    mSupportNativeZoomRatio, usePrecorrectArray);
+
+            if (SessionConfigurationUtils::isUltraHighResolutionSensor(
+                    mPhysicalDeviceInfoMap[physicalId])) {
+                mUHRCropAndMeteringRegionMappers[physicalId] =
+                        UHRCropAndMeteringRegionMapper(mPhysicalDeviceInfoMap[physicalId],
+                                usePrecorrectArray);
+            }
+        }
+    }
+
+    std::shared_ptr<RequestMetadataQueue> queue;
+    auto requestQueueRet = session->getCaptureRequestMetadataQueue(
+        [&queue](const auto& descriptor) {
+            queue = std::make_shared<RequestMetadataQueue>(descriptor);
+            if (!queue->isValid() || queue->availableToWrite() <= 0) {
+                ALOGE("HAL returns empty request metadata fmq, not use it");
+                queue = nullptr;
+                // don't use the queue onwards.
+            }
+        });
+    if (!requestQueueRet.isOk()) {
+        ALOGE("Transaction error when getting request metadata fmq: %s, not use it",
+                requestQueueRet.description().c_str());
+        return DEAD_OBJECT;
+    }
+
+    std::unique_ptr<ResultMetadataQueue>& resQueue = mResultMetadataQueue;
+    auto resultQueueRet = session->getCaptureResultMetadataQueue(
+        [&resQueue](const auto& descriptor) {
+            resQueue = std::make_unique<ResultMetadataQueue>(descriptor);
+            if (!resQueue->isValid() || resQueue->availableToWrite() <= 0) {
+                ALOGE("HAL returns empty result metadata fmq, not use it");
+                resQueue = nullptr;
+                // Don't use the resQueue onwards.
+            }
+        });
+    if (!resultQueueRet.isOk()) {
+        ALOGE("Transaction error when getting result metadata queue from camera session: %s",
+                resultQueueRet.description().c_str());
+        return DEAD_OBJECT;
+    }
+    IF_ALOGV() {
+        session->interfaceChain([](
+            ::android::hardware::hidl_vec<::android::hardware::hidl_string> interfaceChain) {
+                ALOGV("Session interface chain:");
+                for (const auto& iface : interfaceChain) {
+                    ALOGV("  %s", iface.c_str());
+                }
+            });
+    }
+
+    camera_metadata_entry bufMgrMode =
+            mDeviceInfo.find(ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION);
+    if (bufMgrMode.count > 0) {
+         mUseHalBufManager = (bufMgrMode.data.u8[0] ==
+            ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION_HIDL_DEVICE_3_5);
+    }
+
+    camera_metadata_entry_t capabilities = mDeviceInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+    for (size_t i = 0; i < capabilities.count; i++) {
+        uint8_t capability = capabilities.data.u8[i];
+        if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_OFFLINE_PROCESSING) {
+            mSupportOfflineProcessing = true;
+        }
+    }
+
+    mInterface = new HidlHalInterface(session, queue, mUseHalBufManager, mSupportOfflineProcessing);
+
+    std::string providerType;
+    mVendorTagId = manager->getProviderTagIdLocked(mId.string());
+    mTagMonitor.initialize(mVendorTagId);
+    if (!monitorTags.isEmpty()) {
+        mTagMonitor.parseTagsToMonitor(String8(monitorTags));
+    }
+
+    // Metadata tags needs fixup for monochrome camera device version less
+    // than 3.5.
+    hardware::hidl_version maxVersion{0,0};
+    res = manager->getHighestSupportedVersion(mId.string(), &maxVersion);
+    if (res != OK) {
+        ALOGE("%s: Error in getting camera device version id: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+    int deviceVersion = HARDWARE_DEVICE_API_VERSION(
+            maxVersion.get_major(), maxVersion.get_minor());
+
+    bool isMonochrome = false;
+    for (size_t i = 0; i < capabilities.count; i++) {
+        uint8_t capability = capabilities.data.u8[i];
+        if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MONOCHROME) {
+            isMonochrome = true;
+        }
+    }
+    mNeedFixupMonochromeTags = (isMonochrome && deviceVersion < CAMERA_DEVICE_API_VERSION_3_5);
+
+    return initializeCommonLocked();
+}
+
+hardware::Return<void> HidlCamera3Device::requestStreamBuffers(
+        const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
+        requestStreamBuffers_cb _hidl_cb) {
+    RequestBufferStates states {
+        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder,
+        *this, *mInterface, *this};
+    camera3::requestStreamBuffers(states, bufReqs, _hidl_cb);
+    return hardware::Void();
+}
+
+hardware::Return<void> HidlCamera3Device::returnStreamBuffers(
+        const hardware::hidl_vec<hardware::camera::device::V3_2::StreamBuffer>& buffers) {
+    ReturnBufferStates states {
+        mId, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder, *mInterface};
+    camera3::returnStreamBuffers(states, buffers);
+    return hardware::Void();
+}
+
+hardware::Return<void> HidlCamera3Device::processCaptureResult_3_4(
+        const hardware::hidl_vec<
+                hardware::camera::device::V3_4::CaptureResult>& results) {
+    // Ideally we should grab mLock, but that can lead to deadlock, and
+    // it's not super important to get up to date value of mStatus for this
+    // warning print, hence skipping the lock here
+    if (mStatus == STATUS_ERROR) {
+        // Per API contract, HAL should act as closed after device error
+        // But mStatus can be set to error by framework as well, so just log
+        // a warning here.
+        ALOGW("%s: received capture result in error state.", __FUNCTION__);
+    }
+
+    sp<NotificationListener> listener;
+    {
+        std::lock_guard<std::mutex> l(mOutputLock);
+        listener = mListener.promote();
+    }
+
+    if (mProcessCaptureResultLock.tryLock() != OK) {
+        // This should never happen; it indicates a wrong client implementation
+        // that doesn't follow the contract. But, we can be tolerant here.
+        ALOGE("%s: callback overlapped! waiting 1s...",
+                __FUNCTION__);
+        if (mProcessCaptureResultLock.timedLock(1000000000 /* 1s */) != OK) {
+            ALOGE("%s: cannot acquire lock in 1s, dropping results",
+                    __FUNCTION__);
+            // really don't know what to do, so bail out.
+            return hardware::Void();
+        }
+    }
+    HidlCaptureOutputStates states {
+       {
+        mId,
+        mInFlightLock, mLastCompletedRegularFrameNumber,
+        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
+        mInFlightMap, mOutputLock,  mResultQueue, mResultSignal,
+        mNextShutterFrameNumber,
+        mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
+        mNextResultFrameNumber,
+        mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
+        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
+        mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
+        mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
+        *mInterface, mLegacyClient}, mResultMetadataQueue
+    };
+
+    //HidlCaptureOutputStates hidlStates {
+    //}
+
+    for (const auto& result : results) {
+        processOneCaptureResultLocked(states, result.v3_2, result.physicalCameraMetadata);
+    }
+    mProcessCaptureResultLock.unlock();
+    return hardware::Void();
+}
+
+// Only one processCaptureResult should be called at a time, so
+// the locks won't block. The locks are present here simply to enforce this.
+hardware::Return<void> HidlCamera3Device::processCaptureResult(
+        const hardware::hidl_vec<
+                hardware::camera::device::V3_2::CaptureResult>& results) {
+    hardware::hidl_vec<hardware::camera::device::V3_4::PhysicalCameraMetadata> noPhysMetadata;
+
+    // Ideally we should grab mLock, but that can lead to deadlock, and
+    // it's not super important to get up to date value of mStatus for this
+    // warning print, hence skipping the lock here
+    if (mStatus == STATUS_ERROR) {
+        // Per API contract, HAL should act as closed after device error
+        // But mStatus can be set to error by framework as well, so just log
+        // a warning here.
+        ALOGW("%s: received capture result in error state.", __FUNCTION__);
+    }
+
+    sp<NotificationListener> listener;
+    {
+        std::lock_guard<std::mutex> l(mOutputLock);
+        listener = mListener.promote();
+    }
+
+    if (mProcessCaptureResultLock.tryLock() != OK) {
+        // This should never happen; it indicates a wrong client implementation
+        // that doesn't follow the contract. But, we can be tolerant here.
+        ALOGE("%s: callback overlapped! waiting 1s...",
+                __FUNCTION__);
+        if (mProcessCaptureResultLock.timedLock(1000000000 /* 1s */) != OK) {
+            ALOGE("%s: cannot acquire lock in 1s, dropping results",
+                    __FUNCTION__);
+            // really don't know what to do, so bail out.
+            return hardware::Void();
+        }
+    }
+
+    HidlCaptureOutputStates states {
+      {mId,
+        mInFlightLock, mLastCompletedRegularFrameNumber,
+        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
+        mInFlightMap, mOutputLock,  mResultQueue, mResultSignal,
+        mNextShutterFrameNumber,
+        mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
+        mNextResultFrameNumber,
+        mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
+        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
+        mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
+        mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
+        *mInterface, mLegacyClient}, mResultMetadataQueue
+    };
+
+    for (const auto& result : results) {
+        processOneCaptureResultLocked(states, result, noPhysMetadata);
+    }
+    mProcessCaptureResultLock.unlock();
+    return hardware::Void();
+}
+
+hardware::Return<void> HidlCamera3Device::notify(
+        const hardware::hidl_vec<hardware::camera::device::V3_2::NotifyMsg>& msgs) {
+    return notifyHelper<hardware::camera::device::V3_2::NotifyMsg>(msgs);
+}
+
+hardware::Return<void> HidlCamera3Device::notify_3_8(
+        const hardware::hidl_vec<hardware::camera::device::V3_8::NotifyMsg>& msgs) {
+    return notifyHelper<hardware::camera::device::V3_8::NotifyMsg>(msgs);
+}
+
+template<typename NotifyMsgType>
+hardware::Return<void> HidlCamera3Device::notifyHelper(
+        const hardware::hidl_vec<NotifyMsgType>& msgs) {
+    // Ideally we should grab mLock, but that can lead to deadlock, and
+    // it's not super important to get up to date value of mStatus for this
+    // warning print, hence skipping the lock here
+    if (mStatus == STATUS_ERROR) {
+        // Per API contract, HAL should act as closed after device error
+        // But mStatus can be set to error by framework as well, so just log
+        // a warning here.
+        ALOGW("%s: received notify message in error state.", __FUNCTION__);
+    }
+
+    sp<NotificationListener> listener;
+    {
+        std::lock_guard<std::mutex> l(mOutputLock);
+        listener = mListener.promote();
+    }
+
+    HidlCaptureOutputStates states {
+      {mId,
+        mInFlightLock, mLastCompletedRegularFrameNumber,
+        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
+        mInFlightMap, mOutputLock,  mResultQueue, mResultSignal,
+        mNextShutterFrameNumber,
+        mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
+        mNextResultFrameNumber,
+        mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
+        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
+        mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
+        mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
+        *mInterface, mLegacyClient}, mResultMetadataQueue
+    };
+    for (const auto& msg : msgs) {
+        camera3::notify(states, msg);
+    }
+    return hardware::Void();
+}
+
+status_t HidlCamera3Device::switchToOffline(
+        const std::vector<int32_t>& streamsToKeep,
+        /*out*/ sp<CameraOfflineSessionBase>* session) {
+    ATRACE_CALL();
+    if (session == nullptr) {
+        ALOGE("%s: session must not be null", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock il(mInterfaceLock);
+
+    bool hasInputStream = mInputStream != nullptr;
+    int32_t inputStreamId = hasInputStream ? mInputStream->getId() : -1;
+    bool inputStreamSupportsOffline = hasInputStream ?
+            mInputStream->getOfflineProcessingSupport() : false;
+    auto outputStreamIds = mOutputStreams.getStreamIds();
+    auto streamIds = outputStreamIds;
+    if (hasInputStream) {
+        streamIds.push_back(mInputStream->getId());
+    }
+
+    // Check all streams in streamsToKeep supports offline mode
+    for (auto id : streamsToKeep) {
+        if (std::find(streamIds.begin(), streamIds.end(), id) == streamIds.end()) {
+            ALOGE("%s: Unknown stream ID %d", __FUNCTION__, id);
+            return BAD_VALUE;
+        } else if (id == inputStreamId) {
+            if (!inputStreamSupportsOffline) {
+                ALOGE("%s: input stream %d cannot be switched to offline",
+                        __FUNCTION__, id);
+                return BAD_VALUE;
+            }
+        } else {
+            sp<camera3::Camera3OutputStreamInterface> stream = mOutputStreams.get(id);
+            if (!stream->getOfflineProcessingSupport()) {
+                ALOGE("%s: output stream %d cannot be switched to offline",
+                        __FUNCTION__, id);
+                return BAD_VALUE;
+            }
+        }
+    }
+    // TODO: block surface sharing and surface group streams until we can support them
+
+    // Stop repeating request, wait until all remaining requests are submitted, then call into
+    // HAL switchToOffline
+    hardware::camera::device::V3_6::CameraOfflineSessionInfo offlineSessionInfo;
+    sp<hardware::camera::device::V3_6::ICameraOfflineSession> offlineSession;
+    camera3::BufferRecords bufferRecords;
+    status_t ret = static_cast<HidlRequestThread *>(mRequestThread.get())->switchToOffline(
+            streamsToKeep, &offlineSessionInfo, &offlineSession, &bufferRecords);
+
+    if (ret != OK) {
+        SET_ERR("Switch to offline failed: %s (%d)", strerror(-ret), ret);
+        return ret;
+    }
+
+    bool succ = mRequestBufferSM.onSwitchToOfflineSuccess();
+    if (!succ) {
+        SET_ERR("HAL must not be calling requestStreamBuffers call");
+        // TODO: block ALL callbacks from HAL till app configured new streams?
+        return UNKNOWN_ERROR;
+    }
+
+    // Verify offlineSessionInfo
+    std::vector<int32_t> offlineStreamIds;
+    offlineStreamIds.reserve(offlineSessionInfo.offlineStreams.size());
+    for (auto offlineStream : offlineSessionInfo.offlineStreams) {
+        // verify stream IDs
+        int32_t id = offlineStream.id;
+        if (std::find(streamIds.begin(), streamIds.end(), id) == streamIds.end()) {
+            SET_ERR("stream ID %d not found!", id);
+            return UNKNOWN_ERROR;
+        }
+
+        // When not using HAL buf manager, only allow streams requested by app to be preserved
+        if (!mUseHalBufManager) {
+            if (std::find(streamsToKeep.begin(), streamsToKeep.end(), id) == streamsToKeep.end()) {
+                SET_ERR("stream ID %d must not be switched to offline!", id);
+                return UNKNOWN_ERROR;
+            }
+        }
+
+        offlineStreamIds.push_back(id);
+        sp<Camera3StreamInterface> stream = (id == inputStreamId) ?
+                static_cast<sp<Camera3StreamInterface>>(mInputStream) :
+                static_cast<sp<Camera3StreamInterface>>(mOutputStreams.get(id));
+        // Verify number of outstanding buffers
+        if (stream->getOutstandingBuffersCount() != offlineStream.numOutstandingBuffers) {
+            SET_ERR("Offline stream %d # of remaining buffer mismatch: (%zu,%d) (service/HAL)",
+                    id, stream->getOutstandingBuffersCount(), offlineStream.numOutstandingBuffers);
+            return UNKNOWN_ERROR;
+        }
+    }
+
+    // Verify all streams to be deleted don't have any outstanding buffers
+    if (hasInputStream && std::find(offlineStreamIds.begin(), offlineStreamIds.end(),
+                inputStreamId) == offlineStreamIds.end()) {
+        if (mInputStream->hasOutstandingBuffers()) {
+            SET_ERR("Input stream %d still has %zu outstanding buffer!",
+                    inputStreamId, mInputStream->getOutstandingBuffersCount());
+            return UNKNOWN_ERROR;
+        }
+    }
+
+    for (const auto& outStreamId : outputStreamIds) {
+        if (std::find(offlineStreamIds.begin(), offlineStreamIds.end(),
+                outStreamId) == offlineStreamIds.end()) {
+            auto outStream = mOutputStreams.get(outStreamId);
+            if (outStream->hasOutstandingBuffers()) {
+                SET_ERR("Output stream %d still has %zu outstanding buffer!",
+                        outStreamId, outStream->getOutstandingBuffersCount());
+                return UNKNOWN_ERROR;
+            }
+        }
+    }
+
+    InFlightRequestMap offlineReqs;
+    // Verify inflight requests and their pending buffers
+    {
+        std::lock_guard<std::mutex> l(mInFlightLock);
+        for (auto offlineReq : offlineSessionInfo.offlineRequests) {
+            int idx = mInFlightMap.indexOfKey(offlineReq.frameNumber);
+            if (idx == NAME_NOT_FOUND) {
+                SET_ERR("Offline request frame number %d not found!", offlineReq.frameNumber);
+                return UNKNOWN_ERROR;
+            }
+
+            const auto& inflightReq = mInFlightMap.valueAt(idx);
+            // TODO: check specific stream IDs
+            size_t numBuffersLeft = static_cast<size_t>(inflightReq.numBuffersLeft);
+            if (numBuffersLeft != offlineReq.pendingStreams.size()) {
+                SET_ERR("Offline request # of remaining buffer mismatch: (%d,%d) (service/HAL)",
+                        inflightReq.numBuffersLeft, offlineReq.pendingStreams.size());
+                return UNKNOWN_ERROR;
+            }
+            offlineReqs.add(offlineReq.frameNumber, inflightReq);
+        }
+    }
+
+    // Create Camera3OfflineSession and transfer object ownership
+    //   (streams, inflight requests, buffer caches)
+    camera3::StreamSet offlineStreamSet;
+    sp<camera3::Camera3Stream> inputStream;
+    for (auto offlineStream : offlineSessionInfo.offlineStreams) {
+        int32_t id = offlineStream.id;
+        if (mInputStream != nullptr && id == mInputStream->getId()) {
+            inputStream = mInputStream;
+        } else {
+            offlineStreamSet.add(id, mOutputStreams.get(id));
+        }
+    }
+
+    // TODO: check if we need to lock before copying states
+    //       though technically no other thread should be talking to Camera3Device at this point
+    Camera3OfflineStates offlineStates(
+            mTagMonitor, mVendorTagId, mUseHalBufManager, mNeedFixupMonochromeTags,
+            mUsePartialResult, mNumPartialResults, mLastCompletedRegularFrameNumber,
+            mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
+            mNextResultFrameNumber, mNextReprocessResultFrameNumber,
+            mNextZslStillResultFrameNumber, mNextShutterFrameNumber,
+            mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
+            mDeviceInfo, mPhysicalDeviceInfoMap, mDistortionMappers,
+            mZoomRatioMappers, mRotateAndCropMappers);
+
+    *session = new HidlCamera3OfflineSession(mId, inputStream, offlineStreamSet,
+            std::move(bufferRecords), offlineReqs, offlineStates, offlineSession);
+
+    // Delete all streams that has been transferred to offline session
+    Mutex::Autolock l(mLock);
+    for (auto offlineStream : offlineSessionInfo.offlineStreams) {
+        int32_t id = offlineStream.id;
+        if (mInputStream != nullptr && id == mInputStream->getId()) {
+            mInputStream.clear();
+        } else {
+            mOutputStreams.remove(id);
+        }
+    }
+
+    // disconnect all other streams and switch to UNCONFIGURED state
+    if (mInputStream != nullptr) {
+        ret = mInputStream->disconnect();
+        if (ret != OK) {
+            SET_ERR_L("disconnect input stream failed!");
+            return UNKNOWN_ERROR;
+        }
+    }
+
+    for (auto streamId : mOutputStreams.getStreamIds()) {
+        sp<Camera3StreamInterface> stream = mOutputStreams.get(streamId);
+        ret = stream->disconnect();
+        if (ret != OK) {
+            SET_ERR_L("disconnect output stream %d failed!", streamId);
+            return UNKNOWN_ERROR;
+        }
+    }
+
+    mInputStream.clear();
+    mOutputStreams.clear();
+    mNeedConfig = true;
+    internalUpdateStatusLocked(STATUS_UNCONFIGURED);
+    mOperatingMode = NO_MODE;
+    mIsConstrainedHighSpeedConfiguration = false;
+    mRequestThread->clearPreviousRequest();
+
+    return OK;
+    // TO be done by CameraDeviceClient/Camera3OfflineSession
+    // register the offline client to camera service
+    // Setup result passthing threads etc
+    // Initialize offline session so HAL can start sending callback to it (result Fmq)
+    // TODO: check how many onIdle callback will be sent
+    // Java side to make sure the CameraCaptureSession is properly closed
+}
+
+sp<Camera3Device::RequestThread> HidlCamera3Device::createNewRequestThread(
+                wp<Camera3Device> parent, sp<camera3::StatusTracker> statusTracker,
+                sp<Camera3Device::HalInterface> interface,
+                const Vector<int32_t>& sessionParamKeys,
+                bool useHalBufManager,
+                bool supportCameraMute) {
+        return new HidlRequestThread(parent, statusTracker, interface, sessionParamKeys,
+                useHalBufManager, supportCameraMute);
+};
+
+sp<Camera3Device::Camera3DeviceInjectionMethods>
+HidlCamera3Device::createCamera3DeviceInjectionMethods(wp<Camera3Device> parent) {
+    return new HidlCamera3DeviceInjectionMethods(parent);
+}
+
+status_t HidlCamera3Device::injectionCameraInitialize(const String8 &injectedCamId,
+            sp<CameraProviderManager> manager) {
+        return (static_cast<HidlCamera3DeviceInjectionMethods *>(
+                mInjectionMethods.get()))->injectionInitialize(injectedCamId, manager, this);
+};
+
+
+HidlCamera3Device::HidlHalInterface::HidlHalInterface(
+            sp<device::V3_2::ICameraDeviceSession> &session,
+            std::shared_ptr<RequestMetadataQueue> queue,
+            bool useHalBufManager, bool supportOfflineProcessing) :
+        HalInterface(useHalBufManager, supportOfflineProcessing),
+        mHidlSession(session),
+        mRequestMetadataQueue(queue) {
+    // Check with hardware service manager if we can downcast these interfaces
+    // Somewhat expensive, so cache the results at startup
+    auto castResult_3_8 = device::V3_8::ICameraDeviceSession::castFrom(mHidlSession);
+    if (castResult_3_8.isOk()) {
+        mHidlSession_3_8 = castResult_3_8;
+    }
+    auto castResult_3_7 = device::V3_7::ICameraDeviceSession::castFrom(mHidlSession);
+    if (castResult_3_7.isOk()) {
+        mHidlSession_3_7 = castResult_3_7;
+    }
+    auto castResult_3_6 = device::V3_6::ICameraDeviceSession::castFrom(mHidlSession);
+    if (castResult_3_6.isOk()) {
+        mHidlSession_3_6 = castResult_3_6;
+    }
+    auto castResult_3_5 = device::V3_5::ICameraDeviceSession::castFrom(mHidlSession);
+    if (castResult_3_5.isOk()) {
+        mHidlSession_3_5 = castResult_3_5;
+    }
+    auto castResult_3_4 = device::V3_4::ICameraDeviceSession::castFrom(mHidlSession);
+    if (castResult_3_4.isOk()) {
+        mHidlSession_3_4 = castResult_3_4;
+    }
+    auto castResult_3_3 = device::V3_3::ICameraDeviceSession::castFrom(mHidlSession);
+    if (castResult_3_3.isOk()) {
+        mHidlSession_3_3 = castResult_3_3;
+    }
+}
+
+bool HidlCamera3Device::HidlHalInterface::valid() {
+    return (mHidlSession != nullptr);
+}
+
+void HidlCamera3Device::HidlHalInterface::clear() {
+    mHidlSession_3_8.clear();
+    mHidlSession_3_7.clear();
+    mHidlSession_3_6.clear();
+    mHidlSession_3_5.clear();
+    mHidlSession_3_4.clear();
+    mHidlSession_3_3.clear();
+    mHidlSession.clear();
+}
+
+status_t HidlCamera3Device::HidlHalInterface::constructDefaultRequestSettings(
+        camera_request_template_t templateId,
+        /*out*/ camera_metadata_t **requestTemplate) {
+    ATRACE_NAME("CameraHidlHal::constructDefaultRequestSettings");
+    if (!valid()) return INVALID_OPERATION;
+    status_t res = OK;
+
+    common::V1_0::Status status;
+
+    auto requestCallback = [&status, &requestTemplate]
+            (common::V1_0::Status s, const device::V3_2::CameraMetadata& request) {
+            status = s;
+            if (status == common::V1_0::Status::OK) {
+                const camera_metadata *r =
+                        reinterpret_cast<const camera_metadata_t*>(request.data());
+                size_t expectedSize = request.size();
+                int ret = validate_camera_metadata_structure(r, &expectedSize);
+                if (ret == OK || ret == CAMERA_METADATA_VALIDATION_SHIFTED) {
+                    *requestTemplate = clone_camera_metadata(r);
+                    if (*requestTemplate == nullptr) {
+                        ALOGE("%s: Unable to clone camera metadata received from HAL",
+                                __FUNCTION__);
+                        status = common::V1_0::Status::INTERNAL_ERROR;
+                    }
+                } else {
+                    ALOGE("%s: Malformed camera metadata received from HAL", __FUNCTION__);
+                    status = common::V1_0::Status::INTERNAL_ERROR;
+                }
+            }
+        };
+    hardware::Return<void> err;
+    RequestTemplate id;
+    switch (templateId) {
+        case CAMERA_TEMPLATE_PREVIEW:
+            id = RequestTemplate::PREVIEW;
+            break;
+        case CAMERA_TEMPLATE_STILL_CAPTURE:
+            id = RequestTemplate::STILL_CAPTURE;
+            break;
+        case CAMERA_TEMPLATE_VIDEO_RECORD:
+            id = RequestTemplate::VIDEO_RECORD;
+            break;
+        case CAMERA_TEMPLATE_VIDEO_SNAPSHOT:
+            id = RequestTemplate::VIDEO_SNAPSHOT;
+            break;
+        case CAMERA_TEMPLATE_ZERO_SHUTTER_LAG:
+            id = RequestTemplate::ZERO_SHUTTER_LAG;
+            break;
+        case CAMERA_TEMPLATE_MANUAL:
+            id = RequestTemplate::MANUAL;
+            break;
+        default:
+            // Unknown template ID, or this HAL is too old to support it
+            return BAD_VALUE;
+    }
+    err = mHidlSession->constructDefaultRequestSettings(id, requestCallback);
+
+    if (!err.isOk()) {
+        ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+        res = DEAD_OBJECT;
+    } else {
+        res = HidlProviderInfo::mapToStatusT(status);
+    }
+
+    return res;
+}
+
+bool HidlCamera3Device::HidlHalInterface::isReconfigurationRequired(
+        CameraMetadata& oldSessionParams, CameraMetadata& newSessionParams) {
+    // We do reconfiguration by default;
+    bool ret = true;
+    if ((mHidlSession_3_5 != nullptr) && mIsReconfigurationQuerySupported) {
+        android::hardware::hidl_vec<uint8_t> oldParams, newParams;
+        camera_metadata_t* oldSessioMeta = const_cast<camera_metadata_t*>(
+                oldSessionParams.getAndLock());
+        camera_metadata_t* newSessioMeta = const_cast<camera_metadata_t*>(
+                newSessionParams.getAndLock());
+        oldParams.setToExternal(reinterpret_cast<uint8_t*>(oldSessioMeta),
+                get_camera_metadata_size(oldSessioMeta));
+        newParams.setToExternal(reinterpret_cast<uint8_t*>(newSessioMeta),
+                get_camera_metadata_size(newSessioMeta));
+        hardware::camera::common::V1_0::Status callStatus;
+        bool required;
+        auto hidlCb = [&callStatus, &required] (hardware::camera::common::V1_0::Status s,
+                bool requiredFlag) {
+            callStatus = s;
+            required = requiredFlag;
+        };
+        auto err = mHidlSession_3_5->isReconfigurationRequired(oldParams, newParams, hidlCb);
+        oldSessionParams.unlock(oldSessioMeta);
+        newSessionParams.unlock(newSessioMeta);
+        if (err.isOk()) {
+            switch (callStatus) {
+                case hardware::camera::common::V1_0::Status::OK:
+                    ret = required;
+                    break;
+                case hardware::camera::common::V1_0::Status::METHOD_NOT_SUPPORTED:
+                    mIsReconfigurationQuerySupported = false;
+                    ret = true;
+                    break;
+                default:
+                    ALOGV("%s: Reconfiguration query failed: %d", __FUNCTION__, callStatus);
+                    ret = true;
+            }
+        } else {
+            ALOGE("%s: Unexpected binder error: %s", __FUNCTION__, err.description().c_str());
+            ret = true;
+        }
+    }
+
+    return ret;
+}
+
+status_t HidlCamera3Device::HidlHalInterface::configureStreams(
+        const camera_metadata_t *sessionParams,
+        camera_stream_configuration *config, const std::vector<uint32_t>& bufferSizes) {
+    ATRACE_NAME("CameraHal::configureStreams");
+    if (!valid()) return INVALID_OPERATION;
+    status_t res = OK;
+
+    if (config->input_is_multi_resolution && mHidlSession_3_7 == nullptr) {
+        ALOGE("%s: Camera device doesn't support multi-resolution input stream", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    // Convert stream config to HIDL
+    std::set<int> activeStreams;
+    device::V3_2::StreamConfiguration requestedConfiguration3_2;
+    device::V3_4::StreamConfiguration requestedConfiguration3_4;
+    device::V3_7::StreamConfiguration requestedConfiguration3_7;
+    device::V3_8::StreamConfiguration requestedConfiguration3_8;
+    requestedConfiguration3_2.streams.resize(config->num_streams);
+    requestedConfiguration3_4.streams.resize(config->num_streams);
+    requestedConfiguration3_7.streams.resize(config->num_streams);
+    requestedConfiguration3_8.streams.resize(config->num_streams);
+    for (size_t i = 0; i < config->num_streams; i++) {
+        device::V3_2::Stream &dst3_2 = requestedConfiguration3_2.streams[i];
+        device::V3_4::Stream &dst3_4 = requestedConfiguration3_4.streams[i];
+        device::V3_7::Stream &dst3_7 = requestedConfiguration3_7.streams[i];
+        device::V3_8::Stream &dst3_8 = requestedConfiguration3_8.streams[i];
+        camera3::camera_stream_t *src = config->streams[i];
+
+        Camera3Stream* cam3stream = Camera3Stream::cast(src);
+        cam3stream->setBufferFreedListener(this);
+        int streamId = cam3stream->getId();
+        StreamType streamType;
+        switch (src->stream_type) {
+            case CAMERA_STREAM_OUTPUT:
+                streamType = StreamType::OUTPUT;
+                break;
+            case CAMERA_STREAM_INPUT:
+                streamType = StreamType::INPUT;
+                break;
+            default:
+                ALOGE("%s: Stream %d: Unsupported stream type %d",
+                        __FUNCTION__, streamId, config->streams[i]->stream_type);
+                return BAD_VALUE;
+        }
+        dst3_2.id = streamId;
+        dst3_2.streamType = streamType;
+        dst3_2.width = src->width;
+        dst3_2.height = src->height;
+        dst3_2.usage = mapToConsumerUsage(cam3stream->getUsage());
+        dst3_2.rotation = mapToStreamRotation((camera_stream_rotation_t) src->rotation);
+        // For HidlSession version 3.5 or newer, the format and dataSpace sent
+        // to HAL are original, not the overridden ones.
+        if (mHidlSession_3_5 != nullptr) {
+            dst3_2.format = mapToPixelFormat(cam3stream->isFormatOverridden() ?
+                    cam3stream->getOriginalFormat() : src->format);
+            dst3_2.dataSpace = mapToHidlDataspace(cam3stream->isDataSpaceOverridden() ?
+                    cam3stream->getOriginalDataSpace() : src->data_space);
+        } else {
+            dst3_2.format = mapToPixelFormat(src->format);
+            dst3_2.dataSpace = mapToHidlDataspace(src->data_space);
+        }
+        dst3_4.v3_2 = dst3_2;
+        dst3_4.bufferSize = bufferSizes[i];
+        if (src->physical_camera_id != nullptr) {
+            dst3_4.physicalCameraId = src->physical_camera_id;
+        }
+        dst3_7.v3_4 = dst3_4;
+        dst3_7.groupId = cam3stream->getHalStreamGroupId();
+        dst3_7.sensorPixelModesUsed.resize(src->sensor_pixel_modes_used.size());
+        size_t j = 0;
+        for (int mode : src->sensor_pixel_modes_used) {
+            dst3_7.sensorPixelModesUsed[j++] =
+                    static_cast<CameraMetadataEnumAndroidSensorPixelMode>(mode);
+        }
+        if ((src->dynamic_range_profile !=
+                    ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) &&
+                (mHidlSession_3_8 == nullptr)) {
+            ALOGE("%s: Camera device doesn't support non-standard dynamic range profiles: %d",
+                    __FUNCTION__, src->dynamic_range_profile);
+            return BAD_VALUE;
+        }
+        dst3_8.v3_7 = dst3_7;
+        dst3_8.dynamicRangeProfile = mapToHidlDynamicProfile(src->dynamic_range_profile);
+        activeStreams.insert(streamId);
+        // Create Buffer ID map if necessary
+        mBufferRecords.tryCreateBufferCache(streamId);
+    }
+    // remove BufferIdMap for deleted streams
+    mBufferRecords.removeInactiveBufferCaches(activeStreams);
+
+    StreamConfigurationMode operationMode;
+    res = mapToStreamConfigurationMode(
+            (camera_stream_configuration_mode_t) config->operation_mode,
+            /*out*/ &operationMode);
+    if (res != OK) {
+        return res;
+    }
+    requestedConfiguration3_2.operationMode = operationMode;
+    requestedConfiguration3_4.operationMode = operationMode;
+    requestedConfiguration3_7.operationMode = operationMode;
+    size_t sessionParamSize = get_camera_metadata_size(sessionParams);
+    requestedConfiguration3_4.sessionParams.setToExternal(
+            reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(sessionParams)),
+            sessionParamSize);
+    requestedConfiguration3_7.operationMode = operationMode;
+    requestedConfiguration3_7.sessionParams.setToExternal(
+            reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(sessionParams)),
+            sessionParamSize);
+    requestedConfiguration3_8.operationMode = operationMode;
+    requestedConfiguration3_8.sessionParams.setToExternal(
+            reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(sessionParams)),
+            sessionParamSize);
+
+    // Invoke configureStreams
+    device::V3_3::HalStreamConfiguration finalConfiguration;
+    device::V3_4::HalStreamConfiguration finalConfiguration3_4;
+    device::V3_6::HalStreamConfiguration finalConfiguration3_6;
+    common::V1_0::Status status;
+
+    auto configStream34Cb = [&status, &finalConfiguration3_4]
+            (common::V1_0::Status s, const device::V3_4::HalStreamConfiguration& halConfiguration) {
+                finalConfiguration3_4 = halConfiguration;
+                status = s;
+            };
+
+    auto configStream36Cb = [&status, &finalConfiguration3_6]
+            (common::V1_0::Status s, const device::V3_6::HalStreamConfiguration& halConfiguration) {
+                finalConfiguration3_6 = halConfiguration;
+                status = s;
+            };
+
+    auto postprocConfigStream34 = [&finalConfiguration, &finalConfiguration3_4]
+            (hardware::Return<void>& err) -> status_t {
+                if (!err.isOk()) {
+                    ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+                    return DEAD_OBJECT;
+                }
+                finalConfiguration.streams.resize(finalConfiguration3_4.streams.size());
+                for (size_t i = 0; i < finalConfiguration3_4.streams.size(); i++) {
+                    finalConfiguration.streams[i] = finalConfiguration3_4.streams[i].v3_3;
+                }
+                return OK;
+            };
+
+    auto postprocConfigStream36 = [&finalConfiguration, &finalConfiguration3_6]
+            (hardware::Return<void>& err) -> status_t {
+                if (!err.isOk()) {
+                    ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+                    return DEAD_OBJECT;
+                }
+                finalConfiguration.streams.resize(finalConfiguration3_6.streams.size());
+                for (size_t i = 0; i < finalConfiguration3_6.streams.size(); i++) {
+                    finalConfiguration.streams[i] = finalConfiguration3_6.streams[i].v3_4.v3_3;
+                }
+                return OK;
+            };
+
+    // See which version of HAL we have
+    if (mHidlSession_3_8 != nullptr) {
+        ALOGV("%s: v3.8 device found", __FUNCTION__);
+        requestedConfiguration3_8.streamConfigCounter = mNextStreamConfigCounter++;
+        requestedConfiguration3_8.multiResolutionInputImage = config->input_is_multi_resolution;
+        auto err = mHidlSession_3_8->configureStreams_3_8(requestedConfiguration3_8,
+                configStream36Cb);
+        res = postprocConfigStream36(err);
+        if (res != OK) {
+            return res;
+        }
+    } else if (mHidlSession_3_7 != nullptr) {
+        ALOGV("%s: v3.7 device found", __FUNCTION__);
+        requestedConfiguration3_7.streamConfigCounter = mNextStreamConfigCounter++;
+        requestedConfiguration3_7.multiResolutionInputImage = config->input_is_multi_resolution;
+        auto err = mHidlSession_3_7->configureStreams_3_7(
+                requestedConfiguration3_7, configStream36Cb);
+        res = postprocConfigStream36(err);
+        if (res != OK) {
+            return res;
+        }
+    } else if (mHidlSession_3_6 != nullptr) {
+        ALOGV("%s: v3.6 device found", __FUNCTION__);
+        device::V3_5::StreamConfiguration requestedConfiguration3_5;
+        requestedConfiguration3_5.v3_4 = requestedConfiguration3_4;
+        requestedConfiguration3_5.streamConfigCounter = mNextStreamConfigCounter++;
+        auto err = mHidlSession_3_6->configureStreams_3_6(
+                requestedConfiguration3_5, configStream36Cb);
+        res = postprocConfigStream36(err);
+        if (res != OK) {
+            return res;
+        }
+    } else if (mHidlSession_3_5 != nullptr) {
+        ALOGV("%s: v3.5 device found", __FUNCTION__);
+        device::V3_5::StreamConfiguration requestedConfiguration3_5;
+        requestedConfiguration3_5.v3_4 = requestedConfiguration3_4;
+        requestedConfiguration3_5.streamConfigCounter = mNextStreamConfigCounter++;
+        auto err = mHidlSession_3_5->configureStreams_3_5(
+                requestedConfiguration3_5, configStream34Cb);
+        res = postprocConfigStream34(err);
+        if (res != OK) {
+            return res;
+        }
+    } else if (mHidlSession_3_4 != nullptr) {
+        // We do; use v3.4 for the call
+        ALOGV("%s: v3.4 device found", __FUNCTION__);
+        auto err = mHidlSession_3_4->configureStreams_3_4(
+                requestedConfiguration3_4, configStream34Cb);
+        res = postprocConfigStream34(err);
+        if (res != OK) {
+            return res;
+        }
+    } else if (mHidlSession_3_3 != nullptr) {
+        // We do; use v3.3 for the call
+        ALOGV("%s: v3.3 device found", __FUNCTION__);
+        auto err = mHidlSession_3_3->configureStreams_3_3(requestedConfiguration3_2,
+            [&status, &finalConfiguration]
+            (common::V1_0::Status s, const device::V3_3::HalStreamConfiguration& halConfiguration) {
+                finalConfiguration = halConfiguration;
+                status = s;
+            });
+        if (!err.isOk()) {
+            ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+            return DEAD_OBJECT;
+        }
+    } else {
+        // We don't; use v3.2 call and construct a v3.3 HalStreamConfiguration
+        ALOGV("%s: v3.2 device found", __FUNCTION__);
+        HalStreamConfiguration finalConfiguration_3_2;
+        auto err = mHidlSession->configureStreams(requestedConfiguration3_2,
+                [&status, &finalConfiguration_3_2]
+                (common::V1_0::Status s, const HalStreamConfiguration& halConfiguration) {
+                    finalConfiguration_3_2 = halConfiguration;
+                    status = s;
+                });
+        if (!err.isOk()) {
+            ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+            return DEAD_OBJECT;
+        }
+        finalConfiguration.streams.resize(finalConfiguration_3_2.streams.size());
+        for (size_t i = 0; i < finalConfiguration_3_2.streams.size(); i++) {
+            finalConfiguration.streams[i].v3_2 = finalConfiguration_3_2.streams[i];
+            finalConfiguration.streams[i].overrideDataSpace =
+                    requestedConfiguration3_2.streams[i].dataSpace;
+        }
+    }
+
+    if (status != common::V1_0::Status::OK ) {
+        return HidlProviderInfo::mapToStatusT(status);
+    }
+
+    // And convert output stream configuration from HIDL
+
+    for (size_t i = 0; i < config->num_streams; i++) {
+        camera3::camera_stream_t *dst = config->streams[i];
+        int streamId = Camera3Stream::cast(dst)->getId();
+
+        // Start scan at i, with the assumption that the stream order matches
+        size_t realIdx = i;
+        bool found = false;
+        size_t halStreamCount = finalConfiguration.streams.size();
+        for (size_t idx = 0; idx < halStreamCount; idx++) {
+            if (finalConfiguration.streams[realIdx].v3_2.id == streamId) {
+                found = true;
+                break;
+            }
+            realIdx = (realIdx >= halStreamCount - 1) ? 0 : realIdx + 1;
+        }
+        if (!found) {
+            ALOGE("%s: Stream %d not found in stream configuration response from HAL",
+                    __FUNCTION__, streamId);
+            return INVALID_OPERATION;
+        }
+        device::V3_3::HalStream &src = finalConfiguration.streams[realIdx];
+        device::V3_6::HalStream &src_36 = finalConfiguration3_6.streams[realIdx];
+
+        Camera3Stream* dstStream = Camera3Stream::cast(dst);
+        int overrideFormat = mapToFrameworkFormat(src.v3_2.overrideFormat);
+        android_dataspace overrideDataSpace = mapToFrameworkDataspace(src.overrideDataSpace);
+
+        if (mHidlSession_3_6 != nullptr) {
+            dstStream->setOfflineProcessingSupport(src_36.supportOffline);
+        }
+
+        if (dstStream->getOriginalFormat() != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+            dstStream->setFormatOverride(false);
+            dstStream->setDataSpaceOverride(false);
+            if (dst->format != overrideFormat) {
+                ALOGE("%s: Stream %d: Format override not allowed for format 0x%x", __FUNCTION__,
+                        streamId, dst->format);
+            }
+            if (dst->data_space != overrideDataSpace) {
+                ALOGE("%s: Stream %d: DataSpace override not allowed for format 0x%x", __FUNCTION__,
+                        streamId, dst->format);
+            }
+        } else {
+            bool needFormatOverride =
+                    requestedConfiguration3_2.streams[i].format != src.v3_2.overrideFormat;
+            bool needDataspaceOverride =
+                    requestedConfiguration3_2.streams[i].dataSpace != src.overrideDataSpace;
+            // Override allowed with IMPLEMENTATION_DEFINED
+            dstStream->setFormatOverride(needFormatOverride);
+            dstStream->setDataSpaceOverride(needDataspaceOverride);
+            dst->format = overrideFormat;
+            dst->data_space = overrideDataSpace;
+        }
+
+        if (dst->stream_type == CAMERA_STREAM_INPUT) {
+            if (src.v3_2.producerUsage != 0) {
+                ALOGE("%s: Stream %d: INPUT streams must have 0 for producer usage",
+                        __FUNCTION__, streamId);
+                return INVALID_OPERATION;
+            }
+            dstStream->setUsage(
+                    mapConsumerToFrameworkUsage(src.v3_2.consumerUsage));
+        } else {
+            // OUTPUT
+            if (src.v3_2.consumerUsage != 0) {
+                ALOGE("%s: Stream %d: OUTPUT streams must have 0 for consumer usage",
+                        __FUNCTION__, streamId);
+                return INVALID_OPERATION;
+            }
+            dstStream->setUsage(
+                    mapProducerToFrameworkUsage(src.v3_2.producerUsage));
+        }
+        dst->max_buffers = src.v3_2.maxBuffers;
+    }
+
+    return res;
+}
+
+status_t HidlCamera3Device::HidlHalInterface::configureInjectedStreams(
+        const camera_metadata_t* sessionParams, camera_stream_configuration* config,
+        const std::vector<uint32_t>& bufferSizes,
+        const CameraMetadata& cameraCharacteristics) {
+    ATRACE_NAME("InjectionCameraHal::configureStreams");
+    if (!valid()) return INVALID_OPERATION;
+    status_t res = OK;
+
+    if (config->input_is_multi_resolution) {
+        ALOGE("%s: Injection camera device doesn't support multi-resolution input "
+                "stream", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    // Convert stream config to HIDL
+    std::set<int> activeStreams;
+    device::V3_2::StreamConfiguration requestedConfiguration3_2;
+    device::V3_4::StreamConfiguration requestedConfiguration3_4;
+    device::V3_7::StreamConfiguration requestedConfiguration3_7;
+    requestedConfiguration3_2.streams.resize(config->num_streams);
+    requestedConfiguration3_4.streams.resize(config->num_streams);
+    requestedConfiguration3_7.streams.resize(config->num_streams);
+    for (size_t i = 0; i < config->num_streams; i++) {
+        device::V3_2::Stream& dst3_2 = requestedConfiguration3_2.streams[i];
+        device::V3_4::Stream& dst3_4 = requestedConfiguration3_4.streams[i];
+        device::V3_7::Stream& dst3_7 = requestedConfiguration3_7.streams[i];
+        camera3::camera_stream_t* src = config->streams[i];
+
+        Camera3Stream* cam3stream = Camera3Stream::cast(src);
+        cam3stream->setBufferFreedListener(this);
+        int streamId = cam3stream->getId();
+        StreamType streamType;
+        switch (src->stream_type) {
+            case CAMERA_STREAM_OUTPUT:
+                streamType = StreamType::OUTPUT;
+                break;
+            case CAMERA_STREAM_INPUT:
+                streamType = StreamType::INPUT;
+                break;
+            default:
+                ALOGE("%s: Stream %d: Unsupported stream type %d", __FUNCTION__,
+                        streamId, config->streams[i]->stream_type);
+            return BAD_VALUE;
+        }
+        dst3_2.id = streamId;
+        dst3_2.streamType = streamType;
+        dst3_2.width = src->width;
+        dst3_2.height = src->height;
+        dst3_2.usage = mapToConsumerUsage(cam3stream->getUsage());
+        dst3_2.rotation =
+                mapToStreamRotation((camera_stream_rotation_t)src->rotation);
+        // For HidlSession version 3.5 or newer, the format and dataSpace sent
+        // to HAL are original, not the overridden ones.
+        if (mHidlSession_3_5 != nullptr) {
+            dst3_2.format = mapToPixelFormat(cam3stream->isFormatOverridden()
+                                            ? cam3stream->getOriginalFormat()
+                                            : src->format);
+            dst3_2.dataSpace =
+                    mapToHidlDataspace(cam3stream->isDataSpaceOverridden()
+                                    ? cam3stream->getOriginalDataSpace()
+                                    : src->data_space);
+        } else {
+            dst3_2.format = mapToPixelFormat(src->format);
+            dst3_2.dataSpace = mapToHidlDataspace(src->data_space);
+        }
+        dst3_4.v3_2 = dst3_2;
+        dst3_4.bufferSize = bufferSizes[i];
+        if (src->physical_camera_id != nullptr) {
+            dst3_4.physicalCameraId = src->physical_camera_id;
+        }
+        dst3_7.v3_4 = dst3_4;
+        dst3_7.groupId = cam3stream->getHalStreamGroupId();
+        dst3_7.sensorPixelModesUsed.resize(src->sensor_pixel_modes_used.size());
+        size_t j = 0;
+        for (int mode : src->sensor_pixel_modes_used) {
+            dst3_7.sensorPixelModesUsed[j++] =
+                    static_cast<CameraMetadataEnumAndroidSensorPixelMode>(mode);
+        }
+        activeStreams.insert(streamId);
+        // Create Buffer ID map if necessary
+        mBufferRecords.tryCreateBufferCache(streamId);
+    }
+    // remove BufferIdMap for deleted streams
+    mBufferRecords.removeInactiveBufferCaches(activeStreams);
+
+    StreamConfigurationMode operationMode;
+    res = mapToStreamConfigurationMode(
+            (camera_stream_configuration_mode_t)config->operation_mode,
+            /*out*/ &operationMode);
+    if (res != OK) {
+        return res;
+    }
+    requestedConfiguration3_7.operationMode = operationMode;
+    size_t sessionParamSize = get_camera_metadata_size(sessionParams);
+    requestedConfiguration3_7.operationMode = operationMode;
+    requestedConfiguration3_7.sessionParams.setToExternal(
+            reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(sessionParams)),
+            sessionParamSize);
+
+    // See which version of HAL we have
+    if (mHidlSession_3_7 != nullptr) {
+        requestedConfiguration3_7.streamConfigCounter = mNextStreamConfigCounter++;
+        requestedConfiguration3_7.multiResolutionInputImage =
+                config->input_is_multi_resolution;
+
+        const camera_metadata_t* rawMetadata = cameraCharacteristics.getAndLock();
+        ::android::hardware::camera::device::V3_2::CameraMetadata hidlChars = {};
+        hidlChars.setToExternal(
+                reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(rawMetadata)),
+                get_camera_metadata_size(rawMetadata));
+        cameraCharacteristics.unlock(rawMetadata);
+
+        sp<hardware::camera::device::V3_7::ICameraInjectionSession>
+                hidlInjectionSession_3_7;
+        auto castInjectionResult_3_7 =
+                device::V3_7::ICameraInjectionSession::castFrom(mHidlSession_3_7);
+        if (castInjectionResult_3_7.isOk()) {
+            hidlInjectionSession_3_7 = castInjectionResult_3_7;
+        } else {
+            ALOGE("%s: Transaction error: %s", __FUNCTION__,
+                    castInjectionResult_3_7.description().c_str());
+            return DEAD_OBJECT;
+        }
+
+        auto err = hidlInjectionSession_3_7->configureInjectionStreams(
+                requestedConfiguration3_7, hidlChars);
+        if (!err.isOk()) {
+            ALOGE("%s: Transaction error: %s", __FUNCTION__,
+                    err.description().c_str());
+            return DEAD_OBJECT;
+        }
+    } else {
+        ALOGE("%s: mHidlSession_3_7 does not exist, the lowest version of injection "
+                "session is 3.7", __FUNCTION__);
+        return DEAD_OBJECT;
+    }
+
+    return res;
+}
+
+status_t HidlCamera3Device::HidlHalInterface::wrapAsHidlRequest(camera_capture_request_t* request,
+        /*out*/device::V3_2::CaptureRequest* captureRequest,
+        /*out*/std::vector<native_handle_t*>* handlesCreated,
+        /*out*/std::vector<std::pair<int32_t, int32_t>>* inflightBuffers) {
+    ATRACE_CALL();
+    if (captureRequest == nullptr || handlesCreated == nullptr || inflightBuffers == nullptr) {
+        ALOGE("%s: captureRequest (%p), handlesCreated (%p), and inflightBuffers(%p) "
+                "must not be null", __FUNCTION__, captureRequest, handlesCreated, inflightBuffers);
+        return BAD_VALUE;
+    }
+
+    captureRequest->frameNumber = request->frame_number;
+
+    captureRequest->fmqSettingsSize = 0;
+
+    {
+        if (request->input_buffer != nullptr) {
+            int32_t streamId = Camera3Stream::cast(request->input_buffer->stream)->getId();
+            buffer_handle_t buf = *(request->input_buffer->buffer);
+            auto pair = getBufferId(buf, streamId);
+            bool isNewBuffer = pair.first;
+            uint64_t bufferId = pair.second;
+            captureRequest->inputBuffer.streamId = streamId;
+            captureRequest->inputBuffer.bufferId = bufferId;
+            captureRequest->inputBuffer.buffer = (isNewBuffer) ? buf : nullptr;
+            captureRequest->inputBuffer.status = BufferStatus::OK;
+            native_handle_t *acquireFence = nullptr;
+            if (request->input_buffer->acquire_fence != -1) {
+                acquireFence = native_handle_create(1,0);
+                acquireFence->data[0] = request->input_buffer->acquire_fence;
+                handlesCreated->push_back(acquireFence);
+            }
+            captureRequest->inputBuffer.acquireFence = acquireFence;
+            captureRequest->inputBuffer.releaseFence = nullptr;
+
+            mBufferRecords.pushInflightBuffer(captureRequest->frameNumber, streamId,
+                    request->input_buffer->buffer);
+            inflightBuffers->push_back(std::make_pair(captureRequest->frameNumber, streamId));
+        } else {
+            captureRequest->inputBuffer.streamId = -1;
+            captureRequest->inputBuffer.bufferId = BUFFER_ID_NO_BUFFER;
+        }
+
+        captureRequest->outputBuffers.resize(request->num_output_buffers);
+        for (size_t i = 0; i < request->num_output_buffers; i++) {
+            const camera_stream_buffer_t *src = request->output_buffers + i;
+            StreamBuffer &dst = captureRequest->outputBuffers[i];
+            int32_t streamId = Camera3Stream::cast(src->stream)->getId();
+            if (src->buffer != nullptr) {
+                buffer_handle_t buf = *(src->buffer);
+                auto pair = getBufferId(buf, streamId);
+                bool isNewBuffer = pair.first;
+                dst.bufferId = pair.second;
+                dst.buffer = isNewBuffer ? buf : nullptr;
+                native_handle_t *acquireFence = nullptr;
+                if (src->acquire_fence != -1) {
+                    acquireFence = native_handle_create(1,0);
+                    acquireFence->data[0] = src->acquire_fence;
+                    handlesCreated->push_back(acquireFence);
+                }
+                dst.acquireFence = acquireFence;
+            } else if (mUseHalBufManager) {
+                // HAL buffer management path
+                dst.bufferId = BUFFER_ID_NO_BUFFER;
+                dst.buffer = nullptr;
+                dst.acquireFence = nullptr;
+            } else {
+                ALOGE("%s: cannot send a null buffer in capture request!", __FUNCTION__);
+                return BAD_VALUE;
+            }
+            dst.streamId = streamId;
+            dst.status = BufferStatus::OK;
+            dst.releaseFence = nullptr;
+
+            // Output buffers are empty when using HAL buffer manager
+            if (!mUseHalBufManager) {
+                mBufferRecords.pushInflightBuffer(
+                        captureRequest->frameNumber, streamId, src->buffer);
+                inflightBuffers->push_back(std::make_pair(captureRequest->frameNumber, streamId));
+            }
+        }
+    }
+    return OK;
+}
+
+status_t HidlCamera3Device::HidlHalInterface::flush() {
+    ATRACE_NAME("CameraHal::flush");
+    if (!valid()) return INVALID_OPERATION;
+    status_t res = OK;
+
+    auto err = mHidlSession->flush();
+    if (!err.isOk()) {
+        ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+        res = DEAD_OBJECT;
+    } else {
+        res = HidlProviderInfo::mapToStatusT(err);
+    }
+
+    return res;
+}
+
+status_t HidlCamera3Device::HidlHalInterface::dump(int /*fd*/) {
+    ATRACE_NAME("CameraHal::dump");
+    if (!valid()) return INVALID_OPERATION;
+
+    // Handled by CameraProviderManager::dump
+
+    return OK;
+}
+
+status_t HidlCamera3Device::HidlHalInterface::repeatingRequestEnd(uint32_t frameNumber,
+        const std::vector<int32_t> &streamIds) {
+    ATRACE_NAME("CameraHal::repeatingRequestEnd");
+    if (!valid()) return INVALID_OPERATION;
+
+    if (mHidlSession_3_8.get() != nullptr) {
+        mHidlSession_3_8->repeatingRequestEnd(frameNumber, streamIds);
+    }
+    return OK;
+}
+
+status_t HidlCamera3Device::HidlHalInterface::close() {
+    ATRACE_NAME("CameraHal::close()");
+    if (!valid()) return INVALID_OPERATION;
+    status_t res = OK;
+
+    auto err = mHidlSession->close();
+    // Interface will be dead shortly anyway, so don't log errors
+    if (!err.isOk()) {
+        res = DEAD_OBJECT;
+    }
+
+    return res;
+}
+
+void HidlCamera3Device::HidlHalInterface::signalPipelineDrain(const std::vector<int>& streamIds) {
+    ATRACE_NAME("CameraHal::signalPipelineDrain");
+    if (!valid() || mHidlSession_3_5 == nullptr) {
+        ALOGE("%s called on invalid camera!", __FUNCTION__);
+        return;
+    }
+
+    auto err = mHidlSession_3_5->signalStreamFlush(streamIds, mNextStreamConfigCounter - 1);
+    if (!err.isOk()) {
+        ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+        return;
+    }
+}
+
+status_t HidlCamera3Device::HidlHalInterface::processBatchCaptureRequests(
+        std::vector<camera_capture_request_t*>& requests,/*out*/uint32_t* numRequestProcessed) {
+    ATRACE_NAME("CameraHal::processBatchCaptureRequests");
+    if (!valid()) return INVALID_OPERATION;
+
+    sp<device::V3_4::ICameraDeviceSession> hidlSession_3_4;
+    sp<device::V3_7::ICameraDeviceSession> hidlSession_3_7;
+    auto castResult_3_7 = device::V3_7::ICameraDeviceSession::castFrom(mHidlSession);
+    if (castResult_3_7.isOk()) {
+        hidlSession_3_7 = castResult_3_7;
+    }
+    auto castResult_3_4 = device::V3_4::ICameraDeviceSession::castFrom(mHidlSession);
+    if (castResult_3_4.isOk()) {
+        hidlSession_3_4 = castResult_3_4;
+    }
+
+    hardware::hidl_vec<device::V3_2::CaptureRequest> captureRequests;
+    hardware::hidl_vec<device::V3_4::CaptureRequest> captureRequests_3_4;
+    hardware::hidl_vec<device::V3_7::CaptureRequest> captureRequests_3_7;
+    size_t batchSize = requests.size();
+    if (hidlSession_3_7 != nullptr) {
+        captureRequests_3_7.resize(batchSize);
+    } else if (hidlSession_3_4 != nullptr) {
+        captureRequests_3_4.resize(batchSize);
+    } else {
+        captureRequests.resize(batchSize);
+    }
+    std::vector<native_handle_t*> handlesCreated;
+    std::vector<std::pair<int32_t, int32_t>> inflightBuffers;
+
+    status_t res = OK;
+    for (size_t i = 0; i < batchSize; i++) {
+        if (hidlSession_3_7 != nullptr) {
+            res = wrapAsHidlRequest(requests[i], /*out*/&captureRequests_3_7[i].v3_4.v3_2,
+                    /*out*/&handlesCreated, /*out*/&inflightBuffers);
+        } else if (hidlSession_3_4 != nullptr) {
+            res = wrapAsHidlRequest(requests[i], /*out*/&captureRequests_3_4[i].v3_2,
+                    /*out*/&handlesCreated, /*out*/&inflightBuffers);
+        } else {
+            res = wrapAsHidlRequest(requests[i], /*out*/&captureRequests[i],
+                    /*out*/&handlesCreated, /*out*/&inflightBuffers);
+        }
+        if (res != OK) {
+            mBufferRecords.popInflightBuffers(inflightBuffers);
+            cleanupNativeHandles(&handlesCreated);
+            return res;
+        }
+    }
+
+    std::vector<device::V3_2::BufferCache> cachesToRemove;
+    {
+        std::lock_guard<std::mutex> lock(mFreedBuffersLock);
+        for (auto& pair : mFreedBuffers) {
+            // The stream might have been removed since onBufferFreed
+            if (mBufferRecords.isStreamCached(pair.first)) {
+                cachesToRemove.push_back({pair.first, pair.second});
+            }
+        }
+        mFreedBuffers.clear();
+    }
+
+    common::V1_0::Status status = common::V1_0::Status::INTERNAL_ERROR;
+    *numRequestProcessed = 0;
+
+    // Write metadata to FMQ.
+    for (size_t i = 0; i < batchSize; i++) {
+        camera_capture_request_t* request = requests[i];
+        device::V3_2::CaptureRequest* captureRequest;
+        if (hidlSession_3_7 != nullptr) {
+            captureRequest = &captureRequests_3_7[i].v3_4.v3_2;
+        } else if (hidlSession_3_4 != nullptr) {
+            captureRequest = &captureRequests_3_4[i].v3_2;
+        } else {
+            captureRequest = &captureRequests[i];
+        }
+
+        if (request->settings != nullptr) {
+            size_t settingsSize = get_camera_metadata_size(request->settings);
+            if (mRequestMetadataQueue != nullptr && mRequestMetadataQueue->write(
+                    reinterpret_cast<const uint8_t*>(request->settings), settingsSize)) {
+                captureRequest->settings.resize(0);
+                captureRequest->fmqSettingsSize = settingsSize;
+            } else {
+                if (mRequestMetadataQueue != nullptr) {
+                    ALOGW("%s: couldn't utilize fmq, fallback to hwbinder", __FUNCTION__);
+                }
+                captureRequest->settings.setToExternal(
+                        reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(
+                                request->settings)),
+                        get_camera_metadata_size(request->settings));
+                captureRequest->fmqSettingsSize = 0u;
+            }
+        } else {
+            // A null request settings maps to a size-0 CameraMetadata
+            captureRequest->settings.resize(0);
+            captureRequest->fmqSettingsSize = 0u;
+        }
+
+        // hidl session 3.7 specific handling.
+        if (hidlSession_3_7 != nullptr) {
+            captureRequests_3_7[i].inputWidth = request->input_width;
+            captureRequests_3_7[i].inputHeight = request->input_height;
+        }
+
+        // hidl session 3.7 and 3.4 specific handling.
+        if (hidlSession_3_7 != nullptr || hidlSession_3_4 != nullptr) {
+            hardware::hidl_vec<device::V3_4::PhysicalCameraSetting>& physicalCameraSettings =
+                    (hidlSession_3_7 != nullptr) ?
+                    captureRequests_3_7[i].v3_4.physicalCameraSettings :
+                    captureRequests_3_4[i].physicalCameraSettings;
+            physicalCameraSettings.resize(request->num_physcam_settings);
+            for (size_t j = 0; j < request->num_physcam_settings; j++) {
+                if (request->physcam_settings != nullptr) {
+                    size_t settingsSize = get_camera_metadata_size(request->physcam_settings[j]);
+                    if (mRequestMetadataQueue != nullptr && mRequestMetadataQueue->write(
+                                reinterpret_cast<const uint8_t*>(request->physcam_settings[j]),
+                                settingsSize)) {
+                        physicalCameraSettings[j].settings.resize(0);
+                        physicalCameraSettings[j].fmqSettingsSize = settingsSize;
+                    } else {
+                        if (mRequestMetadataQueue != nullptr) {
+                            ALOGW("%s: couldn't utilize fmq, fallback to hwbinder", __FUNCTION__);
+                        }
+                        physicalCameraSettings[j].settings.setToExternal(
+                                reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(
+                                        request->physcam_settings[j])),
+                                get_camera_metadata_size(request->physcam_settings[j]));
+                        physicalCameraSettings[j].fmqSettingsSize = 0u;
+                    }
+                } else {
+                    physicalCameraSettings[j].fmqSettingsSize = 0u;
+                    physicalCameraSettings[j].settings.resize(0);
+                }
+                physicalCameraSettings[j].physicalCameraId = request->physcam_id[j];
+            }
+        }
+    }
+
+    hardware::details::return_status err;
+    auto resultCallback =
+        [&status, &numRequestProcessed] (auto s, uint32_t n) {
+                status = s;
+                *numRequestProcessed = n;
+        };
+    if (hidlSession_3_7 != nullptr) {
+        err = hidlSession_3_7->processCaptureRequest_3_7(captureRequests_3_7, cachesToRemove,
+                                                         resultCallback);
+    } else if (hidlSession_3_4 != nullptr) {
+        err = hidlSession_3_4->processCaptureRequest_3_4(captureRequests_3_4, cachesToRemove,
+                                                         resultCallback);
+    } else {
+        err = mHidlSession->processCaptureRequest(captureRequests, cachesToRemove,
+                                                  resultCallback);
+    }
+    if (!err.isOk()) {
+        ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+        status = common::V1_0::Status::CAMERA_DISCONNECTED;
+    }
+
+    if (status == common::V1_0::Status::OK && *numRequestProcessed != batchSize) {
+        ALOGE("%s: processCaptureRequest returns OK but processed %d/%zu requests",
+                __FUNCTION__, *numRequestProcessed, batchSize);
+        status = common::V1_0::Status::INTERNAL_ERROR;
+    }
+
+    res = HidlProviderInfo::mapToStatusT(status);
+    if (res == OK) {
+        if (mHidlSession->isRemote()) {
+            // Only close acquire fence FDs when the HIDL transaction succeeds (so the FDs have been
+            // sent to camera HAL processes)
+            cleanupNativeHandles(&handlesCreated, /*closeFd*/true);
+        } else {
+            // In passthrough mode the FDs are now owned by HAL
+            cleanupNativeHandles(&handlesCreated);
+        }
+    } else {
+        mBufferRecords.popInflightBuffers(inflightBuffers);
+        cleanupNativeHandles(&handlesCreated);
+    }
+    return res;
+}
+
+status_t HidlCamera3Device::HidlHalInterface::switchToOffline(
+        const std::vector<int32_t>& streamsToKeep,
+        /*out*/hardware::camera::device::V3_6::CameraOfflineSessionInfo* offlineSessionInfo,
+        /*out*/sp<hardware::camera::device::V3_6::ICameraOfflineSession>* offlineSession,
+        /*out*/camera3::BufferRecords* bufferRecords) {
+    ATRACE_NAME("CameraHal::switchToOffline");
+    if (!valid() || mHidlSession_3_6 == nullptr) {
+        ALOGE("%s called on invalid camera!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    if (offlineSessionInfo == nullptr || offlineSession == nullptr || bufferRecords == nullptr) {
+        ALOGE("%s: output arguments must not be null!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    common::V1_0::Status status = common::V1_0::Status::INTERNAL_ERROR;
+    auto resultCallback =
+        [&status, &offlineSessionInfo, &offlineSession] (auto s, auto info, auto session) {
+                status = s;
+                *offlineSessionInfo = info;
+                *offlineSession = session;
+        };
+    auto err = mHidlSession_3_6->switchToOffline(streamsToKeep, resultCallback);
+
+    if (!err.isOk()) {
+        ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+        return DEAD_OBJECT;
+    }
+
+    status_t ret = HidlProviderInfo::mapToStatusT(status);
+    if (ret != OK) {
+        return ret;
+    }
+
+    return verifyBufferCaches(offlineSessionInfo, bufferRecords);
+}
+
+HidlCamera3Device::HidlRequestThread::HidlRequestThread(wp<Camera3Device> parent,
+                sp<camera3::StatusTracker> statusTracker,
+                sp<HalInterface> interface,
+                const Vector<int32_t>& sessionParamKeys,
+                bool useHalBufManager,
+                bool supportCameraMute) :
+          RequestThread(parent, statusTracker, interface, sessionParamKeys, useHalBufManager,
+                  supportCameraMute) {}
+
+status_t HidlCamera3Device::HidlRequestThread::switchToOffline(
+        const std::vector<int32_t>& streamsToKeep,
+        /*out*/hardware::camera::device::V3_6::CameraOfflineSessionInfo* offlineSessionInfo,
+        /*out*/sp<hardware::camera::device::V3_6::ICameraOfflineSession>* offlineSession,
+        /*out*/camera3::BufferRecords* bufferRecords) {
+    Mutex::Autolock l(mRequestLock);
+    clearRepeatingRequestsLocked(/*lastFrameNumber*/nullptr);
+
+    // Wait until request thread is fully stopped
+    // TBD: check if request thread is being paused by other APIs (shouldn't be)
+
+    // We could also check for mRepeatingRequests.empty(), but the API interface
+    // is serialized by Camera3Device::mInterfaceLock so no one should be able to submit any
+    // new requests during the call; hence skip that check.
+    bool queueEmpty = mNextRequests.empty() && mRequestQueue.empty();
+    while (!queueEmpty) {
+        status_t res = mRequestSubmittedSignal.waitRelative(mRequestLock, kRequestSubmitTimeout);
+        if (res == TIMED_OUT) {
+            ALOGE("%s: request thread failed to submit one request within timeout!", __FUNCTION__);
+            return res;
+        } else if (res != OK) {
+            ALOGE("%s: request thread failed to submit a request: %s (%d)!",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+        queueEmpty = mNextRequests.empty() && mRequestQueue.empty();
+    }
+    return (static_cast<HidlHalInterface *>(mInterface.get()))->switchToOffline(
+            streamsToKeep, offlineSessionInfo, offlineSession, bufferRecords);
+}
+
+status_t HidlCamera3Device::HidlCamera3DeviceInjectionMethods::injectionInitialize(
+        const String8& injectedCamId, sp<CameraProviderManager> manager,
+        const sp<android::hardware::camera::device::V3_2::ICameraDeviceCallback>&
+                callback) {
+    ATRACE_CALL();
+    Mutex::Autolock lock(mInjectionLock);
+
+    if (manager == nullptr) {
+        ALOGE("%s: manager does not exist!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    sp<Camera3Device> parent = mParent.promote();
+    if (parent == nullptr) {
+        ALOGE("%s: parent does not exist!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    mInjectedCamId = injectedCamId;
+    sp<ICameraDeviceSession> session;
+    ATRACE_BEGIN("Injection CameraHal::openSession");
+    status_t res = manager->openHidlSession(injectedCamId.string(), callback,
+                                          /*out*/ &session);
+    ATRACE_END();
+    if (res != OK) {
+        ALOGE("Injection camera could not open camera session: %s (%d)",
+                strerror(-res), res);
+        return res;
+    }
+
+    std::shared_ptr<RequestMetadataQueue> queue;
+    auto requestQueueRet =
+        session->getCaptureRequestMetadataQueue([&queue](const auto& descriptor) {
+            queue = std::make_shared<RequestMetadataQueue>(descriptor);
+            if (!queue->isValid() || queue->availableToWrite() <= 0) {
+                ALOGE("Injection camera HAL returns empty request metadata fmq, not "
+                        "use it");
+                queue = nullptr;
+                // don't use the queue onwards.
+            }
+        });
+    if (!requestQueueRet.isOk()) {
+        ALOGE("Injection camera transaction error when getting request metadata fmq: "
+                "%s, not use it", requestQueueRet.description().c_str());
+        return DEAD_OBJECT;
+    }
+
+    std::unique_ptr<ResultMetadataQueue>& resQueue = mInjectionResultMetadataQueue;
+    auto resultQueueRet = session->getCaptureResultMetadataQueue(
+        [&resQueue](const auto& descriptor) {
+            resQueue = std::make_unique<ResultMetadataQueue>(descriptor);
+            if (!resQueue->isValid() || resQueue->availableToWrite() <= 0) {
+                ALOGE("Injection camera HAL returns empty result metadata fmq, not use "
+                        "it");
+                resQueue = nullptr;
+                // Don't use the resQueue onwards.
+            }
+        });
+    if (!resultQueueRet.isOk()) {
+        ALOGE("Injection camera transaction error when getting result metadata queue "
+                "from camera session: %s", resultQueueRet.description().c_str());
+        return DEAD_OBJECT;
+    }
+    IF_ALOGV() {
+        session->interfaceChain(
+                [](::android::hardware::hidl_vec<::android::hardware::hidl_string>
+                        interfaceChain) {
+                        ALOGV("Injection camera session interface chain:");
+                        for (const auto& iface : interfaceChain) {
+                            ALOGV("  %s", iface.c_str());
+                        }
+                });
+    }
+
+    ALOGV("%s: Injection camera interface = new HalInterface()", __FUNCTION__);
+
+    mInjectedCamHalInterface =
+            new HidlHalInterface(session, queue, parent->mUseHalBufManager,
+                       parent->mSupportOfflineProcessing);
+    if (mInjectedCamHalInterface == nullptr) {
+        ALOGE("%s: mInjectedCamHalInterface does not exist!", __FUNCTION__);
+        return DEAD_OBJECT;
+    }
+
+    return OK;
+}
+
+status_t HidlCamera3Device::HidlCamera3DeviceInjectionMethods::replaceHalInterface(
+        sp<HalInterface> newHalInterface, bool keepBackup) {
+    Mutex::Autolock lock(mInjectionLock);
+    if (newHalInterface.get() == nullptr) {
+        ALOGE("%s: The newHalInterface does not exist, to stop replacing.",
+                __FUNCTION__);
+        return DEAD_OBJECT;
+    }
+
+    sp<Camera3Device> parent = mParent.promote();
+    if (parent == nullptr) {
+        ALOGE("%s: parent does not exist!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+    if (newHalInterface->getTransportType() != IPCTransport::HIDL) {
+        ALOGE("%s Replacing HIDL HalInterface with another transport unsupported", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    HidlCamera3Device *hidlParent = static_cast<HidlCamera3Device *>(parent.get());
+    if (keepBackup) {
+        if (mBackupHalInterface == nullptr) {
+            mBackupHalInterface = parent->mInterface;
+        }
+        if (mBackupResultMetadataQueue == nullptr) {
+            mBackupResultMetadataQueue = std::move(hidlParent->mResultMetadataQueue);
+            hidlParent->mResultMetadataQueue = std::move(mInjectionResultMetadataQueue);
+        }
+    } else {
+        mBackupHalInterface = nullptr;
+        hidlParent->mResultMetadataQueue = std::move(mBackupResultMetadataQueue);
+        mBackupResultMetadataQueue = nullptr;
+    }
+    parent->mInterface = newHalInterface;
+
+    return OK;
+}
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.h b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.h
new file mode 100644
index 0000000..a83080b
--- /dev/null
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_HIDLCAMERA3DEVICE_H
+#define ANDROID_SERVERS_HIDLCAMERA3DEVICE_H
+
+#include "../Camera3Device.h"
+#include "HidlCamera3OutputUtils.h"
+
+namespace android {
+
+using android::hardware::camera::metadata::V3_8::CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap;
+
+/**
+ * CameraDevice for HIDL HAL devices with version CAMERA_DEVICE_API_VERSION_3_0 or higher.
+ */
+class HidlCamera3Device :
+            virtual public hardware::camera::device::V3_8::ICameraDeviceCallback,
+            public Camera3Device {
+  public:
+
+   explicit HidlCamera3Device(const String8& id, bool overrideForPerfClass,
+          bool legacyClient = false) : Camera3Device(id, overrideForPerfClass, legacyClient) { }
+
+    virtual ~HidlCamera3Device() {}
+
+   /**
+     * Helper functions to map between framework and HIDL values
+     */
+    static hardware::graphics::common::V1_0::PixelFormat mapToPixelFormat(int frameworkFormat);
+    static hardware::camera::device::V3_2::DataspaceFlags mapToHidlDataspace(
+            android_dataspace dataSpace);
+    static hardware::camera::device::V3_2::BufferUsageFlags mapToConsumerUsage(uint64_t usage);
+    static CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap mapToHidlDynamicProfile(
+                    int dynamicRangeProfile);
+    static hardware::camera::device::V3_2::StreamRotation mapToStreamRotation(
+            camera_stream_rotation_t rotation);
+    // Returns a negative error code if the passed-in operation mode is not valid.
+    static status_t mapToStreamConfigurationMode(camera_stream_configuration_mode_t operationMode,
+            /*out*/ hardware::camera::device::V3_2::StreamConfigurationMode *mode);
+    static int mapToFrameworkFormat(hardware::graphics::common::V1_0::PixelFormat pixelFormat);
+    static android_dataspace mapToFrameworkDataspace(
+            hardware::camera::device::V3_2::DataspaceFlags);
+    static uint64_t mapConsumerToFrameworkUsage(
+            hardware::camera::device::V3_2::BufferUsageFlags usage);
+    static uint64_t mapProducerToFrameworkUsage(
+            hardware::camera::device::V3_2::BufferUsageFlags usage);
+
+    status_t initialize(sp<CameraProviderManager> manager, const String8& monitorTags) override;
+
+    /**
+     * Implementation of android::hardware::camera::device::V3_5::ICameraDeviceCallback
+     */
+
+    hardware::Return<void> processCaptureResult_3_4(
+            const hardware::hidl_vec<
+                    hardware::camera::device::V3_4::CaptureResult>& results) override;
+    hardware::Return<void> processCaptureResult(
+            const hardware::hidl_vec<
+                    hardware::camera::device::V3_2::CaptureResult>& results) override;
+    hardware::Return<void> notify(
+            const hardware::hidl_vec<
+                    hardware::camera::device::V3_2::NotifyMsg>& msgs) override;
+
+    hardware::Return<void> requestStreamBuffers(
+            const hardware::hidl_vec<
+                    hardware::camera::device::V3_5::BufferRequest>& bufReqs,
+            requestStreamBuffers_cb _hidl_cb) override;
+
+    hardware::Return<void> returnStreamBuffers(
+            const hardware::hidl_vec<
+                    hardware::camera::device::V3_2::StreamBuffer>& buffers) override;
+
+    hardware::Return<void> notify_3_8(
+            const hardware::hidl_vec<
+                    hardware::camera::device::V3_8::NotifyMsg>& msgs) override;
+
+    // Handle one notify message
+    void notify(const hardware::camera::device::V3_2::NotifyMsg& msg);
+
+    status_t switchToOffline(const std::vector<int32_t>& streamsToKeep,
+            /*out*/ sp<CameraOfflineSessionBase>* session) override;
+
+    using RequestMetadataQueue = hardware::MessageQueue<uint8_t, hardware::kSynchronizedReadWrite>;
+
+    class HidlHalInterface : public Camera3Device::HalInterface {
+     public:
+        HidlHalInterface(sp<hardware::camera::device::V3_2::ICameraDeviceSession> &session,
+                     std::shared_ptr<RequestMetadataQueue> queue,
+                     bool useHalBufManager, bool supportOfflineProcessing);
+
+        virtual IPCTransport getTransportType() override { return IPCTransport::HIDL; }
+        // Returns true if constructed with a valid device or session, and not yet cleared
+        virtual bool valid() override;
+
+        // Reset this HalInterface object (does not call close())
+        virtual void clear() override;
+
+        // Calls into the HAL interface
+
+        // Caller takes ownership of requestTemplate
+        virtual status_t constructDefaultRequestSettings(camera_request_template templateId,
+                /*out*/ camera_metadata_t **requestTemplate) override;
+
+        virtual status_t configureStreams(const camera_metadata_t *sessionParams,
+                /*inout*/ camera_stream_configuration_t *config,
+                const std::vector<uint32_t>& bufferSizes) override;
+
+        // The injection camera configures the streams to hal.
+        virtual status_t configureInjectedStreams(
+                const camera_metadata_t* sessionParams,
+                /*inout*/ camera_stream_configuration_t* config,
+                const std::vector<uint32_t>& bufferSizes,
+                const CameraMetadata& cameraCharacteristics) override;
+
+        // When the call succeeds, the ownership of acquire fences in requests is transferred to
+        // HalInterface. More specifically, the current implementation will send the fence to
+        // HAL process and close the FD in cameraserver process. When the call fails, the ownership
+        // of the acquire fence still belongs to the caller.
+        virtual status_t processBatchCaptureRequests(
+                std::vector<camera_capture_request_t*>& requests,
+                /*out*/uint32_t* numRequestProcessed) override;
+        virtual status_t flush() override;
+        virtual status_t dump(int fd) override;
+        virtual status_t close() override;
+
+        virtual void signalPipelineDrain(const std::vector<int>& streamIds) override;
+        virtual bool isReconfigurationRequired(CameraMetadata& oldSessionParams,
+                CameraMetadata& newSessionParams) override;
+
+        virtual status_t repeatingRequestEnd(uint32_t frameNumber,
+                const std::vector<int32_t> &streamIds) override;
+
+        status_t switchToOffline(
+        const std::vector<int32_t>& streamsToKeep,
+        /*out*/hardware::camera::device::V3_6::CameraOfflineSessionInfo* offlineSessionInfo,
+        /*out*/sp<hardware::camera::device::V3_6::ICameraOfflineSession>* offlineSession,
+        /*out*/camera3::BufferRecords* bufferRecords);
+
+     private:
+
+        // Always valid
+        sp<hardware::camera::device::V3_2::ICameraDeviceSession> mHidlSession;
+        // Valid if ICameraDeviceSession is @3.3 or newer
+        sp<hardware::camera::device::V3_3::ICameraDeviceSession> mHidlSession_3_3;
+        // Valid if ICameraDeviceSession is @3.4 or newer
+        sp<hardware::camera::device::V3_4::ICameraDeviceSession> mHidlSession_3_4;
+        // Valid if ICameraDeviceSession is @3.5 or newer
+        sp<hardware::camera::device::V3_5::ICameraDeviceSession> mHidlSession_3_5;
+        // Valid if ICameraDeviceSession is @3.6 or newer
+        sp<hardware::camera::device::V3_6::ICameraDeviceSession> mHidlSession_3_6;
+        // Valid if ICameraDeviceSession is @3.7 or newer
+        sp<hardware::camera::device::V3_7::ICameraDeviceSession> mHidlSession_3_7;
+        // Valid if ICameraDeviceSession is @3.7 or newer
+        sp<hardware::camera::device::V3_8::ICameraDeviceSession> mHidlSession_3_8;
+
+        std::shared_ptr<RequestMetadataQueue> mRequestMetadataQueue;
+
+        // The output HIDL request still depends on input camera_capture_request_t
+        // Do not free input camera_capture_request_t before output HIDL request
+        status_t wrapAsHidlRequest(camera_capture_request_t* in,
+                /*out*/hardware::camera::device::V3_2::CaptureRequest* out,
+                /*out*/std::vector<native_handle_t*>* handlesCreated,
+                /*out*/std::vector<std::pair<int32_t, int32_t>>* inflightBuffers);
+    }; // class HidlHalInterface
+
+    class HidlRequestThread : public Camera3Device::RequestThread {
+      public:
+        HidlRequestThread(wp<Camera3Device> parent,
+                sp<camera3::StatusTracker> statusTracker,
+                sp<HalInterface> interface,
+                const Vector<int32_t>& sessionParamKeys,
+                bool useHalBufManager,
+                bool supportCameraMute);
+
+        status_t switchToOffline(
+                const std::vector<int32_t>& streamsToKeep,
+                /*out*/hardware::camera::device::V3_6::CameraOfflineSessionInfo* offlineSessionInfo,
+                /*out*/sp<hardware::camera::device::V3_6::ICameraOfflineSession>* offlineSession,
+                /*out*/camera3::BufferRecords* bufferRecords);
+    }; // class HidlRequestThread
+
+    class HidlCamera3DeviceInjectionMethods : public Camera3DeviceInjectionMethods {
+     public:
+        // Initialize the injection camera and generate an hal interface.
+        status_t injectionInitialize(
+                const String8& injectedCamId, sp<CameraProviderManager> manager,
+                const sp<
+                    android::hardware::camera::device::V3_2 ::ICameraDeviceCallback>&
+                    callback);
+        HidlCamera3DeviceInjectionMethods(wp<Camera3Device> parent) :
+                Camera3DeviceInjectionMethods(parent) { };
+        ~HidlCamera3DeviceInjectionMethods() {}
+     private:
+        // Backup of the original camera hal result FMQ.
+        std::unique_ptr<ResultMetadataQueue> mBackupResultMetadataQueue;
+
+        // FMQ writes the result for the injection camera. Must be guarded by
+        // mProcessCaptureResultLock.
+        std::unique_ptr<ResultMetadataQueue> mInjectionResultMetadataQueue;
+
+        // Use injection camera hal interface to replace and backup original
+        // camera hal interface.
+        virtual status_t replaceHalInterface(sp<HalInterface> newHalInterface,
+                bool keepBackup) override;
+    };
+
+  private:
+    template<typename NotifyMsgType>
+    hardware::Return<void> notifyHelper(
+            const hardware::hidl_vec<NotifyMsgType>& msgs);
+
+    virtual status_t injectionCameraInitialize(const String8 &injectCamId,
+            sp<CameraProviderManager> manager) override;
+
+    virtual sp<RequestThread> createNewRequestThread(wp<Camera3Device> parent,
+                sp<camera3::StatusTracker> statusTracker,
+                sp<HalInterface> interface,
+                const Vector<int32_t>& sessionParamKeys,
+                bool useHalBufManager,
+                bool supportCameraMute) override;
+
+    virtual sp<Camera3DeviceInjectionMethods>
+            createCamera3DeviceInjectionMethods(wp<Camera3Device>) override;
+
+    // FMQ to write result on. Must be guarded by mProcessCaptureResultLock.
+    std::unique_ptr<ResultMetadataQueue> mResultMetadataQueue;
+
+}; // class HidlCamera3Device
+
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
new file mode 100644
index 0000000..d517c8d
--- /dev/null
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Hidl-Camera3-OffLnSsn"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <inttypes.h>
+
+#include <utils/Trace.h>
+
+#include <android/hardware/camera2/ICameraDeviceCallbacks.h>
+
+#include "device3/hidl/HidlCamera3OfflineSession.h"
+#include "device3/Camera3OutputStream.h"
+#include "device3/hidl/HidlCamera3OutputUtils.h"
+#include "device3/Camera3InputStream.h"
+#include "device3/Camera3SharedOutputStream.h"
+#include "utils/CameraTraces.h"
+
+using namespace android::camera3;
+using namespace android::hardware::camera;
+
+namespace android {
+
+HidlCamera3OfflineSession::~HidlCamera3OfflineSession() {
+    ATRACE_CALL();
+    ALOGV("%s: Tearing down hidl offline session for camera id %s", __FUNCTION__, mId.string());
+    HidlCamera3OfflineSession::disconnectSession();
+}
+
+status_t HidlCamera3OfflineSession::initialize(wp<NotificationListener> listener) {
+    ATRACE_CALL();
+
+    if (mSession == nullptr) {
+        ALOGE("%s: HIDL session is null!", __FUNCTION__);
+        return DEAD_OBJECT;
+    }
+
+    {
+        std::lock_guard<std::mutex> lock(mLock);
+
+        mListener = listener;
+
+        // setup result FMQ
+        std::unique_ptr<ResultMetadataQueue>& resQueue = mResultMetadataQueue;
+        auto resultQueueRet = mSession->getCaptureResultMetadataQueue(
+            [&resQueue](const auto& descriptor) {
+                resQueue = std::make_unique<ResultMetadataQueue>(descriptor);
+                if (!resQueue->isValid() || resQueue->availableToWrite() <= 0) {
+                    ALOGE("HAL returns empty result metadata fmq, not use it");
+                    resQueue = nullptr;
+                    // Don't use resQueue onwards.
+                }
+            });
+        if (!resultQueueRet.isOk()) {
+            ALOGE("Transaction error when getting result metadata queue from camera session: %s",
+                    resultQueueRet.description().c_str());
+            return DEAD_OBJECT;
+        }
+        mStatus = STATUS_ACTIVE;
+    }
+
+    mSession->setCallback(this);
+
+    return OK;
+}
+
+hardware::Return<void> HidlCamera3OfflineSession::processCaptureResult_3_4(
+        const hardware::hidl_vec<
+                hardware::camera::device::V3_4::CaptureResult>& results) {
+    sp<NotificationListener> listener;
+    {
+        std::lock_guard<std::mutex> lock(mLock);
+        if (mStatus != STATUS_ACTIVE) {
+            ALOGE("%s called in wrong state %d", __FUNCTION__, mStatus);
+            return hardware::Void();
+        }
+        listener = mListener.promote();
+    }
+
+    HidlCaptureOutputStates states {
+      {mId,
+        mOfflineReqsLock, mLastCompletedRegularFrameNumber,
+        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
+        mOfflineReqs, mOutputLock, mResultQueue, mResultSignal,
+        mNextShutterFrameNumber,
+        mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
+        mNextResultFrameNumber,
+        mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
+        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
+        mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
+        mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
+        mBufferRecords, /*legacyClient*/ false}, mResultMetadataQueue
+    };
+
+    std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
+    for (const auto& result : results) {
+        processOneCaptureResultLocked(states, result.v3_2, result.physicalCameraMetadata);
+    }
+    return hardware::Void();
+}
+
+hardware::Return<void> HidlCamera3OfflineSession::processCaptureResult(
+        const hardware::hidl_vec<
+                hardware::camera::device::V3_2::CaptureResult>& results) {
+    // TODO: changed impl to call into processCaptureResult_3_4 instead?
+    //       might need to figure how to reduce copy though.
+    sp<NotificationListener> listener;
+    {
+        std::lock_guard<std::mutex> lock(mLock);
+        if (mStatus != STATUS_ACTIVE) {
+            ALOGE("%s called in wrong state %d", __FUNCTION__, mStatus);
+            return hardware::Void();
+        }
+        listener = mListener.promote();
+    }
+
+    hardware::hidl_vec<hardware::camera::device::V3_4::PhysicalCameraMetadata> noPhysMetadata;
+
+    HidlCaptureOutputStates states {
+      {mId,
+        mOfflineReqsLock, mLastCompletedRegularFrameNumber,
+        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
+        mOfflineReqs, mOutputLock, mResultQueue, mResultSignal,
+        mNextShutterFrameNumber,
+        mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
+        mNextResultFrameNumber,
+        mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
+        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
+        mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
+        mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
+        mBufferRecords, /*legacyClient*/ false}, mResultMetadataQueue
+    };
+
+    std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
+    for (const auto& result : results) {
+        processOneCaptureResultLocked(states, result, noPhysMetadata);
+    }
+    return hardware::Void();
+}
+
+hardware::Return<void> HidlCamera3OfflineSession::notify(
+        const hardware::hidl_vec<hardware::camera::device::V3_2::NotifyMsg>& msgs) {
+    sp<NotificationListener> listener;
+    {
+        std::lock_guard<std::mutex> lock(mLock);
+        if (mStatus != STATUS_ACTIVE) {
+            ALOGE("%s called in wrong state %d", __FUNCTION__, mStatus);
+            return hardware::Void();
+        }
+        listener = mListener.promote();
+    }
+
+    HidlCaptureOutputStates states {
+      {mId,
+        mOfflineReqsLock, mLastCompletedRegularFrameNumber,
+        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
+        mOfflineReqs, mOutputLock, mResultQueue, mResultSignal,
+        mNextShutterFrameNumber,
+        mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
+        mNextResultFrameNumber,
+        mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
+        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
+        mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
+        mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
+        mBufferRecords, /*legacyClient*/ false}, mResultMetadataQueue
+    };
+    for (const auto& msg : msgs) {
+        camera3::notify(states, msg);
+    }
+    return hardware::Void();
+}
+
+hardware::Return<void> HidlCamera3OfflineSession::requestStreamBuffers(
+        const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
+        requestStreamBuffers_cb _hidl_cb) {
+    {
+        std::lock_guard<std::mutex> lock(mLock);
+        if (mStatus != STATUS_ACTIVE) {
+            ALOGE("%s called in wrong state %d", __FUNCTION__, mStatus);
+            return hardware::Void();
+        }
+    }
+
+    RequestBufferStates states {
+        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder,
+        *this, mBufferRecords, *this};
+    camera3::requestStreamBuffers(states, bufReqs, _hidl_cb);
+    return hardware::Void();
+}
+
+hardware::Return<void> HidlCamera3OfflineSession::returnStreamBuffers(
+        const hardware::hidl_vec<hardware::camera::device::V3_2::StreamBuffer>& buffers) {
+    {
+        std::lock_guard<std::mutex> lock(mLock);
+        if (mStatus != STATUS_ACTIVE) {
+            ALOGE("%s called in wrong state %d", __FUNCTION__, mStatus);
+            return hardware::Void();
+        }
+    }
+
+    ReturnBufferStates states {
+        mId, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder, mBufferRecords};
+
+    camera3::returnStreamBuffers(states, buffers);
+    return hardware::Void();
+}
+
+void HidlCamera3OfflineSession::disconnectSession() {
+  // TODO: Make sure this locking is correct.
+  std::lock_guard<std::mutex> lock(mLock);
+  if (mSession != nullptr) {
+      mSession->close();
+  }
+  mSession.clear();
+}
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.h b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.h
new file mode 100644
index 0000000..597cc5d
--- /dev/null
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_HIDL_CAMERA3OFFLINESESSION_H
+#define ANDROID_SERVERS_HIDL_CAMERA3OFFLINESESSION_H
+
+#include <memory>
+#include <mutex>
+
+#include <utils/String8.h>
+#include <utils/String16.h>
+
+#include <android/hardware/camera/device/3.6/ICameraOfflineSession.h>
+
+#include <fmq/MessageQueue.h>
+
+#include "HidlCamera3OutputUtils.h"
+#include "common/CameraOfflineSessionBase.h"
+
+#include "device3/Camera3BufferManager.h"
+#include "device3/Camera3OfflineSession.h"
+#include "device3/InFlightRequest.h"
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3Stream;
+class Camera3OutputStreamInterface;
+class Camera3StreamInterface;
+
+} // namespace camera3
+
+/**
+ * HidlCamera3OfflineSession for offline session defined in HIDL ICameraOfflineSession@3.6 or higher
+ */
+class HidlCamera3OfflineSession :
+            public Camera3OfflineSession,
+            virtual public hardware::camera::device::V3_5::ICameraDeviceCallback {
+  public:
+
+    // initialize by Camera3Device.
+    explicit HidlCamera3OfflineSession(const String8& id,
+            const sp<camera3::Camera3Stream>& inputStream,
+            const camera3::StreamSet& offlineStreamSet,
+            camera3::BufferRecords&& bufferRecords,
+            const camera3::InFlightRequestMap& offlineReqs,
+            const Camera3OfflineStates& offlineStates,
+            sp<hardware::camera::device::V3_6::ICameraOfflineSession> offlineSession) :
+      Camera3OfflineSession(id, inputStream, offlineStreamSet, std::move(bufferRecords),
+              offlineReqs, offlineStates),
+      mSession(offlineSession) {};
+
+    virtual ~HidlCamera3OfflineSession();
+
+    virtual status_t initialize(wp<NotificationListener> listener) override;
+
+    /**
+     * HIDL ICameraDeviceCallback interface
+     * Implementation of android::hardware::camera::device::V3_5::ICameraDeviceCallback
+     */
+
+    hardware::Return<void> processCaptureResult_3_4(
+            const hardware::hidl_vec<
+                    hardware::camera::device::V3_4::CaptureResult>& results) override;
+    hardware::Return<void> processCaptureResult(
+            const hardware::hidl_vec<
+                    hardware::camera::device::V3_2::CaptureResult>& results) override;
+    hardware::Return<void> notify(
+            const hardware::hidl_vec<
+                    hardware::camera::device::V3_2::NotifyMsg>& msgs) override;
+
+    hardware::Return<void> requestStreamBuffers(
+            const hardware::hidl_vec<
+                    hardware::camera::device::V3_5::BufferRequest>& bufReqs,
+            requestStreamBuffers_cb _hidl_cb) override;
+
+    hardware::Return<void> returnStreamBuffers(
+            const hardware::hidl_vec<
+                    hardware::camera::device::V3_2::StreamBuffer>& buffers) override;
+
+    /**
+     * End of CameraOfflineSessionBase interface
+     */
+
+  private:
+    sp<hardware::camera::device::V3_6::ICameraOfflineSession> mSession;
+    // FMQ to write result on. Must be guarded by mProcessCaptureResultLock.
+    std::unique_ptr<ResultMetadataQueue> mResultMetadataQueue;
+
+    virtual void disconnectSession() override;
+}; // class Camera3OfflineSession
+
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.cpp
new file mode 100644
index 0000000..afe9d56
--- /dev/null
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.cpp
@@ -0,0 +1,344 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "HidlCamera3-OutputUtils"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+// Convenience macros for transitioning to the error state
+#define SET_ERR(fmt, ...) states.setErrIntf.setErrorState(   \
+    "%s: " fmt, __FUNCTION__,                         \
+    ##__VA_ARGS__)
+
+#include <inttypes.h>
+
+#include <utils/Log.h>
+#include <utils/SortedVector.h>
+#include <utils/Trace.h>
+
+#include <android/hardware/camera2/ICameraDeviceCallbacks.h>
+
+#include <android/hardware/camera/device/3.4/ICameraDeviceCallback.h>
+#include <android/hardware/camera/device/3.5/ICameraDeviceCallback.h>
+#include <android/hardware/camera/device/3.5/ICameraDeviceSession.h>
+
+#include <camera/CameraUtils.h>
+#include <camera_metadata_hidden.h>
+
+#include "device3/hidl/HidlCamera3OutputUtils.h"
+#include "device3/Camera3OutputUtilsTemplated.h"
+
+#include "system/camera_metadata.h"
+
+using namespace android::camera3;
+using namespace android::hardware::camera;
+
+namespace android {
+namespace camera3 {
+
+void processOneCaptureResultLocked(
+        HidlCaptureOutputStates& states,
+        const hardware::camera::device::V3_2::CaptureResult& result,
+        const hardware::hidl_vec<
+                hardware::camera::device::V3_4::PhysicalCameraMetadata> &physicalCameraMetadata) {
+    processOneCaptureResultLockedT<HidlCaptureOutputStates,
+        hardware::camera::device::V3_2::CaptureResult,
+        hardware::hidl_vec<hardware::camera::device::V3_4::PhysicalCameraMetadata>,
+        hardware::hidl_vec<uint8_t>, ResultMetadataQueue,
+        hardware::camera::device::V3_2::BufferStatus>(states, result, physicalCameraMetadata);
+}
+
+void notify(CaptureOutputStates& states,
+        const hardware::camera::device::V3_8::NotifyMsg& msg) {
+    using android::hardware::camera::device::V3_2::MsgType;
+
+    hardware::camera::device::V3_2::NotifyMsg msg_3_2;
+    msg_3_2.type = msg.type;
+    bool hasReadoutTime = false;
+    uint64_t readoutTime = 0;
+    switch (msg.type) {
+        case MsgType::ERROR:
+            msg_3_2.msg.error = msg.msg.error;
+            break;
+        case MsgType::SHUTTER:
+            msg_3_2.msg.shutter = msg.msg.shutter.v3_2;
+            hasReadoutTime = true;
+            readoutTime = msg.msg.shutter.readoutTimestamp;
+            break;
+    }
+    notify(states, msg_3_2, hasReadoutTime, readoutTime);
+}
+
+void notify(CaptureOutputStates& states,
+        const hardware::camera::device::V3_2::NotifyMsg& msg,
+        bool hasReadoutTime, uint64_t readoutTime) {
+
+    using android::hardware::camera::device::V3_2::MsgType;
+    using android::hardware::camera::device::V3_2::ErrorCode;
+
+    ATRACE_CALL();
+    camera_notify_msg m;
+    switch (msg.type) {
+        case MsgType::ERROR:
+            m.type = CAMERA_MSG_ERROR;
+            m.message.error.frame_number = msg.msg.error.frameNumber;
+            if (msg.msg.error.errorStreamId >= 0) {
+                sp<Camera3StreamInterface> stream =
+                        states.outputStreams.get(msg.msg.error.errorStreamId);
+                if (stream == nullptr) {
+                    ALOGE("%s: Frame %d: Invalid error stream id %d", __FUNCTION__,
+                            m.message.error.frame_number, msg.msg.error.errorStreamId);
+                    return;
+                }
+                m.message.error.error_stream = stream->asHalStream();
+            } else {
+                m.message.error.error_stream = nullptr;
+            }
+            switch (msg.msg.error.errorCode) {
+                case ErrorCode::ERROR_DEVICE:
+                    m.message.error.error_code = CAMERA_MSG_ERROR_DEVICE;
+                    break;
+                case ErrorCode::ERROR_REQUEST:
+                    m.message.error.error_code = CAMERA_MSG_ERROR_REQUEST;
+                    break;
+                case ErrorCode::ERROR_RESULT:
+                    m.message.error.error_code = CAMERA_MSG_ERROR_RESULT;
+                    break;
+                case ErrorCode::ERROR_BUFFER:
+                    m.message.error.error_code = CAMERA_MSG_ERROR_BUFFER;
+                    break;
+            }
+            break;
+        case MsgType::SHUTTER:
+            m.type = CAMERA_MSG_SHUTTER;
+            m.message.shutter.frame_number = msg.msg.shutter.frameNumber;
+            m.message.shutter.timestamp = msg.msg.shutter.timestamp;
+            m.message.shutter.readout_timestamp = hasReadoutTime ?
+                    readoutTime : m.message.shutter.timestamp;
+            break;
+    }
+    notify(states, &m);
+}
+
+
+
+// The buffers requested through this call are not tied to any CaptureRequest in
+// particular. They may used by the hal for a particular frame's output buffer
+// or for its internal use as well. In the case that the hal does use any buffer
+// from the requested list here, for a particular frame's output buffer, the
+// buffer will be returned with the processCaptureResult call corresponding to
+// the frame. The other buffers will be returned through returnStreamBuffers.
+// The buffers returned via returnStreamBuffers will not have a valid
+// timestamp(0) and will be dropped by the bufferqueue.
+void requestStreamBuffers(RequestBufferStates& states,
+        const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
+        hardware::camera::device::V3_5::ICameraDeviceCallback::requestStreamBuffers_cb _hidl_cb) {
+    using android::hardware::camera::device::V3_2::BufferStatus;
+    using android::hardware::camera::device::V3_2::StreamBuffer;
+    using android::hardware::camera::device::V3_5::BufferRequestStatus;
+    using android::hardware::camera::device::V3_5::StreamBufferRet;
+    using android::hardware::camera::device::V3_5::StreamBufferRequestError;
+
+    std::lock_guard<std::mutex> lock(states.reqBufferLock);
+
+    hardware::hidl_vec<StreamBufferRet> bufRets;
+    if (!states.useHalBufManager) {
+        ALOGE("%s: Camera %s does not support HAL buffer management",
+                __FUNCTION__, states.cameraId.string());
+        _hidl_cb(BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS, bufRets);
+        return;
+    }
+
+    SortedVector<int32_t> streamIds;
+    ssize_t sz = streamIds.setCapacity(bufReqs.size());
+    if (sz < 0 || static_cast<size_t>(sz) != bufReqs.size()) {
+        ALOGE("%s: failed to allocate memory for %zu buffer requests",
+                __FUNCTION__, bufReqs.size());
+        _hidl_cb(BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS, bufRets);
+        return;
+    }
+
+    if (bufReqs.size() > states.outputStreams.size()) {
+        ALOGE("%s: too many buffer requests (%zu > # of output streams %zu)",
+                __FUNCTION__, bufReqs.size(), states.outputStreams.size());
+        _hidl_cb(BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS, bufRets);
+        return;
+    }
+
+    // Check for repeated streamId
+    for (const auto& bufReq : bufReqs) {
+        if (streamIds.indexOf(bufReq.streamId) != NAME_NOT_FOUND) {
+            ALOGE("%s: Stream %d appear multiple times in buffer requests",
+                    __FUNCTION__, bufReq.streamId);
+            _hidl_cb(BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS, bufRets);
+            return;
+        }
+        streamIds.add(bufReq.streamId);
+    }
+
+    if (!states.reqBufferIntf.startRequestBuffer()) {
+        ALOGE("%s: request buffer disallowed while camera service is configuring",
+                __FUNCTION__);
+        _hidl_cb(BufferRequestStatus::FAILED_CONFIGURING, bufRets);
+        return;
+    }
+
+    bufRets.resize(bufReqs.size());
+
+    bool allReqsSucceeds = true;
+    bool oneReqSucceeds = false;
+    for (size_t i = 0; i < bufReqs.size(); i++) {
+        const auto& bufReq = bufReqs[i];
+        auto& bufRet = bufRets[i];
+        int32_t streamId = bufReq.streamId;
+        sp<Camera3OutputStreamInterface> outputStream = states.outputStreams.get(streamId);
+        if (outputStream == nullptr) {
+            ALOGE("%s: Output stream id %d not found!", __FUNCTION__, streamId);
+            hardware::hidl_vec<StreamBufferRet> emptyBufRets;
+            _hidl_cb(BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS, emptyBufRets);
+            states.reqBufferIntf.endRequestBuffer();
+            return;
+        }
+
+        bufRet.streamId = streamId;
+        if (outputStream->isAbandoned()) {
+            bufRet.val.error(StreamBufferRequestError::STREAM_DISCONNECTED);
+            allReqsSucceeds = false;
+            continue;
+        }
+
+        size_t handOutBufferCount = outputStream->getOutstandingBuffersCount();
+        uint32_t numBuffersRequested = bufReq.numBuffersRequested;
+        size_t totalHandout = handOutBufferCount + numBuffersRequested;
+        uint32_t maxBuffers = outputStream->asHalStream()->max_buffers;
+        if (totalHandout > maxBuffers) {
+            // Not able to allocate enough buffer. Exit early for this stream
+            ALOGE("%s: request too much buffers for stream %d: at HAL: %zu + requesting: %d"
+                    " > max: %d", __FUNCTION__, streamId, handOutBufferCount,
+                    numBuffersRequested, maxBuffers);
+            bufRet.val.error(StreamBufferRequestError::MAX_BUFFER_EXCEEDED);
+            allReqsSucceeds = false;
+            continue;
+        }
+
+        hardware::hidl_vec<StreamBuffer> tmpRetBuffers(numBuffersRequested);
+        bool currentReqSucceeds = true;
+        std::vector<camera_stream_buffer_t> streamBuffers(numBuffersRequested);
+        std::vector<buffer_handle_t> newBuffers;
+        size_t numAllocatedBuffers = 0;
+        size_t numPushedInflightBuffers = 0;
+        for (size_t b = 0; b < numBuffersRequested; b++) {
+            camera_stream_buffer_t& sb = streamBuffers[b];
+            // Since this method can run concurrently with request thread
+            // We need to update the wait duration everytime we call getbuffer
+            nsecs_t waitDuration =  states.reqBufferIntf.getWaitDuration();
+            status_t res = outputStream->getBuffer(&sb, waitDuration);
+            if (res != OK) {
+                if (res == NO_INIT || res == DEAD_OBJECT) {
+                    ALOGV("%s: Can't get output buffer for stream %d: %s (%d)",
+                            __FUNCTION__, streamId, strerror(-res), res);
+                    bufRet.val.error(StreamBufferRequestError::STREAM_DISCONNECTED);
+                    states.sessionStatsBuilder.stopCounter(streamId);
+                } else {
+                    ALOGE("%s: Can't get output buffer for stream %d: %s (%d)",
+                            __FUNCTION__, streamId, strerror(-res), res);
+                    if (res == TIMED_OUT || res == NO_MEMORY) {
+                        bufRet.val.error(StreamBufferRequestError::NO_BUFFER_AVAILABLE);
+                    } else {
+                        bufRet.val.error(StreamBufferRequestError::UNKNOWN_ERROR);
+                    }
+                }
+                currentReqSucceeds = false;
+                break;
+            }
+            numAllocatedBuffers++;
+
+            buffer_handle_t *buffer = sb.buffer;
+            auto pair = states.bufferRecordsIntf.getBufferId(*buffer, streamId);
+            bool isNewBuffer = pair.first;
+            uint64_t bufferId = pair.second;
+            StreamBuffer& hBuf = tmpRetBuffers[b];
+
+            hBuf.streamId = streamId;
+            hBuf.bufferId = bufferId;
+            hBuf.buffer = (isNewBuffer) ? *buffer : nullptr;
+            hBuf.status = BufferStatus::OK;
+            hBuf.releaseFence = nullptr;
+            if (isNewBuffer) {
+                newBuffers.push_back(*buffer);
+            }
+
+            native_handle_t *acquireFence = nullptr;
+            if (sb.acquire_fence != -1) {
+                acquireFence = native_handle_create(1,0);
+                acquireFence->data[0] = sb.acquire_fence;
+            }
+            hBuf.acquireFence.setTo(acquireFence, /*shouldOwn*/true);
+            hBuf.releaseFence = nullptr;
+
+            res = states.bufferRecordsIntf.pushInflightRequestBuffer(bufferId, buffer, streamId);
+            if (res != OK) {
+                ALOGE("%s: Can't get register request buffers for stream %d: %s (%d)",
+                        __FUNCTION__, streamId, strerror(-res), res);
+                bufRet.val.error(StreamBufferRequestError::UNKNOWN_ERROR);
+                currentReqSucceeds = false;
+                break;
+            }
+            numPushedInflightBuffers++;
+        }
+        if (currentReqSucceeds) {
+            bufRet.val.buffers(std::move(tmpRetBuffers));
+            oneReqSucceeds = true;
+        } else {
+            allReqsSucceeds = false;
+            for (size_t b = 0; b < numPushedInflightBuffers; b++) {
+                StreamBuffer& hBuf = tmpRetBuffers[b];
+                buffer_handle_t* buffer;
+                status_t res = states.bufferRecordsIntf.popInflightRequestBuffer(
+                        hBuf.bufferId, &buffer);
+                if (res != OK) {
+                    SET_ERR("%s: popInflightRequestBuffer failed for stream %d: %s (%d)",
+                            __FUNCTION__, streamId, strerror(-res), res);
+                }
+            }
+            for (size_t b = 0; b < numAllocatedBuffers; b++) {
+                camera_stream_buffer_t& sb = streamBuffers[b];
+                sb.acquire_fence = -1;
+                sb.status = CAMERA_BUFFER_STATUS_ERROR;
+            }
+            returnOutputBuffers(states.useHalBufManager, /*listener*/nullptr,
+                    streamBuffers.data(), numAllocatedBuffers, /*timestamp*/0,
+                    /*readoutTimestamp*/0, /*requested*/false,
+                    /*requestTimeNs*/0, states.sessionStatsBuilder);
+            for (auto buf : newBuffers) {
+                states.bufferRecordsIntf.removeOneBufferCache(streamId, buf);
+            }
+        }
+    }
+
+    _hidl_cb(allReqsSucceeds ? BufferRequestStatus::OK :
+            oneReqSucceeds ? BufferRequestStatus::FAILED_PARTIAL :
+                             BufferRequestStatus::FAILED_UNKNOWN,
+            bufRets);
+    states.reqBufferIntf.endRequestBuffer();
+}
+
+void returnStreamBuffers(ReturnBufferStates& states,
+        const hardware::hidl_vec<hardware::camera::device::V3_2::StreamBuffer>& buffers) {
+    returnStreamBuffersT(states, buffers);
+}
+
+} // camera3
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.h b/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.h
new file mode 100644
index 0000000..583d738
--- /dev/null
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_HIDL_CAMERA3_OUTPUT_UTILS_H
+#define ANDROID_SERVERS_HIDL_CAMERA3_OUTPUT_UTILS_H
+
+#include <memory>
+#include <mutex>
+
+#include <cutils/native_handle.h>
+
+#include <fmq/MessageQueue.h>
+
+#include <common/CameraDeviceBase.h>
+
+#include <android/hardware/camera/device/3.8/ICameraDeviceCallback.h>
+
+#include "device3/BufferUtils.h"
+//#include "device3/DistortionMapper.h"
+//#include "device3/ZoomRatioMapper.h"
+//#include "device3/RotateAndCropMapper.h"
+#include "device3/InFlightRequest.h"
+#include "device3/Camera3Stream.h"
+//#include "device3/Camera3OutputStreamInterface.h"
+#include "device3/Camera3OutputUtils.h"
+//#include "utils/SessionStatsBuilder.h"
+//#include "utils/TagMonitor.h"
+
+namespace android {
+
+using ResultMetadataQueue = hardware::MessageQueue<uint8_t, hardware::kSynchronizedReadWrite>;
+
+namespace camera3 {
+
+    /**
+     * Helper methods shared between HidlCamera3Device/HidlCamera3OfflineSession for HAL callbacks
+     */
+    // Camera3Device/Camera3OfflineSession internal states used in notify/processCaptureResult
+    // callbacks
+    struct HidlCaptureOutputStates : public CaptureOutputStates {
+        std::unique_ptr<ResultMetadataQueue>& fmq;
+    };
+
+    // Handle one capture result. Assume callers hold the lock to serialize all
+    // processCaptureResult calls
+    void processOneCaptureResultLocked(
+            HidlCaptureOutputStates& states,
+            const hardware::camera::device::V3_2::CaptureResult& result,
+            const hardware::hidl_vec<
+                    hardware::camera::device::V3_4::PhysicalCameraMetadata>
+                            &physicalCameraMetadata);
+
+    // Handle one notify message
+    void notify(CaptureOutputStates& states,
+            const hardware::camera::device::V3_2::NotifyMsg& msg,
+            bool hasReadoutTime = false, uint64_t readoutTime = 0LL);
+    void notify(CaptureOutputStates& states,
+            const hardware::camera::device::V3_8::NotifyMsg& msg);
+
+    void requestStreamBuffers(RequestBufferStates& states,
+            const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
+            hardware::camera::device::V3_5::ICameraDeviceCallback::requestStreamBuffers_cb
+                    _hidl_cb);
+    void returnStreamBuffers(ReturnBufferStates& states,
+            const hardware::hidl_vec<hardware::camera::device::V3_2::StreamBuffer>& buffers);
+
+} // namespace camera3
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp
index 8e619e1..cca3f2e 100644
--- a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp
+++ b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp
@@ -70,6 +70,11 @@
   return binder::Status::ok();
 }
 
+::android::binder::Status H2BCameraServiceListener::onTorchStrengthLevelChanged(
+    const ::android::String16&, int32_t) {
+  return binder::Status::ok();
+}
+
 } // implementation
 } // V2_0
 } // common
diff --git a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
index 7148035..7ef413f 100644
--- a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
+++ b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
@@ -54,6 +54,8 @@
 
     virtual ::android::binder::Status onTorchStatusChanged(
             int32_t status, const ::android::String16& cameraId) override;
+    virtual ::android::binder::Status onTorchStrengthLevelChanged(
+            const ::android::String16& cameraId, int32_t newStrengthLevel) override;
     virtual binder::Status onCameraAccessPrioritiesChanged() {
         // TODO: no implementation yet.
         return binder::Status::ok();
diff --git a/services/camera/libcameraservice/hidl/HidlCameraService.cpp b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
index 7d1b3cf..a812587 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraService.cpp
+++ b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
@@ -279,6 +279,9 @@
         size_t numSections = sectionNames->size();
         std::vector<std::vector<HVendorTag>> tagsBySection(numSections);
         int tagCount = desc->getTagCount();
+        if (tagCount <= 0) {
+            continue;
+        }
         std::vector<uint32_t> tags(tagCount);
         desc->getTagArray(tags.data());
         for (int i = 0; i < tagCount; i++) {
diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp b/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
index 3d74f0b..ca73e4c 100644
--- a/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
+++ b/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
@@ -59,11 +59,17 @@
         "android.hardware.camera.device@3.5",
         "android.hardware.camera.device@3.6",
         "android.hardware.camera.device@3.7",
+        "android.hardware.camera.device@3.8",
     ],
     fuzz_config: {
         cc: [
             "android-media-fuzzing-reports@google.com",
         ],
         componentid: 155276,
+        libfuzzer_options: [
+            //based on b/187360866
+            "timeout=770",
+        ],
+
     },
 }
diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
index e46bf74..97d7bf4 100644
--- a/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
+++ b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
@@ -466,6 +466,12 @@
         // No op
         return binder::Status::ok();
     }
+
+    virtual binder::Status onTorchStrengthLevelChanged(const String16& /*cameraId*/,
+            int32_t /*torchStrength*/) {
+        // No op
+        return binder::Status::ok();
+    }
 };
 
 class TestCameraDeviceCallbacks : public hardware::camera2::BnCameraDeviceCallbacks {
diff --git a/services/camera/libcameraservice/tests/Android.bp b/services/camera/libcameraservice/tests/Android.bp
new file mode 100644
index 0000000..c3f0620
--- /dev/null
+++ b/services/camera/libcameraservice/tests/Android.bp
@@ -0,0 +1,125 @@
+// Copyright 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+    default_applicable_licenses: [
+        "frameworks_av_services_camera_libcameraservice_license",
+    ],
+}
+
+cc_test {
+    name: "cameraservice_test",
+
+    include_dirs: [
+        "system/media/private/camera/include",
+        "external/dynamic_depth/includes",
+        "external/dynamic_depth/internal",
+    ],
+
+    shared_libs: [
+        "libbase",
+        "libcutils",
+        "libcameraservice",
+        "libhidlbase",
+        "liblog",
+        "libcamera_client",
+        "libcamera_metadata",
+        "libui",
+        "libutils",
+        "libjpeg",
+        "libexif",
+        "android.hardware.camera.common@1.0",
+        "android.hardware.camera.provider@2.4",
+        "android.hardware.camera.provider@2.5",
+        "android.hardware.camera.provider@2.6",
+        "android.hardware.camera.provider@2.7",
+        "android.hardware.camera.device@1.0",
+        "android.hardware.camera.device@3.2",
+        "android.hardware.camera.device@3.4",
+        "android.hardware.camera.device@3.7",
+        "android.hardware.camera.device@3.8",
+        "android.hidl.token@1.0-utils",
+    ],
+
+    static_libs: [
+        "libgmock",
+    ],
+
+    srcs: [
+        "CameraProviderManagerTest.cpp",
+        "ClientManagerTest.cpp",
+        "DepthProcessorTest.cpp",
+        "DistortionMapperTest.cpp",
+        "ExifUtilsTest.cpp",
+        "NV12Compressor.cpp",
+        "RotateAndCropMapperTest.cpp",
+        "ZoomRatioTest.cpp",
+    ],
+
+    cflags: [
+        "-Wall",
+        "-Wextra",
+        "-Werror",
+    ],
+
+    test_suites: ["device-tests"],
+
+}
+
+cc_test_host {
+    name: "cameraservice_test_host",
+
+    include_dirs: [
+        "frameworks/av/camera/include",
+        "frameworks/av/camera/include/camera",
+        "frameworks/native/libs/binder/include_activitymanager"
+    ],
+
+    shared_libs: [
+        "libactivity_manager_procstate_aidl-cpp",
+        "libbase",
+        "libbinder",
+        "libcamera_metadata",
+        "libdynamic_depth",
+        "libexif",
+        "libjpeg",
+        "liblog",
+        "libutils",
+    ],
+
+    static_libs: [
+        "libcamera_client_host",
+        "libcameraservice_device_independent",
+        "libgmock",
+    ],
+
+    srcs: [
+        "ClientManagerTest.cpp",
+        "DepthProcessorTest.cpp",
+        "DistortionMapperTest.cpp",
+        "ExifUtilsTest.cpp",
+        "NV12Compressor.cpp",
+        "RotateAndCropMapperTest.cpp",
+        "ZoomRatioTest.cpp",
+    ],
+
+    cflags: [
+        "-Wall",
+        "-Wextra",
+        "-Werror",
+    ],
+
+    test_suites: ["device-tests"],
+
+}
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
deleted file mode 100644
index 0b5ad79..0000000
--- a/services/camera/libcameraservice/tests/Android.mk
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2013 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= $(call all-cpp-files-under, .)
-
-LOCAL_SHARED_LIBRARIES := \
-    libbase \
-    libcutils \
-    libcameraservice \
-    libhidlbase \
-    liblog \
-    libcamera_client \
-    libcamera_metadata \
-    libui \
-    libutils \
-    libjpeg \
-    libexif \
-    android.hardware.camera.common@1.0 \
-    android.hardware.camera.provider@2.4 \
-    android.hardware.camera.provider@2.5 \
-    android.hardware.camera.provider@2.6 \
-    android.hardware.camera.provider@2.7 \
-    android.hardware.camera.device@1.0 \
-    android.hardware.camera.device@3.2 \
-    android.hardware.camera.device@3.4 \
-    android.hardware.camera.device@3.7 \
-    android.hidl.token@1.0-utils
-
-LOCAL_STATIC_LIBRARIES := \
-    libgmock
-
-LOCAL_C_INCLUDES += \
-    system/media/private/camera/include \
-    external/dynamic_depth/includes \
-    external/dynamic_depth/internal \
-
-LOCAL_CFLAGS += -Wall -Wextra -Werror
-
-LOCAL_SANITIZE := address
-
-LOCAL_MODULE:= cameraservice_test
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/../NOTICE
-LOCAL_COMPATIBILITY_SUITE := device-tests
-LOCAL_MODULE_TAGS := tests
-
-include $(BUILD_NATIVE_TEST)
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index a74fd9d..e9f6979 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -212,7 +212,7 @@
  * Simple test version of the interaction proxy, to use to inject onRegistered calls to the
  * CameraProviderManager
  */
-struct TestInteractionProxy : public CameraProviderManager::ServiceInteractionProxy {
+struct TestInteractionProxy : public CameraProviderManager::HidlServiceInteractionProxy {
     sp<hidl::manager::V1_0::IServiceNotification> mManagerNotificationInterface;
     sp<TestICameraProvider> mTestCameraProvider;
 
@@ -269,11 +269,13 @@
     ~TestStatusListener() {}
 
     void onDeviceStatusChanged(const String8 &,
-            hardware::camera::common::V1_0::CameraDeviceStatus) override {}
+            CameraDeviceStatus) override {}
     void onDeviceStatusChanged(const String8 &, const String8 &,
-            hardware::camera::common::V1_0::CameraDeviceStatus) override {}
+            CameraDeviceStatus) override {}
     void onTorchStatusChanged(const String8 &,
-            hardware::camera::common::V1_0::TorchModeStatus) override {}
+            TorchModeStatus) override {}
+    void onTorchStatusChanged(const String8 &,
+            TorchModeStatus, SystemCameraKind) override {}
     void onNewProviderRegistered() override {}
 };
 
diff --git a/services/camera/libcameraservice/tests/ExifUtilsTest.cpp b/services/camera/libcameraservice/tests/ExifUtilsTest.cpp
new file mode 100644
index 0000000..3de4bf2
--- /dev/null
+++ b/services/camera/libcameraservice/tests/ExifUtilsTest.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ExifUtilsTest"
+
+#include <camera/CameraMetadata.h>
+#include "../utils/ExifUtils.h"
+#include <gtest/gtest.h>
+
+using android::camera3::ExifUtils;
+using android::camera3::ExifOrientation;
+using android::CameraMetadata;
+
+uint32_t kImageWidth = 1920;
+uint32_t kImageHeight = 1440;
+ExifOrientation kExifOrientation = ExifOrientation::ORIENTATION_0_DEGREES;
+
+// Test that setFromMetadata works correctly, without errors.
+TEST(ExifUtilsTest, SetFromMetadataTest) {
+    std::unique_ptr<ExifUtils> utils(ExifUtils::create());
+    uint8_t invalidSensorPixelMode = 2;
+    uint8_t validSensorPixelMode = ANDROID_SENSOR_PIXEL_MODE_DEFAULT;
+    CameraMetadata metadata;
+    // Empty staticInfo
+    CameraMetadata staticInfo;
+    ASSERT_TRUE(utils->initializeEmpty());
+    ASSERT_TRUE(
+            metadata.update(ANDROID_SENSOR_PIXEL_MODE, &invalidSensorPixelMode, 1) == android::OK);
+    ASSERT_FALSE(utils->setFromMetadata(metadata, staticInfo, kImageWidth, kImageHeight));
+    ASSERT_TRUE(
+            metadata.update(ANDROID_SENSOR_PIXEL_MODE, &validSensorPixelMode, 1) == android::OK);
+    ASSERT_TRUE(utils->setFromMetadata(metadata, staticInfo, kImageWidth, kImageHeight));
+    ASSERT_TRUE(utils->setImageWidth(kImageWidth));
+    ASSERT_TRUE(utils->setImageHeight(kImageHeight));
+    ASSERT_TRUE(utils->setOrientationValue(kExifOrientation));
+    ASSERT_TRUE(utils->generateApp1());
+    const uint8_t* exifBuffer = utils->getApp1Buffer();
+    ASSERT_NE(exifBuffer, nullptr);
+    size_t exifBufferSize = utils->getApp1Length();
+    ASSERT_TRUE(exifBufferSize != 0);
+}
diff --git a/services/camera/libcameraservice/tests/NV12Compressor.h b/services/camera/libcameraservice/tests/NV12Compressor.h
index ee22d5e..a959871 100644
--- a/services/camera/libcameraservice/tests/NV12Compressor.h
+++ b/services/camera/libcameraservice/tests/NV12Compressor.h
@@ -19,6 +19,7 @@
 
 #include <setjmp.h>
 #include <stdlib.h>
+#include <stdio.h>
 extern "C" {
 #include <jpeglib.h>
 #include <jerror.h>
diff --git a/services/camera/libcameraservice/tests/PreviewSchedulerTest.cpp b/services/camera/libcameraservice/tests/PreviewSchedulerTest.cpp
new file mode 100644
index 0000000..025521a
--- /dev/null
+++ b/services/camera/libcameraservice/tests/PreviewSchedulerTest.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "PreviewSchedulerTest"
+
+#include <chrono>
+#include <thread>
+#include <utility>
+
+#include <gtest/gtest.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <utils/Mutex.h>
+
+#include <gui/BufferItemConsumer.h>
+#include <gui/BufferQueue.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <gui/Surface.h>
+
+#include "../device3/Camera3OutputStream.h"
+#include "../device3/PreviewFrameScheduler.h"
+
+using namespace android;
+using namespace android::camera3;
+
+// Consumer buffer available listener
+class SimpleListener : public BufferItemConsumer::FrameAvailableListener {
+public:
+    SimpleListener(size_t frameCount): mFrameCount(frameCount) {}
+
+    void waitForFrames() {
+        Mutex::Autolock lock(mMutex);
+        while (mFrameCount > 0) {
+            mCondition.wait(mMutex);
+        }
+    }
+
+    void onFrameAvailable(const BufferItem& /*item*/) override {
+        Mutex::Autolock lock(mMutex);
+        if (mFrameCount > 0) {
+            mFrameCount--;
+            mCondition.signal();
+        }
+    }
+
+    void reset(size_t frameCount) {
+        Mutex::Autolock lock(mMutex);
+        mFrameCount = frameCount;
+    }
+private:
+    size_t mFrameCount;
+    Mutex mMutex;
+    Condition mCondition;
+};
+
+// Test the PreviewFrameScheduler functionatliy of re-timing buffers
+TEST(PreviewSchedulerTest, BasicPreviewSchedulerTest) {
+    const int ID = 0;
+    const int FORMAT = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    const uint32_t WIDTH = 640;
+    const uint32_t HEIGHT = 480;
+    const int32_t TRANSFORM = 0;
+    const nsecs_t T_OFFSET = 0;
+    const android_dataspace DATASPACE = HAL_DATASPACE_UNKNOWN;
+    const camera_stream_rotation_t ROTATION = CAMERA_STREAM_ROTATION_0;
+    const String8 PHY_ID;
+    const std::unordered_set<int32_t> PIX_MODES;
+    const int BUFFER_COUNT = 4;
+    const int TOTAL_BUFFER_COUNT = BUFFER_COUNT * 2;
+
+    // Create buffer queue
+    sp<IGraphicBufferProducer> producer;
+    sp<IGraphicBufferConsumer> consumer;
+    BufferQueue::createBufferQueue(&producer, &consumer);
+    ASSERT_NE(producer, nullptr);
+    ASSERT_NE(consumer, nullptr);
+    ASSERT_EQ(NO_ERROR, consumer->setDefaultBufferSize(WIDTH, HEIGHT));
+
+    // Set up consumer
+    sp<BufferItemConsumer> bufferConsumer = new BufferItemConsumer(consumer,
+            GRALLOC_USAGE_HW_COMPOSER, BUFFER_COUNT);
+    ASSERT_NE(bufferConsumer, nullptr);
+    sp<SimpleListener> consumerListener = new SimpleListener(BUFFER_COUNT);
+    bufferConsumer->setFrameAvailableListener(consumerListener);
+
+    // Set up producer
+    sp<Surface> surface = new Surface(producer);
+    sp<StubProducerListener> listener = new StubProducerListener();
+    ASSERT_EQ(NO_ERROR, surface->connect(NATIVE_WINDOW_API_CPU, listener));
+    sp<ANativeWindow> anw(surface);
+    ASSERT_EQ(NO_ERROR, native_window_set_buffer_count(anw.get(), TOTAL_BUFFER_COUNT));
+
+    // Create Camera3OutputStream and PreviewFrameScheduler
+    sp<Camera3OutputStream> stream = new Camera3OutputStream(ID, surface, WIDTH, HEIGHT,
+            FORMAT, DATASPACE, ROTATION, T_OFFSET, PHY_ID, PIX_MODES);
+    ASSERT_NE(stream, nullptr);
+    std::unique_ptr<PreviewFrameScheduler> scheduler =
+            std::make_unique<PreviewFrameScheduler>(*stream, surface);
+    ASSERT_NE(scheduler, nullptr);
+
+    // The pair of nsecs_t: camera timestamp delta (negative means in the past) and frame interval
+    const std::pair<nsecs_t, nsecs_t> inputTimestamps[][BUFFER_COUNT] = {
+        // 30fps, 33ms interval
+        {{-100000000LL, 33333333LL}, {-66666667LL, 33333333LL},
+          {-33333333LL, 33333333LL}, {0, 0}},
+        // 30fps, variable interval
+        {{-100000000LL, 16666667LL}, {-66666667LL, 33333333LL},
+          {-33333333LL, 50000000LL}, {0, 0}},
+        // 60fps, 16.7ms interval
+        {{-50000000LL, 16666667LL}, {-33333333LL, 16666667LL},
+          {-16666667LL, 16666667LL}, {0, 0}},
+        // 60fps, variable interval
+        {{-50000000LL, 8666667LL}, {-33333333LL, 19666667LL},
+          {-16666667LL, 20666667LL}, {0, 0}},
+    };
+
+    // Go through different use cases, and check the buffer timestamp
+    size_t iterations = sizeof(inputTimestamps)/sizeof(inputTimestamps[0]);
+    for (size_t i = 0; i < iterations; i++) {
+        // Space out different test sets to reset the frame scheduler
+        nsecs_t timeBase = systemTime() - s2ns(1) * (iterations - i);
+        nsecs_t lastQueueTime = 0;
+        nsecs_t duration = 0;
+        for (size_t j = 0; j < BUFFER_COUNT; j++) {
+            ANativeWindowBuffer* buffer = nullptr;
+            int fenceFd;
+            ASSERT_EQ(NO_ERROR, anw->dequeueBuffer(anw.get(), &buffer, &fenceFd));
+
+            // Sleep to space out queuePreviewBuffer
+            nsecs_t currentTime = systemTime();
+            if (duration > 0 && duration > currentTime - lastQueueTime) {
+                std::this_thread::sleep_for(
+                        std::chrono::nanoseconds(duration + lastQueueTime - currentTime));
+            }
+            nsecs_t timestamp = timeBase + inputTimestamps[i][j].first;
+            ASSERT_EQ(NO_ERROR,
+                    scheduler->queuePreviewBuffer(timestamp, TRANSFORM, buffer, fenceFd));
+
+            lastQueueTime = systemTime();
+            duration = inputTimestamps[i][j].second;
+        }
+
+        // Collect output timestamps, making sure they are either set by
+        // producer, or set by the scheduler.
+        consumerListener->waitForFrames();
+        nsecs_t outputTimestamps[BUFFER_COUNT];
+        for (size_t j = 0; j < BUFFER_COUNT; j++) {
+            BufferItem bufferItem;
+            ASSERT_EQ(NO_ERROR, bufferConsumer->acquireBuffer(&bufferItem, 0/*presentWhen*/));
+
+            outputTimestamps[j] = bufferItem.mTimestamp;
+            ALOGV("%s: [%zu][%zu]: input: %" PRId64 ", output: %" PRId64, __FUNCTION__,
+                  i, j, timeBase + inputTimestamps[i][j].first, bufferItem.mTimestamp);
+            ASSERT_GT(bufferItem.mTimestamp, inputTimestamps[i][j].first);
+
+            ASSERT_EQ(NO_ERROR, bufferConsumer->releaseBuffer(bufferItem));
+        }
+
+        // Check the output timestamp intervals are aligned with input intervals
+        const nsecs_t SHIFT_THRESHOLD = ms2ns(2);
+        for (size_t j = 0; j < BUFFER_COUNT - 1; j ++) {
+            nsecs_t interval_shift = outputTimestamps[j+1] - outputTimestamps[j] -
+                    (inputTimestamps[i][j+1].first - inputTimestamps[i][j].first);
+            ASSERT_LE(std::abs(interval_shift), SHIFT_THRESHOLD);
+        }
+
+        consumerListener->reset(BUFFER_COUNT);
+    }
+
+    // Disconnect the surface
+    ASSERT_EQ(NO_ERROR, surface->disconnect(NATIVE_WINDOW_API_CPU));
+}
diff --git a/services/camera/libcameraservice/tests/how_to_run.txt b/services/camera/libcameraservice/tests/how_to_run.txt
new file mode 100644
index 0000000..93239e3
--- /dev/null
+++ b/services/camera/libcameraservice/tests/how_to_run.txt
@@ -0,0 +1,5 @@
+adb root &&
+m cameraservice_test &&
+adb push $ANDROID_PRODUCT_OUT/data/nativetest/cameraservice_test/cameraservice_test \
+    /data/nativetest/cameraservice_test/arm64/cameraservice_test &&
+adb shell /data/nativetest/cameraservice_test/arm64/cameraservice_test
\ No newline at end of file
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
index 76927c0..8699543 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
@@ -120,13 +120,12 @@
     proxyBinder->pingForUserUpdate();
 }
 
-bool CameraServiceProxyWrapper::isRotateAndCropOverrideNeeded(
-        String16 packageName, int sensorOrientation, int lensFacing) {
+int CameraServiceProxyWrapper::getRotateAndCropOverride(String16 packageName, int lensFacing,
+        int userId) {
     sp<ICameraServiceProxy> proxyBinder = getCameraServiceProxy();
     if (proxyBinder == nullptr) return true;
-    bool ret = true;
-    auto status = proxyBinder->isRotateAndCropOverrideNeeded(packageName, sensorOrientation,
-            lensFacing, &ret);
+    int ret = 0;
+    auto status = proxyBinder->getRotateAndCropOverride(packageName, lensFacing, userId, &ret);
     if (!status.isOk()) {
         ALOGE("%s: Failed during top activity orientation query: %s", __FUNCTION__,
                 status.exceptionMessage().c_str());
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
index ad9db68..f701e94 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
@@ -91,9 +91,8 @@
     // Ping camera service proxy for user update
     static void pingCameraServiceProxy();
 
-    // Check whether the current top activity needs a rotate and crop override.
-    static bool isRotateAndCropOverrideNeeded(String16 packageName, int sensorOrientation,
-            int lensFacing);
+    // Return the current top activity rotate and crop override.
+    static int getRotateAndCropOverride(String16 packageName, int lensFacing, int userId);
 };
 
 } // android
diff --git a/services/camera/libcameraservice/utils/CameraTraces.cpp b/services/camera/libcameraservice/utils/CameraTraces.cpp
index 0198690..0cd4f5d 100644
--- a/services/camera/libcameraservice/utils/CameraTraces.cpp
+++ b/services/camera/libcameraservice/utils/CameraTraces.cpp
@@ -64,7 +64,7 @@
     ATRACE_END();
 }
 
-status_t CameraTraces::dump(int fd, const Vector<String16> &args __attribute__((unused))) {
+status_t CameraTraces::dump(int fd) {
     ALOGV("%s: fd = %d", __FUNCTION__, fd);
     Mutex::Autolock al(sImpl.tracesLock);
     List<ProcessCallStack>& pcsList = sImpl.pcsList;
diff --git a/services/camera/libcameraservice/utils/CameraTraces.h b/services/camera/libcameraservice/utils/CameraTraces.h
index 13ca16d..71fa334 100644
--- a/services/camera/libcameraservice/utils/CameraTraces.h
+++ b/services/camera/libcameraservice/utils/CameraTraces.h
@@ -42,7 +42,7 @@
      *
      * <p>Each line is indented by DUMP_INDENT spaces.</p>
      */
-    static status_t dump(int fd, const Vector<String16>& args);
+    static status_t dump(int fd);
 
 private:
     enum {
diff --git a/services/camera/libcameraservice/utils/ExifUtils.cpp b/services/camera/libcameraservice/utils/ExifUtils.cpp
index 485705c..21f02db 100644
--- a/services/camera/libcameraservice/utils/ExifUtils.cpp
+++ b/services/camera/libcameraservice/utils/ExifUtils.cpp
@@ -920,7 +920,7 @@
     camera_metadata_ro_entry sensorPixelModeEntry = metadata.find(ANDROID_SENSOR_PIXEL_MODE);
     if (sensorPixelModeEntry.count != 0) {
         sensorPixelMode = sensorPixelModeEntry.data.u8[0];
-        if (sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_DEFAULT ||
+        if (sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_DEFAULT &&
             sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) {
             ALOGE("%s: Request sensor pixel mode is not one of the valid values %d",
                       __FUNCTION__, sensorPixelMode);
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigSessionExt.aidl b/services/camera/libcameraservice/utils/IPCTransport.h
similarity index 73%
rename from media/libaudioclient/aidl/android/media/AudioPortConfigSessionExt.aidl
rename to services/camera/libcameraservice/utils/IPCTransport.h
index a2cbf62..b8e80ac 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigSessionExt.aidl
+++ b/services/camera/libcameraservice/utils/IPCTransport.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright (C) 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,11 +14,13 @@
  * limitations under the License.
  */
 
-package android.media;
+#ifndef ANDROID_SERVERS_IPC_H_
+#define ANDROID_SERVERS_IPC_H_
 
-/**
- * {@hide}
- */
-parcelable AudioPortConfigSessionExt {
-    int session;
-}
+enum class IPCTransport : uint32_t {
+  HIDL = 0,
+  AIDL = 1,
+  INVALID = 2
+};
+
+#endif
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index a239c81..f826d83 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -13,71 +13,28 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 #include <cutils/properties.h>
 
 #include "SessionConfigurationUtils.h"
 #include "../api2/DepthCompositeStream.h"
 #include "../api2/HeicCompositeStream.h"
+#include "android/hardware/camera/metadata/3.8/types.h"
 #include "common/CameraDeviceBase.h"
 #include "../CameraService.h"
-#include "device3/Camera3Device.h"
+#include "device3/hidl/HidlCamera3Device.h"
 #include "device3/Camera3OutputStream.h"
+#include "system/graphics-base-v1.1.h"
 
 using android::camera3::OutputStreamInfo;
 using android::camera3::OutputStreamInfo;
 using android::hardware::camera2::ICameraDeviceUser;
 using android::hardware::camera::metadata::V3_6::CameraMetadataEnumAndroidSensorPixelMode;
+using android::hardware::camera::metadata::V3_8::CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap;
 
 namespace android {
 namespace camera3 {
 
-int32_t SessionConfigurationUtils::PERF_CLASS_LEVEL =
-        property_get_int32("ro.odm.build.media_performance_class", 0);
-
-bool SessionConfigurationUtils::IS_PERF_CLASS = (PERF_CLASS_LEVEL == SDK_VERSION_S);
-
-camera3::Size SessionConfigurationUtils::getMaxJpegResolution(const CameraMetadata &metadata,
-        bool ultraHighResolution) {
-    int32_t maxJpegWidth = 0, maxJpegHeight = 0;
-    const int STREAM_CONFIGURATION_SIZE = 4;
-    const int STREAM_FORMAT_OFFSET = 0;
-    const int STREAM_WIDTH_OFFSET = 1;
-    const int STREAM_HEIGHT_OFFSET = 2;
-    const int STREAM_IS_INPUT_OFFSET = 3;
-
-    int32_t scalerSizesTag = ultraHighResolution ?
-            ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION :
-                    ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS;
-    camera_metadata_ro_entry_t availableStreamConfigs =
-            metadata.find(scalerSizesTag);
-    if (availableStreamConfigs.count == 0 ||
-            availableStreamConfigs.count % STREAM_CONFIGURATION_SIZE != 0) {
-        return camera3::Size(0, 0);
-    }
-
-    // Get max jpeg size (area-wise).
-    for (size_t i= 0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) {
-        int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET];
-        int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET];
-        int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET];
-        int32_t isInput = availableStreamConfigs.data.i32[i + STREAM_IS_INPUT_OFFSET];
-        if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT
-                && format == HAL_PIXEL_FORMAT_BLOB &&
-                (width * height > maxJpegWidth * maxJpegHeight)) {
-            maxJpegWidth = width;
-            maxJpegHeight = height;
-        }
-    }
-
-    return camera3::Size(maxJpegWidth, maxJpegHeight);
-}
-
-size_t SessionConfigurationUtils::getUHRMaxJpegBufferSize(camera3::Size uhrMaxJpegSize,
-        camera3::Size defaultMaxJpegSize, size_t defaultMaxJpegBufferSize) {
-    return (uhrMaxJpegSize.width * uhrMaxJpegSize.height) /
-            (defaultMaxJpegSize.width * defaultMaxJpegSize.height) * defaultMaxJpegBufferSize;
-}
-
 void StreamConfiguration::getStreamConfigurations(
         const CameraMetadata &staticInfo, int configuration,
         std::unordered_map<int, std::vector<StreamConfiguration>> *scm) {
@@ -126,65 +83,57 @@
     getStreamConfigurations(staticInfo, heicKey, scm);
 }
 
-int32_t SessionConfigurationUtils::getAppropriateModeTag(int32_t defaultTag, bool maxResolution) {
-    if (!maxResolution) {
-        return defaultTag;
+namespace SessionConfigurationUtils {
+
+int32_t PERF_CLASS_LEVEL =
+        property_get_int32("ro.odm.build.media_performance_class", 0);
+
+bool IS_PERF_CLASS = (PERF_CLASS_LEVEL == SDK_VERSION_S);
+
+camera3::Size getMaxJpegResolution(const CameraMetadata &metadata,
+        bool ultraHighResolution) {
+    int32_t maxJpegWidth = 0, maxJpegHeight = 0;
+    const int STREAM_CONFIGURATION_SIZE = 4;
+    const int STREAM_FORMAT_OFFSET = 0;
+    const int STREAM_WIDTH_OFFSET = 1;
+    const int STREAM_HEIGHT_OFFSET = 2;
+    const int STREAM_IS_INPUT_OFFSET = 3;
+
+    int32_t scalerSizesTag = ultraHighResolution ?
+            ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION :
+                    ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS;
+    camera_metadata_ro_entry_t availableStreamConfigs =
+            metadata.find(scalerSizesTag);
+    if (availableStreamConfigs.count == 0 ||
+            availableStreamConfigs.count % STREAM_CONFIGURATION_SIZE != 0) {
+        return camera3::Size(0, 0);
     }
-    switch (defaultTag) {
-        case ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS:
-            return ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS:
-            return ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_SCALER_AVAILABLE_STALL_DURATIONS:
-            return ANDROID_SCALER_AVAILABLE_STALL_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS:
-            return ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS:
-            return ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS:
-            return ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS:
-            return ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS:
-            return ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS:
-            return ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS:
-            return ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS:
-            return ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS:
-            return ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_SENSOR_OPAQUE_RAW_SIZE:
-            return ANDROID_SENSOR_OPAQUE_RAW_SIZE_MAXIMUM_RESOLUTION;
-        case ANDROID_LENS_INTRINSIC_CALIBRATION:
-            return ANDROID_LENS_INTRINSIC_CALIBRATION_MAXIMUM_RESOLUTION;
-        case ANDROID_LENS_DISTORTION:
-            return ANDROID_LENS_DISTORTION_MAXIMUM_RESOLUTION;
-        default:
-            ALOGE("%s: Tag %d doesn't have a maximum resolution counterpart", __FUNCTION__,
-                    defaultTag);
-            return -1;
+
+    // Get max jpeg size (area-wise).
+    for (size_t i= 0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) {
+        int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET];
+        int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET];
+        int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET];
+        int32_t isInput = availableStreamConfigs.data.i32[i + STREAM_IS_INPUT_OFFSET];
+        if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT
+                && format == HAL_PIXEL_FORMAT_BLOB &&
+                (width * height > maxJpegWidth * maxJpegHeight)) {
+            maxJpegWidth = width;
+            maxJpegHeight = height;
+        }
     }
-    return -1;
+
+    return camera3::Size(maxJpegWidth, maxJpegHeight);
 }
 
-bool SessionConfigurationUtils::getArrayWidthAndHeight(const CameraMetadata *deviceInfo,
-        int32_t arrayTag, int32_t *width, int32_t *height) {
-    if (width == nullptr || height == nullptr) {
-        ALOGE("%s: width / height nullptr", __FUNCTION__);
-        return false;
-    }
-    camera_metadata_ro_entry_t entry;
-    entry = deviceInfo->find(arrayTag);
-    if (entry.count != 4) return false;
-    *width = entry.data.i32[2];
-    *height = entry.data.i32[3];
-    return true;
+size_t getUHRMaxJpegBufferSize(camera3::Size uhrMaxJpegSize,
+        camera3::Size defaultMaxJpegSize, size_t defaultMaxJpegBufferSize) {
+    return (uhrMaxJpegSize.width * uhrMaxJpegSize.height) /
+            (defaultMaxJpegSize.width * defaultMaxJpegSize.height) * defaultMaxJpegBufferSize;
 }
 
 StreamConfigurationPair
-SessionConfigurationUtils::getStreamConfigurationPair(const CameraMetadata &staticInfo) {
+getStreamConfigurationPair(const CameraMetadata &staticInfo) {
     camera3::StreamConfigurationPair streamConfigurationPair;
     camera3::StreamConfiguration::getStreamConfigurations(staticInfo, false,
             &streamConfigurationPair.mDefaultStreamConfigurationMap);
@@ -193,13 +142,13 @@
     return streamConfigurationPair;
 }
 
-int64_t SessionConfigurationUtils::euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1) {
+int64_t euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1) {
     int64_t d0 = x0 - x1;
     int64_t d1 = y0 - y1;
     return d0 * d0 + d1 * d1;
 }
 
-bool SessionConfigurationUtils::roundBufferDimensionNearest(int32_t width, int32_t height,
+bool roundBufferDimensionNearest(int32_t width, int32_t height,
         int32_t format, android_dataspace dataSpace,
         const CameraMetadata& info, bool maxResolution, /*out*/int32_t* outWidth,
         /*out*/int32_t* outHeight) {
@@ -260,7 +209,81 @@
     return true;
 }
 
-bool SessionConfigurationUtils::isPublicFormat(int32_t format)
+//check if format is 10-bit compatible
+bool is10bitCompatibleFormat(int32_t format) {
+    switch(format) {
+        case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+        case HAL_PIXEL_FORMAT_YCBCR_P010:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool isDynamicRangeProfileSupported(int dynamicRangeProfile, const CameraMetadata& staticInfo) {
+    if (dynamicRangeProfile == ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {
+        // Supported by default
+        return true;
+    }
+
+    camera_metadata_ro_entry_t entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+    bool is10bitDynamicRangeSupported = false;
+    for (size_t i = 0; i < entry.count; ++i) {
+        uint8_t capability = entry.data.u8[i];
+        if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_DYNAMIC_RANGE_TEN_BIT) {
+            is10bitDynamicRangeSupported = true;
+            break;
+        }
+    }
+
+    if (!is10bitDynamicRangeSupported) {
+        return false;
+    }
+
+    switch (dynamicRangeProfile) {
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10_PLUS:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HLG10:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_OEM:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_OEM_PO:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_REF:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_REF_PO:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_OEM:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_OEM_PO:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_REF:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_REF_PO:
+            entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP);
+            for (size_t i = 0; i < entry.count; i += 2) {
+                if (dynamicRangeProfile == entry.data.i32[i]) {
+                    return true;
+                }
+            }
+
+            return false;
+        default:
+            return false;
+    }
+
+    return false;
+}
+
+//check if format is 10-bit compatible
+bool is10bitDynamicRangeProfile(int32_t dynamicRangeProfile) {
+    switch (dynamicRangeProfile) {
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10_PLUS:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HLG10:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_OEM:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_OEM_PO:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_REF:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_REF_PO:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool isPublicFormat(int32_t format)
 {
     switch(format) {
         case HAL_PIXEL_FORMAT_RGBA_8888:
@@ -287,11 +310,11 @@
     }
 }
 
-binder::Status SessionConfigurationUtils::createSurfaceFromGbp(
+binder::Status createSurfaceFromGbp(
         OutputStreamInfo& streamInfo, bool isStreamInfoValid,
         sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
         const String8 &logicalCameraId, const CameraMetadata &physicalCameraMetadata,
-        const std::vector<int32_t> &sensorPixelModesUsed){
+        const std::vector<int32_t> &sensorPixelModesUsed, int dynamicRangeProfile){
     // bufferProducer must be non-null
     if (gbp == nullptr) {
         String8 msg = String8::format("Camera %s: Surface is NULL", logicalCameraId.string());
@@ -389,6 +412,21 @@
         ALOGE("%s: %s", __FUNCTION__, msg.string());
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
+    if (!SessionConfigurationUtils::isDynamicRangeProfileSupported(dynamicRangeProfile,
+                physicalCameraMetadata)) {
+        String8 msg = String8::format("Camera %s: Dynamic range profile 0x%x not supported,"
+                " failed to create output stream", logicalCameraId.string(), dynamicRangeProfile);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
+    if (SessionConfigurationUtils::is10bitDynamicRangeProfile(dynamicRangeProfile) &&
+            !SessionConfigurationUtils::is10bitCompatibleFormat(format)) {
+        String8 msg = String8::format("Camera %s: No 10-bit supported stream configurations with "
+                "format %#x defined and profile %#x, failed to create output stream",
+                logicalCameraId.string(), format, dynamicRangeProfile);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
 
     if (!isStreamInfoValid) {
         streamInfo.width = width;
@@ -397,6 +435,7 @@
         streamInfo.dataSpace = dataSpace;
         streamInfo.consumerUsage = consumerUsage;
         streamInfo.sensorPixelModesUsed = overriddenSensorPixelModes;
+        streamInfo.dynamicRangeProfile = dynamicRangeProfile;
         return binder::Status::ok();
     }
     if (width != streamInfo.width) {
@@ -437,35 +476,39 @@
     return binder::Status::ok();
 }
 
-void SessionConfigurationUtils::mapStreamInfo(const OutputStreamInfo &streamInfo,
+void mapStreamInfo(const OutputStreamInfo &streamInfo,
             camera3::camera_stream_rotation_t rotation, String8 physicalId,
-            int32_t groupId, hardware::camera::device::V3_7::Stream *stream /*out*/) {
+            int32_t groupId, hardware::camera::device::V3_8::Stream *stream /*out*/) {
     if (stream == nullptr) {
         return;
     }
 
-    stream->v3_4.v3_2.streamType = hardware::camera::device::V3_2::StreamType::OUTPUT;
-    stream->v3_4.v3_2.width = streamInfo.width;
-    stream->v3_4.v3_2.height = streamInfo.height;
-    stream->v3_4.v3_2.format = Camera3Device::mapToPixelFormat(streamInfo.format);
+    stream->v3_7.v3_4.v3_2.streamType = hardware::camera::device::V3_2::StreamType::OUTPUT;
+    stream->v3_7.v3_4.v3_2.width = streamInfo.width;
+    stream->v3_7.v3_4.v3_2.height = streamInfo.height;
+    stream->v3_7.v3_4.v3_2.format = HidlCamera3Device::mapToPixelFormat(streamInfo.format);
     auto u = streamInfo.consumerUsage;
     camera3::Camera3OutputStream::applyZSLUsageQuirk(streamInfo.format, &u);
-    stream->v3_4.v3_2.usage = Camera3Device::mapToConsumerUsage(u);
-    stream->v3_4.v3_2.dataSpace = Camera3Device::mapToHidlDataspace(streamInfo.dataSpace);
-    stream->v3_4.v3_2.rotation = Camera3Device::mapToStreamRotation(rotation);
-    stream->v3_4.v3_2.id = -1; // Invalid stream id
-    stream->v3_4.physicalCameraId = std::string(physicalId.string());
-    stream->v3_4.bufferSize = 0;
-    stream->groupId = groupId;
-    stream->sensorPixelModesUsed.resize(streamInfo.sensorPixelModesUsed.size());
+    stream->v3_7.v3_4.v3_2.usage = HidlCamera3Device::mapToConsumerUsage(u);
+    stream->v3_7.v3_4.v3_2.dataSpace = HidlCamera3Device::mapToHidlDataspace(streamInfo.dataSpace);
+    stream->v3_7.v3_4.v3_2.rotation = HidlCamera3Device::mapToStreamRotation(rotation);
+    stream->v3_7.v3_4.v3_2.id = -1; // Invalid stream id
+    stream->v3_7.v3_4.physicalCameraId = std::string(physicalId.string());
+    stream->v3_7.v3_4.bufferSize = 0;
+    stream->v3_7.groupId = groupId;
+    stream->v3_7.sensorPixelModesUsed.resize(streamInfo.sensorPixelModesUsed.size());
+
     size_t idx = 0;
     for (auto mode : streamInfo.sensorPixelModesUsed) {
-        stream->sensorPixelModesUsed[idx++] =
+        stream->v3_7.sensorPixelModesUsed[idx++] =
                 static_cast<CameraMetadataEnumAndroidSensorPixelMode>(mode);
     }
+    stream->dynamicRangeProfile =
+        static_cast<CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap> (
+                streamInfo.dynamicRangeProfile);
 }
 
-binder::Status SessionConfigurationUtils::checkPhysicalCameraId(
+binder::Status checkPhysicalCameraId(
         const std::vector<std::string> &physicalCameraIds, const String8 &physicalCameraId,
         const String8 &logicalCameraId) {
     if (physicalCameraId.size() == 0) {
@@ -481,7 +524,7 @@
     return binder::Status::ok();
 }
 
-binder::Status SessionConfigurationUtils::checkSurfaceType(size_t numBufferProducers,
+binder::Status checkSurfaceType(size_t numBufferProducers,
         bool deferredConsumer, int surfaceType)  {
     if (numBufferProducers > MAX_SURFACES_PER_STREAM) {
         ALOGE("%s: GraphicBufferProducer count %zu for stream exceeds limit of %d",
@@ -503,7 +546,7 @@
     return binder::Status::ok();
 }
 
-binder::Status SessionConfigurationUtils::checkOperatingMode(int operatingMode,
+binder::Status checkOperatingMode(int operatingMode,
         const CameraMetadata &staticInfo, const String8 &cameraId) {
     if (operatingMode < 0) {
         String8 msg = String8::format(
@@ -538,11 +581,11 @@
 }
 
 binder::Status
-SessionConfigurationUtils::convertToHALStreamCombination(
+convertToHALStreamCombination(
         const SessionConfiguration& sessionConfiguration,
         const String8 &logicalCameraId, const CameraMetadata &deviceInfo,
         metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
-        hardware::camera::device::V3_7::StreamConfiguration &streamConfiguration,
+        hardware::camera::device::V3_8::StreamConfiguration &streamConfiguration,
         bool overrideForPerfClass, bool *earlyExit) {
 
     auto operatingMode = sessionConfiguration.getOperatingMode();
@@ -557,7 +600,7 @@
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
     *earlyExit = false;
-    auto ret = Camera3Device::mapToStreamConfigurationMode(
+    auto ret = HidlCamera3Device::mapToStreamConfigurationMode(
             static_cast<camera_stream_configuration_mode_t> (operatingMode),
             /*out*/ &streamConfiguration.operationMode);
     if (ret != OK) {
@@ -583,11 +626,11 @@
         defaultSensorPixelModes[0] =
                 static_cast<CameraMetadataEnumAndroidSensorPixelMode>(
                         ANDROID_SENSOR_PIXEL_MODE_DEFAULT);
-        streamConfiguration.streams[streamIdx++] = {{{/*streamId*/0,
+        streamConfiguration.streams[streamIdx++].v3_7 = {{{/*streamId*/0,
                 hardware::camera::device::V3_2::StreamType::INPUT,
                 static_cast<uint32_t> (sessionConfiguration.getInputWidth()),
                 static_cast<uint32_t> (sessionConfiguration.getInputHeight()),
-                Camera3Device::mapToPixelFormat(sessionConfiguration.getInputFormat()),
+                HidlCamera3Device::mapToPixelFormat(sessionConfiguration.getInputFormat()),
                 /*usage*/ 0, HAL_DATASPACE_UNKNOWN,
                 hardware::camera::device::V3_2::StreamRotation::ROTATION_0},
                 /*physicalId*/ nullptr, /*bufferSize*/0}, /*groupId*/-1, defaultSensorPixelModes};
@@ -601,6 +644,7 @@
         bool deferredConsumer = it.isDeferred();
         String8 physicalCameraId = String8(it.getPhysicalCameraId());
 
+        int dynamicRangeProfile = it.getDynamicRangeProfile();
         std::vector<int32_t> sensorPixelModesUsed = it.getSensorPixelModesUsed();
         const CameraMetadata &physicalDeviceInfo = getMetadata(physicalCameraId,
                 overrideForPerfClass);
@@ -632,6 +676,7 @@
             if (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) {
                 streamInfo.consumerUsage |= GraphicBuffer::USAGE_HW_COMPOSER;
             }
+            streamInfo.dynamicRangeProfile = it.getDynamicRangeProfile();
             if (checkAndOverrideSensorPixelModesUsed(sensorPixelModesUsed,
                     streamInfo.format, streamInfo.width,
                     streamInfo.height, metadataChosen, false /*flexibleConsumer*/,
@@ -653,7 +698,7 @@
         for (auto& bufferProducer : bufferProducers) {
             sp<Surface> surface;
             res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer,
-                    logicalCameraId, metadataChosen, sensorPixelModesUsed);
+                    logicalCameraId, metadataChosen, sensorPixelModesUsed, dynamicRangeProfile);
 
             if (!res.isOk())
                 return res;
@@ -729,7 +774,7 @@
     return std::unordered_set<int32_t>(sensorPixelModesUsed.begin(), sensorPixelModesUsed.end());
 }
 
-status_t SessionConfigurationUtils::checkAndOverrideSensorPixelModesUsed(
+status_t checkAndOverrideSensorPixelModesUsed(
         const std::vector<int32_t> &sensorPixelModesUsed, int format, int width, int height,
         const CameraMetadata &staticInfo, bool flexibleConsumer,
         std::unordered_set<int32_t> *overriddenSensorPixelModesUsed) {
@@ -795,21 +840,26 @@
     return OK;
 }
 
-bool SessionConfigurationUtils::isUltraHighResolutionSensor(const CameraMetadata &deviceInfo) {
-    camera_metadata_ro_entry_t entryCap;
-    entryCap = deviceInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
-    // Go through the capabilities and check if it has
-    // ANDROID_REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR
-    for (size_t i = 0; i < entryCap.count; ++i) {
-        uint8_t capability = entryCap.data.u8[i];
-        if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR) {
-            return true;
+bool convertHALStreamCombinationFromV38ToV37(
+        hardware::camera::device::V3_7::StreamConfiguration &streamConfigV37,
+        const hardware::camera::device::V3_8::StreamConfiguration &streamConfigV38) {
+    streamConfigV37.streams.resize(streamConfigV38.streams.size());
+    for (size_t i = 0; i < streamConfigV38.streams.size(); i++) {
+        if (static_cast<int32_t>(streamConfigV38.streams[i].dynamicRangeProfile) !=
+                ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {
+            // ICameraDevice older than 3.8 doesn't support 10-bit dynamic range profiles
+            // image
+            return false;
         }
+        streamConfigV37.streams[i] = streamConfigV38.streams[i].v3_7;
     }
-    return false;
+    streamConfigV37.operationMode = streamConfigV38.operationMode;
+    streamConfigV37.sessionParams = streamConfigV38.sessionParams;
+
+    return true;
 }
 
-bool SessionConfigurationUtils::convertHALStreamCombinationFromV37ToV34(
+bool convertHALStreamCombinationFromV37ToV34(
         hardware::camera::device::V3_4::StreamConfiguration &streamConfigV34,
         const hardware::camera::device::V3_7::StreamConfiguration &streamConfigV37) {
     if (streamConfigV37.multiResolutionInputImage) {
@@ -832,7 +882,7 @@
     return true;
 }
 
-bool SessionConfigurationUtils::targetPerfClassPrimaryCamera(
+bool targetPerfClassPrimaryCamera(
         const std::set<std::string>& perfClassPrimaryCameraIds, const std::string& cameraId,
         int targetSdkVersion) {
     bool isPerfClassPrimaryCamera =
@@ -840,5 +890,6 @@
     return targetSdkVersion >= SDK_VERSION_S && isPerfClassPrimaryCamera;
 }
 
+} // namespace SessionConfigurationUtils
 } // namespace camera3
 } // namespace android
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
index 192e241..9a5dc2c 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
@@ -21,15 +21,18 @@
 #include <camera/camera2/OutputConfiguration.h>
 #include <camera/camera2/SessionConfiguration.h>
 #include <camera/camera2/SubmitInfo.h>
-#include <android/hardware/camera/device/3.7/types.h>
+#include <android/hardware/camera/device/3.8/types.h>
 #include <android/hardware/camera/device/3.4/ICameraDeviceSession.h>
 #include <android/hardware/camera/device/3.7/ICameraDeviceSession.h>
+#include <android/hardware/camera/device/3.8/ICameraDeviceSession.h>
 
 #include <device3/Camera3StreamInterface.h>
 
 #include <set>
 #include <stdint.h>
 
+#include "SessionConfigurationUtilsHost.h"
+
 // Convenience methods for constructing binder::Status objects for error returns
 
 #define STATUS_ERROR(errorCode, errorString) \
@@ -69,96 +72,105 @@
             mMaximumResolutionStreamConfigurationMap;
 };
 
-class SessionConfigurationUtils {
-public:
-    static camera3::Size getMaxJpegResolution(const CameraMetadata &metadata,
-            bool ultraHighResolution);
+namespace SessionConfigurationUtils {
 
-    static size_t getUHRMaxJpegBufferSize(camera3::Size uhrMaxJpegSize,
-            camera3::Size defaultMaxJpegSize, size_t defaultMaxJpegBufferSize);
+camera3::Size getMaxJpegResolution(const CameraMetadata &metadata,
+        bool ultraHighResolution);
 
-    static int64_t euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1);
+size_t getUHRMaxJpegBufferSize(camera3::Size uhrMaxJpegSize,
+        camera3::Size defaultMaxJpegSize, size_t defaultMaxJpegBufferSize);
 
-    // Find the closest dimensions for a given format in available stream configurations with
-    // a width <= ROUNDING_WIDTH_CAP
-    static bool roundBufferDimensionNearest(int32_t width, int32_t height, int32_t format,
-            android_dataspace dataSpace, const CameraMetadata& info, bool maxResolution,
-            /*out*/int32_t* outWidth, /*out*/int32_t* outHeight);
+int64_t euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1);
 
-    static bool getArrayWidthAndHeight(const CameraMetadata *deviceInfo, int32_t arrayTag,
-            int32_t *width, int32_t *height);
+// Find the closest dimensions for a given format in available stream configurations with
+// a width <= ROUNDING_WIDTH_CAP
+bool roundBufferDimensionNearest(int32_t width, int32_t height, int32_t format,
+        android_dataspace dataSpace, const CameraMetadata& info, bool maxResolution,
+        /*out*/int32_t* outWidth, /*out*/int32_t* outHeight);
 
-    //check if format is not custom format
-    static bool isPublicFormat(int32_t format);
+// check if format is not custom format
+bool isPublicFormat(int32_t format);
 
-    // Create a Surface from an IGraphicBufferProducer. Returns error if
-    // IGraphicBufferProducer's property doesn't match with streamInfo
-    static binder::Status createSurfaceFromGbp(
-        camera3::OutputStreamInfo& streamInfo, bool isStreamInfoValid,
-        sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
-        const String8 &logicalCameraId, const CameraMetadata &physicalCameraMetadata,
-        const std::vector<int32_t> &sensorPixelModesUsed);
+// Create a Surface from an IGraphicBufferProducer. Returns error if
+// IGraphicBufferProducer's property doesn't match with streamInfo
+binder::Status createSurfaceFromGbp(
+camera3::OutputStreamInfo& streamInfo, bool isStreamInfoValid,
+sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
+const String8 &logicalCameraId, const CameraMetadata &physicalCameraMetadata,
+const std::vector<int32_t> &sensorPixelModesUsed,  int dynamicRangeProfile);
+void mapStreamInfo(const camera3::OutputStreamInfo &streamInfo,
+        camera3::camera_stream_rotation_t rotation, String8 physicalId, int32_t groupId,
+        hardware::camera::device::V3_7::Stream *stream /*out*/);
 
-    static void mapStreamInfo(const camera3::OutputStreamInfo &streamInfo,
-            camera3::camera_stream_rotation_t rotation, String8 physicalId, int32_t groupId,
-            hardware::camera::device::V3_7::Stream *stream /*out*/);
+//check if format is 10-bit output compatible
+bool is10bitCompatibleFormat(int32_t format);
 
-    // Check that the physicalCameraId passed in is spported by the camera
-    // device.
-    static binder::Status checkPhysicalCameraId(
-        const std::vector<std::string> &physicalCameraIds, const String8 &physicalCameraId,
-        const String8 &logicalCameraId);
+// check if the dynamic range requires 10-bit output
+bool is10bitDynamicRangeProfile(int32_t dynamicRangeProfile);
 
-    static binder::Status checkSurfaceType(size_t numBufferProducers,
-        bool deferredConsumer, int surfaceType);
+// Check if the device supports a given dynamicRangeProfile
+bool isDynamicRangeProfileSupported(int dynamicRangeProfile, const CameraMetadata& staticMeta);
 
-    static binder::Status checkOperatingMode(int operatingMode,
-        const CameraMetadata &staticInfo, const String8 &cameraId);
+// Check that the physicalCameraId passed in is spported by the camera
+// device.
+binder::Status checkPhysicalCameraId(
+const std::vector<std::string> &physicalCameraIds, const String8 &physicalCameraId,
+const String8 &logicalCameraId);
 
-    // utility function to convert AIDL SessionConfiguration to HIDL
-    // streamConfiguration. Also checks for validity of SessionConfiguration and
-    // returns a non-ok binder::Status if the passed in session configuration
-    // isn't valid.
-    static binder::Status
-    convertToHALStreamCombination(const SessionConfiguration& sessionConfiguration,
-            const String8 &cameraId, const CameraMetadata &deviceInfo,
-            metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
-            hardware::camera::device::V3_7::StreamConfiguration &streamConfiguration,
-            bool overrideForPerfClass, bool *earlyExit);
+binder::Status checkSurfaceType(size_t numBufferProducers,
+bool deferredConsumer, int surfaceType);
 
-    // Utility function to convert a V3_7::StreamConfiguration to
-    // V3_4::StreamConfiguration. Return false if the original V3_7 configuration cannot
-    // be used by older version HAL.
-    static bool convertHALStreamCombinationFromV37ToV34(
-            hardware::camera::device::V3_4::StreamConfiguration &streamConfigV34,
-            const hardware::camera::device::V3_7::StreamConfiguration &streamConfigV37);
+binder::Status checkOperatingMode(int operatingMode,
+const CameraMetadata &staticInfo, const String8 &cameraId);
 
-    static StreamConfigurationPair getStreamConfigurationPair(const CameraMetadata &metadata);
+// utility function to convert AIDL SessionConfiguration to HIDL
+// streamConfiguration. Also checks for validity of SessionConfiguration and
+// returns a non-ok binder::Status if the passed in session configuration
+// isn't valid.
+binder::Status
+convertToHALStreamCombination(const SessionConfiguration& sessionConfiguration,
+        const String8 &cameraId, const CameraMetadata &deviceInfo,
+        metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
+        hardware::camera::device::V3_8::StreamConfiguration &streamConfiguration,
+        bool overrideForPerfClass, bool *earlyExit);
 
-    static status_t checkAndOverrideSensorPixelModesUsed(
-            const std::vector<int32_t> &sensorPixelModesUsed, int format, int width, int height,
-            const CameraMetadata &staticInfo, bool flexibleConsumer,
-            std::unordered_set<int32_t> *overriddenSensorPixelModesUsed);
+// Utility function to convert a V3_8::StreamConfiguration to
+// V3_7::StreamConfiguration. Return false if the original V3_8 configuration cannot
+// be used by older version HAL.
+bool convertHALStreamCombinationFromV38ToV37(
+        hardware::camera::device::V3_7::StreamConfiguration &streamConfigV37,
+        const hardware::camera::device::V3_8::StreamConfiguration &streamConfigV38);
 
-    static bool isUltraHighResolutionSensor(const CameraMetadata &deviceInfo);
+// Utility function to convert a V3_7::StreamConfiguration to
+// V3_4::StreamConfiguration. Return false if the original V3_7 configuration cannot
+// be used by older version HAL.
+bool convertHALStreamCombinationFromV37ToV34(
+        hardware::camera::device::V3_4::StreamConfiguration &streamConfigV34,
+        const hardware::camera::device::V3_7::StreamConfiguration &streamConfigV37);
 
-    static int32_t getAppropriateModeTag(int32_t defaultTag, bool maxResolution = false);
+StreamConfigurationPair getStreamConfigurationPair(const CameraMetadata &metadata);
 
-    static bool targetPerfClassPrimaryCamera(
-            const std::set<std::string>& perfClassPrimaryCameraIds, const std::string& cameraId,
-            int32_t targetSdkVersion);
+status_t checkAndOverrideSensorPixelModesUsed(
+        const std::vector<int32_t> &sensorPixelModesUsed, int format, int width, int height,
+        const CameraMetadata &staticInfo, bool flexibleConsumer,
+        std::unordered_set<int32_t> *overriddenSensorPixelModesUsed);
 
-    static const int32_t MAX_SURFACES_PER_STREAM = 4;
+bool targetPerfClassPrimaryCamera(
+        const std::set<std::string>& perfClassPrimaryCameraIds, const std::string& cameraId,
+        int32_t targetSdkVersion);
 
-    static const int32_t ROUNDING_WIDTH_CAP = 1920;
+constexpr int32_t MAX_SURFACES_PER_STREAM = 4;
 
-    static const int32_t SDK_VERSION_S = 31;
-    static int32_t PERF_CLASS_LEVEL;
-    static bool IS_PERF_CLASS;
-    static const int32_t PERF_CLASS_JPEG_THRESH_W = 1920;
-    static const int32_t PERF_CLASS_JPEG_THRESH_H = 1080;
-};
+constexpr int32_t ROUNDING_WIDTH_CAP = 1920;
 
+constexpr int32_t SDK_VERSION_S = 31;
+extern int32_t PERF_CLASS_LEVEL;
+extern bool IS_PERF_CLASS;
+constexpr int32_t PERF_CLASS_JPEG_THRESH_W = 1920;
+constexpr int32_t PERF_CLASS_JPEG_THRESH_H = 1080;
+
+} // SessionConfigurationUtils
 } // camera3
 } // android
+
 #endif
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.cpp
new file mode 100644
index 0000000..1efdc60
--- /dev/null
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SessionConfigurationUtilsHost.h"
+
+namespace android {
+namespace camera3 {
+namespace SessionConfigurationUtils {
+
+int32_t getAppropriateModeTag(int32_t defaultTag, bool maxResolution) {
+    if (!maxResolution) {
+        return defaultTag;
+    }
+    switch (defaultTag) {
+        case ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS:
+            return ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS:
+            return ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_SCALER_AVAILABLE_STALL_DURATIONS:
+            return ANDROID_SCALER_AVAILABLE_STALL_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS:
+            return ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS:
+            return ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS:
+            return ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS:
+            return ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS:
+            return ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS:
+            return ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS:
+            return ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS:
+            return ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS:
+            return ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_SENSOR_OPAQUE_RAW_SIZE:
+            return ANDROID_SENSOR_OPAQUE_RAW_SIZE_MAXIMUM_RESOLUTION;
+        case ANDROID_LENS_INTRINSIC_CALIBRATION:
+            return ANDROID_LENS_INTRINSIC_CALIBRATION_MAXIMUM_RESOLUTION;
+        case ANDROID_LENS_DISTORTION:
+            return ANDROID_LENS_DISTORTION_MAXIMUM_RESOLUTION;
+        default:
+            ALOGE("%s: Tag %d doesn't have a maximum resolution counterpart", __FUNCTION__,
+                    defaultTag);
+            return -1;
+    }
+    return -1;
+}
+
+bool isUltraHighResolutionSensor(const CameraMetadata &deviceInfo) {
+    camera_metadata_ro_entry_t entryCap;
+    entryCap = deviceInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+    // Go through the capabilities and check if it has
+    // ANDROID_REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR
+    for (size_t i = 0; i < entryCap.count; ++i) {
+        uint8_t capability = entryCap.data.u8[i];
+        if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool getArrayWidthAndHeight(const CameraMetadata *deviceInfo,
+        int32_t arrayTag, int32_t *width, int32_t *height) {
+    if (width == nullptr || height == nullptr) {
+        ALOGE("%s: width / height nullptr", __FUNCTION__);
+        return false;
+    }
+    camera_metadata_ro_entry_t entry;
+    entry = deviceInfo->find(arrayTag);
+    if (entry.count != 4) return false;
+    *width = entry.data.i32[2];
+    *height = entry.data.i32[3];
+    return true;
+}
+
+} // namespace SessionConfigurationUtils
+} // namespace camera3
+} // namespace android
\ No newline at end of file
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.h b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.h
new file mode 100644
index 0000000..45b1e91
--- /dev/null
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_SERVERS_CAMERA_SESSION_CONFIGURATION_UTILS_HOST_H
+#define ANDROID_SERVERS_CAMERA_SESSION_CONFIGURATION_UTILS_HOST_H
+
+#include "camera/CameraMetadata.h"
+
+namespace android {
+namespace camera3 {
+namespace SessionConfigurationUtils {
+
+bool isUltraHighResolutionSensor(const CameraMetadata &deviceInfo);
+
+int32_t getAppropriateModeTag(int32_t defaultTag, bool maxResolution = false);
+
+bool getArrayWidthAndHeight(const CameraMetadata *deviceInfo, int32_t arrayTag,
+        int32_t *width, int32_t *height);
+
+} // SessionConfigurationUtils
+} // camera3
+} // android
+
+#endif
\ No newline at end of file
diff --git a/services/camera/libcameraservice/utils/TagMonitor.cpp b/services/camera/libcameraservice/utils/TagMonitor.cpp
index 262f962..461f5e9 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.cpp
+++ b/services/camera/libcameraservice/utils/TagMonitor.cpp
@@ -24,6 +24,7 @@
 #include <utils/Log.h>
 #include <camera/VendorTagDescriptor.h>
 #include <camera_metadata_hidden.h>
+#include <device3/Camera3Stream.h>
 
 namespace android {
 
@@ -112,11 +113,15 @@
     mLastMonitoredResultValues.clear();
     mLastMonitoredPhysicalRequestKeys.clear();
     mLastMonitoredPhysicalResultKeys.clear();
+    mLastStreamIds.clear();
+    mLastInputStreamId = -1;
 }
 
 void TagMonitor::monitorMetadata(eventSource source, int64_t frameNumber, nsecs_t timestamp,
         const CameraMetadata& metadata,
-        const std::unordered_map<std::string, CameraMetadata>& physicalMetadata) {
+        const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
+        const camera3::camera_stream_buffer_t *outputBuffers, uint32_t numOutputBuffers,
+        int32_t inputStreamId) {
     if (!mMonitoringEnabled) return;
 
     std::lock_guard<std::mutex> lock(mMonitorMutex);
@@ -124,19 +129,27 @@
     if (timestamp == 0) {
         timestamp = systemTime(SYSTEM_TIME_BOOTTIME);
     }
-
+    std::unordered_set<int32_t> outputStreamIds;
+    for (size_t i = 0; i < numOutputBuffers; i++) {
+        const camera3::camera_stream_buffer_t *src = outputBuffers + i;
+        int32_t streamId = camera3::Camera3Stream::cast(src->stream)->getId();
+        outputStreamIds.emplace(streamId);
+    }
     std::string emptyId;
     for (auto tag : mMonitoredTagList) {
-        monitorSingleMetadata(source, frameNumber, timestamp, emptyId, tag, metadata);
+        monitorSingleMetadata(source, frameNumber, timestamp, emptyId, tag, metadata,
+                outputStreamIds, inputStreamId);
 
         for (auto& m : physicalMetadata) {
-            monitorSingleMetadata(source, frameNumber, timestamp, m.first, tag, m.second);
+            monitorSingleMetadata(source, frameNumber, timestamp, m.first, tag, m.second,
+                    outputStreamIds, inputStreamId);
         }
     }
 }
 
 void TagMonitor::monitorSingleMetadata(eventSource source, int64_t frameNumber, nsecs_t timestamp,
-        const std::string& cameraId, uint32_t tag, const CameraMetadata& metadata) {
+        const std::string& cameraId, uint32_t tag, const CameraMetadata& metadata,
+        const std::unordered_set<int32_t> &outputStreamIds, int32_t inputStreamId) {
 
     CameraMetadata &lastValues = (source == REQUEST) ?
             (cameraId.empty() ? mLastMonitoredRequestValues :
@@ -177,13 +190,22 @@
             // No last entry, so always consider to be different
             isDifferent = true;
         }
-
+        // Also monitor when the stream ids change, this helps visually see what
+        // monitored metadata values are for capture requests with different
+        // stream ids.
+        if (source == REQUEST &&
+                (inputStreamId != mLastInputStreamId || outputStreamIds != mLastStreamIds)) {
+            mLastInputStreamId = inputStreamId;
+            mLastStreamIds = outputStreamIds;
+            isDifferent = true;
+        }
         if (isDifferent) {
             ALOGV("%s: Tag %s changed", __FUNCTION__,
                   get_local_camera_metadata_tag_name_vendor_id(
                           tag, mVendorTagId));
             lastValues.update(entry);
-            mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId);
+            mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId,
+                    outputStreamIds, inputStreamId);
         }
     } else if (lastEntry.count > 0) {
         // Value has been removed
@@ -195,7 +217,10 @@
         entry.type = get_local_camera_metadata_tag_type_vendor_id(tag,
                 mVendorTagId);
         entry.count = 0;
-        mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId);
+        mLastInputStreamId = inputStreamId;
+        mLastStreamIds = outputStreamIds;
+        mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId, outputStreamIds,
+                inputStreamId);
     }
 }
 
@@ -214,37 +239,59 @@
     } else {
         dprintf(fd, "     Tag monitoring disabled (enable with -m <name1,..,nameN>)\n");
     }
-    if (mMonitoringEvents.size() > 0) {
-        dprintf(fd, "     Monitored tag event log:\n");
-        for (const auto& event : mMonitoringEvents) {
-            int indentation = (event.source == REQUEST) ? 15 : 30;
-            dprintf(fd, "        f%d:%" PRId64 "ns:%*s%*s%s.%s: ",
-                    event.frameNumber, event.timestamp,
-                    2, event.cameraId.c_str(),
-                    indentation,
-                    event.source == REQUEST ? "REQ:" : "RES:",
-                    get_local_camera_metadata_section_name_vendor_id(event.tag,
-                            mVendorTagId),
-                    get_local_camera_metadata_tag_name_vendor_id(event.tag,
-                            mVendorTagId));
-            if (event.newData.size() == 0) {
-                dprintf(fd, " (Removed)\n");
-            } else {
-                printData(fd, event.newData.data(), event.tag,
-                        event.type, event.newData.size() / camera_metadata_type_size[event.type],
-                        indentation + 18);
-            }
-        }
-    }
 
+    if (mMonitoringEvents.size() == 0) { return; }
+
+    dprintf(fd, "     Monitored tag event log:\n");
+
+    std::vector<std::string> eventStrs;
+    dumpMonitoredTagEventsToVectorLocked(eventStrs);
+    for (const std::string &eventStr : eventStrs) {
+        dprintf(fd, "        %s", eventStr.c_str());
+    }
 }
 
-// TODO: Consolidate with printData from camera_metadata.h
+void TagMonitor::getLatestMonitoredTagEvents(std::vector<std::string> &out) {
+    std::lock_guard<std::mutex> lock(mMonitorMutex);
+    dumpMonitoredTagEventsToVectorLocked(out);
+}
+
+void TagMonitor::dumpMonitoredTagEventsToVectorLocked(std::vector<std::string> &vec) {
+    if (mMonitoringEvents.size() == 0) { return; }
+
+    for (const auto& event : mMonitoringEvents) {
+        int indentation = (event.source == REQUEST) ? 15 : 30;
+        String8 eventString = String8::format("f%d:%" PRId64 "ns:%*s%*s%s.%s: ",
+                event.frameNumber, event.timestamp,
+                2, event.cameraId.c_str(),
+                indentation,
+                event.source == REQUEST ? "REQ:" : "RES:",
+                get_local_camera_metadata_section_name_vendor_id(event.tag, mVendorTagId),
+                get_local_camera_metadata_tag_name_vendor_id(event.tag, mVendorTagId));
+        if (event.newData.size() == 0) {
+            eventString += " (Removed)";
+        } else {
+            eventString += getEventDataString(event.newData.data(),
+                                    event.tag,
+                                    event.type,
+                                    event.newData.size() / camera_metadata_type_size[event.type],
+                                    indentation + 18,
+                                    event.outputStreamIds,
+                                    event.inputStreamId);
+        }
+        vec.emplace_back(eventString.string());
+    }
+}
 
 #define CAMERA_METADATA_ENUM_STRING_MAX_SIZE 29
 
-void TagMonitor::printData(int fd, const uint8_t *data_ptr, uint32_t tag,
-        int type, int count, int indentation) {
+String8 TagMonitor::getEventDataString(const uint8_t* data_ptr,
+                                    uint32_t tag,
+                                    int type,
+                                    int count,
+                                    int indentation,
+                                    const std::unordered_set<int32_t>& outputStreamIds,
+                                    int32_t inputStreamId) {
     static int values_per_line[NUM_TYPES] = {
         [TYPE_BYTE]     = 16,
         [TYPE_INT32]    = 8,
@@ -253,6 +300,7 @@
         [TYPE_DOUBLE]   = 4,
         [TYPE_RATIONAL] = 4,
     };
+
     size_t type_size = camera_metadata_type_size[type];
     char value_string_tmp[CAMERA_METADATA_ENUM_STRING_MAX_SIZE];
     uint32_t value;
@@ -260,10 +308,11 @@
     int lines = count / values_per_line[type];
     if (count % values_per_line[type] != 0) lines++;
 
+    String8 returnStr = String8();
     int index = 0;
     int j, k;
     for (j = 0; j < lines; j++) {
-        dprintf(fd, "%*s[", (j != 0) ? indentation + 4 : 0, "");
+        returnStr.appendFormat("%*s[", (j != 0) ? indentation + 4 : 0, "");
         for (k = 0;
              k < values_per_line[type] && count > 0;
              k++, count--, index += type_size) {
@@ -276,10 +325,9 @@
                                                      value_string_tmp,
                                                      sizeof(value_string_tmp))
                         == OK) {
-                        dprintf(fd, "%s ", value_string_tmp);
+                        returnStr += value_string_tmp;
                     } else {
-                        dprintf(fd, "%hhu ",
-                                *(data_ptr + index));
+                        returnStr.appendFormat("%hhu", *(data_ptr + index));
                     }
                     break;
                 case TYPE_INT32:
@@ -290,49 +338,57 @@
                                                      value_string_tmp,
                                                      sizeof(value_string_tmp))
                         == OK) {
-                        dprintf(fd, "%s ", value_string_tmp);
+                        returnStr += value_string_tmp;
                     } else {
-                        dprintf(fd, "%" PRId32 " ",
-                                *(int32_t*)(data_ptr + index));
+                        returnStr.appendFormat("%" PRId32 " ", *(int32_t*)(data_ptr + index));
                     }
                     break;
                 case TYPE_FLOAT:
-                    dprintf(fd, "%0.8f ",
-                            *(float*)(data_ptr + index));
+                    returnStr.appendFormat("%0.8f", *(float*)(data_ptr + index));
                     break;
                 case TYPE_INT64:
-                    dprintf(fd, "%" PRId64 " ",
-                            *(int64_t*)(data_ptr + index));
+                    returnStr.appendFormat("%" PRId64 " ", *(int64_t*)(data_ptr + index));
                     break;
                 case TYPE_DOUBLE:
-                    dprintf(fd, "%0.8f ",
-                            *(double*)(data_ptr + index));
+                    returnStr.appendFormat("%0.8f ", *(double*)(data_ptr + index));
                     break;
                 case TYPE_RATIONAL: {
                     int32_t numerator = *(int32_t*)(data_ptr + index);
                     int32_t denominator = *(int32_t*)(data_ptr + index + 4);
-                    dprintf(fd, "(%d / %d) ",
-                            numerator, denominator);
+                    returnStr.appendFormat("(%d / %d) ", numerator, denominator);
                     break;
                 }
                 default:
-                    dprintf(fd, "??? ");
+                    returnStr += "??? ";
             }
         }
-        dprintf(fd, "]\n");
+        returnStr += "] ";
+        if (!outputStreamIds.empty()) {
+            returnStr += "output stream ids: ";
+            for (const auto &id : outputStreamIds) {
+                returnStr.appendFormat(" %d ", id);
+            }
+        }
+        if (inputStreamId != -1) {
+            returnStr.appendFormat("input stream id: %d", inputStreamId);
+        }
+        returnStr += "\n";
     }
+    return returnStr;
 }
 
 template<typename T>
 TagMonitor::MonitorEvent::MonitorEvent(eventSource src, uint32_t frameNumber, nsecs_t timestamp,
-        const T &value, const std::string& cameraId) :
+        const T &value, const std::string& cameraId,
+        const std::unordered_set<int32_t> &outputStreamIds,
+        int32_t inputStreamId) :
         source(src),
         frameNumber(frameNumber),
         timestamp(timestamp),
         tag(value.tag),
         type(value.type),
         newData(value.data.u8, value.data.u8 + camera_metadata_type_size[value.type] * value.count),
-        cameraId(cameraId) {
+        cameraId(cameraId), outputStreamIds(outputStreamIds), inputStreamId(inputStreamId) {
 }
 
 TagMonitor::MonitorEvent::~MonitorEvent() {
diff --git a/services/camera/libcameraservice/utils/TagMonitor.h b/services/camera/libcameraservice/utils/TagMonitor.h
index 413f502..088d6fe 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.h
+++ b/services/camera/libcameraservice/utils/TagMonitor.h
@@ -30,6 +30,7 @@
 #include <system/camera_metadata.h>
 #include <system/camera_vendor_tags.h>
 #include <camera/CameraMetadata.h>
+#include <device3/InFlightRequest.h>
 
 namespace android {
 
@@ -66,19 +67,35 @@
     // Scan through the metadata and update the monitoring information
     void monitorMetadata(eventSource source, int64_t frameNumber,
             nsecs_t timestamp, const CameraMetadata& metadata,
-            const std::unordered_map<std::string, CameraMetadata>& physicalMetadata);
+            const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
+            const camera3::camera_stream_buffer_t *outputBuffers = nullptr,
+            uint32_t numOutputBuffers = 0, int32_t inputStreamId = -1);
 
     // Dump current event log to the provided fd
     void dumpMonitoredMetadata(int fd);
 
-  private:
+    // Dumps the latest monitored Tag events to the passed vector.
+    // NOTE: The events are appended to the vector in reverser chronological order
+    // (i.e. most recent first)
+    void getLatestMonitoredTagEvents(std::vector<std::string> &out);
 
-    static void printData(int fd, const uint8_t *data_ptr, uint32_t tag,
-            int type, int count, int indentation);
+  private:
+    // Dumps monitored tag events to the passed vector without acquiring
+    // mMonitorMutex. mMonitorMutex must be acquired before calling this
+    // function.
+    void dumpMonitoredTagEventsToVectorLocked(std::vector<std::string> &out);
+
+    static String8 getEventDataString(const uint8_t *data_ptr,
+                                       uint32_t tag, int type,
+                                       int count,
+                                       int indentation,
+                                       const std::unordered_set<int32_t> &outputStreamIds,
+                                       int32_t inputStreamId);
 
     void monitorSingleMetadata(TagMonitor::eventSource source, int64_t frameNumber,
             nsecs_t timestamp, const std::string& cameraId, uint32_t tag,
-            const CameraMetadata& metadata);
+            const CameraMetadata& metadata, const std::unordered_set<int32_t> &outputStreamIds,
+            int32_t inputStreamId);
 
     std::atomic<bool> mMonitoringEnabled;
     std::mutex mMonitorMutex;
@@ -93,6 +110,9 @@
     std::unordered_map<std::string, CameraMetadata> mLastMonitoredPhysicalRequestKeys;
     std::unordered_map<std::string, CameraMetadata> mLastMonitoredPhysicalResultKeys;
 
+    int32_t mLastInputStreamId = -1;
+    std::unordered_set<int32_t> mLastStreamIds;
+
     /**
      * A monitoring event
      * Stores a new metadata field value and the timestamp at which it changed.
@@ -101,7 +121,8 @@
     struct MonitorEvent {
         template<typename T>
         MonitorEvent(eventSource src, uint32_t frameNumber, nsecs_t timestamp,
-                const T &newValue, const std::string& cameraId);
+                const T &newValue, const std::string& cameraId,
+                const std::unordered_set<int32_t> &outputStreamIds, int32_t inputStreamId);
         ~MonitorEvent();
 
         eventSource source;
@@ -111,6 +132,8 @@
         uint8_t type;
         std::vector<uint8_t> newData;
         std::string cameraId;
+        std::unordered_set<int32_t> outputStreamIds;
+        int32_t inputStreamId = 1;
     };
 
     // A ring buffer for tracking the last kMaxMonitorEvents metadata changes
diff --git a/services/mediacodec/Android.bp b/services/mediacodec/Android.bp
index bf4d524..4488efb 100644
--- a/services/mediacodec/Android.bp
+++ b/services/mediacodec/Android.bp
@@ -19,6 +19,7 @@
     name: "mediaswcodec",
     vendor_available: true,
     min_sdk_version: "29",
+    apex_available: ["com.android.media.swcodec"],
 
     srcs: [
         "main_swcodecservice.cpp",
diff --git a/services/mediacodec/OWNERS b/services/mediacodec/OWNERS
index c716cce..3453a76 100644
--- a/services/mediacodec/OWNERS
+++ b/services/mediacodec/OWNERS
@@ -1,2 +1,3 @@
 jeffv@google.com
-marcone@google.com
+essick@google.com
+wonsik@google.com
diff --git a/services/mediacodec/android.hardware.media.omx@1.0-service.rc b/services/mediacodec/android.hardware.media.omx@1.0-service.rc
index 3ef9a85..845e5cc 100644
--- a/services/mediacodec/android.hardware.media.omx@1.0-service.rc
+++ b/services/mediacodec/android.hardware.media.omx@1.0-service.rc
@@ -3,4 +3,4 @@
     user mediacodec
     group camera drmrpc mediadrm
     ioprio rt 4
-    writepid /dev/cpuset/foreground/tasks
+    task_profiles ProcessCapacityHigh
diff --git a/services/mediacodec/registrant/Android.bp b/services/mediacodec/registrant/Android.bp
index 696b967..12cc32a 100644
--- a/services/mediacodec/registrant/Android.bp
+++ b/services/mediacodec/registrant/Android.bp
@@ -7,9 +7,15 @@
     default_applicable_licenses: ["frameworks_av_services_mediacodec_license"],
 }
 
-cc_library_shared {
+cc_library {
     name: "libmedia_codecserviceregistrant",
     vendor_available: true,
+    min_sdk_version: "29",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media.swcodec",
+    ],
+
     srcs: [
         "CodecServiceRegistrant.cpp",
     ],
diff --git a/services/mediacodec/registrant/fuzzer/Android.bp b/services/mediacodec/registrant/fuzzer/Android.bp
new file mode 100644
index 0000000..43afbf1
--- /dev/null
+++ b/services/mediacodec/registrant/fuzzer/Android.bp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_av_services_mediacodec_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["frameworks_av_services_mediacodec_license"],
+}
+
+cc_fuzz {
+    name: "codecServiceRegistrant_fuzzer",
+    srcs: [
+        "codecServiceRegistrant_fuzzer.cpp",
+    ],
+    static_libs: [
+        "libmedia_codecserviceregistrant",
+    ],
+    header_libs: [
+        "libmedia_headers",
+    ],
+    defaults: [
+        "libcodec2-hidl-defaults",
+    ],
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
diff --git a/services/mediacodec/registrant/fuzzer/README.md b/services/mediacodec/registrant/fuzzer/README.md
new file mode 100644
index 0000000..0ffa063
--- /dev/null
+++ b/services/mediacodec/registrant/fuzzer/README.md
@@ -0,0 +1,56 @@
+# Fuzzer for libmedia_codecserviceregistrant
+
+## Plugin Design Considerations
+The fuzzer plugin for libmedia_codecserviceregistrant is designed based on the understanding of the library and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+libmedia_codecserviceregistrant supports the following parameters:
+1. C2String (parameter name: `c2String`)
+2. Width (parameter name: `width`)
+3. Height (parameter name: `height`)
+4. SamplingRate (parameter name: `samplingRate`)
+5. Channels (parameter name: `channels`)
+6. Stream (parameter name: `stream`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `c2String` |`String` | Value obtained from FuzzedDataProvider|
+| `width` |`UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `height` |`UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `samplingRate` |`UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `channels` |`UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `stream` |`UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feeds the entire input data to the libmedia_codecserviceregistrant module.
+This ensures that the plugin tolerates any kind of input (empty, huge,
+malformed, etc) and doesnt `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build codecServiceRegistrant_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+  $ mm -j$(nproc) codecServiceRegistrant_fuzzer
+```
+#### Steps to run
+
+To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/${TARGET_ARCH}/codecServiceRegistrant_fuzzer/codecServiceRegistrant_fuzzer
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/services/mediacodec/registrant/fuzzer/codecServiceRegistrant_fuzzer.cpp b/services/mediacodec/registrant/fuzzer/codecServiceRegistrant_fuzzer.cpp
new file mode 100644
index 0000000..e5983e4
--- /dev/null
+++ b/services/mediacodec/registrant/fuzzer/codecServiceRegistrant_fuzzer.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "../CodecServiceRegistrant.cpp"
+#include "fuzzer/FuzzedDataProvider.h"
+#include <C2Config.h>
+#include <C2Param.h>
+
+using namespace std;
+
+constexpr char kServiceName[] = "software";
+
+class CodecServiceRegistrantFuzzer {
+public:
+  void process(const uint8_t *data, size_t size);
+  ~CodecServiceRegistrantFuzzer() {
+    delete mH2C2;
+    if (mInputSize) {
+      delete mInputSize;
+    }
+    if (mSampleRateInfo) {
+      delete mSampleRateInfo;
+    }
+    if (mChannelCountInfo) {
+      delete mChannelCountInfo;
+    }
+  }
+
+private:
+  void initH2C2ComponentStore();
+  void invokeH2C2ComponentStore();
+  void invokeConfigSM();
+  void invokeQuerySM();
+  H2C2ComponentStore *mH2C2 = nullptr;
+  C2StreamPictureSizeInfo::input *mInputSize = nullptr;
+  C2StreamSampleRateInfo::output *mSampleRateInfo = nullptr;
+  C2StreamChannelCountInfo::output *mChannelCountInfo = nullptr;
+  C2Param::Index mIndex = C2StreamProfileLevelInfo::output::PARAM_TYPE;
+  C2StreamFrameRateInfo::output mFrameRate;
+  FuzzedDataProvider *mFDP = nullptr;
+};
+
+void CodecServiceRegistrantFuzzer::initH2C2ComponentStore() {
+  using namespace ::android::hardware::media::c2;
+  shared_ptr<C2ComponentStore> store =
+      android::GetCodec2PlatformComponentStore();
+  if (!store) {
+    return;
+  }
+  android::sp<V1_1::IComponentStore> storeV1_1 =
+      new V1_1::utils::ComponentStore(store);
+  if (storeV1_1->registerAsService(string(kServiceName)) != android::OK) {
+    return;
+  }
+  string const preferredStoreName = string(kServiceName);
+  sp<IComponentStore> preferredStore =
+      IComponentStore::getService(preferredStoreName.c_str());
+  mH2C2 = new H2C2ComponentStore(preferredStore);
+}
+
+void CodecServiceRegistrantFuzzer::invokeConfigSM() {
+  vector<C2Param *> configParams;
+  uint32_t width = mFDP->ConsumeIntegral<uint32_t>();
+  uint32_t height = mFDP->ConsumeIntegral<uint32_t>();
+  uint32_t samplingRate = mFDP->ConsumeIntegral<uint32_t>();
+  uint32_t channels = mFDP->ConsumeIntegral<uint32_t>();
+  if (mFDP->ConsumeBool()) {
+    mInputSize = new C2StreamPictureSizeInfo::input(0u, width, height);
+    configParams.push_back(mInputSize);
+  } else {
+    if (mFDP->ConsumeBool()) {
+      mSampleRateInfo = new C2StreamSampleRateInfo::output(0u, samplingRate);
+      configParams.push_back(mSampleRateInfo);
+    }
+    if (mFDP->ConsumeBool()) {
+      mChannelCountInfo = new C2StreamChannelCountInfo::output(0u, channels);
+      configParams.push_back(mChannelCountInfo);
+    }
+  }
+  vector<unique_ptr<C2SettingResult>> failures;
+  mH2C2->config_sm(configParams, &failures);
+}
+
+void CodecServiceRegistrantFuzzer::invokeQuerySM() {
+  vector<C2Param *> stackParams;
+  vector<C2Param::Index> heapParamIndices;
+  if (mFDP->ConsumeBool()) {
+    stackParams = {};
+    heapParamIndices = {};
+  } else {
+    uint32_t stream = mFDP->ConsumeIntegral<uint32_t>();
+    mFrameRate.setStream(stream);
+    stackParams.push_back(&mFrameRate);
+    heapParamIndices.push_back(mIndex);
+  }
+  vector<unique_ptr<C2Param>> heapParams;
+  mH2C2->query_sm(stackParams, heapParamIndices, &heapParams);
+}
+
+void CodecServiceRegistrantFuzzer::invokeH2C2ComponentStore() {
+  initH2C2ComponentStore();
+  shared_ptr<C2Component> component;
+  shared_ptr<C2ComponentInterface> interface;
+  string c2String = mFDP->ConsumeRandomLengthString();
+  mH2C2->createComponent(c2String, &component);
+  mH2C2->createInterface(c2String, &interface);
+  invokeConfigSM();
+  invokeQuerySM();
+
+  vector<shared_ptr<C2ParamDescriptor>> params;
+  mH2C2->querySupportedParams_nb(&params);
+
+  C2StoreIonUsageInfo usageInfo;
+  std::vector<C2FieldSupportedValuesQuery> query = {
+      C2FieldSupportedValuesQuery::Possible(
+          C2ParamField::Make(usageInfo, usageInfo.usage)),
+      C2FieldSupportedValuesQuery::Possible(
+          C2ParamField::Make(usageInfo, usageInfo.capacity)),
+  };
+  mH2C2->querySupportedValues_sm(query);
+
+  mH2C2->getName();
+  shared_ptr<C2ParamReflector> paramReflector = mH2C2->getParamReflector();
+  if (paramReflector) {
+    paramReflector->describe(C2ComponentDomainSetting::CORE_INDEX);
+  }
+  mH2C2->listComponents();
+  shared_ptr<C2GraphicBuffer> src;
+  shared_ptr<C2GraphicBuffer> dst;
+  mH2C2->copyBuffer(src, dst);
+}
+
+void CodecServiceRegistrantFuzzer::process(const uint8_t *data, size_t size) {
+  mFDP = new FuzzedDataProvider(data, size);
+  invokeH2C2ComponentStore();
+  /** RegisterCodecServices is called here to improve code coverage */
+  /** as currently it is not called by codecServiceRegistrant       */
+  RegisterCodecServices();
+  delete mFDP;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+  CodecServiceRegistrantFuzzer codecServiceRegistrantFuzzer;
+  codecServiceRegistrantFuzzer.process(data, size);
+  return 0;
+}
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy b/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
index 9058f10..41efce0 100644
--- a/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
@@ -84,5 +84,6 @@
 getgid32: 1
 getegid32: 1
 getgroups32: 1
+sysinfo: 1
 
 @include /apex/com.android.media.swcodec/etc/seccomp_policy/code_coverage.arm.policy
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
index 7ff858b..4317ccc 100644
--- a/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
@@ -78,6 +78,7 @@
 getgid: 1
 getegid: 1
 getgroups: 1
+sysinfo: 1
 
 # Android profiler (heapprofd, traced_perf) additions, where not already
 # covered by the rest of the file, or by builtin minijail allow-listing of
diff --git a/services/mediaextractor/OWNERS b/services/mediaextractor/OWNERS
index c716cce..2a779c2 100644
--- a/services/mediaextractor/OWNERS
+++ b/services/mediaextractor/OWNERS
@@ -1,2 +1,3 @@
 jeffv@google.com
-marcone@google.com
+essick@google.com
+aquilescanta@google.com
diff --git a/services/mediaextractor/TEST_MAPPING b/services/mediaextractor/TEST_MAPPING
new file mode 100644
index 0000000..7a66eeb
--- /dev/null
+++ b/services/mediaextractor/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+    "presubmit": [
+        {
+            "name": "CtsMediaTranscodingTestCases"
+        }
+    ]
+}
diff --git a/services/mediaextractor/mediaextractor.rc b/services/mediaextractor/mediaextractor.rc
index 5fc2941..4fb50d0 100644
--- a/services/mediaextractor/mediaextractor.rc
+++ b/services/mediaextractor/mediaextractor.rc
@@ -3,4 +3,4 @@
     user mediaex
     group drmrpc mediadrm
     ioprio rt 4
-    writepid /dev/cpuset/foreground/tasks
+    task_profiles ProcessCapacityHigh
diff --git a/services/medialog/Android.bp b/services/medialog/Android.bp
index cfc4c40..8088ef0 100644
--- a/services/medialog/Android.bp
+++ b/services/medialog/Android.bp
@@ -26,6 +26,7 @@
         "libmediautils",
         "libnblog",
         "libutils",
+        "packagemanager_aidl-cpp",
     ],
 
     cflags: [
diff --git a/services/mediametrics/Android.bp b/services/mediametrics/Android.bp
index 5989181..11534bb 100644
--- a/services/mediametrics/Android.bp
+++ b/services/mediametrics/Android.bp
@@ -114,6 +114,7 @@
         "libmediautils",
         "libutils",
         "mediametricsservice-aidl-cpp",
+        "packagemanager_aidl-cpp",
     ],
     header_libs: [
         "libaudioutils_headers",
@@ -148,7 +149,8 @@
         "statsd_mediaparser.cpp",
         "statsd_nuplayer.cpp",
         "statsd_recorder.cpp",
-        "StringUtils.cpp"
+        "StringUtils.cpp",
+        "ValidateId.cpp",
     ],
 
     proto: {
@@ -171,6 +173,7 @@
         "libstatspull",
         "libstatssocket",
         "libutils",
+        "packagemanager_aidl-cpp",
     ],
 
     export_shared_lib_headers: [
@@ -178,11 +181,22 @@
         "libstatssocket",
     ],
 
+    // within the library, we use "xxx.h"
+    local_include_dirs: [
+        "include/mediametricsservice",
+    ],
+
+    // external parties use <mediametricsservice/xxx.h>
+    export_include_dirs: [
+        "include",
+    ],
+
     static_libs: [
         "libplatformprotos",
     ],
 
-    include_dirs: [
-        "system/media/audio_utils/include",
+    header_libs: [
+        "libaaudio_headers",
+        "libaudioutils_headers",
     ],
 }
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index 45c9f56..a936ac8 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -21,6 +21,7 @@
 
 #include "AudioAnalytics.h"
 
+#include <aaudio/AAudio.h>        // error codes
 #include <audio_utils/clock.h>    // clock conversions
 #include <cutils/properties.h>
 #include <statslog.h>             // statsd
@@ -29,6 +30,7 @@
 #include "AudioTypes.h"           // string to int conversions
 #include "MediaMetricsService.h"  // package info
 #include "StringUtils.h"
+#include "ValidateId.h"
 
 #define PROP_AUDIO_ANALYTICS_CLOUD_ENABLED "persist.audio.analytics.cloud.enabled"
 
@@ -63,6 +65,50 @@
     }
 }
 
+// The status variable contains status_t codes which are used by
+// the core audio framework. We also consider AAudio status codes.
+//
+// Compare with mediametrics::statusToStatusString
+//
+inline constexpr const char* extendedStatusToStatusString(status_t status) {
+    switch (status) {
+    case BAD_VALUE:           // status_t
+    case AAUDIO_ERROR_ILLEGAL_ARGUMENT:
+    case AAUDIO_ERROR_INVALID_FORMAT:
+    case AAUDIO_ERROR_INVALID_RATE:
+    case AAUDIO_ERROR_NULL:
+    case AAUDIO_ERROR_OUT_OF_RANGE:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT;
+    case DEAD_OBJECT:         // status_t
+    case FAILED_TRANSACTION:  // status_t
+    case AAUDIO_ERROR_DISCONNECTED:
+    case AAUDIO_ERROR_INVALID_HANDLE:
+    case AAUDIO_ERROR_NO_SERVICE:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_IO;
+    case NO_MEMORY:           // status_t
+    case AAUDIO_ERROR_NO_FREE_HANDLES:
+    case AAUDIO_ERROR_NO_MEMORY:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY;
+    case PERMISSION_DENIED:   // status_t
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY;
+    case INVALID_OPERATION:   // status_t
+    case NO_INIT:             // status_t
+    case AAUDIO_ERROR_INVALID_STATE:
+    case AAUDIO_ERROR_UNAVAILABLE:
+    case AAUDIO_ERROR_UNIMPLEMENTED:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_STATE;
+    case WOULD_BLOCK:         // status_t
+    case AAUDIO_ERROR_TIMEOUT:
+    case AAUDIO_ERROR_WOULD_BLOCK:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT;
+    default:
+        if (status >= 0) return AMEDIAMETRICS_PROP_STATUS_VALUE_OK; // non-negative values "OK"
+        [[fallthrough]];            // negative values are error.
+    case UNKNOWN_ERROR:       // status_t
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN;
+    }
+}
+
 static constexpr const auto LOG_LEVEL = android::base::VERBOSE;
 
 static constexpr int PREVIOUS_STATE_EXPIRE_SEC = 60 * 60; // 1 hour.
@@ -129,6 +175,24 @@
     "log_session_id",
 };
 
+static constexpr const char * const AudioTrackStatusFields[] {
+    "mediametrics_audiotrackstatus_reported",
+    "status",
+    "debug_message",
+    "status_subcode",
+    "uid",
+    "event",
+    "output_flags",
+    "content_type",
+    "usage",
+    "encoding",
+    "channel_mask",
+    "buffer_frame_count",
+    "sample_rate",
+    "speed",
+    "pitch",
+};
+
 static constexpr const char * const AudioDeviceConnectionFields[] = {
     "mediametrics_audiodeviceconnection_reported",
     "input_devices",
@@ -391,11 +455,15 @@
 {
     if (!startsWith(item->getKey(), AMEDIAMETRICS_KEY_PREFIX_AUDIO)) return BAD_VALUE;
     status_t status = mAnalyticsState->submit(item, isTrusted);
+
+    // Status is selectively authenticated.
+    processStatus(item);
+
     if (status != NO_ERROR) return status;  // may not be permitted.
 
     // Only if the item was successfully submitted (permission)
     // do we check triggered actions.
-    checkActions(item);
+    processActions(item);
     return NO_ERROR;
 }
 
@@ -429,7 +497,7 @@
     return { ss.str(), lines - ll };
 }
 
-void AudioAnalytics::checkActions(const std::shared_ptr<const mediametrics::Item>& item)
+void AudioAnalytics::processActions(const std::shared_ptr<const mediametrics::Item>& item)
 {
     auto actions = mActions.getActionsForItem(item); // internally locked.
     // Execute actions with no lock held.
@@ -438,6 +506,116 @@
     }
 }
 
+void AudioAnalytics::processStatus(const std::shared_ptr<const mediametrics::Item>& item)
+{
+    int32_t status;
+    if (!item->get(AMEDIAMETRICS_PROP_STATUS, &status)) return;
+
+    // Any record with a status will automatically be added to a heat map.
+    // Standard information.
+    const auto key = item->getKey();
+    const auto uid = item->getUid();
+
+    // from audio.track.10 ->  prefix = audio.track, suffix = 10
+    // from audio.track.error -> prefix = audio.track, suffix = error
+    const auto [prefixKey, suffixKey] = stringutils::splitPrefixKey(key);
+
+    std::string message;
+    item->get(AMEDIAMETRICS_PROP_STATUSMESSAGE, &message); // optional
+
+    int32_t subCode = 0; // not used
+    (void)item->get(AMEDIAMETRICS_PROP_STATUSSUBCODE, &subCode); // optional
+
+    std::string eventStr; // optional
+    item->get(AMEDIAMETRICS_PROP_EVENT, &eventStr);
+
+    const std::string statusString = extendedStatusToStatusString(status);
+
+    // Add to the heat map - we automatically track every item's status to see
+    // the types of errors and the frequency of errors.
+    mHeatMap.add(prefixKey, suffixKey, eventStr, statusString, uid, message, subCode);
+
+    // Certain keys/event pairs are sent to statsd.
+    // Note that the prefixes often end with a '.' so we use startsWith.
+    if (startsWith(key, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK)
+            && eventStr == AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE) {
+        const int atom_status = types::lookup<types::STATUS, int32_t>(statusString);
+
+        // currently we only send create status events.
+        const int32_t event = android::util::
+                MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__EVENT__AUDIO_TRACK_EVENT_CREATE;
+
+        // The following fields should all be present in a create event.
+        std::string flagsStr;
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_ORIGINALFLAGS, &flagsStr),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_ORIGINALFLAGS);
+        const auto flags = types::lookup<types::OUTPUT_FLAG, int32_t>(flagsStr);
+
+        // AMEDIAMETRICS_PROP_SESSIONID omitted from atom
+
+        std::string contentTypeStr;
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_CONTENTTYPE, &contentTypeStr),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_CONTENTTYPE);
+        const auto contentType = types::lookup<types::CONTENT_TYPE, int32_t>(contentTypeStr);
+
+        std::string usageStr;
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_USAGE, &usageStr),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_USAGE);
+        const auto usage = types::lookup<types::USAGE, int32_t>(usageStr);
+
+        // AMEDIAMETRICS_PROP_SELECTEDDEVICEID omitted from atom
+
+        std::string encodingStr;
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_ENCODING, &encodingStr),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_ENCODING);
+        const auto encoding = types::lookup<types::ENCODING, int32_t>(encodingStr);
+
+        int32_t channelMask = 0;
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_CHANNELMASK, &channelMask),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_CHANNELMASK);
+        int32_t frameCount = 0;
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_FRAMECOUNT, &frameCount),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_FRAMECOUNT);
+        int32_t sampleRate = 0;
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_SAMPLERATE, &sampleRate),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_SAMPLERATE);
+        double speed = 0.f;  // default is 1.f
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, &speed),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_PLAYBACK_SPEED);
+        double pitch = 0.f;  // default is 1.f
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, &pitch),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_PLAYBACK_PITCH);
+        const auto [ result, str ] = sendToStatsd(AudioTrackStatusFields,
+                CONDITION(android::util::MEDIAMETRICS_AUDIOTRACKSTATUS_REPORTED)
+                , atom_status
+                , message.c_str()
+                , subCode
+                , uid
+                , event
+                , flags
+                , contentType
+                , usage
+                , encoding
+                , (int64_t)channelMask
+                , frameCount
+                , sampleRate
+                , (float)speed
+                , (float)pitch
+                );
+        ALOGV("%s: statsd %s", __func__, str.c_str());
+        mStatsdLog->log(android::util::MEDIAMETRICS_AUDIOTRACKSTATUS_REPORTED, str);
+    }
+}
+
 // HELPER METHODS
 
 std::string AudioAnalytics::getThreadFromTrack(const std::string& track) const
@@ -563,7 +741,7 @@
         const auto flagsForStats = types::lookup<types::INPUT_FLAG, short_enum_type_t>(flags);
         const auto sourceForStats = types::lookup<types::SOURCE_TYPE, short_enum_type_t>(source);
         // Android S
-        const auto logSessionIdForStats = stringutils::sanitizeLogSessionId(logSessionId);
+        const auto logSessionIdForStats = ValidateId::get()->validateId(logSessionId);
 
         LOG(LOG_LEVEL) << "key:" << key
               << " id:" << id
@@ -718,7 +896,7 @@
                  types::lookup<types::TRACK_TRAITS, short_enum_type_t>(traits);
         const auto usageForStats = types::lookup<types::USAGE, short_enum_type_t>(usage);
         // Android S
-        const auto logSessionIdForStats = stringutils::sanitizeLogSessionId(logSessionId);
+        const auto logSessionIdForStats = ValidateId::get()->validateId(logSessionId);
 
         LOG(LOG_LEVEL) << "key:" << key
               << " id:" << id
diff --git a/services/mediametrics/AudioPowerUsage.cpp b/services/mediametrics/AudioPowerUsage.cpp
index ab74c8e..5787e9e 100644
--- a/services/mediametrics/AudioPowerUsage.cpp
+++ b/services/mediametrics/AudioPowerUsage.cpp
@@ -45,6 +45,10 @@
 #define AUDIO_POWER_USAGE_PROP_DURATION_NS    "durationNs" // int64
 #define AUDIO_POWER_USAGE_PROP_TYPE           "type"       // int32
 #define AUDIO_POWER_USAGE_PROP_VOLUME         "volume"     // double
+#define AUDIO_POWER_USAGE_PROP_MIN_VOLUME_DURATION_NS "minVolumeDurationNs" // int64
+#define AUDIO_POWER_USAGE_PROP_MIN_VOLUME             "minVolume"           // double
+#define AUDIO_POWER_USAGE_PROP_MAX_VOLUME_DURATION_NS "maxVolumeDurationNs" // int64
+#define AUDIO_POWER_USAGE_PROP_MAX_VOLUME             "maxVolume"           // double
 
 namespace android::mediametrics {
 
@@ -141,13 +145,34 @@
     double volume;
     if (!item->getDouble(AUDIO_POWER_USAGE_PROP_VOLUME, &volume)) return;
 
+    int64_t min_volume_duration_ns;
+    if (!item->getInt64(AUDIO_POWER_USAGE_PROP_MIN_VOLUME_DURATION_NS, &min_volume_duration_ns)) {
+        return;
+    }
+
+    double min_volume;
+    if (!item->getDouble(AUDIO_POWER_USAGE_PROP_MIN_VOLUME, &min_volume)) return;
+
+    int64_t max_volume_duration_ns;
+    if (!item->getInt64(AUDIO_POWER_USAGE_PROP_MAX_VOLUME_DURATION_NS, &max_volume_duration_ns)) {
+        return;
+    }
+
+    double max_volume;
+    if (!item->getDouble(AUDIO_POWER_USAGE_PROP_MAX_VOLUME, &max_volume)) return;
+
     const int32_t duration_secs = (int32_t)(duration_ns / NANOS_PER_SECOND);
-    const float average_volume = (float)volume;
+    const int32_t min_volume_duration_secs = (int32_t)(min_volume_duration_ns / NANOS_PER_SECOND);
+    const int32_t max_volume_duration_secs = (int32_t)(max_volume_duration_ns / NANOS_PER_SECOND);
     const int result = android::util::stats_write(android::util::AUDIO_POWER_USAGE_DATA_REPORTED,
                                          audio_device,
                                          duration_secs,
-                                         average_volume,
-                                         type);
+                                         (float)volume,
+                                         type,
+                                         min_volume_duration_secs,
+                                         (float)min_volume,
+                                         max_volume_duration_secs,
+                                         (float)max_volume);
 
     std::stringstream log;
     log << "result:" << result << " {"
@@ -155,17 +180,43 @@
             << android::util::AUDIO_POWER_USAGE_DATA_REPORTED
             << " audio_device:" << audio_device
             << " duration_secs:" << duration_secs
-            << " average_volume:" << average_volume
+            << " average_volume:" << (float)volume
             << " type:" << type
+            << " min_volume_duration_secs:" << min_volume_duration_secs
+            << " min_volume:" << (float)min_volume
+            << " max_volume_duration_secs:" << max_volume_duration_secs
+            << " max_volume:" << (float)max_volume
             << " }";
     mStatsdLog->log(android::util::AUDIO_POWER_USAGE_DATA_REPORTED, log.str());
 }
 
+void AudioPowerUsage::updateMinMaxVolumeAndDuration(
+            const int64_t cur_max_volume_duration_ns, const double cur_max_volume,
+            const int64_t cur_min_volume_duration_ns, const double cur_min_volume,
+            int64_t& f_max_volume_duration_ns, double& f_max_volume,
+            int64_t& f_min_volume_duration_ns, double& f_min_volume)
+{
+    if (f_min_volume > cur_min_volume) {
+        f_min_volume = cur_min_volume;
+        f_min_volume_duration_ns = cur_min_volume_duration_ns;
+    } else if (f_min_volume == cur_min_volume) {
+        f_min_volume_duration_ns += cur_min_volume_duration_ns;
+    }
+    if (f_max_volume < cur_max_volume) {
+        f_max_volume = cur_max_volume;
+        f_max_volume_duration_ns = cur_max_volume_duration_ns;
+    } else if (f_max_volume == cur_max_volume) {
+        f_max_volume_duration_ns += cur_max_volume_duration_ns;
+    }
+}
+
 bool AudioPowerUsage::saveAsItem_l(
-        int32_t device, int64_t duration_ns, int32_t type, double average_vol)
+        int32_t device, int64_t duration_ns, int32_t type, double average_vol,
+        int64_t max_volume_duration_ns, double max_volume,
+        int64_t min_volume_duration_ns, double min_volume)
 {
     ALOGV("%s: (%#x, %d, %lld, %f)", __func__, device, type,
-                                   (long long)duration_ns, average_vol );
+                                   (long long)duration_ns, average_vol);
     if (duration_ns == 0) {
         return true; // skip duration 0 usage
     }
@@ -193,10 +244,36 @@
             item->setDouble(AUDIO_POWER_USAGE_PROP_VOLUME, final_volume);
             item->setTimestamp(systemTime(SYSTEM_TIME_REALTIME));
 
-            ALOGV("%s: update (%#x, %d, %lld, %f) --> (%lld, %f)", __func__,
+            // Update the max/min volume and duration
+            int64_t final_min_volume_duration_ns;
+            int64_t final_max_volume_duration_ns;
+            double final_min_volume;
+            double final_max_volume;
+
+            item->getInt64(AUDIO_POWER_USAGE_PROP_MIN_VOLUME_DURATION_NS,
+                           &final_min_volume_duration_ns);
+            item->getDouble(AUDIO_POWER_USAGE_PROP_MIN_VOLUME, &final_min_volume);
+            item->getInt64(AUDIO_POWER_USAGE_PROP_MAX_VOLUME_DURATION_NS,
+                           &final_max_volume_duration_ns);
+            item->getDouble(AUDIO_POWER_USAGE_PROP_MAX_VOLUME, &final_max_volume);
+            updateMinMaxVolumeAndDuration(max_volume_duration_ns, max_volume,
+                                          min_volume_duration_ns, min_volume,
+                                          final_max_volume_duration_ns, final_max_volume,
+                                          final_min_volume_duration_ns, final_min_volume);
+            item->setInt64(AUDIO_POWER_USAGE_PROP_MIN_VOLUME_DURATION_NS,
+                           final_min_volume_duration_ns);
+            item->setDouble(AUDIO_POWER_USAGE_PROP_MIN_VOLUME, final_min_volume);
+            item->setInt64(AUDIO_POWER_USAGE_PROP_MAX_VOLUME_DURATION_NS,
+                           final_max_volume_duration_ns);
+            item->setDouble(AUDIO_POWER_USAGE_PROP_MAX_VOLUME, final_max_volume);
+
+            ALOGV("%s: update (%#x, %d, %lld, %f) --> (%lld, %f) min(%lld, %f) max(%lld, %f)",
+                  __func__,
                   device, type,
                   (long long)item_duration_ns, item_volume,
-                  (long long)final_duration_ns, final_volume);
+                  (long long)final_duration_ns, final_volume,
+                  (long long)final_min_volume_duration_ns, final_min_volume,
+                  (long long)final_max_volume_duration_ns, final_max_volume);
 
             return true;
         }
@@ -208,12 +285,18 @@
     sitem->setInt64(AUDIO_POWER_USAGE_PROP_DURATION_NS, duration_ns);
     sitem->setInt32(AUDIO_POWER_USAGE_PROP_TYPE, type);
     sitem->setDouble(AUDIO_POWER_USAGE_PROP_VOLUME, average_vol);
+    sitem->setInt64(AUDIO_POWER_USAGE_PROP_MIN_VOLUME_DURATION_NS, min_volume_duration_ns);
+    sitem->setDouble(AUDIO_POWER_USAGE_PROP_MIN_VOLUME, min_volume);
+    sitem->setInt64(AUDIO_POWER_USAGE_PROP_MAX_VOLUME_DURATION_NS, max_volume_duration_ns);
+    sitem->setDouble(AUDIO_POWER_USAGE_PROP_MAX_VOLUME, max_volume);
     mItems.emplace_back(sitem);
     return true;
 }
 
 bool AudioPowerUsage::saveAsItems_l(
-        int32_t device, int64_t duration_ns, int32_t type, double average_vol)
+        int32_t device, int64_t duration_ns, int32_t type, double average_vol,
+        int64_t max_volume_duration, double max_volume,
+        int64_t min_volume_duration, double min_volume)
 {
     ALOGV("%s: (%#x, %d, %lld, %f)", __func__, device, type,
                                    (long long)duration_ns, average_vol );
@@ -232,7 +315,9 @@
         int32_t tmp_device = device_bits & -device_bits; // get lowest bit
         device_bits ^= tmp_device;  // clear lowest bit
         tmp_device |= input_bit;    // restore input bit
-        ret = saveAsItem_l(tmp_device, duration_ns, type, average_vol);
+        ret = saveAsItem_l(tmp_device, duration_ns, type, average_vol,
+                           max_volume_duration, max_volume,
+                           min_volume_duration, min_volume);
 
         ALOGV("%s: device %#x recorded, remaining device_bits = %#x", __func__,
             tmp_device, device_bits);
@@ -250,9 +335,28 @@
         return;
     }
     double deviceVolume = 1.;
-    if (isTrack && !item->getDouble(AMEDIAMETRICS_PROP_DEVICEVOLUME, &deviceVolume)) {
-        return;
+    int64_t maxVolumeDurationNs = 0;
+    double maxVolume = AMEDIAMETRICS_INITIAL_MAX_VOLUME;
+    int64_t minVolumeDurationNs = 0;
+    double minVolume = AMEDIAMETRICS_INITIAL_MIN_VOLUME;
+    if (isTrack) {
+        if (!item->getDouble(AMEDIAMETRICS_PROP_DEVICEVOLUME, &deviceVolume)) {
+            return;
+        }
+        if (!item->getInt64(AMEDIAMETRICS_PROP_DEVICEMAXVOLUMEDURATIONNS, &maxVolumeDurationNs)) {
+            return;
+        }
+        if (!item->getDouble(AMEDIAMETRICS_PROP_DEVICEMAXVOLUME, &maxVolume)) {
+            return;
+        }
+        if (!item->getInt64(AMEDIAMETRICS_PROP_DEVICEMINVOLUMEDURATIONNS, &minVolumeDurationNs)) {
+            return;
+        }
+        if (!item->getDouble(AMEDIAMETRICS_PROP_DEVICEMINVOLUME, &minVolume)) {
+            return;
+        }
     }
+
     int32_t type = 0;
     std::string type_string;
     if ((isTrack && mAudioAnalytics->mAnalyticsState->timeMachine().get(
@@ -285,7 +389,8 @@
         ALOGV("device = %s => %d", device_strings.c_str(), device);
     }
     std::lock_guard l(mLock);
-    saveAsItems_l(device, deviceTimeNs, type, deviceVolume);
+    saveAsItems_l(device, deviceTimeNs, type, deviceVolume,
+                  maxVolumeDurationNs, maxVolume, minVolumeDurationNs, minVolume);
 }
 
 void AudioPowerUsage::checkMode(const std::shared_ptr<const mediametrics::Item>& item)
@@ -299,10 +404,17 @@
     if (mMode == "AUDIO_MODE_IN_CALL") { // leaving call mode
         const int64_t endCallNs = item->getTimestamp();
         const int64_t durationNs = endCallNs - mDeviceTimeNs;
+        const int64_t volumeDurationNs = endCallNs - mVolumeTimeNs;
         if (durationNs > 0) {
             mDeviceVolume = (mDeviceVolume * double(mVolumeTimeNs - mDeviceTimeNs) +
-                    mVoiceVolume * double(endCallNs - mVolumeTimeNs)) / (double)durationNs;
-            saveAsItems_l(mPrimaryDevice, durationNs, VOICE_CALL_TYPE, mDeviceVolume);
+                    mVoiceVolume * double(volumeDurationNs)) / (double)durationNs;
+            updateMinMaxVolumeAndDuration(volumeDurationNs, mVoiceVolume,
+                          volumeDurationNs, mVoiceVolume,
+                          mMaxVoiceVolumeDurationNs, mMaxVoiceVolume,
+                          mMinVoiceVolumeDurationNs, mMinVoiceVolume);
+            saveAsItems_l(mPrimaryDevice, durationNs, VOICE_CALL_TYPE, mDeviceVolume,
+                          mMaxVoiceVolumeDurationNs, mMaxVoiceVolume,
+                          mMinVoiceVolumeDurationNs, mMinVoiceVolume);
         }
     } else if (mode == "AUDIO_MODE_IN_CALL") { // entering call mode
         mStartCallNs = item->getTimestamp(); // advisory only
@@ -327,10 +439,15 @@
     if (mMode == "AUDIO_MODE_IN_CALL") {
         const int64_t timeNs = item->getTimestamp();
         const int64_t durationNs = timeNs - mDeviceTimeNs;
+        const int64_t volumeDurationNs = timeNs - mVolumeTimeNs;
         if (durationNs > 0) {
             mDeviceVolume = (mDeviceVolume * double(mVolumeTimeNs - mDeviceTimeNs) +
-                    mVoiceVolume * double(timeNs - mVolumeTimeNs)) / (double)durationNs;
+                    mVoiceVolume * double(volumeDurationNs)) / (double)durationNs;
             mVolumeTimeNs = timeNs;
+            updateMinMaxVolumeAndDuration(volumeDurationNs, mVoiceVolume,
+                          volumeDurationNs, mVoiceVolume,
+                          mMaxVoiceVolumeDurationNs, mMaxVoiceVolume,
+                          mMinVoiceVolumeDurationNs, mMinVoiceVolume);
         }
     }
     ALOGV("%s: new voice volume:%lf  old voice volume:%lf", __func__, voiceVolume, mVoiceVolume);
@@ -358,15 +475,26 @@
         // Save statistics
         const int64_t endDeviceNs = item->getTimestamp();
         const int64_t durationNs = endDeviceNs - mDeviceTimeNs;
+        const int64_t volumeDurationNs = endDeviceNs - mVolumeTimeNs;
         if (durationNs > 0) {
             mDeviceVolume = (mDeviceVolume * double(mVolumeTimeNs - mDeviceTimeNs) +
-                    mVoiceVolume * double(endDeviceNs - mVolumeTimeNs)) / (double)durationNs;
-            saveAsItems_l(mPrimaryDevice, durationNs, VOICE_CALL_TYPE, mDeviceVolume);
+                    mVoiceVolume * double(volumeDurationNs)) / (double)durationNs;
+            updateMinMaxVolumeAndDuration(volumeDurationNs, mVoiceVolume,
+                          volumeDurationNs, mVoiceVolume,
+                          mMaxVoiceVolumeDurationNs, mMaxVoiceVolume,
+                          mMinVoiceVolumeDurationNs, mMinVoiceVolume);
+            saveAsItems_l(mPrimaryDevice, durationNs, VOICE_CALL_TYPE, mDeviceVolume,
+                          mMaxVoiceVolumeDurationNs, mMaxVoiceVolume,
+                          mMinVoiceVolumeDurationNs, mMinVoiceVolume);
         }
         // reset statistics
         mDeviceVolume = 0;
         mDeviceTimeNs = endDeviceNs;
         mVolumeTimeNs = endDeviceNs;
+        mMaxVoiceVolume = AMEDIAMETRICS_INITIAL_MAX_VOLUME;
+        mMinVoiceVolume = AMEDIAMETRICS_INITIAL_MIN_VOLUME;
+        mMaxVoiceVolumeDurationNs = 0;
+        mMinVoiceVolumeDurationNs = 0;
     }
     ALOGV("%s: new primary device:%#x  old primary device:%#x", __func__, device, mPrimaryDevice);
     mPrimaryDevice = device;
diff --git a/services/mediametrics/AudioTypes.cpp b/services/mediametrics/AudioTypes.cpp
index 838cdd5..594809c 100644
--- a/services/mediametrics/AudioTypes.cpp
+++ b/services/mediametrics/AudioTypes.cpp
@@ -15,8 +15,10 @@
  */
 
 #include "AudioTypes.h"
+#include "MediaMetricsConstants.h"
 #include "StringUtils.h"
 #include <media/TypeConverter.h> // requires libmedia_helper to get the Audio code.
+#include <statslog.h>            // statsd
 
 namespace android::mediametrics::types {
 
@@ -190,6 +192,31 @@
     return map;
 }
 
+const std::unordered_map<std::string, int32_t>& getStatusMap() {
+    // DO NOT MODIFY VALUES(OK to add new ones).
+    static std::unordered_map<std::string, int32_t> map {
+        {"",
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__NO_ERROR},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_OK,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__NO_ERROR},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_ARGUMENT},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_IO,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_IO},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_MEMORY},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_SECURITY},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_STATE,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_STATE},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_TIMEOUT},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_UNKNOWN},
+    };
+    return map;
+}
+
 // Helper: Create the corresponding int32 from string flags split with '|'.
 template <typename Traits>
 int32_t int32FromFlags(const std::string &flags)
@@ -431,6 +458,17 @@
 }
 
 template <>
+int32_t lookup<STATUS>(const std::string &status)
+{
+    auto& map = getStatusMap();
+    auto it = map.find(status);
+    if (it == map.end()) {
+        return util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_UNKNOWN;
+    }
+    return it->second;
+}
+
+template <>
 int32_t lookup<THREAD_TYPE>(const std::string &threadType)
 {
     auto& map = getAudioThreadTypeMap();
diff --git a/services/mediametrics/MediaMetricsService.cpp b/services/mediametrics/MediaMetricsService.cpp
index 1d64878..636b343 100644
--- a/services/mediametrics/MediaMetricsService.cpp
+++ b/services/mediametrics/MediaMetricsService.cpp
@@ -19,6 +19,7 @@
 #include <utils/Log.h>
 
 #include "MediaMetricsService.h"
+#include "ValidateId.h"
 #include "iface_statsd.h"
 
 #include <pwd.h> //getpwuid
@@ -204,6 +205,15 @@
     // now attach either the item or its dup to a const shared pointer
     std::shared_ptr<const mediametrics::Item> sitem(release ? item : item->dup());
 
+    // register log session ids with singleton.
+    if (startsWith(item->getKey(), "metrics.manager")) {
+        std::string logSessionId;
+        if (item->get("logSessionId", &logSessionId)
+                && mediametrics::stringutils::isLogSessionId(logSessionId.c_str())) {
+            mediametrics::ValidateId::get()->registerId(logSessionId);
+        }
+    }
+
     (void)mAudioAnalytics.submit(sitem, isTrusted);
 
     (void)dump2Statsd(sitem, mStatsdLog);  // failure should be logged in function.
@@ -309,8 +319,19 @@
                 result << "-- some lines may be truncated --\n";
             }
 
+            const int32_t heatLinesToDump = all ? INT32_MAX : 20;
+            const auto [ heatDumpString, heatLines] =
+                    mAudioAnalytics.dumpHeatMap(heatLinesToDump);
+            result << "\n" << heatDumpString;
+            if (heatLines == heatLinesToDump) {
+                result << "-- some lines may be truncated --\n";
+            }
+
+            result << "\nLogSessionId:\n"
+                   << mediametrics::ValidateId::get()->dump();
+
             // Dump the statsd atoms we sent out.
-            result << "Statsd atoms:\n"
+            result << "\nStatsd atoms:\n"
                    << mStatsdLog->dumpToString("  " /* prefix */,
                            all ? STATSD_LOG_LINES_MAX : STATSD_LOG_LINES_DUMP);
         }
diff --git a/services/mediametrics/ValidateId.cpp b/services/mediametrics/ValidateId.cpp
new file mode 100644
index 0000000..0cc8593
--- /dev/null
+++ b/services/mediametrics/ValidateId.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaMetricsService"  // not ValidateId
+#include <utils/Log.h>
+
+#include "ValidateId.h"
+
+namespace android::mediametrics {
+
+std::string ValidateId::dump() const
+{
+    std::stringstream ss;
+    ss << "Entries:" << mIdSet.size() << "  InvalidIds:" << mInvalidIds << "\n";
+    ss << mIdSet.dump(10);
+    return ss.str();
+}
+
+void ValidateId::registerId(const std::string& id)
+{
+    if (id.empty()) return;
+    if (!mediametrics::stringutils::isLogSessionId(id.c_str())) {
+        ALOGW("%s: rejecting malformed id %s", __func__, id.c_str());
+        return;
+    }
+    ALOGV("%s: registering %s", __func__, id.c_str());
+    mIdSet.add(id);
+}
+
+const std::string& ValidateId::validateId(const std::string& id)
+{
+    static const std::string empty{};
+    if (id.empty()) return empty;
+
+    // reject because the id is malformed
+    if (!mediametrics::stringutils::isLogSessionId(id.c_str())) {
+        ALOGW("%s: rejecting malformed id %s", __func__, id.c_str());
+        ++mInvalidIds;
+        return empty;
+    }
+
+    // reject because the id is unregistered
+    if (!mIdSet.check(id)) {
+        ALOGW("%s: rejecting unregistered id %s", __func__, id.c_str());
+        ++mInvalidIds;
+        return empty;
+    }
+    return id;
+}
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/fuzzer/Android.bp b/services/mediametrics/fuzzer/Android.bp
index b03e518..84d494e 100644
--- a/services/mediametrics/fuzzer/Android.bp
+++ b/services/mediametrics/fuzzer/Android.bp
@@ -56,11 +56,11 @@
         "libstatssocket",
         "libutils",
         "mediametricsservice-aidl-cpp",
+        "packagemanager_aidl-cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/services/mediametrics",
-        "system/media/audio_utils/include",
+    header_libs: [
+        "libaudioutils_headers",
     ],
 
     fuzz_config: {
diff --git a/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp b/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
index 8b0b479..433332c 100644
--- a/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
+++ b/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
@@ -19,15 +19,14 @@
  */
 #include <fuzzer/FuzzedDataProvider.h>
 #include <media/MediaMetricsItem.h>
+#include <mediametricsservice/AudioTypes.h>
+#include <mediametricsservice/MediaMetricsService.h>
+#include <mediametricsservice/StringUtils.h>
 #include <stdio.h>
 #include <string.h>
 #include <utils/Log.h>
 #include <algorithm>
 
-#include "AudioTypes.h"
-#include "MediaMetricsService.h"
-#include "StringUtils.h"
-
 using namespace android;
 
 // low water mark
@@ -48,6 +47,7 @@
     void invokeAudioAnalytics(const uint8_t *data, size_t size);
     void invokeTimedAction(const uint8_t *data, size_t size);
     void process(const uint8_t *data, size_t size);
+    std::atomic_int mValue = 0;
 };
 
 void MediaMetricsServiceFuzzer::invokeStartsWith(const uint8_t *data, size_t size) {
@@ -342,11 +342,10 @@
 void MediaMetricsServiceFuzzer::invokeTimedAction(const uint8_t *data, size_t size) {
     FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
     android::mediametrics::TimedAction timedAction;
-    std::atomic_int value = 0;
 
     while (fdp.remaining_bytes()) {
         timedAction.postIn(std::chrono::seconds(fdp.ConsumeIntegral<int32_t>()),
-                           [&value] { ++value; });
+                           [this] { ++mValue; });
         timedAction.size();
     }
 }
diff --git a/services/mediametrics/AnalyticsActions.h b/services/mediametrics/include/mediametricsservice/AnalyticsActions.h
similarity index 100%
rename from services/mediametrics/AnalyticsActions.h
rename to services/mediametrics/include/mediametricsservice/AnalyticsActions.h
diff --git a/services/mediametrics/AnalyticsState.h b/services/mediametrics/include/mediametricsservice/AnalyticsState.h
similarity index 100%
rename from services/mediametrics/AnalyticsState.h
rename to services/mediametrics/include/mediametricsservice/AnalyticsState.h
diff --git a/services/mediametrics/AudioAnalytics.h b/services/mediametrics/include/mediametricsservice/AudioAnalytics.h
similarity index 90%
rename from services/mediametrics/AudioAnalytics.h
rename to services/mediametrics/include/mediametricsservice/AudioAnalytics.h
index 2b41a95..9b54cf3 100644
--- a/services/mediametrics/AudioAnalytics.h
+++ b/services/mediametrics/include/mediametricsservice/AudioAnalytics.h
@@ -20,6 +20,7 @@
 #include "AnalyticsActions.h"
 #include "AnalyticsState.h"
 #include "AudioPowerUsage.h"
+#include "HeatMap.h"
 #include "StatsdLog.h"
 #include "TimedAction.h"
 #include "Wrap.h"
@@ -73,11 +74,23 @@
     std::pair<std::string, int32_t> dump(
             int32_t lines = INT32_MAX, int64_t sinceNs = 0, const char *prefix = nullptr) const;
 
+    /**
+     * Returns a pair consisting of the dump string and the number of lines in the string.
+     *
+     * HeatMap dump.
+     */
+    std::pair<std::string, int32_t> dumpHeatMap(int32_t lines = INT32_MAX) const {
+        return mHeatMap.dump(lines);
+    }
+
     void clear() {
         // underlying state is locked.
         mPreviousAnalyticsState->clear();
         mAnalyticsState->clear();
 
+        // Clears the status map
+        mHeatMap.clear();
+
         // Clear power usage state.
         mAudioPowerUsage.clear();
     }
@@ -96,11 +109,18 @@
      */
 
     /**
-     * Checks for any pending actions for a particular item.
+     * Processes any pending actions for a particular item.
      *
      * \param item to check against the current AnalyticsActions.
      */
-    void checkActions(const std::shared_ptr<const mediametrics::Item>& item);
+    void processActions(const std::shared_ptr<const mediametrics::Item>& item);
+
+    /**
+     * Processes status information contained in the item.
+     *
+     * \param item to check against for status handling
+     */
+    void processStatus(const std::shared_ptr<const mediametrics::Item>& item);
 
     // HELPER METHODS
     /**
@@ -124,6 +144,9 @@
     TimedAction mTimedAction; // locked internally
     const std::shared_ptr<StatsdLog> mStatsdLog; // locked internally, ok for multiple threads.
 
+    static constexpr size_t kHeatEntries = 100;
+    HeatMap mHeatMap{kHeatEntries}; // locked internally, ok for multiple threads.
+
     // DeviceUse is a nested class which handles audio device usage accounting.
     // We define this class at the end to ensure prior variables all properly constructed.
     // TODO: Track / Thread interaction
diff --git a/services/mediametrics/AudioPowerUsage.h b/services/mediametrics/include/mediametricsservice/AudioPowerUsage.h
similarity index 80%
rename from services/mediametrics/AudioPowerUsage.h
rename to services/mediametrics/include/mediametricsservice/AudioPowerUsage.h
index 7021902..b7215e6 100644
--- a/services/mediametrics/AudioPowerUsage.h
+++ b/services/mediametrics/include/mediametricsservice/AudioPowerUsage.h
@@ -26,6 +26,7 @@
 
 namespace android::mediametrics {
 
+
 class AudioAnalytics;
 
 class AudioPowerUsage {
@@ -83,13 +84,21 @@
     static bool deviceFromString(const std::string& device_string, int32_t& device);
     static int32_t deviceFromStringPairs(const std::string& device_strings);
 private:
-    bool saveAsItem_l(int32_t device, int64_t duration, int32_t type, double average_vol)
-         REQUIRES(mLock);
+    bool saveAsItem_l(int32_t device, int64_t duration, int32_t type, double average_vol,
+                      int64_t max_volume_duration, double max_volume,
+                      int64_t min_volume_duration, double min_volume)
+                      REQUIRES(mLock);
     void sendItem(const std::shared_ptr<const mediametrics::Item>& item) const;
     void collect();
-    bool saveAsItems_l(int32_t device, int64_t duration, int32_t type, double average_vol)
-         REQUIRES(mLock);
-
+    bool saveAsItems_l(int32_t device, int64_t duration, int32_t type, double average_vol,
+                      int64_t max_volume_duration, double max_volume,
+                      int64_t min_volume_duration, double min_volume)
+                      REQUIRES(mLock);
+    void updateMinMaxVolumeAndDuration(
+            const int64_t cur_max_volume_duration_ns, const double cur_max_volume,
+            const int64_t cur_min_volume_duration_ns, const double cur_min_volume,
+            int64_t& f_max_volume_duration_ns, double& f_max_volume,
+            int64_t& f_min_volume_duration_ns, double& f_min_volume);
     AudioAnalytics * const mAudioAnalytics;
     const std::shared_ptr<StatsdLog> mStatsdLog;  // mStatsdLog is internally locked
     const bool mDisabled;
@@ -100,6 +109,10 @@
 
     double mVoiceVolume GUARDED_BY(mLock) = 0.;
     double mDeviceVolume GUARDED_BY(mLock) = 0.;
+    double mMaxVoiceVolume GUARDED_BY(mLock) = AMEDIAMETRICS_INITIAL_MAX_VOLUME;
+    double mMinVoiceVolume GUARDED_BY(mLock) = AMEDIAMETRICS_INITIAL_MIN_VOLUME;
+    int64_t mMaxVoiceVolumeDurationNs GUARDED_BY(mLock) = 0;
+    int64_t mMinVoiceVolumeDurationNs GUARDED_BY(mLock) = 0;
     int64_t mStartCallNs GUARDED_BY(mLock) = 0; // advisory only
     int64_t mVolumeTimeNs GUARDED_BY(mLock) = 0;
     int64_t mDeviceTimeNs GUARDED_BY(mLock) = 0;
diff --git a/services/mediametrics/AudioTypes.h b/services/mediametrics/include/mediametricsservice/AudioTypes.h
similarity index 92%
rename from services/mediametrics/AudioTypes.h
rename to services/mediametrics/include/mediametricsservice/AudioTypes.h
index 4394d79..5dbff9b 100644
--- a/services/mediametrics/AudioTypes.h
+++ b/services/mediametrics/include/mediametricsservice/AudioTypes.h
@@ -39,6 +39,10 @@
 };
 
 // Enumeration for all the string translations to integers (generally int32_t) unless noted.
+// This is used to index the template method below:
+// template <AudioEnumCategory C, typename T, typename S>  T lookup(const S &str);
+//
+// Okay to keep AudioEnumCategory alphabetical and add new translations in the middle.
 enum AudioEnumCategory {
     AAUDIO_DIRECTION,
     AAUDIO_PERFORMANCE_MODE,
@@ -51,6 +55,7 @@
     OUTPUT_DEVICE, // int64_t
     OUTPUT_FLAG,
     SOURCE_TYPE,
+    STATUS,
     STREAM_TYPE,
     THREAD_TYPE,
     TRACK_TRAITS,
diff --git a/services/mediametrics/include/mediametricsservice/HeatMap.h b/services/mediametrics/include/mediametricsservice/HeatMap.h
new file mode 100644
index 0000000..950501a
--- /dev/null
+++ b/services/mediametrics/include/mediametricsservice/HeatMap.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <iomanip>
+#include <map>
+#include <sstream>
+#include "MediaMetricsConstants.h"
+
+namespace android::mediametrics {
+
+/**
+ * HeatData accumulates statistics on the status reported for a given key.
+ *
+ * HeatData is a helper class used by HeatMap to represent statistics.  We expose it
+ * here for testing purposes currently.
+ *
+ * Note: This class is not thread safe, so mutual exclusion should be obtained by the caller
+ * which in this case is HeatMap.  HeatMap getData() returns a local copy of HeatData, so use
+ * of that is thread-safe.
+ */
+class HeatData {
+    /* HeatData for a key is stored in a map based on the event (e.g. "start", "pause", create)
+     * and then another map based on the status (e.g. "ok", "argument", "state").
+     */
+    std::map<std::string /* event */,
+             std::map<std::string /* status name */, size_t /* count, nonzero */>> mMap;
+
+public:
+    /**
+     * Add status data.
+     *
+     * \param suffix  (ignored) the suffix to the key that was stripped, if any.
+     * \param event             the event (e.g. create, start, pause, stop, etc.).
+     * \param uid     (ignored) the uid associated with the error.
+     * \param message (ignored) the status message, if any.
+     * \param subCode (ignored) the status subcode, if any.
+     */
+    void add(const std::string& suffix, const std::string& event, const std::string& status,
+            uid_t uid, const std::string& message, int32_t subCode) {
+        // Perhaps there could be a more detailed print.
+        (void)suffix;
+        (void)uid;
+        (void)message;
+        (void)subCode;
+        ++mMap[event][status];
+    }
+
+    /** Returns the number of event names with status. */
+    size_t size() const {
+        return mMap.size();
+    }
+
+    /**
+     * Returns a deque with pairs indicating the count of Oks and Errors.
+     * The first pair is total, the other pairs are in order of mMap.
+     *
+     * Example return value of {ok, error} pairs:
+     *     total     key1      key2
+     * { { 2, 1 }, { 1, 0 }, { 1, 1 } }
+     */
+    std::deque<std::pair<size_t /* oks */, size_t /* errors */>> heatCount() const {
+        size_t totalOk = 0;
+        size_t totalError = 0;
+        std::deque<std::pair<size_t /* oks */, size_t /* errors */>> heat;
+        for (const auto &eventPair : mMap) {
+            size_t ok = 0;
+            size_t error = 0;
+            for (const auto &[name, count] : eventPair.second) {
+                if (name == AMEDIAMETRICS_PROP_STATUS_VALUE_OK) {
+                    ok += count;
+                } else {
+                    error += count;
+                }
+            }
+            totalOk += ok;
+            totalError += error;
+            heat.emplace_back(ok, error);
+        }
+        heat.emplace_front(totalOk, totalError);
+        return heat;
+    }
+
+    /** Returns the error fraction from a pair <oks, errors>, a float between 0.f to 1.f. */
+    static float fraction(const std::pair<size_t, size_t>& count) {
+        return (float)count.second / (count.first + count.second);
+    }
+
+    /** Returns the HeatMap information in a single line string. */
+    std::string dump() const {
+        const auto heat = heatCount();
+        auto it = heat.begin();
+        std::stringstream ss;
+        ss << "{ ";
+        float errorFraction = fraction(*it++);
+        if (errorFraction > 0.f) {
+            ss << std::fixed << std::setprecision(2) << errorFraction << " ";
+        }
+        for (const auto &eventPair : mMap) {
+            ss << eventPair.first << ": { ";
+            errorFraction = fraction(*it++);
+            if (errorFraction > 0.f) {
+                ss << std::fixed << std::setprecision(2) << errorFraction << " ";
+            }
+            for (const auto &[name, count]: eventPair.second) {
+                ss << "[ " << name << " : " << count << " ] ";
+            }
+            ss << "} ";
+        }
+        ss << " }";
+        return ss.str();
+    }
+};
+
+/**
+ * HeatMap is a thread-safe collection that counts activity of status errors per key.
+ *
+ * The classic heat map is a 2D picture with intensity shown by color.
+ * Here we accumulate the status results from keys to see if there are consistent
+ * failures in the system.
+ *
+ * TODO(b/210855555): Heatmap improvements.
+ *   1) heat decays in intensity in time for past events, currently we don't decay.
+ */
+
+class HeatMap {
+    const size_t mMaxSize;
+    mutable std::mutex mLock;
+    size_t mRejected GUARDED_BY(mLock) = 0;
+    std::map<std::string, HeatData> mMap GUARDED_BY(mLock);
+
+public:
+    /**
+     * Constructs a HeatMap.
+     *
+     * \param maxSize the maximum number of elements that are tracked.
+     */
+    explicit HeatMap(size_t maxSize) : mMaxSize(maxSize) {
+    }
+
+    /** Returns the number of keys. */
+    size_t size() const {
+        std::lock_guard l(mLock);
+        return mMap.size();
+    }
+
+    /** Clears error history. */
+    void clear() {
+        std::lock_guard l(mLock);
+        return mMap.clear();
+    }
+
+    /** Returns number of keys rejected due to space. */
+    size_t rejected() const {
+        std::lock_guard l(mLock);
+        return mRejected;
+    }
+
+    /** Returns a copy of the heat data associated with key. */
+    HeatData getData(const std::string& key) const {
+        std::lock_guard l(mLock);
+        return mMap.count(key) == 0 ? HeatData{} : mMap.at(key);
+    }
+
+    /**
+     * Adds a new entry.
+     * \param key               the key category (e.g. audio.track).
+     * \param suffix  (ignored) the suffix to the key that was stripped, if any.
+     * \param event             the event (e.g. create, start, pause, stop, etc.).
+     * \param uid     (ignored) the uid associated with the error.
+     * \param message (ignored) the status message, if any.
+     * \param subCode (ignored) the status subcode, if any.
+     */
+    void add(const std::string& key, const std::string& suffix, const std::string& event,
+            const std::string& status, uid_t uid, const std::string& message, int32_t subCode) {
+        std::lock_guard l(mLock);
+
+        // Hard limit on heat map entries.
+        // TODO: have better GC.
+        if (mMap.size() == mMaxSize && mMap.count(key) == 0) {
+            ++mRejected;
+            return;
+        }
+        mMap[key].add(suffix, event, status, uid, message, subCode);
+    }
+
+    /**
+     * Returns a pair consisting of the dump string and the number of lines in the string.
+     */
+    std::pair<std::string, int32_t> dump(int32_t lines = INT32_MAX) const {
+        std::stringstream ss;
+        int32_t ll = lines;
+        std::lock_guard l(mLock);
+        if (ll > 0) {
+            ss << "Error Heat Map (rejected: " << mRejected << "):\n";
+            --ll;
+        }
+        // TODO: restriction is implemented alphabetically not on priority.
+        for (const auto& [name, data] : mMap) {
+            if (ll <= 0) break;
+            ss << name << ": " << data.dump() << "\n";
+            --ll;
+        }
+        return { ss.str(), lines - ll };
+    }
+};
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/include/mediametricsservice/LruSet.h b/services/mediametrics/include/mediametricsservice/LruSet.h
new file mode 100644
index 0000000..1f0ab60
--- /dev/null
+++ b/services/mediametrics/include/mediametricsservice/LruSet.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <list>
+#include <sstream>
+#include <unordered_map>
+
+namespace android::mediametrics {
+
+/**
+ * LruSet keeps a set of the last "Size" elements added or accessed.
+ *
+ * (Lru stands for least-recently-used eviction policy).
+ *
+ * Runs in O(1) time for add, remove, and check.  Internally implemented
+ * with an unordered_map and a list.  In order to remove elements,
+ * a list iterator is stored in the unordered_map
+ * (noting that std::list::erase() contractually
+ * does not affect iterators other than the one erased).
+ */
+
+template <typename T>
+class LruSet {
+    const size_t mMaxSize;
+    std::list<T> mAccessOrder;                 // front is the most recent, back is the oldest.
+    // item T with its access order iterator.
+    std::unordered_map<T, typename std::list<T>::iterator> mMap;
+
+public:
+    /**
+     * Constructs a LruSet which checks whether the element was
+     * accessed or added recently.
+     *
+     * The parameter maxSize is used to cap growth of LruSet;
+     * eviction is based on least recently used LRU.
+     * If maxSize is zero, the LruSet contains no elements
+     * and check() always returns false.
+     *
+     * \param maxSize the maximum number of elements that are tracked.
+     */
+    explicit LruSet(size_t maxSize) : mMaxSize(maxSize) {}
+
+    /**
+     * Returns the number of entries in the LruSet.
+     *
+     * This is a number between 0 and maxSize.
+     */
+    size_t size() const {
+        return mMap.size();
+    }
+
+    /** Clears the container contents. */
+    void clear() {
+        mMap.clear();
+        mAccessOrder.clear();
+    }
+
+    /** Returns a string dump of the last n entries. */
+    std::string dump(size_t n) const {
+        std::stringstream ss;
+        auto it = mAccessOrder.cbegin();
+        for (size_t i = 0; i < n && it != mAccessOrder.cend(); ++i) {
+            ss << *it++ << "\n";
+        }
+        return ss.str();
+    }
+
+    /** Adds a new item to the set. */
+    void add(const T& t) {
+        if (mMaxSize == 0) return;
+        auto it = mMap.find(t);
+        if (it != mMap.end()) { // already exists.
+            mAccessOrder.erase(it->second);  // move-to-front on the chronologically ordered list.
+        } else if (mAccessOrder.size() >= mMaxSize) {
+            const T last = mAccessOrder.back();
+            mAccessOrder.pop_back();
+            mMap.erase(last);
+        }
+        mAccessOrder.push_front(t);
+        mMap[t] = mAccessOrder.begin();
+    }
+
+    /**
+     * Removes an item from the set.
+     *
+     * \param t item to be removed.
+     * \return false if the item doesn't exist.
+     */
+    bool remove(const T& t) {
+        auto it = mMap.find(t);
+        if (it == mMap.end()) return false;
+        mAccessOrder.erase(it->second);
+        mMap.erase(it);
+        return true;
+    }
+
+    /** Returns true if t is present (and moves the access order of t to the front). */
+    bool check(const T& t) { // not const, as it adjusts the least-recently-used order.
+        auto it = mMap.find(t);
+        if (it == mMap.end()) return false;
+        mAccessOrder.erase(it->second);
+        mAccessOrder.push_front(it->first);
+        it->second = mAccessOrder.begin();
+        return true;
+    }
+};
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/MediaMetricsService.h b/services/mediametrics/include/mediametricsservice/MediaMetricsService.h
similarity index 100%
rename from services/mediametrics/MediaMetricsService.h
rename to services/mediametrics/include/mediametricsservice/MediaMetricsService.h
diff --git a/services/mediametrics/StatsdLog.h b/services/mediametrics/include/mediametricsservice/StatsdLog.h
similarity index 100%
rename from services/mediametrics/StatsdLog.h
rename to services/mediametrics/include/mediametricsservice/StatsdLog.h
diff --git a/services/mediametrics/StringUtils.h b/services/mediametrics/include/mediametricsservice/StringUtils.h
similarity index 83%
rename from services/mediametrics/StringUtils.h
rename to services/mediametrics/include/mediametricsservice/StringUtils.h
index 01034d9..a56f5b8 100644
--- a/services/mediametrics/StringUtils.h
+++ b/services/mediametrics/include/mediametricsservice/StringUtils.h
@@ -167,4 +167,41 @@
     return ss.str();
 }
 
+/**
+ * Returns true if the string is non-null, not empty, and contains only digits.
+ */
+inline constexpr bool isNumeric(const char *s)
+{
+    if (s == nullptr || *s == 0) return false;
+    do {
+        if (!isdigit(*s)) return false;
+    } while (*++s != 0);
+    return true;  // all digits
+}
+
+/**
+ * Extracts out the prefix from the key, returning a pair of prefix, suffix.
+ *
+ * Usually the key is something like:
+ * Prefix.(ID)
+ *   where ID is an integer,
+ *               or "error" if the id was not returned because of failure,
+ *               or "status" if general status.
+ *
+ * Example: audio.track.10     -> prefix = audio.track, suffix = 10
+ *          audio.track.error  -> prefix = audio.track, suffix = error
+ *          audio.track.status -> prefix = audio.track, suffix = status
+ *          audio.mute         -> prefix = audio.mute,  suffix = ""
+ */
+inline std::pair<std::string /* prefix */,
+                 std::string /* suffix */> splitPrefixKey(const std::string &key)
+{
+    const size_t split = key.rfind('.');
+    const char* suffix = key.c_str() + split + 1;
+    if (*suffix && (!strcmp(suffix, "error") || !strcmp(suffix, "status") || isNumeric(suffix))) {
+        return { key.substr(0, split), suffix };
+    }
+    return { key, "" };
+}
+
 } // namespace android::mediametrics::stringutils
diff --git a/services/mediametrics/TimeMachine.h b/services/mediametrics/include/mediametricsservice/TimeMachine.h
similarity index 100%
rename from services/mediametrics/TimeMachine.h
rename to services/mediametrics/include/mediametricsservice/TimeMachine.h
diff --git a/services/mediametrics/TimedAction.h b/services/mediametrics/include/mediametricsservice/TimedAction.h
similarity index 100%
rename from services/mediametrics/TimedAction.h
rename to services/mediametrics/include/mediametricsservice/TimedAction.h
diff --git a/services/mediametrics/TransactionLog.h b/services/mediametrics/include/mediametricsservice/TransactionLog.h
similarity index 98%
rename from services/mediametrics/TransactionLog.h
rename to services/mediametrics/include/mediametricsservice/TransactionLog.h
index 0ca4639..fd42518 100644
--- a/services/mediametrics/TransactionLog.h
+++ b/services/mediametrics/include/mediametricsservice/TransactionLog.h
@@ -158,7 +158,7 @@
                 ++it) {
             if (ll <= 0) break;
             if (prefix != nullptr && !startsWith(it->first, prefix)) break;
-            auto [s, l] = dumpMapTimeItem(it->second, ll - 1, sinceNs, prefix);
+            std::tie(s, l) = dumpMapTimeItem(it->second, ll - 1, sinceNs, prefix);
             if (l == 0) continue; // don't show empty groups (due to sinceNs).
             ss << " " << it->first << "\n" << s;
             ll -= l + 1;
diff --git a/services/mediametrics/include/mediametricsservice/ValidateId.h b/services/mediametrics/include/mediametricsservice/ValidateId.h
new file mode 100644
index 0000000..166b39a
--- /dev/null
+++ b/services/mediametrics/include/mediametricsservice/ValidateId.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "LruSet.h"
+#include "StringUtils.h"
+#include "Wrap.h"
+
+namespace android::mediametrics {
+
+/*
+ * ValidateId is used to check whether the log session id is properly formed
+ * and has been registered (i.e. from the Java MediaMetricsManagerService).
+ *
+ * The default memory window to track registered ids is set to SINGLETON_LRU_SET_SIZE.
+ *
+ * This class is not thread-safe, but the singleton returned by get() uses LockWrap<>
+ * to ensure thread-safety.
+ */
+class ValidateId {
+    mediametrics::LruSet<std::string> mIdSet;
+    size_t mInvalidIds = 0;  // count invalid ids encountered.
+public:
+    /** Creates a ValidateId object with size memory window. */
+    explicit ValidateId(size_t size) : mIdSet{size} {}
+
+    /** Returns a string dump of recent contents and stats. */
+    std::string dump() const;
+
+    /**
+     * Registers the id string.
+     *
+     * If id string is malformed (not 16 Base64Url chars), it is ignored.
+     * Once registered, calling validateId() will return id (instead of the empty string).
+     * ValidateId may "forget" the id after not encountering it within the past N ids,
+     * where N is the size set in the constructor.
+     *
+     * param id string (from MediaMetricsManagerService).
+     */
+    void registerId(const std::string& id);
+
+    /**
+     * Returns the empty string if id string is malformed (not 16 Base64Url chars)
+     * or if id string has not been seen (in the recent size ids);
+     * otherwise it returns the same id parameter.
+     *
+     * \param id string (to be sent to statsd).
+     */
+    const std::string& validateId(const std::string& id);
+
+    /** Singleton set size */
+    static inline constexpr size_t SINGLETON_LRU_SET_SIZE = 2000;
+
+    using LockedValidateId = mediametrics::LockWrap<ValidateId>;
+    /**
+     * Returns a singleton locked ValidateId object that is thread-safe using LockWrap<>.
+     *
+     * The Singleton ValidateId object is created with size LRU_SET_SIZE (during first call).
+     */
+    static inline LockedValidateId& get() {
+        static LockedValidateId privateSet{SINGLETON_LRU_SET_SIZE};
+        return privateSet;
+    }
+};
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/Wrap.h b/services/mediametrics/include/mediametricsservice/Wrap.h
similarity index 100%
rename from services/mediametrics/Wrap.h
rename to services/mediametrics/include/mediametricsservice/Wrap.h
diff --git a/services/mediametrics/cleaner.h b/services/mediametrics/include/mediametricsservice/cleaner.h
similarity index 100%
rename from services/mediametrics/cleaner.h
rename to services/mediametrics/include/mediametricsservice/cleaner.h
diff --git a/services/mediametrics/iface_statsd.h b/services/mediametrics/include/mediametricsservice/iface_statsd.h
similarity index 100%
rename from services/mediametrics/iface_statsd.h
rename to services/mediametrics/include/mediametricsservice/iface_statsd.h
diff --git a/services/mediametrics/main_mediametrics.cpp b/services/mediametrics/main_mediametrics.cpp
index 3a66538..455d67a 100644
--- a/services/mediametrics/main_mediametrics.cpp
+++ b/services/mediametrics/main_mediametrics.cpp
@@ -18,11 +18,10 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
-#include "MediaMetricsService.h"
-
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <binder/ProcessState.h>
+#include <mediametricsservice/MediaMetricsService.h>
 #include <mediautils/LimitProcessMemory.h>
 
 int main(int argc __unused, char **argv)
diff --git a/services/mediametrics/statsd_audiorecord.cpp b/services/mediametrics/statsd_audiorecord.cpp
index 41efcaa..a7b045e 100644
--- a/services/mediametrics/statsd_audiorecord.cpp
+++ b/services/mediametrics/statsd_audiorecord.cpp
@@ -32,7 +32,7 @@
 #include <statslog.h>
 
 #include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
 #include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
 #include "iface_statsd.h"
 
@@ -80,16 +80,20 @@
     }
 
     int64_t created_millis = -1;
+    // not currently sent from client.
     if (item->getInt64("android.media.audiorecord.createdMs", &created_millis)) {
         metrics_proto.set_created_millis(created_millis);
     }
 
     int64_t duration_millis = -1;
-    if (item->getInt64("android.media.audiorecord.durationMs", &duration_millis)) {
+    double durationMs = 0.;
+    if (item->getDouble("android.media.audiorecord.durationMs", &durationMs)) {
+        duration_millis = (int64_t)durationMs;
         metrics_proto.set_duration_millis(duration_millis);
     }
 
     int32_t count = -1;
+    // not currently sent from client.  (see start count instead).
     if (item->getInt32("android.media.audiorecord.n", &count)) {
         metrics_proto.set_count(count);
     }
@@ -129,7 +133,7 @@
     }
 
     int64_t start_count = -1;
-    if (item->getInt64("android.media.audiorecord.startcount", &start_count)) {
+    if (item->getInt64("android.media.audiorecord.startCount", &start_count)) {
         metrics_proto.set_start_count(start_count);
     }
 
@@ -143,8 +147,7 @@
     // log_session_id (string)
     std::string logSessionId;
     (void)item->getString("android.media.audiorecord.logSessionId", &logSessionId);
-    const auto log_session_id =
-            mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
+    const auto log_session_id = mediametrics::ValidateId::get()->validateId(logSessionId);
 
     android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
     int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIORECORD_REPORTED,
diff --git a/services/mediametrics/statsd_audiotrack.cpp b/services/mediametrics/statsd_audiotrack.cpp
index 59627ae..67514e9 100644
--- a/services/mediametrics/statsd_audiotrack.cpp
+++ b/services/mediametrics/statsd_audiotrack.cpp
@@ -32,7 +32,7 @@
 #include <statslog.h>
 
 #include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
 #include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
 #include "iface_statsd.h"
 
@@ -56,52 +56,47 @@
     // flesh out the protobuf we'll hand off with our data
     //
 
-    // static constexpr char kAudioTrackStreamType[] = "android.media.audiotrack.streamtype";
+    // Do not change this without changing AudioTrack.cpp collection.
+
     // optional string streamType;
     std::string stream_type;
     if (item->getString("android.media.audiotrack.streamtype", &stream_type)) {
         metrics_proto.set_stream_type(stream_type);
     }
 
-    // static constexpr char kAudioTrackContentType[] = "android.media.audiotrack.type";
     // optional string contentType;
     std::string content_type;
     if (item->getString("android.media.audiotrack.type", &content_type)) {
         metrics_proto.set_content_type(content_type);
     }
 
-    // static constexpr char kAudioTrackUsage[] = "android.media.audiotrack.usage";
     // optional string trackUsage;
     std::string track_usage;
     if (item->getString("android.media.audiotrack.usage", &track_usage)) {
         metrics_proto.set_track_usage(track_usage);
     }
 
-    // static constexpr char kAudioTrackSampleRate[] = "android.media.audiotrack.samplerate";
-    // optional int32 samplerate;
+    // optional int32 sampleRate;
     int32_t sample_rate = -1;
-    if (item->getInt32("android.media.audiotrack.samplerate", &sample_rate)) {
+    if (item->getInt32("android.media.audiotrack.sampleRate", &sample_rate)) {
         metrics_proto.set_sample_rate(sample_rate);
     }
 
-    // static constexpr char kAudioTrackChannelMask[] = "android.media.audiotrack.channelmask";
     // optional int64 channelMask;
     int64_t channel_mask = -1;
-    if (item->getInt64("android.media.audiotrack.channelmask", &channel_mask)) {
+    if (item->getInt64("android.media.audiotrack.channelMask", &channel_mask)) {
         metrics_proto.set_channel_mask(channel_mask);
     }
 
-    // NB: These are not yet exposed as public Java API constants.
-    // static constexpr char kAudioTrackUnderrunFrames[] = "android.media.audiotrack.underrunframes";
-    // optional int32 underrunframes;
+    // optional int32 underrunFrames;
     int32_t underrun_frames = -1;
-    if (item->getInt32("android.media.audiotrack.underrunframes", &underrun_frames)) {
+    if (item->getInt32("android.media.audiotrack.underrunFrames", &underrun_frames)) {
         metrics_proto.set_underrun_frames(underrun_frames);
     }
 
-    // static constexpr char kAudioTrackStartupGlitch[] = "android.media.audiotrack.glitch.startup";
-    // optional int32 startupglitch;
+    // optional int32 glitch.startup;
     int32_t startup_glitch = -1;
+    // Not currently sent from client.
     if (item->getInt32("android.media.audiotrack.glitch.startup", &startup_glitch)) {
         metrics_proto.set_startup_glitch(startup_glitch);
     }
@@ -137,8 +132,7 @@
     // log_session_id (string)
     std::string logSessionId;
     (void)item->getString("android.media.audiotrack.logSessionId", &logSessionId);
-    const auto log_session_id =
-            mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
+    const auto log_session_id = mediametrics::ValidateId::get()->validateId(logSessionId);
 
     android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
     int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIOTRACK_REPORTED,
diff --git a/services/mediametrics/statsd_codec.cpp b/services/mediametrics/statsd_codec.cpp
index 46cbdc8..17a3a5f 100644
--- a/services/mediametrics/statsd_codec.cpp
+++ b/services/mediametrics/statsd_codec.cpp
@@ -34,7 +34,7 @@
 
 #include "cleaner.h"
 #include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
 #include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
 #include "iface_statsd.h"
 
@@ -228,7 +228,7 @@
 
     std::string sessionId;
     if (item->getString("android.media.mediacodec.log-session-id", &sessionId)) {
-        sessionId = mediametrics::stringutils::sanitizeLogSessionId(sessionId);
+        sessionId = mediametrics::ValidateId::get()->validateId(sessionId);
         metrics_proto.set_log_session_id(sessionId);
     }
     AStatsEvent_writeString(event, codec.c_str());
@@ -390,6 +390,48 @@
     }
     AStatsEvent_writeInt32(event, qpBMaxOri);
 
+    // int32_t configColorStandard = -1;
+    // if (item->getInt32("android.media.mediacodec.config-color-standard", &configColorStandard)) {
+    //     metrics_proto.set_config_color_standard(configColorStandard);
+    // }
+    // AStatsEvent_writeInt32(event, configColorStandard);
+
+    // int32_t configColorRange = -1;
+    // if (item->getInt32("android.media.mediacodec.config-color-range", &configColorRange)) {
+    //     metrics_proto.set_config_color_range(configColorRange);
+    // }
+    // AStatsEvent_writeInt32(event, configColorRange);
+
+    // int32_t configColorTransfer = -1;
+    // if (item->getInt32("android.media.mediacodec.config-color-transfer", &configColorTransfer)) {
+    //     metrics_proto.set_config_color_transfer(configColorTransfer);
+    // }
+    // AStatsEvent_writeInt32(event, configColorTransfer);
+
+    // int32_t parsedColorStandard = -1;
+    // if (item->getInt32("android.media.mediacodec.parsed-color-standard", &parsedColorStandard)) {
+    //     metrics_proto.set_parsed_color_standard(parsedColorStandard);
+    // }
+    // AStatsEvent_writeInt32(event, parsedColorStandard);
+
+    // int32_t parsedColorRange = -1;
+    // if (item->getInt32("android.media.mediacodec.parsed-color-range", &parsedColorRange)) {
+    //     metrics_proto.set_parsed_color_range(parsedColorRange);
+    // }
+    // AStatsEvent_writeInt32(event, parsedColorRange);
+
+    // int32_t parsedColorTransfer = -1;
+    // if (item->getInt32("android.media.mediacodec.parsed-color-transfer", &parsedColorTransfer)) {
+    //     metrics_proto.set_parsed_color_transfer(parsedColorTransfer);
+    // }
+    // AStatsEvent_writeInt32(event, parsedColorTransfer);
+
+    // int32_t hdrMetadataFlags = -1;
+    // if (item->getInt32("android.media.mediacodec.hdr-metadata-flags", &hdrMetadataFlags)) {
+    //     metrics_proto.set_hdr_metadata_flags(hdrMetadataFlags);
+    // }
+    // AStatsEvent_writeInt32(event, hdrMetadataFlags);
+
     int err = AStatsEvent_write(event);
     if (err < 0) {
       ALOGE("Failed to write codec metrics to statsd (%d)", err);
diff --git a/services/mediametrics/statsd_extractor.cpp b/services/mediametrics/statsd_extractor.cpp
index bcf2e0a..a8bfeaa 100644
--- a/services/mediametrics/statsd_extractor.cpp
+++ b/services/mediametrics/statsd_extractor.cpp
@@ -32,7 +32,7 @@
 #include <statslog.h>
 
 #include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
 #include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
 #include "iface_statsd.h"
 
@@ -86,7 +86,7 @@
 
     std::string log_session_id;
     if (item->getString("android.media.mediaextractor.logSessionId", &log_session_id)) {
-        log_session_id = mediametrics::stringutils::sanitizeLogSessionId(log_session_id);
+        log_session_id = mediametrics::ValidateId::get()->validateId(log_session_id);
         metrics_proto.set_log_session_id(log_session_id);
     }
 
diff --git a/services/mediametrics/statsd_mediaparser.cpp b/services/mediametrics/statsd_mediaparser.cpp
index 921b320..67ca874b 100644
--- a/services/mediametrics/statsd_mediaparser.cpp
+++ b/services/mediametrics/statsd_mediaparser.cpp
@@ -31,7 +31,7 @@
 #include <statslog.h>
 
 #include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
 #include "frameworks/proto_logging/stats/enums/stats/mediametrics/mediametrics.pb.h"
 #include "iface_statsd.h"
 
@@ -81,7 +81,7 @@
 
     std::string logSessionId;
     item->getString("android.media.mediaparser.logSessionId", &logSessionId);
-    logSessionId = mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
+    logSessionId = mediametrics::ValidateId::get()->validateId(logSessionId);
 
     int result = android::util::stats_write(android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED,
                                timestamp_nanos,
diff --git a/services/mediametrics/statsd_recorder.cpp b/services/mediametrics/statsd_recorder.cpp
index b29ad73..5f54a68 100644
--- a/services/mediametrics/statsd_recorder.cpp
+++ b/services/mediametrics/statsd_recorder.cpp
@@ -32,7 +32,7 @@
 #include <statslog.h>
 
 #include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
 #include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
 #include "iface_statsd.h"
 
@@ -59,7 +59,7 @@
     // string kRecorderLogSessionId = "android.media.mediarecorder.log-session-id";
     std::string log_session_id;
     if (item->getString("android.media.mediarecorder.log-session-id", &log_session_id)) {
-        log_session_id = mediametrics::stringutils::sanitizeLogSessionId(log_session_id);
+        log_session_id = mediametrics::ValidateId::get()->validateId(log_session_id);
         metrics_proto.set_log_session_id(log_session_id);
     }
     // string kRecorderAudioMime = "android.media.mediarecorder.audio.mime";
diff --git a/services/mediametrics/tests/Android.bp b/services/mediametrics/tests/Android.bp
index 3baf739..f46fbad 100644
--- a/services/mediametrics/tests/Android.bp
+++ b/services/mediametrics/tests/Android.bp
@@ -33,6 +33,7 @@
         "libmediautils",
         "libutils",
         "mediametricsservice-aidl-cpp",
+        "packagemanager_aidl-cpp",
     ],
 
     header_libs: [
diff --git a/services/mediametrics/tests/mediametrics_tests.cpp b/services/mediametrics/tests/mediametrics_tests.cpp
index 2336d6f..bc7b47b 100644
--- a/services/mediametrics/tests/mediametrics_tests.cpp
+++ b/services/mediametrics/tests/mediametrics_tests.cpp
@@ -17,18 +17,18 @@
 #define LOG_TAG "mediametrics_tests"
 #include <utils/Log.h>
 
-#include "MediaMetricsService.h"
 
 #include <stdio.h>
 #include <unordered_set>
 
 #include <gtest/gtest.h>
 #include <media/MediaMetricsItem.h>
+#include <mediametricsservice/AudioTypes.h>
+#include <mediametricsservice/MediaMetricsService.h>
+#include <mediametricsservice/StringUtils.h>
+#include <mediametricsservice/ValidateId.h>
 #include <system/audio.h>
 
-#include "AudioTypes.h"
-#include "StringUtils.h"
-
 using namespace android;
 
 static size_t countNewlines(const char *s) {
@@ -1127,3 +1127,169 @@
    validId2[3] = '!';
    ASSERT_EQ("", mediametrics::stringutils::sanitizeLogSessionId(validId2));
 }
+
+TEST(mediametrics_tests, LruSet) {
+    constexpr size_t LRU_SET_SIZE = 2;
+    mediametrics::LruSet<std::string> lruSet(LRU_SET_SIZE);
+
+    // test adding a couple strings.
+    lruSet.add("abc");
+    ASSERT_EQ(1u, lruSet.size());
+    ASSERT_TRUE(lruSet.check("abc"));
+    lruSet.add("def");
+    ASSERT_EQ(2u, lruSet.size());
+
+    // now adding the third string causes eviction of the oldest.
+    lruSet.add("ghi");
+    ASSERT_FALSE(lruSet.check("abc"));
+    ASSERT_TRUE(lruSet.check("ghi"));
+    ASSERT_TRUE(lruSet.check("def"));  // "def" is most recent.
+    ASSERT_EQ(2u, lruSet.size());      // "abc" is correctly discarded.
+
+    // adding another string will evict the oldest.
+    lruSet.add("foo");
+    ASSERT_FALSE(lruSet.check("ghi")); // note: "ghi" discarded when "foo" added.
+    ASSERT_TRUE(lruSet.check("foo"));
+    ASSERT_TRUE(lruSet.check("def"));
+
+    // manual removing of a string works, too.
+    ASSERT_TRUE(lruSet.remove("def"));
+    ASSERT_FALSE(lruSet.check("def")); // we manually removed "def".
+    ASSERT_TRUE(lruSet.check("foo"));  // "foo" is still there.
+    ASSERT_EQ(1u, lruSet.size());
+
+    // you can't remove a string that has not been added.
+    ASSERT_FALSE(lruSet.remove("bar")); // Note: "bar" doesn't exist, so remove returns false.
+    ASSERT_EQ(1u, lruSet.size());
+
+    lruSet.add("foo");   // adding "foo" (which already exists) doesn't change size.
+    ASSERT_EQ(1u, lruSet.size());
+    lruSet.add("bar");   // add "bar"
+    ASSERT_EQ(2u, lruSet.size());
+    lruSet.add("glorp"); // add "glorp" evicts "foo".
+    ASSERT_EQ(2u, lruSet.size());
+    ASSERT_TRUE(lruSet.check("bar"));
+    ASSERT_TRUE(lruSet.check("glorp"));
+    ASSERT_FALSE(lruSet.check("foo"));
+}
+
+TEST(mediametrics_tests, LruSet0) {
+    constexpr size_t LRU_SET_SIZE = 0;
+    mediametrics::LruSet<std::string> lruSet(LRU_SET_SIZE);
+
+    lruSet.add("a");
+    ASSERT_EQ(0u, lruSet.size());
+    ASSERT_FALSE(lruSet.check("a"));
+    ASSERT_FALSE(lruSet.remove("a")); // never added.
+    ASSERT_EQ(0u, lruSet.size());
+}
+
+// Returns a 16 Base64Url string representing the decimal representation of value
+// (with leading 0s) e.g. 0000000000000000, 0000000000000001, 0000000000000002, ...
+static std::string generateId(size_t value)
+{
+    char id[16 + 1]; // to be filled with 16 Base64Url chars (and zero termination)
+    char *sptr = id + 16; // start at the end.
+    *sptr-- = 0; // zero terminate.
+    // output the digits from least significant to most significant.
+    while (value) {
+        *sptr-- = value % 10;
+        value /= 10;
+    }
+    // add leading 0's
+    while (sptr > id) {
+        *sptr-- = '0';
+    }
+    return std::string(id);
+}
+
+TEST(mediametrics_tests, ValidateId) {
+    constexpr size_t LRU_SET_SIZE = 3;
+    constexpr size_t IDS = 10;
+    static_assert(IDS > LRU_SET_SIZE);  // IDS must be greater than LRU_SET_SIZE.
+    mediametrics::ValidateId validateId(LRU_SET_SIZE);
+
+
+    // register IDs as integer strings counting from 0.
+    for (size_t i = 0; i < IDS; ++i) {
+        validateId.registerId(generateId(i));
+    }
+
+    // only the last LRU_SET_SIZE exist.
+    for (size_t i = 0; i < IDS - LRU_SET_SIZE; ++i) {
+        ASSERT_EQ("", validateId.validateId(generateId(i)));
+    }
+    for (size_t i = IDS - LRU_SET_SIZE; i < IDS; ++i) {
+        const std::string id = generateId(i);
+        ASSERT_EQ(id, validateId.validateId(id));
+    }
+}
+
+TEST(mediametrics_tests, StatusConversion) {
+    constexpr status_t statuses[] = {
+        NO_ERROR,
+        BAD_VALUE,
+        DEAD_OBJECT,
+        NO_MEMORY,
+        PERMISSION_DENIED,
+        INVALID_OPERATION,
+        WOULD_BLOCK,
+        UNKNOWN_ERROR,
+    };
+
+    auto roundTrip = [](status_t status) {
+        return android::mediametrics::statusStringToStatus(
+                android::mediametrics::statusToStatusString(status));
+    };
+
+    // Primary status error categories.
+    for (const auto status : statuses) {
+        ASSERT_EQ(status, roundTrip(status));
+    }
+
+    // Status errors specially considered.
+    ASSERT_EQ(DEAD_OBJECT, roundTrip(FAILED_TRANSACTION));
+}
+
+TEST(mediametrics_tests, HeatMap) {
+    constexpr size_t SIZE = 2;
+    android::mediametrics::HeatMap heatMap{SIZE};
+    constexpr uid_t UID = 0;
+    constexpr int32_t SUBCODE = 1;
+
+    ASSERT_EQ((size_t)0, heatMap.size());
+    heatMap.add("someKey", "someSuffix", "someEvent",
+            AMEDIAMETRICS_PROP_STATUS_VALUE_OK, UID, "message", SUBCODE);
+    ASSERT_EQ((size_t)1, heatMap.size());
+    heatMap.add("someKey", "someSuffix", "someEvent",
+            AMEDIAMETRICS_PROP_STATUS_VALUE_OK, UID, "message", SUBCODE);
+    heatMap.add("someKey", "someSuffix", "anotherEvent",
+            AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT, UID, "message", SUBCODE);
+    ASSERT_EQ((size_t)1, heatMap.size());
+    heatMap.add("anotherKey", "someSuffix", "someEvent",
+            AMEDIAMETRICS_PROP_STATUS_VALUE_OK, UID, "message", SUBCODE);
+    ASSERT_EQ((size_t)2, heatMap.size());
+    ASSERT_EQ((size_t)0, heatMap.rejected());
+
+    heatMap.add("thirdKey", "someSuffix", "someEvent",
+            AMEDIAMETRICS_PROP_STATUS_VALUE_OK, UID, "message", SUBCODE);
+    ASSERT_EQ((size_t)2, heatMap.size());
+    ASSERT_EQ((size_t)1, heatMap.rejected());
+
+    android::mediametrics::HeatData heatData = heatMap.getData("someKey");
+    ASSERT_EQ((size_t)2, heatData.size());
+    auto count = heatData.heatCount();
+    ASSERT_EQ((size_t)3, count.size()); // pairs in order { total, "anotherEvent", "someEvent" }
+    // check total value
+    ASSERT_EQ((size_t)2, count[0].first);  // OK
+    ASSERT_EQ((size_t)1, count[0].second); // ERROR;
+    // first key "anotherEvent"
+    ASSERT_EQ((size_t)0, count[1].first);  // OK
+    ASSERT_EQ((size_t)1, count[1].second); // ERROR;
+    // second key "someEvent"
+    ASSERT_EQ((size_t)2, count[2].first);  // OK
+    ASSERT_EQ((size_t)0, count[2].second); // ERROR;
+
+    heatMap.clear();
+    ASSERT_EQ((size_t)0, heatMap.size());
+}
diff --git a/services/mediaresourcemanager/Android.bp b/services/mediaresourcemanager/Android.bp
index f31202b..5d80744 100644
--- a/services/mediaresourcemanager/Android.bp
+++ b/services/mediaresourcemanager/Android.bp
@@ -90,7 +90,7 @@
     ],
 
     static_libs: [
-        "resourceobserver_aidl_interface-V1-ndk_platform",
+        "resourceobserver_aidl_interface-V1-ndk",
     ],
 
     include_dirs: ["frameworks/av/include"],
diff --git a/services/mediaresourcemanager/test/Android.bp b/services/mediaresourcemanager/test/Android.bp
index ec4ba58..618626f 100644
--- a/services/mediaresourcemanager/test/Android.bp
+++ b/services/mediaresourcemanager/test/Android.bp
@@ -56,7 +56,7 @@
     test_suites: ["device-tests"],
     static_libs: [
         "libresourcemanagerservice",
-        "resourceobserver_aidl_interface-V1-ndk_platform",
+        "resourceobserver_aidl_interface-V1-ndk",
     ],
     shared_libs: [
         "libbinder",
diff --git a/services/mediatranscoding/Android.bp b/services/mediatranscoding/Android.bp
index a9fd14f..fa5eb4e 100644
--- a/services/mediatranscoding/Android.bp
+++ b/services/mediatranscoding/Android.bp
@@ -47,7 +47,7 @@
     ],
 
     static_libs: [
-        "mediatranscoding_aidl_interface-ndk_platform",
+        "mediatranscoding_aidl_interface-ndk",
     ],
 
     cflags: [
@@ -80,7 +80,7 @@
     ],
 
     static_libs: [
-        "mediatranscoding_aidl_interface-ndk_platform",
+        "mediatranscoding_aidl_interface-ndk",
     ],
 
     cflags: [
diff --git a/services/mediatranscoding/tests/Android.bp b/services/mediatranscoding/tests/Android.bp
index cb180ec..ae13656 100644
--- a/services/mediatranscoding/tests/Android.bp
+++ b/services/mediatranscoding/tests/Android.bp
@@ -34,8 +34,8 @@
     ],
 
     static_libs: [
-        "mediatranscoding_aidl_interface-ndk_platform",
-        "resourcemanager_aidl_interface-ndk_platform",
+        "mediatranscoding_aidl_interface-ndk",
+        "resourcemanager_aidl_interface-ndk",
         "libmediatranscodingservice",
     ],
 
diff --git a/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h b/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
index 0cb2fad..8e17f55 100644
--- a/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
+++ b/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
@@ -259,9 +259,7 @@
 static constexpr bool success = true;
 static constexpr bool fail = false;
 
-struct TestClientCallback : public BnTranscodingClientCallback,
-                            public EventTracker,
-                            public std::enable_shared_from_this<TestClientCallback> {
+struct TestClientCallback : public BnTranscodingClientCallback, public EventTracker {
     TestClientCallback(const char* packageName, int32_t id)
           : mClientId(id), mClientPid(PID(id)), mClientUid(UID(id)), mPackageName(packageName) {
         ALOGI("TestClientCallback %d created: pid %d, uid %d", id, PID(id), UID(id));
@@ -348,8 +346,8 @@
         ALOGD("registering %s with uid %d", packageName, mClientUid);
 
         std::shared_ptr<ITranscodingClient> client;
-        Status status =
-                service->registerClient(shared_from_this(), kClientName, packageName, &client);
+        Status status = service->registerClient(ref<TestClientCallback>(), kClientName, packageName,
+                                                &client);
 
         mClient = status.isOk() ? client : nullptr;
         return status;
diff --git a/services/minijail/Android.bp b/services/minijail/Android.bp
index 3a89e12..decc5fe 100644
--- a/services/minijail/Android.bp
+++ b/services/minijail/Android.bp
@@ -28,17 +28,11 @@
     defaults: ["libavservices_minijail_defaults"],
     vendor_available: true,
     min_sdk_version: "29",
-    export_include_dirs: ["."],
-}
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media.swcodec",
+    ],
 
-// By adding "vendor_available: true" to "libavservices_minijail", we don't
-// need to have "libavservices_minijail_vendor" any longer.
-// "libavservices_minijail_vendor" will be removed, once we replace it with
-// "libavservices_minijail" in all vendor modules. (b/146313710)
-cc_library_shared {
-    name: "libavservices_minijail_vendor",
-    vendor: true,
-    defaults: ["libavservices_minijail_defaults"],
     export_include_dirs: ["."],
 }
 
diff --git a/services/minijail/OWNERS b/services/minijail/OWNERS
index 19f4f9f..9ebf41e 100644
--- a/services/minijail/OWNERS
+++ b/services/minijail/OWNERS
@@ -1,2 +1,2 @@
 jorgelo@google.com
-marcone@google.com
+essick@google.com
diff --git a/services/oboeservice/AAudioCommandQueue.cpp b/services/oboeservice/AAudioCommandQueue.cpp
new file mode 100644
index 0000000..9bd18b3
--- /dev/null
+++ b/services/oboeservice/AAudioCommandQueue.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioCommandQueue"
+//#define LOG_NDEBUG 0
+
+#include <chrono>
+
+#include <utils/Log.h>
+
+#include "AAudioCommandQueue.h"
+
+namespace aaudio {
+
+aaudio_result_t AAudioCommandQueue::sendCommand(std::shared_ptr<AAudioCommand> command) {
+    {
+        std::scoped_lock<std::mutex> _l(mLock);
+        if (!mRunning) {
+            ALOGE("Tried to send command while it was not running");
+            return AAUDIO_ERROR_INVALID_STATE;
+        }
+        mCommands.push(command);
+        mWaitWorkCond.notify_one();
+    }
+
+    std::unique_lock _cl(command->lock);
+    android::base::ScopedLockAssertion lockAssertion(command->lock);
+    ALOGV("Sending command %d, wait for reply(%d) with timeout %jd",
+           command->operationCode, command->isWaitingForReply, command->timeoutNanoseconds);
+    // `mWaitForReply` is first initialized when the command is constructed. It will be flipped
+    // when the command is completed.
+    auto timeoutExpire = std::chrono::steady_clock::now()
+            + std::chrono::nanoseconds(command->timeoutNanoseconds);
+    while (command->isWaitingForReply) {
+        if (command->conditionVariable.wait_until(_cl, timeoutExpire)
+                == std::cv_status::timeout) {
+            ALOGD("Command %d time out", command->operationCode);
+            command->result = AAUDIO_ERROR_TIMEOUT;
+            command->isWaitingForReply = false;
+        }
+    }
+    ALOGV("Command %d sent with result as %d", command->operationCode, command->result);
+    return command->result;
+}
+
+std::shared_ptr<AAudioCommand> AAudioCommandQueue::waitForCommand(int64_t timeoutNanos) {
+    std::shared_ptr<AAudioCommand> command;
+    {
+        std::unique_lock _l(mLock);
+        android::base::ScopedLockAssertion lockAssertion(mLock);
+        if (timeoutNanos >= 0) {
+            mWaitWorkCond.wait_for(_l, std::chrono::nanoseconds(timeoutNanos), [this]() {
+                android::base::ScopedLockAssertion lockAssertion(mLock);
+                return !mRunning || !mCommands.empty();
+            });
+        } else {
+            mWaitWorkCond.wait(_l, [this]() {
+                android::base::ScopedLockAssertion lockAssertion(mLock);
+                return !mRunning || !mCommands.empty();
+            });
+        }
+        if (!mCommands.empty() && mRunning) {
+            command = mCommands.front();
+            mCommands.pop();
+        }
+    }
+    return command;
+}
+
+void AAudioCommandQueue::startWaiting() {
+    std::scoped_lock<std::mutex> _l(mLock);
+    mRunning = true;
+}
+
+void AAudioCommandQueue::stopWaiting() {
+    std::scoped_lock<std::mutex> _l(mLock);
+    mRunning = false;
+    // Clear all commands in the queue as the command thread is stopped.
+    while (!mCommands.empty()) {
+        auto command = mCommands.front();
+        mCommands.pop();
+        std::scoped_lock<std::mutex> _cl(command->lock);
+        // If the command is waiting for result, returns AAUDIO_ERROR_INVALID_STATE
+        // as there is no thread waiting for the command.
+        if (command->isWaitingForReply) {
+            command->result = AAUDIO_ERROR_INVALID_STATE;
+            command->isWaitingForReply = false;
+            command->conditionVariable.notify_one();
+        }
+    }
+    mWaitWorkCond.notify_one();
+}
+
+} // namespace aaudio
\ No newline at end of file
diff --git a/services/oboeservice/AAudioCommandQueue.h b/services/oboeservice/AAudioCommandQueue.h
new file mode 100644
index 0000000..64442a3
--- /dev/null
+++ b/services/oboeservice/AAudioCommandQueue.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <condition_variable>
+#include <memory>
+#include <mutex>
+#include <queue>
+
+#include <aaudio/AAudio.h>
+#include <android-base/thread_annotations.h>
+
+namespace aaudio {
+
+typedef int32_t aaudio_command_opcode;
+
+class AAudioCommandParam {
+public:
+    AAudioCommandParam() = default;
+    virtual ~AAudioCommandParam() = default;
+};
+
+class AAudioCommand {
+public:
+    explicit AAudioCommand(
+            aaudio_command_opcode opCode, std::shared_ptr<AAudioCommandParam> param = nullptr,
+            bool waitForReply = false, int64_t timeoutNanos = 0)
+            : operationCode(opCode), parameter(param), isWaitingForReply(waitForReply),
+              timeoutNanoseconds(timeoutNanos) { }
+    virtual ~AAudioCommand() = default;
+
+    std::mutex lock;
+    std::condition_variable conditionVariable;
+
+    const aaudio_command_opcode operationCode;
+    std::shared_ptr<AAudioCommandParam> parameter;
+    bool isWaitingForReply GUARDED_BY(lock);
+    const int64_t timeoutNanoseconds;
+    aaudio_result_t result GUARDED_BY(lock) = AAUDIO_OK;
+};
+
+class AAudioCommandQueue {
+public:
+    AAudioCommandQueue() = default;
+    ~AAudioCommandQueue() = default;
+
+    /**
+     * Send a command to the command queue. The return will be waiting for a specified timeout
+     * period indicated by the command if it is required.
+     *
+     * @param command the command to send to the command queue.
+     * @return the result of sending the command or the result of executing the command if command
+     *         need to wait for a reply. If timeout happens, AAUDIO_ERROR_TIMEOUT will be returned.
+     */
+    aaudio_result_t sendCommand(std::shared_ptr<AAudioCommand> command);
+
+    /**
+     * Wait for next available command OR until the timeout is expired.
+     *
+     * @param timeoutNanos the maximum time to wait for next command (0 means return immediately in
+     *                     any case), negative to wait forever.
+     * @return the next available command if any or a nullptr when there is none.
+     */
+    std::shared_ptr<AAudioCommand> waitForCommand(int64_t timeoutNanos = -1);
+
+    /**
+     * Start waiting for commands. Commands can only be pushed into the command queue after it
+     * starts waiting.
+     */
+    void startWaiting();
+
+    /**
+     * Force stop waiting for next command
+     */
+    void stopWaiting();
+
+private:
+    std::mutex mLock;
+    std::condition_variable mWaitWorkCond;
+
+    std::queue<std::shared_ptr<AAudioCommand>> mCommands GUARDED_BY(mLock);
+    bool mRunning GUARDED_BY(mLock) = false;
+};
+
+} // namespace aaudio
\ No newline at end of file
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 40a664e..2679b2e 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -280,6 +280,22 @@
     AIDL_RETURN(serviceStream->unregisterAudioThread(clientThreadId));
 }
 
+Status AAudioService::exitStandby(int32_t streamHandle, Endpoint* endpoint, int32_t *_aidl_return) {
+    static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+    sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+    if (serviceStream.get() == nullptr) {
+        ALOGE("getStreamDescription(), illegal stream handle = 0x%0x", streamHandle);
+        AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+    }
+    AudioEndpointParcelable endpointParcelable;
+    aaudio_result_t result = serviceStream->exitStandby(&endpointParcelable);
+    if (result == AAUDIO_OK) {
+        *endpoint = std::move(endpointParcelable).parcelable();
+    }
+    AIDL_RETURN(result);
+}
+
 bool AAudioService::isCallerInService() {
     pid_t clientPid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mAudioClient.attributionSource.pid));
     uid_t clientUid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mAudioClient.attributionSource.uid));
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index 7c1b796..0a111fb 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -82,6 +82,9 @@
     binder::Status unregisterAudioThread(int32_t streamHandle, int32_t clientThreadId,
                                          int32_t* _aidl_return) override;
 
+    binder::Status exitStandby(int32_t streamHandle, ::aaudio::Endpoint* endpoint,
+                               int32_t* _aidl_return) override;
+
     aaudio_result_t startClient(aaudio::aaudio_handle_t streamHandle,
                                 const android::AudioClient& client,
                                 const audio_attributes_t *attr,
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index 13dd3d3..390cd5c 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -59,6 +59,7 @@
     result << "    Device Id:            " << getDeviceId() << "\n";
     result << "    Sample Rate:          " << getSampleRate() << "\n";
     result << "    Channel Count:        " << getSamplesPerFrame() << "\n";
+    result << "    Channel Mask:         0x" << std::hex << getChannelMask() << std::dec << "\n";
     result << "    Format:               " << getFormat() << "\n";
     result << "    Frames Per Burst:     " << mFramesPerBurst << "\n";
     result << "    Usage:                " << getUsage() << "\n";
@@ -164,6 +165,10 @@
         configuration.getSamplesPerFrame() != getSamplesPerFrame()) {
         return false;
     }
+    if (configuration.getChannelMask() != AAUDIO_UNSPECIFIED &&
+        configuration.getChannelMask() != getChannelMask()) {
+        return false;
+    }
     return true;
 }
 
@@ -188,7 +193,9 @@
     if (direction == AAUDIO_DIRECTION_OUTPUT) {
         flags = static_cast<audio_flags_mask_t>(AUDIO_FLAG_LOW_LATENCY
                 | AAudioConvert_allowCapturePolicyToAudioFlagsMask(
-                        params->getAllowedCapturePolicy()));
+                        params->getAllowedCapturePolicy(),
+                        params->getSpatializationBehavior(),
+                        params->isContentSpatialized()));
     } else {
         flags = static_cast<audio_flags_mask_t>(AUDIO_FLAG_LOW_LATENCY
                 | AAudioConvert_privacySensitiveToAudioFlagsMask(params->isPrivacySensitive()));
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index a7f63d3..92004c5 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -77,6 +77,16 @@
         return AAUDIO_ERROR_UNAVAILABLE;
     }
 
+    virtual aaudio_result_t standby() {
+        ALOGD("AAudioServiceEndpoint::standby() AAUDIO_ERROR_UNAVAILABLE");
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+
+    virtual aaudio_result_t exitStandby(AudioEndpointParcelable* parcelable) {
+        ALOGD("AAudioServiceEndpoint::exitStandby() AAUDIO_ERROR_UNAVAILABLE");
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+
     /**
      * @param positionFrames
      * @param timeNanos
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.cpp b/services/oboeservice/AAudioServiceEndpointCapture.cpp
index bc769f0..95bd4bb 100644
--- a/services/oboeservice/AAudioServiceEndpointCapture.cpp
+++ b/services/oboeservice/AAudioServiceEndpointCapture.cpp
@@ -66,8 +66,7 @@
                 getFramesPerBurst(), timeoutNanos);
         if (result == AAUDIO_ERROR_DISCONNECTED) {
             ALOGD("%s() read() returned AAUDIO_ERROR_DISCONNECTED", __func__);
-            // We do not need the returned vector.
-            (void) AAudioServiceEndpointShared::disconnectRegisteredStreams();
+            AAudioServiceEndpointShared::handleDisconnectRegisteredStreamsAsync();
             break;
         } else if (result != getFramesPerBurst()) {
             ALOGW("callbackLoop() read %d / %d",
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index a08098c..a266d5b 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -80,16 +80,16 @@
 
     audio_format_t audioFormat = getFormat();
 
-    // FLOAT is not directly supported by the HAL so ask for a 32-bit.
-    if (audioFormat == AUDIO_FORMAT_PCM_FLOAT) {
-        // TODO remove these logs when finished debugging.
-        ALOGD("%s() change format from %d to 32_BIT", __func__, audioFormat);
-        audioFormat = AUDIO_FORMAT_PCM_32_BIT;
-    }
-
     result = openWithFormat(audioFormat);
     if (result == AAUDIO_OK) return result;
 
+    if (result == AAUDIO_ERROR_UNAVAILABLE && audioFormat == AUDIO_FORMAT_PCM_FLOAT) {
+        ALOGD("%s() FLOAT failed, perhaps due to format. Try again with 32_BIT", __func__);
+        audioFormat = AUDIO_FORMAT_PCM_32_BIT;
+        result = openWithFormat(audioFormat);
+    }
+    if (result == AAUDIO_OK) return result;
+
     if (result == AAUDIO_ERROR_UNAVAILABLE && audioFormat == AUDIO_FORMAT_PCM_32_BIT) {
         ALOGD("%s() 32_BIT failed, perhaps due to format. Try again with 24_BIT_PACKED", __func__);
         audioFormat = AUDIO_FORMAT_PCM_24_BIT_PACKED;
@@ -126,20 +126,15 @@
     }
     config.sample_rate = aaudioSampleRate;
 
-    int32_t aaudioSamplesPerFrame = getSamplesPerFrame();
-
     const aaudio_direction_t direction = getDirection();
 
+    config.channel_mask = AAudio_getChannelMaskForOpen(
+            getChannelMask(), getSamplesPerFrame(), direction == AAUDIO_DIRECTION_INPUT);
+
     if (direction == AAUDIO_DIRECTION_OUTPUT) {
-        config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
-                              ? AUDIO_CHANNEL_OUT_STEREO
-                              : audio_channel_out_mask_from_count(aaudioSamplesPerFrame);
         mHardwareTimeOffsetNanos = OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at DAC later
 
     } else if (direction == AAUDIO_DIRECTION_INPUT) {
-        config.channel_mask =  (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
-                               ? AUDIO_CHANNEL_IN_STEREO
-                               : audio_channel_in_mask_from_count(aaudioSamplesPerFrame);
         mHardwareTimeOffsetNanos = INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at ADC earlier
 
     } else {
@@ -191,65 +186,29 @@
     ALOGD("%s() deviceId = %d, sessionId = %d", __func__, getDeviceId(), getSessionId());
 
     // Create MMAP/NOIRQ buffer.
-    int32_t minSizeFrames = getBufferCapacity();
-    if (minSizeFrames <= 0) { // zero will get rejected
-        minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
-    }
-    status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
-    bool isBufferShareable = mMmapBufferinfo.flags & AUDIO_MMAP_APPLICATION_SHAREABLE;
-    if (status != OK) {
-        ALOGE("%s() - createMmapBuffer() failed with status %d %s",
-              __func__, status, strerror(-status));
-        result = AAUDIO_ERROR_UNAVAILABLE;
+    if (createMmapBuffer(&mAudioDataFileDescriptor) != AAUDIO_OK) {
         goto error;
-    } else {
-        ALOGD("%s() createMmapBuffer() buffer_size = %d fr, burst_size %d fr"
-                      ", Sharable FD: %s",
-              __func__,
-              mMmapBufferinfo.buffer_size_frames,
-              mMmapBufferinfo.burst_size_frames,
-              isBufferShareable ? "Yes" : "No");
-    }
-
-    setBufferCapacity(mMmapBufferinfo.buffer_size_frames);
-    if (!isBufferShareable) {
-        // Exclusive mode can only be used by the service because the FD cannot be shared.
-        int32_t audioServiceUid =
-            VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
-        if ((mMmapClient.attributionSource.uid != audioServiceUid) &&
-            getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE) {
-            ALOGW("%s() - exclusive FD cannot be used by client", __func__);
-            result = AAUDIO_ERROR_UNAVAILABLE;
-            goto error;
-        }
     }
 
     // Get information about the stream and pass it back to the caller.
-    setSamplesPerFrame((direction == AAUDIO_DIRECTION_OUTPUT)
-                       ? audio_channel_count_from_out_mask(config.channel_mask)
-                       : audio_channel_count_from_in_mask(config.channel_mask));
+    setChannelMask(AAudioConvert_androidToAAudioChannelMask(
+            config.channel_mask, getDirection() == AAUDIO_DIRECTION_INPUT,
+            AAudio_isChannelIndexMask(config.channel_mask)));
 
-    // AAudio creates a copy of this FD and retains ownership of the copy.
-    // Assume that AudioFlinger will close the original shared_memory_fd.
-    mAudioDataFileDescriptor.reset(dup(mMmapBufferinfo.shared_memory_fd));
-    if (mAudioDataFileDescriptor.get() == -1) {
-        ALOGE("%s() - could not dup shared_memory_fd", __func__);
-        result = AAUDIO_ERROR_INTERNAL;
-        goto error;
-    }
-    // Call to HAL to make sure the transport FD was able to be closed by binder.
-    // This is a tricky workaround for a problem in Binder.
-    // TODO:[b/192048842] When that problem is fixed we may be able to remove or change this code.
-    struct audio_mmap_position position;
-    mMmapStream->getMmapPosition(&position);
-
-    mFramesPerBurst = mMmapBufferinfo.burst_size_frames;
     setFormat(config.format);
     setSampleRate(config.sample_rate);
 
-    ALOGD("%s() actual rate = %d, channels = %d"
-          ", deviceId = %d, capacity = %d\n",
-          __func__, getSampleRate(), getSamplesPerFrame(), deviceId, getBufferCapacity());
+    // If the position is not updated while the timestamp is updated for more than a certain amount,
+    // the timestamp reported from the HAL may not be accurate. Here, a timestamp grace period is
+    // set as 5 burst size. We may want to update this value if there is any report from OEMs saying
+    // that is too short.
+    static constexpr int kTimestampGraceBurstCount = 5;
+    mTimestampGracePeriodMs = ((int64_t) kTimestampGraceBurstCount * mFramesPerBurst
+            * AAUDIO_MILLIS_PER_SECOND) / getSampleRate();
+
+    ALOGD("%s() actual rate = %d, channels = %d channelMask = %#x, deviceId = %d, capacity = %d\n",
+          __func__, getSampleRate(), getSamplesPerFrame(), getChannelMask(),
+          deviceId, getBufferCapacity());
 
     ALOGD("%s() format = 0x%08x, frame size = %d, burst size = %d",
           __func__, getFormat(), calculateBytesPerFrame(), mFramesPerBurst);
@@ -320,6 +279,32 @@
     return result;
 }
 
+aaudio_result_t AAudioServiceEndpointMMAP::standby() {
+    if (mMmapStream == nullptr) {
+        return AAUDIO_ERROR_NULL;
+    }
+    aaudio_result_t result = AAudioConvert_androidToAAudioResult(mMmapStream->standby());
+    return result;
+}
+
+aaudio_result_t AAudioServiceEndpointMMAP::exitStandby(AudioEndpointParcelable* parcelable) {
+    if (mMmapStream == nullptr) {
+        return AAUDIO_ERROR_NULL;
+    }
+    mAudioDataFileDescriptor.reset();
+    aaudio_result_t result = createMmapBuffer(&mAudioDataFileDescriptor);
+    if (result == AAUDIO_OK) {
+        int32_t bytesPerFrame = calculateBytesPerFrame();
+        int32_t capacityInBytes = getBufferCapacity() * bytesPerFrame;
+        int fdIndex = parcelable->addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
+        parcelable->mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
+        parcelable->mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
+        parcelable->mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
+        parcelable->mDownDataQueueParcelable.setCapacityInFrames(getBufferCapacity());
+    }
+    return result;
+}
+
 // Get free-running DSP or DMA hardware position from the HAL.
 aaudio_result_t AAudioServiceEndpointMMAP::getFreeRunningPosition(int64_t *positionFrames,
                                                                 int64_t *timeNanos) {
@@ -406,30 +391,150 @@
 /**
  * Get an immutable description of the data queue from the HAL.
  */
-aaudio_result_t AAudioServiceEndpointMMAP::getDownDataDescription(AudioEndpointParcelable &parcelable)
+aaudio_result_t AAudioServiceEndpointMMAP::getDownDataDescription(
+        AudioEndpointParcelable* parcelable)
 {
     // Gather information on the data queue based on HAL info.
     int32_t bytesPerFrame = calculateBytesPerFrame();
     int32_t capacityInBytes = getBufferCapacity() * bytesPerFrame;
-    int fdIndex = parcelable.addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
-    parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
-    parcelable.mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
-    parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
-    parcelable.mDownDataQueueParcelable.setCapacityInFrames(getBufferCapacity());
+    int fdIndex = parcelable->addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
+    parcelable->mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
+    parcelable->mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
+    parcelable->mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
+    parcelable->mDownDataQueueParcelable.setCapacityInFrames(getBufferCapacity());
     return AAUDIO_OK;
 }
 
 aaudio_result_t AAudioServiceEndpointMMAP::getExternalPosition(uint64_t *positionFrames,
                                                                int64_t *timeNanos)
 {
-    if (!mExternalPositionSupported) {
-        return AAUDIO_ERROR_INVALID_STATE;
+    if (mHalExternalPositionStatus != AAUDIO_OK) {
+        return mHalExternalPositionStatus;
     }
-    status_t status = mMmapStream->getExternalPosition(positionFrames, timeNanos);
-    if (status == INVALID_OPERATION) {
-        // getExternalPosition is not supported. Set mExternalPositionSupported as false
+    uint64_t tempPositionFrames;
+    int64_t tempTimeNanos;
+    status_t status = mMmapStream->getExternalPosition(&tempPositionFrames, &tempTimeNanos);
+    if (status != OK) {
+        // getExternalPosition reports error. The HAL may not support the API. Cache the result
         // so that the call will not go to the HAL next time.
-        mExternalPositionSupported = false;
+        mHalExternalPositionStatus = AAudioConvert_androidToAAudioResult(status);
+        return mHalExternalPositionStatus;
     }
-    return AAudioConvert_androidToAAudioResult(status);
+
+    // If the HAL keeps reporting the same position or timestamp, the HAL may be having some issues
+    // to report correct external position. In that case, we will not trust the values reported from
+    // the HAL. Ideally, we may want to stop querying external position if the HAL cannot report
+    // correct position within a period. But it may not be a good idea to get system time too often.
+    // In that case, a maximum number of frozen external position is defined so that if the
+    // count of the same timestamp or position is reported by the HAL continuously, the values from
+    // the HAL will no longer be trusted.
+    static constexpr int kMaxFrozenCount = 20;
+    // If the HAL version is less than 7.0, the getPresentationPosition is an optional API.
+    // If the HAL version is 7.0 or later, the getPresentationPosition is a mandatory API.
+    // In that case, even the returned status is NO_ERROR, it doesn't indicate the returned
+    // position is a valid one. Do a simple validation, which is checking if the position is
+    // forward within half a second or not, here so that this function can return error if
+    // the validation fails. Note that we don't only apply this validation logic to HAL API
+    // less than 7.0. The reason is that there is a chance the HAL is not reporting the
+    // timestamp and position correctly.
+    if (mLastPositionFrames > tempPositionFrames) {
+        // If the position is going backwards, there must be something wrong with the HAL.
+        // In that case, we do not trust the values reported by the HAL.
+        ALOGW("%s position is going backwards, last position(%jd) current position(%jd)",
+              __func__, mLastPositionFrames, tempPositionFrames);
+        mHalExternalPositionStatus = AAUDIO_ERROR_INTERNAL;
+        return mHalExternalPositionStatus;
+    } else if (mLastPositionFrames == tempPositionFrames) {
+        if (tempTimeNanos - mTimestampNanosForLastPosition >
+                AAUDIO_NANOS_PER_MILLISECOND * mTimestampGracePeriodMs) {
+            ALOGW("%s, the reported position is not changed within %d msec. "
+                  "Set the external position as not supported", __func__, mTimestampGracePeriodMs);
+            mHalExternalPositionStatus = AAUDIO_ERROR_INTERNAL;
+            return mHalExternalPositionStatus;
+        }
+        mFrozenPositionCount++;
+    } else {
+        mFrozenPositionCount = 0;
+    }
+
+    if (mTimestampNanosForLastPosition > tempTimeNanos) {
+        // If the timestamp is going backwards, there must be something wrong with the HAL.
+        // In that case, we do not trust the values reported by the HAL.
+        ALOGW("%s timestamp is going backwards, last timestamp(%jd), current timestamp(%jd)",
+              __func__, mTimestampNanosForLastPosition, tempTimeNanos);
+        mHalExternalPositionStatus = AAUDIO_ERROR_INTERNAL;
+        return mHalExternalPositionStatus;
+    } else if (mTimestampNanosForLastPosition == tempTimeNanos) {
+        mFrozenTimestampCount++;
+    } else {
+        mFrozenTimestampCount = 0;
+    }
+
+    if (mFrozenTimestampCount + mFrozenPositionCount > kMaxFrozenCount) {
+        ALOGW("%s too many frozen external position from HAL.", __func__);
+        mHalExternalPositionStatus = AAUDIO_ERROR_INTERNAL;
+        return mHalExternalPositionStatus;
+    }
+
+    mLastPositionFrames = tempPositionFrames;
+    mTimestampNanosForLastPosition = tempTimeNanos;
+
+    // Only update the timestamp and position when they looks valid.
+    *positionFrames = tempPositionFrames;
+    *timeNanos = tempTimeNanos;
+    return mHalExternalPositionStatus;
+}
+
+aaudio_result_t AAudioServiceEndpointMMAP::createMmapBuffer(
+        android::base::unique_fd* fileDescriptor)
+{
+    memset(&mMmapBufferinfo, 0, sizeof(struct audio_mmap_buffer_info));
+    int32_t minSizeFrames = getBufferCapacity();
+    if (minSizeFrames <= 0) { // zero will get rejected
+        minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
+    }
+    status_t status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
+    bool isBufferShareable = mMmapBufferinfo.flags & AUDIO_MMAP_APPLICATION_SHAREABLE;
+    if (status != OK) {
+        ALOGE("%s() - createMmapBuffer() failed with status %d %s",
+              __func__, status, strerror(-status));
+        return AAUDIO_ERROR_UNAVAILABLE;
+    } else {
+        ALOGD("%s() createMmapBuffer() buffer_size = %d fr, burst_size %d fr"
+                      ", Sharable FD: %s",
+              __func__,
+              mMmapBufferinfo.buffer_size_frames,
+              mMmapBufferinfo.burst_size_frames,
+              isBufferShareable ? "Yes" : "No");
+    }
+
+    setBufferCapacity(mMmapBufferinfo.buffer_size_frames);
+    if (!isBufferShareable) {
+        // Exclusive mode can only be used by the service because the FD cannot be shared.
+        int32_t audioServiceUid =
+            VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
+        if ((mMmapClient.attributionSource.uid != audioServiceUid) &&
+            getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE) {
+            ALOGW("%s() - exclusive FD cannot be used by client", __func__);
+            return AAUDIO_ERROR_UNAVAILABLE;
+        }
+    }
+
+    // AAudio creates a copy of this FD and retains ownership of the copy.
+    // Assume that AudioFlinger will close the original shared_memory_fd.
+    fileDescriptor->reset(dup(mMmapBufferinfo.shared_memory_fd));
+    if (fileDescriptor->get() == -1) {
+        ALOGE("%s() - could not dup shared_memory_fd", __func__);
+        return AAUDIO_ERROR_INTERNAL;
+    }
+
+    // Call to HAL to make sure the transport FD was able to be closed by binder.
+    // This is a tricky workaround for a problem in Binder.
+    // TODO:[b/192048842] When that problem is fixed we may be able to remove or change this code.
+    struct audio_mmap_position position;
+    mMmapStream->getMmapPosition(&position);
+
+    mFramesPerBurst = mMmapBufferinfo.burst_size_frames;
+
+    return AAUDIO_OK;
 }
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index 5a53885..3e7f2c7 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -64,6 +64,10 @@
 
     aaudio_result_t stopClient(audio_port_handle_t clientHandle)  override;
 
+    aaudio_result_t standby() override;
+
+    aaudio_result_t exitStandby(AudioEndpointParcelable* parcelable) override;
+
     aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
 
     aaudio_result_t getTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
@@ -79,7 +83,7 @@
     void onRoutingChanged(audio_port_handle_t portHandle) override;
     // ------------------------------------------------------------------------------
 
-    aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable);
+    aaudio_result_t getDownDataDescription(AudioEndpointParcelable* parcelable);
 
     int64_t getHardwareTimeOffsetNanos() const {
         return mHardwareTimeOffsetNanos;
@@ -91,6 +95,8 @@
 
     aaudio_result_t openWithFormat(audio_format_t audioFormat);
 
+    aaudio_result_t createMmapBuffer(android::base::unique_fd* fileDescriptor);
+
     MonotonicCounter                          mFramesTransferred;
 
     // Interface to the AudioFlinger MMAP support.
@@ -106,7 +112,12 @@
 
     int64_t                                   mHardwareTimeOffsetNanos = 0; // TODO get from HAL
 
-    bool                                      mExternalPositionSupported = true;
+    aaudio_result_t                           mHalExternalPositionStatus = AAUDIO_OK;
+    uint64_t                                  mLastPositionFrames = 0;
+    int64_t                                   mTimestampNanosForLastPosition = 0;
+    int32_t                                   mTimestampGracePeriodMs;
+    int32_t                                   mFrozenPositionCount = 0;
+    int32_t                                   mFrozenTimestampCount = 0;
 
 };
 
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index 4e46033..2a5939f 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -21,6 +21,7 @@
 #include <assert.h>
 #include <map>
 #include <mutex>
+#include <media/AudioSystem.h>
 #include <utils/Singleton.h>
 
 #include "AAudioEndpointManager.h"
@@ -51,7 +52,7 @@
         mMixer.allocate(getStreamInternal()->getSamplesPerFrame(),
                         getStreamInternal()->getFramesPerBurst());
 
-        int32_t burstsPerBuffer = AAudioProperty_getMixerBursts();
+        int32_t burstsPerBuffer = AudioSystem::getAAudioMixerBurstCount();
         if (burstsPerBuffer == 0) {
             mLatencyTuningEnabled = true;
             burstsPerBuffer = BURSTS_PER_BUFFER_DEFAULT;
@@ -146,8 +147,7 @@
                                             getFramesPerBurst(), timeoutNanos);
         if (result == AAUDIO_ERROR_DISCONNECTED) {
             ALOGD("%s() write() returned AAUDIO_ERROR_DISCONNECTED", __func__);
-            // We do not need the returned vector.
-            (void) AAudioServiceEndpointShared::disconnectRegisteredStreams();
+            AAudioServiceEndpointShared::handleDisconnectRegisteredStreamsAsync();
             break;
         } else if (result != getFramesPerBurst()) {
             ALOGW("callbackLoop() wrote %d / %d",
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index 5fbcadb..dd421fe 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -78,7 +78,7 @@
     result = mStreamInternal->open(builder);
 
     setSampleRate(mStreamInternal->getSampleRate());
-    setSamplesPerFrame(mStreamInternal->getSamplesPerFrame());
+    setChannelMask(mStreamInternal->getChannelMask());
     setDeviceId(mStreamInternal->getDeviceId());
     setSessionId(mStreamInternal->getSessionId());
     setFormat(AUDIO_FORMAT_PCM_FLOAT); // force for mixer
@@ -214,3 +214,12 @@
     }
     return result;
 }
+
+void AAudioServiceEndpointShared::handleDisconnectRegisteredStreamsAsync() {
+    android::sp<AAudioServiceEndpointShared> holdEndpoint(this);
+    std::thread asyncTask([holdEndpoint]() {
+        // We do not need the returned vector.
+        holdEndpoint->disconnectRegisteredStreams();
+    });
+    asyncTask.detach();
+}
diff --git a/services/oboeservice/AAudioServiceEndpointShared.h b/services/oboeservice/AAudioServiceEndpointShared.h
index 8357567..3e760c4 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.h
+++ b/services/oboeservice/AAudioServiceEndpointShared.h
@@ -69,6 +69,8 @@
 
     aaudio_result_t          stopSharingThread();
 
+    void                     handleDisconnectRegisteredStreamsAsync();
+
     // An MMAP stream that is shared by multiple clients.
     android::sp<AudioStreamInternal> mStreamInternal;
 
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 34ddd4d..9f48f80 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -34,20 +34,23 @@
 #include "AAudioService.h"
 #include "AAudioServiceEndpoint.h"
 #include "AAudioServiceStreamBase.h"
-#include "TimestampScheduler.h"
 
 using namespace android;  // TODO just import names needed
 using namespace aaudio;   // TODO just import names needed
 
 using content::AttributionSourceState;
 
+static const int64_t TIMEOUT_NANOS = 3LL * 1000 * 1000 * 1000;
+// If the stream is idle for more than `IDLE_TIMEOUT_NANOS`, the stream will be put into standby.
+static const int64_t IDLE_TIMEOUT_NANOS = 3e9;
+
 /**
  * Base class for streams in the service.
  * @return
  */
 
 AAudioServiceStreamBase::AAudioServiceStreamBase(AAudioService &audioService)
-        : mTimestampThread("AATime")
+        : mCommandThread("AACommand")
         , mAtomicStreamTimestamp()
         , mAudioService(audioService) {
     mMmapClient.attributionSource = AttributionSourceState();
@@ -70,10 +73,18 @@
                         || getState() == AAUDIO_STREAM_STATE_UNINITIALIZED),
                         "service stream %p still open, state = %d",
                         this, getState());
+
+    // Stop the command thread before destroying.
+    if (mThreadEnabled) {
+        mThreadEnabled = false;
+        mCommandQueue.stopWaiting();
+        mCommandThread.stop();
+    }
 }
 
 std::string AAudioServiceStreamBase::dumpHeader() {
-    return std::string("    T   Handle   UId   Port Run State Format Burst Chan Capacity");
+    return std::string(
+            "    T   Handle   UId   Port Run State Format Burst Chan Mask     Capacity");
 }
 
 std::string AAudioServiceStreamBase::dump() const {
@@ -88,6 +99,7 @@
     result << std::setw(7) << getFormat();
     result << std::setw(6) << mFramesPerBurst;
     result << std::setw(5) << getSamplesPerFrame();
+    result << std::setw(8) << std::hex << getChannelMask() << std::dec;
     result << std::setw(9) << getBufferCapacity();
 
     return result.str();
@@ -164,16 +176,36 @@
         mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
         copyFrom(*mServiceEndpoint);
     }
+
+    // Make sure this object does not get deleted before the run() method
+    // can protect it by making a strong pointer.
+    mCommandQueue.startWaiting();
+    mThreadEnabled = true;
+    incStrong(nullptr); // See run() method.
+    result = mCommandThread.start(this);
+    if (result != AAUDIO_OK) {
+        decStrong(nullptr); // run() can't do it so we have to do it here.
+        goto error;
+    }
     return result;
 
 error:
-    close();
+    closeAndClear();
+    mThreadEnabled = false;
+    mCommandQueue.stopWaiting();
+    mCommandThread.stop();
     return result;
 }
 
 aaudio_result_t AAudioServiceStreamBase::close() {
-    std::lock_guard<std::mutex> lock(mLock);
-    return close_l();
+    aaudio_result_t result = sendCommand(CLOSE, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+
+    // Stop the command thread as the stream is closed.
+    mThreadEnabled = false;
+    mCommandQueue.stopWaiting();
+    mCommandThread.stop();
+
+    return result;
 }
 
 aaudio_result_t AAudioServiceStreamBase::close_l() {
@@ -181,29 +213,10 @@
         return AAUDIO_OK;
     }
 
-    // This will call stopTimestampThread() and also stop the stream,
-    // just in case it was not already stopped.
+    // This will stop the stream, just in case it was not already stopped.
     stop_l();
 
-    aaudio_result_t result = AAUDIO_OK;
-    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
-    if (endpoint == nullptr) {
-        result = AAUDIO_ERROR_INVALID_STATE;
-    } else {
-        endpoint->unregisterStream(this);
-        AAudioEndpointManager &endpointManager = AAudioEndpointManager::getInstance();
-        endpointManager.closeEndpoint(endpoint);
-
-        // AAudioService::closeStream() prevents two threads from closing at the same time.
-        mServiceEndpoint.clear(); // endpoint will hold the pointer after this method returns.
-    }
-
-    setState(AAUDIO_STREAM_STATE_CLOSED);
-
-    mediametrics::LogItem(mMetricsId)
-        .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CLOSE)
-        .record();
-    return result;
+    return closeAndClear();
 }
 
 aaudio_result_t AAudioServiceStreamBase::startDevice() {
@@ -222,8 +235,10 @@
  * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
  */
 aaudio_result_t AAudioServiceStreamBase::start() {
-    std::lock_guard<std::mutex> lock(mLock);
+    return sendCommand(START, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+}
 
+aaudio_result_t AAudioServiceStreamBase::start_l() {
     const int64_t beginNs = AudioClock::getNanoseconds();
     aaudio_result_t result = AAUDIO_OK;
 
@@ -234,6 +249,12 @@
         return AAUDIO_ERROR_INVALID_STATE;
     }
 
+    if (mStandby) {
+        ALOGW("%s() the stream is standby, return ERROR_STANDBY, "
+              "expecting the client call exitStandby before start", __func__);
+        return AAUDIO_ERROR_STANDBY;
+    }
+
     mediametrics::Defer defer([&] {
         mediametrics::LogItem(mMetricsId)
             .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_START)
@@ -259,15 +280,6 @@
     // This should happen at the end of the start.
     sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
     setState(AAUDIO_STREAM_STATE_STARTED);
-    mThreadEnabled.store(true);
-    // Make sure this object does not get deleted before the run() method
-    // can protect it by making a strong pointer.
-    incStrong(nullptr); // See run() method.
-    result = mTimestampThread.start(this);
-    if (result != AAUDIO_OK) {
-        decStrong(nullptr); // run() can't do it so we have to do it here.
-        goto error;
-    }
 
     return result;
 
@@ -277,8 +289,7 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::pause() {
-    std::lock_guard<std::mutex> lock(mLock);
-    return pause_l();
+    return sendCommand(PAUSE, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
 }
 
 aaudio_result_t AAudioServiceStreamBase::pause_l() {
@@ -296,12 +307,6 @@
             .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)result)
             .record(); });
 
-    result = stopTimestampThread();
-    if (result != AAUDIO_OK) {
-        disconnect_l();
-        return result;
-    }
-
     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
     if (endpoint == nullptr) {
         ALOGE("%s() has no endpoint", __func__);
@@ -320,8 +325,7 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::stop() {
-    std::lock_guard<std::mutex> lock(mLock);
-    return stop_l();
+    return sendCommand(STOP, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
 }
 
 aaudio_result_t AAudioServiceStreamBase::stop_l() {
@@ -341,12 +345,6 @@
 
     setState(AAUDIO_STREAM_STATE_STOPPING);
 
-    // Temporarily unlock because we are joining the timestamp thread and it may try
-    // to acquire mLock.
-    mLock.unlock();
-    result = stopTimestampThread();
-    mLock.lock();
-
     if (result != AAUDIO_OK) {
         disconnect_l();
         return result;
@@ -371,17 +369,11 @@
     return result;
 }
 
-aaudio_result_t AAudioServiceStreamBase::stopTimestampThread() {
-    aaudio_result_t result = AAUDIO_OK;
-    // clear flag that tells thread to loop
-    if (mThreadEnabled.exchange(false)) {
-        result = mTimestampThread.stop();
-    }
-    return result;
+aaudio_result_t AAudioServiceStreamBase::flush() {
+    return sendCommand(FLUSH, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
 }
 
-aaudio_result_t AAudioServiceStreamBase::flush() {
-    std::lock_guard<std::mutex> lock(mLock);
+aaudio_result_t AAudioServiceStreamBase::flush_l() {
     aaudio_result_t result = AAudio_isFlushAllowed(getState());
     if (result != AAUDIO_OK) {
         return result;
@@ -402,48 +394,122 @@
     return AAUDIO_OK;
 }
 
-// implement Runnable, periodically send timestamps to client
+// implement Runnable, periodically send timestamps to client and process commands from queue.
 __attribute__((no_sanitize("integer")))
 void AAudioServiceStreamBase::run() {
-    ALOGD("%s() %s entering >>>>>>>>>>>>>> TIMESTAMPS", __func__, getTypeText());
+    ALOGD("%s() %s entering >>>>>>>>>>>>>> COMMANDS", __func__, getTypeText());
     // Hold onto the ref counted stream until the end.
     android::sp<AAudioServiceStreamBase> holdStream(this);
     TimestampScheduler timestampScheduler;
+    int64_t nextTime;
+    int64_t standbyTime = AudioClock::getNanoseconds() + IDLE_TIMEOUT_NANOS;
     // Balance the incStrong from when the thread was launched.
     holdStream->decStrong(nullptr);
 
-    timestampScheduler.setBurstPeriod(mFramesPerBurst, getSampleRate());
-    timestampScheduler.start(AudioClock::getNanoseconds());
-    int64_t nextTime = timestampScheduler.nextAbsoluteTime();
+    // Taking mLock while starting the thread. All the operation must be able to
+    // run with holding the lock.
+    std::scoped_lock<std::mutex> _l(mLock);
+
     int32_t loopCount = 0;
-    aaudio_result_t result = AAUDIO_OK;
-    while(mThreadEnabled.load()) {
+    while (mThreadEnabled.load()) {
         loopCount++;
-        if (AudioClock::getNanoseconds() >= nextTime) {
-            result = sendCurrentTimestamp();
-            if (result != AAUDIO_OK) {
-                ALOGE("%s() timestamp thread got result = %d", __func__, result);
-                break;
+        int64_t timeoutNanos = -1;
+        if (isRunning() || (isIdle_l() && !isStandby_l())) {
+            timeoutNanos = (isRunning() ? nextTime : standbyTime) - AudioClock::getNanoseconds();
+            timeoutNanos = std::max<int64_t>(0, timeoutNanos);
+        }
+
+        auto command = mCommandQueue.waitForCommand(timeoutNanos);
+        if (!mThreadEnabled) {
+            // Break the loop if the thread is disabled.
+            break;
+        }
+
+        if (isRunning() && AudioClock::getNanoseconds() >= nextTime) {
+            // It is time to update timestamp.
+            if (sendCurrentTimestamp_l() != AAUDIO_OK) {
+                ALOGE("Failed to send current timestamp, stop updating timestamp");
+                disconnect_l();
+            } else {
+                nextTime = timestampScheduler.nextAbsoluteTime();
             }
-            nextTime = timestampScheduler.nextAbsoluteTime();
-        } else  {
-            // Sleep until it is time to send the next timestamp.
-            // TODO Wait for a signal with a timeout so that we can stop more quickly.
-            AudioClock::sleepUntilNanoTime(nextTime);
+        }
+        if (isIdle_l() && AudioClock::getNanoseconds() >= standbyTime) {
+            standby_l();
+        }
+
+        if (command != nullptr) {
+            std::scoped_lock<std::mutex> _commandLock(command->lock);
+            switch (command->operationCode) {
+                case START:
+                    command->result = start_l();
+                    timestampScheduler.setBurstPeriod(mFramesPerBurst, getSampleRate());
+                    timestampScheduler.start(AudioClock::getNanoseconds());
+                    nextTime = timestampScheduler.nextAbsoluteTime();
+                    break;
+                case PAUSE:
+                    command->result = pause_l();
+                    standbyTime = AudioClock::getNanoseconds() + IDLE_TIMEOUT_NANOS;
+                    break;
+                case STOP:
+                    command->result = stop_l();
+                    standbyTime = AudioClock::getNanoseconds() + IDLE_TIMEOUT_NANOS;
+                    break;
+                case FLUSH:
+                    command->result = flush_l();
+                    break;
+                case CLOSE:
+                    command->result = close_l();
+                    break;
+                case DISCONNECT:
+                    disconnect_l();
+                    break;
+                case REGISTER_AUDIO_THREAD: {
+                    RegisterAudioThreadParam *param =
+                            (RegisterAudioThreadParam *) command->parameter.get();
+                    command->result =
+                            param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
+                                             : registerAudioThread_l(param->mOwnerPid,
+                                                                     param->mClientThreadId,
+                                                                     param->mPriority);
+                }
+                    break;
+                case UNREGISTER_AUDIO_THREAD: {
+                    UnregisterAudioThreadParam *param =
+                            (UnregisterAudioThreadParam *) command->parameter.get();
+                    command->result =
+                            param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
+                                             : unregisterAudioThread_l(param->mClientThreadId);
+                }
+                    break;
+                case GET_DESCRIPTION: {
+                    GetDescriptionParam *param = (GetDescriptionParam *) command->parameter.get();
+                    command->result = param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
+                                                        : getDescription_l(param->mParcelable);
+                }
+                    break;
+                case EXIT_STANDBY: {
+                    ExitStandbyParam *param = (ExitStandbyParam *) command->parameter.get();
+                    command->result = param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
+                                                       : exitStandby_l(param->mParcelable);
+                    standbyTime = AudioClock::getNanoseconds() + IDLE_TIMEOUT_NANOS;
+                } break;
+                default:
+                    ALOGE("Invalid command op code: %d", command->operationCode);
+                    break;
+            }
+            if (command->isWaitingForReply) {
+                command->isWaitingForReply = false;
+                command->conditionVariable.notify_one();
+            }
         }
     }
-    // This was moved from the calls in stop_l() and pause_l(), which could cause a deadlock
-    // if it resulted in a call to disconnect.
-    if (result == AAUDIO_OK) {
-        (void) sendCurrentTimestamp();
-    }
-    ALOGD("%s() %s exiting after %d loops <<<<<<<<<<<<<< TIMESTAMPS",
+    ALOGD("%s() %s exiting after %d loops <<<<<<<<<<<<<< COMMANDS",
           __func__, getTypeText(), loopCount);
 }
 
 void AAudioServiceStreamBase::disconnect() {
-    std::lock_guard<std::mutex> lock(mLock);
-    disconnect_l();
+    sendCommand(DISCONNECT);
 }
 
 void AAudioServiceStreamBase::disconnect_l() {
@@ -459,15 +525,21 @@
     }
 }
 
-aaudio_result_t AAudioServiceStreamBase::registerAudioThread(pid_t clientThreadId,
-        int priority) {
-    std::lock_guard<std::mutex> lock(mLock);
+aaudio_result_t AAudioServiceStreamBase::registerAudioThread(pid_t clientThreadId, int priority) {
+    const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
+    return sendCommand(REGISTER_AUDIO_THREAD,
+            std::make_shared<RegisterAudioThreadParam>(ownerPid, clientThreadId, priority),
+            true /*waitForReply*/,
+            TIMEOUT_NANOS);
+}
+
+aaudio_result_t AAudioServiceStreamBase::registerAudioThread_l(
+        pid_t ownerPid, pid_t clientThreadId, int priority) {
     aaudio_result_t result = AAUDIO_OK;
     if (getRegisteredThread() != AAudioServiceStreamBase::ILLEGAL_THREAD_ID) {
         ALOGE("AAudioService::registerAudioThread(), thread already registered");
         result = AAUDIO_ERROR_INVALID_STATE;
     } else {
-        const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
         setRegisteredThread(clientThreadId);
         int err = android::requestPriority(ownerPid, clientThreadId,
                                            priority, true /* isForApp */);
@@ -481,7 +553,13 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::unregisterAudioThread(pid_t clientThreadId) {
-    std::lock_guard<std::mutex> lock(mLock);
+    return sendCommand(UNREGISTER_AUDIO_THREAD,
+            std::make_shared<UnregisterAudioThreadParam>(clientThreadId),
+            true /*waitForReply*/,
+            TIMEOUT_NANOS);
+}
+
+aaudio_result_t AAudioServiceStreamBase::unregisterAudioThread_l(pid_t clientThreadId) {
     aaudio_result_t result = AAUDIO_OK;
     if (getRegisteredThread() != clientThreadId) {
         ALOGE("%s(), wrong thread", __func__);
@@ -550,7 +628,7 @@
     return sendServiceEvent(AAUDIO_SERVICE_EVENT_XRUN, (int64_t) xRunCount);
 }
 
-aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp() {
+aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp_l() {
     AAudioServiceMessage command;
     // It is not worth filling up the queue with timestamps.
     // That can cause the stream to get suspended.
@@ -560,8 +638,8 @@
     }
 
     // Send a timestamp for the clock model.
-    aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position,
-                                                    &command.timestamp.timestamp);
+    aaudio_result_t result = getFreeRunningPosition_l(&command.timestamp.position,
+                                                      &command.timestamp.timestamp);
     if (result == AAUDIO_OK) {
         ALOGV("%s() SERVICE  %8lld at %lld", __func__,
               (long long) command.timestamp.position,
@@ -571,8 +649,8 @@
 
         if (result == AAUDIO_OK) {
             // Send a hardware timestamp for presentation time.
-            result = getHardwareTimestamp(&command.timestamp.position,
-                                          &command.timestamp.timestamp);
+            result = getHardwareTimestamp_l(&command.timestamp.position,
+                                            &command.timestamp.timestamp);
             if (result == AAUDIO_OK) {
                 ALOGV("%s() HARDWARE %8lld at %lld", __func__,
                       (long long) command.timestamp.position,
@@ -594,7 +672,14 @@
  * used to communicate with the underlying HAL or Service.
  */
 aaudio_result_t AAudioServiceStreamBase::getDescription(AudioEndpointParcelable &parcelable) {
-    std::lock_guard<std::mutex> lock(mLock);
+    return sendCommand(
+            GET_DESCRIPTION,
+            std::make_shared<GetDescriptionParam>(&parcelable),
+            true /*waitForReply*/,
+            TIMEOUT_NANOS);
+}
+
+aaudio_result_t AAudioServiceStreamBase::getDescription_l(AudioEndpointParcelable* parcelable) {
     {
         std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
         if (mUpMessageQueue == nullptr) {
@@ -603,11 +688,50 @@
         }
         // Gather information on the message queue.
         mUpMessageQueue->fillParcelable(parcelable,
-                                        parcelable.mUpMessageQueueParcelable);
+                                        parcelable->mUpMessageQueueParcelable);
     }
-    return getAudioDataDescription(parcelable);
+    return getAudioDataDescription_l(parcelable);
+}
+
+aaudio_result_t AAudioServiceStreamBase::exitStandby(AudioEndpointParcelable *parcelable) {
+    auto command = std::make_shared<AAudioCommand>(
+            EXIT_STANDBY,
+            std::make_shared<ExitStandbyParam>(parcelable),
+            true /*waitForReply*/,
+            TIMEOUT_NANOS);
+    return mCommandQueue.sendCommand(command);
 }
 
 void AAudioServiceStreamBase::onVolumeChanged(float volume) {
     sendServiceEvent(AAUDIO_SERVICE_EVENT_VOLUME, volume);
 }
+
+aaudio_result_t AAudioServiceStreamBase::sendCommand(aaudio_command_opcode opCode,
+                                                     std::shared_ptr<AAudioCommandParam> param,
+                                                     bool waitForReply,
+                                                     int64_t timeoutNanos) {
+    return mCommandQueue.sendCommand(std::make_shared<AAudioCommand>(
+            opCode, param, waitForReply, timeoutNanos));
+}
+
+aaudio_result_t AAudioServiceStreamBase::closeAndClear() {
+    aaudio_result_t result = AAUDIO_OK;
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        result = AAUDIO_ERROR_INVALID_STATE;
+    } else {
+        endpoint->unregisterStream(this);
+        AAudioEndpointManager &endpointManager = AAudioEndpointManager::getInstance();
+        endpointManager.closeEndpoint(endpoint);
+
+        // AAudioService::closeStream() prevents two threads from closing at the same time.
+        mServiceEndpoint.clear(); // endpoint will hold the pointer after this method returns.
+    }
+
+    setState(AAUDIO_STREAM_STATE_CLOSED);
+
+    mediametrics::LogItem(mMetricsId)
+        .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CLOSE)
+        .record();
+    return result;
+}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 976996d..b2ba725 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -33,8 +33,10 @@
 #include "utility/AAudioUtilities.h"
 #include "utility/AudioClock.h"
 
-#include "SharedRingBuffer.h"
+#include "AAudioCommandQueue.h"
 #include "AAudioThread.h"
+#include "SharedRingBuffer.h"
+#include "TimestampScheduler.h"
 
 namespace android {
     class AAudioService;
@@ -114,6 +116,11 @@
      */
     aaudio_result_t flush() EXCLUDES(mLock);
 
+    /**
+     * Exit standby mode. The MMAP buffer will be reallocated.
+     */
+    aaudio_result_t exitStandby(AudioEndpointParcelable *parcelable) EXCLUDES(mLock);
+
     virtual aaudio_result_t startClient(const android::AudioClient& client,
                                         const audio_attributes_t *attr __unused,
                                         audio_port_handle_t *clientHandle __unused) {
@@ -235,10 +242,46 @@
     aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
                          aaudio_sharing_mode_t sharingMode);
 
+    aaudio_result_t start_l() REQUIRES(mLock);
     virtual aaudio_result_t close_l() REQUIRES(mLock);
     virtual aaudio_result_t pause_l() REQUIRES(mLock);
     virtual aaudio_result_t stop_l() REQUIRES(mLock);
     void disconnect_l() REQUIRES(mLock);
+    aaudio_result_t flush_l() REQUIRES(mLock);
+
+    class RegisterAudioThreadParam : public AAudioCommandParam {
+    public:
+        RegisterAudioThreadParam(pid_t ownerPid, pid_t clientThreadId, int priority)
+                : AAudioCommandParam(), mOwnerPid(ownerPid),
+                  mClientThreadId(clientThreadId), mPriority(priority) { }
+        ~RegisterAudioThreadParam() = default;
+
+        pid_t mOwnerPid;
+        pid_t mClientThreadId;
+        int mPriority;
+    };
+    aaudio_result_t registerAudioThread_l(
+            pid_t ownerPid, pid_t clientThreadId, int priority) REQUIRES(mLock);
+
+    class UnregisterAudioThreadParam : public AAudioCommandParam {
+    public:
+        UnregisterAudioThreadParam(pid_t clientThreadId)
+                : AAudioCommandParam(), mClientThreadId(clientThreadId) { }
+        ~UnregisterAudioThreadParam() = default;
+
+        pid_t mClientThreadId;
+    };
+    aaudio_result_t unregisterAudioThread_l(pid_t clientThreadId) REQUIRES(mLock);
+
+    class GetDescriptionParam : public AAudioCommandParam {
+    public:
+        GetDescriptionParam(AudioEndpointParcelable* parcelable)
+                : AAudioCommandParam(), mParcelable(parcelable) { }
+        ~GetDescriptionParam() = default;
+
+        AudioEndpointParcelable* mParcelable;
+    };
+    aaudio_result_t getDescription_l(AudioEndpointParcelable* parcelable) REQUIRES(mLock);
 
     void setState(aaudio_stream_state_t state);
 
@@ -250,7 +293,7 @@
 
     aaudio_result_t writeUpMessageQueue(AAudioServiceMessage *command);
 
-    aaudio_result_t sendCurrentTimestamp() EXCLUDES(mLock);
+    aaudio_result_t sendCurrentTimestamp_l() REQUIRES(mLock);
 
     aaudio_result_t sendXRunCount(int32_t xRunCount);
 
@@ -259,11 +302,13 @@
      * @param timeNanos
      * @return AAUDIO_OK or AAUDIO_ERROR_UNAVAILABLE or other negative error
      */
-    virtual aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) = 0;
+    virtual aaudio_result_t getFreeRunningPosition_l(
+            int64_t *positionFrames, int64_t *timeNanos) = 0;
 
-    virtual aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) = 0;
+    virtual aaudio_result_t getHardwareTimestamp_l(int64_t *positionFrames, int64_t *timeNanos) = 0;
 
-    virtual aaudio_result_t getAudioDataDescription(AudioEndpointParcelable &parcelable) = 0;
+    virtual aaudio_result_t getAudioDataDescription_l(AudioEndpointParcelable* parcelable) = 0;
+
 
     aaudio_stream_state_t   mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
 
@@ -274,14 +319,53 @@
         mDisconnected = flag;
     }
 
+    virtual aaudio_result_t standby_l() REQUIRES(mLock) {
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+    class ExitStandbyParam : public AAudioCommandParam {
+    public:
+        ExitStandbyParam(AudioEndpointParcelable* parcelable)
+                : AAudioCommandParam(), mParcelable(parcelable) { }
+        ~ExitStandbyParam() = default;
+
+        AudioEndpointParcelable* mParcelable;
+    };
+    virtual aaudio_result_t exitStandby_l(
+            AudioEndpointParcelable* parcelable __unused) REQUIRES(mLock) {
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+    bool isStandby_l() const REQUIRES(mLock) {
+        return mStandby;
+    }
+    void setStandby_l(bool standby) REQUIRES(mLock) {
+        mStandby = standby;
+    }
+
+    bool isIdle_l() const REQUIRES(mLock) {
+        return mState == AAUDIO_STREAM_STATE_OPEN || mState == AAUDIO_STREAM_STATE_PAUSED
+                || mState == AAUDIO_STREAM_STATE_STOPPED;
+    }
+
     pid_t                   mRegisteredClientThread = ILLEGAL_THREAD_ID;
 
     std::mutex              mUpMessageQueueLock;
     std::shared_ptr<SharedRingBuffer> mUpMessageQueue;
 
-    AAudioThread            mTimestampThread;
-    // This is used by one thread to tell another thread to exit. So it must be atomic.
+    enum : int32_t {
+        START,
+        PAUSE,
+        STOP,
+        FLUSH,
+        CLOSE,
+        DISCONNECT,
+        REGISTER_AUDIO_THREAD,
+        UNREGISTER_AUDIO_THREAD,
+        GET_DESCRIPTION,
+        EXIT_STANDBY,
+    };
+    AAudioThread            mCommandThread;
     std::atomic<bool>       mThreadEnabled{false};
+    AAudioCommandQueue      mCommandQueue;
 
     int32_t                 mFramesPerBurst = 0;
     android::AudioClient    mMmapClient; // set in open, used in MMAP start()
@@ -315,6 +399,13 @@
     aaudio_result_t sendServiceEvent(aaudio_service_event_t event,
                                      double dataDouble);
 
+    aaudio_result_t sendCommand(aaudio_command_opcode opCode,
+                                std::shared_ptr<AAudioCommandParam> param = nullptr,
+                                bool waitForReply = false,
+                                int64_t timeoutNanos = 0);
+
+    aaudio_result_t closeAndClear();
+
     /**
      * @return true if the queue is getting full.
      */
@@ -333,9 +424,13 @@
 
     bool                    mDisconnected GUARDED_BY(mLock) {false};
 
+    bool                    mStandby GUARDED_BY(mLock) = false;
+
 protected:
     // Locking order is important.
     // Acquire mLock before acquiring AAudioServiceEndpoint::mLockStreams
+    // The lock will be held by the command thread. All operations needing the lock must run from
+    // the command thread.
     std::mutex              mLock; // Prevent start/stop/close etcetera from colliding
 };
 
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 57dc1ab..ec9b2e2 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -117,6 +117,35 @@
     return result;
 }
 
+aaudio_result_t AAudioServiceStreamMMAP::standby_l() {
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    aaudio_result_t result = endpoint->standby();
+    if (result == AAUDIO_OK) {
+        setStandby_l(true);
+    }
+    return result;
+}
+
+aaudio_result_t AAudioServiceStreamMMAP::exitStandby_l(AudioEndpointParcelable* parcelable) {
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    aaudio_result_t result = endpoint->exitStandby(parcelable);
+    if (result == AAUDIO_OK) {
+        setStandby_l(false);
+    } else {
+        ALOGE("%s failed, result %d, disconnecting stream.", __func__, result);
+        disconnect_l();
+    }
+    return result;
+}
+
 aaudio_result_t AAudioServiceStreamMMAP::startClient(const android::AudioClient& client,
                                                      const audio_attributes_t *attr,
                                                      audio_port_handle_t *clientHandle) {
@@ -141,7 +170,7 @@
 }
 
 // Get free-running DSP or DMA hardware position from the HAL.
-aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition(int64_t *positionFrames,
+aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition_l(int64_t *positionFrames,
                                                                   int64_t *timeNanos) {
     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
     if (endpoint == nullptr) {
@@ -158,16 +187,15 @@
         *positionFrames = timestamp.getPosition();
         *timeNanos = timestamp.getNanoseconds();
     } else if (result != AAUDIO_ERROR_UNAVAILABLE) {
-        disconnect();
+        disconnect_l();
     }
     return result;
 }
 
 // Get timestamp from presentation position.
 // If it fails, get timestamp that was written by getFreeRunningPosition()
-aaudio_result_t AAudioServiceStreamMMAP::getHardwareTimestamp(int64_t *positionFrames,
+aaudio_result_t AAudioServiceStreamMMAP::getHardwareTimestamp_l(int64_t *positionFrames,
                                                                 int64_t *timeNanos) {
-
     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
     if (endpoint == nullptr) {
         ALOGE("%s() has no endpoint", __func__);
@@ -176,17 +204,17 @@
     sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
             static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
 
-    // Disable this code temporarily because the HAL is not returning
-    // a useful result.
-#if 0
     uint64_t position;
-    if (serviceEndpointMMAP->getExternalPosition(&position, timeNanos) == AAUDIO_OK) {
-        ALOGD("%s() getExternalPosition() says pos = %" PRIi64 ", time = %" PRIi64,
+    aaudio_result_t result = serviceEndpointMMAP->getExternalPosition(&position, timeNanos);
+    if (result == AAUDIO_OK) {
+        ALOGV("%s() getExternalPosition() says pos = %" PRIi64 ", time = %" PRIi64,
                 __func__, position, *timeNanos);
         *positionFrames = (int64_t) position;
         return AAUDIO_OK;
-    } else
-#endif
+    } else {
+        ALOGV("%s() getExternalPosition() returns error %d", __func__, result);
+    }
+
     if (mAtomicStreamTimestamp.isValid()) {
         Timestamp timestamp = mAtomicStreamTimestamp.read();
         *positionFrames = timestamp.getPosition();
@@ -198,8 +226,8 @@
 }
 
 // Get an immutable description of the data queue from the HAL.
-aaudio_result_t AAudioServiceStreamMMAP::getAudioDataDescription(
-        AudioEndpointParcelable &parcelable)
+aaudio_result_t AAudioServiceStreamMMAP::getAudioDataDescription_l(
+        AudioEndpointParcelable* parcelable)
 {
     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
     if (endpoint == nullptr) {
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h
index 667465a..cd8c91e 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.h
+++ b/services/oboeservice/AAudioServiceStreamMMAP.h
@@ -71,12 +71,18 @@
 
     aaudio_result_t stop_l() REQUIRES(mLock) override;
 
-    aaudio_result_t getAudioDataDescription(AudioEndpointParcelable &parcelable) override;
+    aaudio_result_t standby_l() REQUIRES(mLock) override;
 
-    aaudio_result_t getFreeRunningPosition(int64_t *positionFrames,
-            int64_t *timeNanos) EXCLUDES(mLock) override;
+    aaudio_result_t exitStandby_l(AudioEndpointParcelable* parcelable) REQUIRES(mLock) override;
 
-    aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
+    aaudio_result_t getAudioDataDescription_l(
+            AudioEndpointParcelable* parcelable) REQUIRES(mLock) override;
+
+    aaudio_result_t getFreeRunningPosition_l(int64_t *positionFrames,
+            int64_t *timeNanos) REQUIRES(mLock) override;
+
+    aaudio_result_t getHardwareTimestamp_l(
+            int64_t *positionFrames, int64_t *timeNanos) REQUIRES(mLock) override;
 
     /**
      * Device specific startup.
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index c665cda..04fcd6d 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -164,11 +164,11 @@
         goto error;
     }
 
-    setSamplesPerFrame(configurationInput.getSamplesPerFrame());
-    if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
-        setSamplesPerFrame(endpoint->getSamplesPerFrame());
+    setChannelMask(configurationInput.getChannelMask());
+    if (getChannelMask() == AAUDIO_UNSPECIFIED) {
+        setChannelMask(endpoint->getChannelMask());
     } else if (getSamplesPerFrame() != endpoint->getSamplesPerFrame()) {
-        ALOGD("%s() mSamplesPerFrame = %d, need %d",
+        ALOGD("%s() mSamplesPerFrame = %#x, need %#x",
               __func__, getSamplesPerFrame(), endpoint->getSamplesPerFrame());
         result = AAUDIO_ERROR_OUT_OF_RANGE;
         goto error;
@@ -211,8 +211,8 @@
 /**
  * Get an immutable description of the data queue created by this service.
  */
-aaudio_result_t AAudioServiceStreamShared::getAudioDataDescription(
-        AudioEndpointParcelable &parcelable)
+aaudio_result_t AAudioServiceStreamShared::getAudioDataDescription_l(
+        AudioEndpointParcelable* parcelable)
 {
     std::lock_guard<std::mutex> lock(audioDataQueueLock);
     if (mAudioDataQueue == nullptr) {
@@ -221,8 +221,8 @@
     }
     // Gather information on the data queue.
     mAudioDataQueue->fillParcelable(parcelable,
-                                    parcelable.mDownDataQueueParcelable);
-    parcelable.mDownDataQueueParcelable.setFramesPerBurst(getFramesPerBurst());
+                                    parcelable->mDownDataQueueParcelable);
+    parcelable->mDownDataQueueParcelable.setFramesPerBurst(getFramesPerBurst());
     return AAUDIO_OK;
 }
 
@@ -231,8 +231,8 @@
 }
 
 // Get timestamp that was written by mixer or distributor.
-aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition(int64_t *positionFrames,
-                                                                  int64_t *timeNanos) {
+aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition_l(int64_t *positionFrames,
+                                                                    int64_t *timeNanos) {
     // TODO Get presentation timestamp from the HAL
     if (mAtomicStreamTimestamp.isValid()) {
         Timestamp timestamp = mAtomicStreamTimestamp.read();
@@ -245,8 +245,8 @@
 }
 
 // Get timestamp from lower level service.
-aaudio_result_t AAudioServiceStreamShared::getHardwareTimestamp(int64_t *positionFrames,
-                                                                int64_t *timeNanos) {
+aaudio_result_t AAudioServiceStreamShared::getHardwareTimestamp_l(int64_t *positionFrames,
+                                                                  int64_t *timeNanos) {
 
     int64_t position = 0;
     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
index 4fae5b4..78f9787 100644
--- a/services/oboeservice/AAudioServiceStreamShared.h
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -88,11 +88,14 @@
 
 protected:
 
-    aaudio_result_t getAudioDataDescription(AudioEndpointParcelable &parcelable) override;
+    aaudio_result_t getAudioDataDescription_l(
+            AudioEndpointParcelable* parcelable) REQUIRES(mLock) override;
 
-    aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
+    aaudio_result_t getFreeRunningPosition_l(
+            int64_t *positionFrames, int64_t *timeNanos) REQUIRES(mLock) override;
 
-    aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
+    aaudio_result_t getHardwareTimestamp_l(
+            int64_t *positionFrames, int64_t *timeNanos) REQUIRES(mLock) override;
 
     /**
      * @param requestedCapacityFrames
diff --git a/services/oboeservice/AAudioThread.cpp b/services/oboeservice/AAudioThread.cpp
index 68496ac..549fa59 100644
--- a/services/oboeservice/AAudioThread.cpp
+++ b/services/oboeservice/AAudioThread.cpp
@@ -16,9 +16,10 @@
 
 #define LOG_TAG "AAudioThread"
 //#define LOG_NDEBUG 0
-#include <utils/Log.h>
 
-#include <pthread.h>
+#include <system_error>
+
+#include <utils/Log.h>
 
 #include <aaudio/AAudio.h>
 #include <utility/AAudioUtilities.h>
@@ -38,7 +39,7 @@
 }
 
 AAudioThread::~AAudioThread() {
-    ALOGE_IF(pthread_equal(pthread_self(), mThread),
+    ALOGE_IF(mThread.get_id() == std::this_thread::get_id(),
             "%s() destructor running in thread", __func__);
     ALOGE_IF(mHasThread, "%s() thread never joined", __func__);
 }
@@ -60,32 +61,16 @@
     }
 }
 
-// This is the entry point for the new thread created by createThread_l().
-// It converts the 'C' function call to a C++ method call.
-static void * AAudioThread_internalThreadProc(void *arg) {
-    AAudioThread *aaudioThread = (AAudioThread *) arg;
-    aaudioThread->dispatch();
-    return nullptr;
-}
-
 aaudio_result_t AAudioThread::start(Runnable *runnable) {
     if (mHasThread) {
         ALOGE("start() - mHasThread already true");
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    // mRunnable will be read by the new thread when it starts.
-    // pthread_create() forces a memory synchronization so mRunnable does not need to be atomic.
+    // mRunnable will be read by the new thread when it starts. A std::thread is created.
     mRunnable = runnable;
-    int err = pthread_create(&mThread, nullptr, AAudioThread_internalThreadProc, this);
-    if (err != 0) {
-        ALOGE("start() - pthread_create() returned %d %s", err, strerror(err));
-        return AAudioConvert_androidToAAudioResult(-err);
-    } else {
-        int err = pthread_setname_np(mThread, mName);
-        ALOGW_IF((err != 0), "Could not set name of AAudioThread. err = %d", err);
-        mHasThread = true;
-        return AAUDIO_OK;
-    }
+    mHasThread = true;
+    mThread = std::thread(&AAudioThread::dispatch, this);
+    return AAUDIO_OK;
 }
 
 aaudio_result_t AAudioThread::stop() {
@@ -93,18 +78,18 @@
         ALOGE("stop() but no thread running");
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    // Check to see if the thread is trying to stop itself.
-    if (pthread_equal(pthread_self(), mThread)) {
-        ALOGE("%s() attempt to pthread_join() from launched thread!", __func__);
-        return AAUDIO_ERROR_INTERNAL;
-    }
 
-    int err = pthread_join(mThread, nullptr);
-    if (err != 0) {
-        ALOGE("stop() - pthread_join() returned %d %s", err, strerror(err));
-        return AAudioConvert_androidToAAudioResult(-err);
-    } else {
+    if (mThread.get_id() == std::this_thread::get_id()) {
+        // The thread must not be joined by itself.
+        ALOGE("%s() attempt to join() from launched thread!", __func__);
+        return AAUDIO_ERROR_INTERNAL;
+    } else if (mThread.joinable()) {
+        // Double check if the thread is joinable to avoid exception when calling join.
+        mThread.join();
         mHasThread = false;
         return AAUDIO_OK;
+    } else {
+        ALOGE("%s() the thread is not joinable", __func__);
+        return AAUDIO_ERROR_INTERNAL;
     }
 }
diff --git a/services/oboeservice/AAudioThread.h b/services/oboeservice/AAudioThread.h
index 08a8a98..b2774e0 100644
--- a/services/oboeservice/AAudioThread.h
+++ b/services/oboeservice/AAudioThread.h
@@ -18,7 +18,7 @@
 #define AAUDIO_THREAD_H
 
 #include <atomic>
-#include <pthread.h>
+#include <thread>
 
 #include <aaudio/AAudio.h>
 
@@ -37,7 +37,6 @@
 
 /**
  * Abstraction for a host dependent thread.
- * TODO Consider using Android "Thread" class or std::thread instead.
  */
 class AAudioThread
 {
@@ -73,7 +72,7 @@
 
     Runnable    *mRunnable = nullptr;
     bool         mHasThread = false;
-    pthread_t    mThread = {};
+    std::thread  mThread;
 
     static std::atomic<uint32_t> mNextThreadIndex;
     char         mName[16]; // max length for a pthread_name
diff --git a/services/oboeservice/Android.bp b/services/oboeservice/Android.bp
index 4c58040..80e4296 100644
--- a/services/oboeservice/Android.bp
+++ b/services/oboeservice/Android.bp
@@ -27,6 +27,7 @@
 
     srcs: [
         "AAudioClientTracker.cpp",
+        "AAudioCommandQueue.cpp",
         "AAudioEndpointManager.cpp",
         "AAudioMixer.cpp",
         "AAudioService.cpp",
@@ -68,6 +69,8 @@
         "aaudio-aidl-cpp",
         "framework-permission-aidl-cpp",
         "libaudioclient_aidl_conversion",
+        "packagemanager_aidl-cpp",
+        "android.media.audio.common.types-V1-cpp",
     ],
 
     export_shared_lib_headers: [
diff --git a/services/oboeservice/SharedRingBuffer.cpp b/services/oboeservice/SharedRingBuffer.cpp
index c1d4e16..fd2a454 100644
--- a/services/oboeservice/SharedRingBuffer.cpp
+++ b/services/oboeservice/SharedRingBuffer.cpp
@@ -85,9 +85,9 @@
     return AAUDIO_OK;
 }
 
-void SharedRingBuffer::fillParcelable(AudioEndpointParcelable &endpointParcelable,
+void SharedRingBuffer::fillParcelable(AudioEndpointParcelable* endpointParcelable,
                     RingBufferParcelable &ringBufferParcelable) {
-    int fdIndex = endpointParcelable.addFileDescriptor(mFileDescriptor, mSharedMemorySizeInBytes);
+    int fdIndex = endpointParcelable->addFileDescriptor(mFileDescriptor, mSharedMemorySizeInBytes);
     ringBufferParcelable.setupMemory(fdIndex,
                                      SHARED_RINGBUFFER_DATA_OFFSET,
                                      mDataMemorySizeInBytes,
diff --git a/services/oboeservice/SharedRingBuffer.h b/services/oboeservice/SharedRingBuffer.h
index c3a9bb7..cff1261 100644
--- a/services/oboeservice/SharedRingBuffer.h
+++ b/services/oboeservice/SharedRingBuffer.h
@@ -45,7 +45,7 @@
 
     aaudio_result_t allocate(android::fifo_frames_t bytesPerFrame, android::fifo_frames_t capacityInFrames);
 
-    void fillParcelable(AudioEndpointParcelable &endpointParcelable,
+    void fillParcelable(AudioEndpointParcelable* endpointParcelable,
                         RingBufferParcelable &ringBufferParcelable);
 
     /**
diff --git a/services/oboeservice/fuzzer/README.md b/services/oboeservice/fuzzer/README.md
index 00b85df..ae7af3eb 100644
--- a/services/oboeservice/fuzzer/README.md
+++ b/services/oboeservice/fuzzer/README.md
@@ -15,7 +15,7 @@
 4. InService
 5. DeviceId
 6. SampleRate
-7. SamplesPerFrame
+7. ChannelMask
 8. Direction
 9. SharingMode
 10. Usage
@@ -31,7 +31,7 @@
 | `InService`   | `bool` | Value obtained from FuzzedDataProvider |
 | `DeviceId`   | `INT32_MIN` to `INT32_MAX` | Value obtained from FuzzedDataProvider |
 | `SampleRate`   | `INT32_MIN` to `INT32_MAX` | Value obtained from FuzzedDataProvider |
-| `SamplesPerFrame` | `INT32_MIN` to `INT32_MAX` | Value obtained from FuzzedDataProvider |
+| `ChannelMask` | `AAUDIO_UNSPECIFIED`, `AAUDIO_CHANNEL_INDEX_MASK_1`, `AAUDIO_CHANNEL_INDEX_MASK_2`, `AAUDIO_CHANNEL_INDEX_MASK_3`, `AAUDIO_CHANNEL_INDEX_MASK_4`, `AAUDIO_CHANNEL_INDEX_MASK_5`, `AAUDIO_CHANNEL_INDEX_MASK_6`, `AAUDIO_CHANNEL_INDEX_MASK_7`, `AAUDIO_CHANNEL_INDEX_MASK_8`, `AAUDIO_CHANNEL_INDEX_MASK_9`, `AAUDIO_CHANNEL_INDEX_MASK_10`, `AAUDIO_CHANNEL_INDEX_MASK_11`, `AAUDIO_CHANNEL_INDEX_MASK_12`, `AAUDIO_CHANNEL_INDEX_MASK_13`, `AAUDIO_CHANNEL_INDEX_MASK_14`, `AAUDIO_CHANNEL_INDEX_MASK_15`, `AAUDIO_CHANNEL_INDEX_MASK_16`, `AAUDIO_CHANNEL_INDEX_MASK_17`, `AAUDIO_CHANNEL_INDEX_MASK_18`, `AAUDIO_CHANNEL_INDEX_MASK_19`, `AAUDIO_CHANNEL_INDEX_MASK_20`, `AAUDIO_CHANNEL_INDEX_MASK_21`, `AAUDIO_CHANNEL_INDEX_MASK_22`, `AAUDIO_CHANNEL_INDEX_MASK_23`, `AAUDIO_CHANNEL_INDEX_MASK_24`, `AAUDIO_CHANNEL_MONO`, `AAUDIO_CHANNEL_STEREO`, `AAUDIO_CHANNEL_FRONT_BACK`, `AAUDIO_CHANNEL_2POINT0POINT2`, `AAUDIO_CHANNEL_2POINT1POINT2`, `AAUDIO_CHANNEL_3POINT0POINT2`, `AAUDIO_CHANNEL_3POINT1POINT2`, `AAUDIO_CHANNEL_5POINT1`, `AAUDIO_CHANNEL_MONO`, `AAUDIO_CHANNEL_STEREO`, `AAUDIO_CHANNEL_2POINT1`, `AAUDIO_CHANNEL_TRI`, `AAUDIO_CHANNEL_TRI_BACK`, `AAUDIO_CHANNEL_3POINT1`, `AAUDIO_CHANNEL_2POINT0POINT2`, `AAUDIO_CHANNEL_2POINT1POINT2`, `AAUDIO_CHANNEL_3POINT0POINT2`, `AAUDIO_CHANNEL_3POINT1POINT2`, `AAUDIO_CHANNEL_QUAD`, `AAUDIO_CHANNEL_QUAD_SIDE`, `AAUDIO_CHANNEL_SURROUND`, `AAUDIO_CHANNEL_PENTA`, `AAUDIO_CHANNEL_5POINT1`, `AAUDIO_CHANNEL_5POINT1_SIDE`, `AAUDIO_CHANNEL_5POINT1POINT2`, `AAUDIO_CHANNEL_5POINT1POINT4`, `AAUDIO_CHANNEL_6POINT1`, `AAUDIO_CHANNEL_7POINT1`, `AAUDIO_CHANNEL_7POINT1POINT2`, `AAUDIO_CHANNEL_7POINT1POINT4`, `AAUDIO_CHANNEL_9POINT1POINT4`, `AAUDIO_CHANNEL_9POINT1POINT6` | Value obtained from FuzzedDataProvider |
 | `Direction` | `AAUDIO_DIRECTION_OUTPUT`, `AAUDIO_DIRECTION_INPUT` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
 | `SharingMode` | `AAUDIO_SHARING_MODE_EXCLUSIVE`, `AAUDIO_SHARING_MODE_SHARED` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
 | `Usage` | `AAUDIO_USAGE_MEDIA`, `AAUDIO_USAGE_VOICE_COMMUNICATION`, `AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING`, `AAUDIO_USAGE_ALARM`, `AAUDIO_USAGE_NOTIFICATION`, `AAUDIO_USAGE_NOTIFICATION_RINGTONE`, `AAUDIO_USAGE_NOTIFICATION_EVENT`, `AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY`, `AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE`, `AAUDIO_USAGE_ASSISTANCE_SONIFICATION`, `AAUDIO_USAGE_GAME`, `AAUDIO_USAGE_ASSISTANT`, `AAUDIO_SYSTEM_USAGE_EMERGENCY`, `AAUDIO_SYSTEM_USAGE_SAFETY`, `AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS`, `AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
diff --git a/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp b/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
index 4bc661c..5e48955 100644
--- a/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
+++ b/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
@@ -68,10 +68,71 @@
     AAUDIO_INPUT_PRESET_UNPROCESSED,       AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE,
 };
 
+aaudio_channel_mask_t kAAudioChannelMasks[] = {
+    AAUDIO_UNSPECIFIED,
+    AAUDIO_CHANNEL_INDEX_MASK_1,
+    AAUDIO_CHANNEL_INDEX_MASK_2,
+    AAUDIO_CHANNEL_INDEX_MASK_3,
+    AAUDIO_CHANNEL_INDEX_MASK_4,
+    AAUDIO_CHANNEL_INDEX_MASK_5,
+    AAUDIO_CHANNEL_INDEX_MASK_6,
+    AAUDIO_CHANNEL_INDEX_MASK_7,
+    AAUDIO_CHANNEL_INDEX_MASK_8,
+    AAUDIO_CHANNEL_INDEX_MASK_9,
+    AAUDIO_CHANNEL_INDEX_MASK_10,
+    AAUDIO_CHANNEL_INDEX_MASK_11,
+    AAUDIO_CHANNEL_INDEX_MASK_12,
+    AAUDIO_CHANNEL_INDEX_MASK_13,
+    AAUDIO_CHANNEL_INDEX_MASK_14,
+    AAUDIO_CHANNEL_INDEX_MASK_15,
+    AAUDIO_CHANNEL_INDEX_MASK_16,
+    AAUDIO_CHANNEL_INDEX_MASK_17,
+    AAUDIO_CHANNEL_INDEX_MASK_18,
+    AAUDIO_CHANNEL_INDEX_MASK_19,
+    AAUDIO_CHANNEL_INDEX_MASK_20,
+    AAUDIO_CHANNEL_INDEX_MASK_21,
+    AAUDIO_CHANNEL_INDEX_MASK_22,
+    AAUDIO_CHANNEL_INDEX_MASK_23,
+    AAUDIO_CHANNEL_INDEX_MASK_24,
+    AAUDIO_CHANNEL_MONO,
+    AAUDIO_CHANNEL_STEREO,
+    AAUDIO_CHANNEL_FRONT_BACK,
+    AAUDIO_CHANNEL_2POINT0POINT2,
+    AAUDIO_CHANNEL_2POINT1POINT2,
+    AAUDIO_CHANNEL_3POINT0POINT2,
+    AAUDIO_CHANNEL_3POINT1POINT2,
+    AAUDIO_CHANNEL_5POINT1,
+    AAUDIO_CHANNEL_MONO,
+    AAUDIO_CHANNEL_STEREO,
+    AAUDIO_CHANNEL_2POINT1,
+    AAUDIO_CHANNEL_TRI,
+    AAUDIO_CHANNEL_TRI_BACK,
+    AAUDIO_CHANNEL_3POINT1,
+    AAUDIO_CHANNEL_2POINT0POINT2,
+    AAUDIO_CHANNEL_2POINT1POINT2,
+    AAUDIO_CHANNEL_3POINT0POINT2,
+    AAUDIO_CHANNEL_3POINT1POINT2,
+    AAUDIO_CHANNEL_QUAD,
+    AAUDIO_CHANNEL_QUAD_SIDE,
+    AAUDIO_CHANNEL_SURROUND,
+    AAUDIO_CHANNEL_PENTA,
+    AAUDIO_CHANNEL_5POINT1,
+    AAUDIO_CHANNEL_5POINT1_SIDE,
+    AAUDIO_CHANNEL_5POINT1POINT2,
+    AAUDIO_CHANNEL_5POINT1POINT4,
+    AAUDIO_CHANNEL_6POINT1,
+    AAUDIO_CHANNEL_7POINT1,
+    AAUDIO_CHANNEL_7POINT1POINT2,
+    AAUDIO_CHANNEL_7POINT1POINT4,
+    AAUDIO_CHANNEL_9POINT1POINT4,
+    AAUDIO_CHANNEL_9POINT1POINT6,
+};
+
 const size_t kNumAAudioFormats = std::size(kAAudioFormats);
 const size_t kNumAAudioUsages = std::size(kAAudioUsages);
 const size_t kNumAAudioContentTypes = std::size(kAAudioContentTypes);
 const size_t kNumAAudioInputPresets = std::size(kAAudioInputPresets);
+const size_t kNumAAudioChannelMasks = std::size(kAAudioChannelMasks);
 
 class FuzzAAudioClient : public virtual RefBase, public AAudioServiceInterface {
    public:
@@ -119,6 +180,11 @@
         return AAUDIO_ERROR_UNAVAILABLE;
     }
 
+    aaudio_result_t exitStandby(aaudio_handle_t streamHandle UNUSED_PARAM,
+                                AudioEndpointParcelable &parcelable UNUSED_PARAM) override {
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+
     void onStreamChange(aaudio_handle_t handle, int32_t opcode, int32_t value) {}
 
     int getDeathCount() { return mDeathCount; }
@@ -305,7 +371,11 @@
 
     request.getConfiguration().setDeviceId(fdp.ConsumeIntegral<int32_t>());
     request.getConfiguration().setSampleRate(fdp.ConsumeIntegral<int32_t>());
-    request.getConfiguration().setSamplesPerFrame(fdp.ConsumeIntegral<int32_t>());
+    request.getConfiguration().setChannelMask((aaudio_channel_mask_t)(
+        fdp.ConsumeBool()
+            ? fdp.ConsumeIntegral<int32_t>()
+            : kAAudioChannelMasks[fdp.ConsumeIntegralInRange<int32_t>(
+                    0, kNumAAudioChannelMasks - 1)]));
     request.getConfiguration().setDirection(
         fdp.ConsumeBool() ? fdp.ConsumeIntegral<int32_t>()
                           : (fdp.ConsumeBool() ? AAUDIO_DIRECTION_OUTPUT : AAUDIO_DIRECTION_INPUT));
diff --git a/services/tuner/.clang-format b/services/tuner/.clang-format
new file mode 100644
index 0000000..f14cc88
--- /dev/null
+++ b/services/tuner/.clang-format
@@ -0,0 +1,33 @@
+---
+BasedOnStyle: Google
+AllowShortFunctionsOnASingleLine: Inline
+AllowShortIfStatementsOnASingleLine: true
+AllowShortLoopsOnASingleLine: true
+BinPackArguments: true
+BinPackParameters: true
+CommentPragmas: NOLINT:.*
+ContinuationIndentWidth: 8
+DerivePointerAlignment: false
+IndentWidth: 4
+PointerAlignment: Left
+TabWidth: 4
+
+# Deviations from the above file:
+# "Don't indent the section label"
+AccessModifierOffset: -4
+# "Each line of text in your code should be at most 100 columns long."
+ColumnLimit: 100
+# "Constructor initializer lists can be all on one line or with subsequent
+# lines indented eight spaces.". clang-format does not support having the colon
+# on the same line as the constructor function name, so this is the best
+# approximation of that rule, which makes all entries in the list (except the
+# first one) have an eight space indentation.
+ConstructorInitializerIndentWidth: 6
+# There is nothing in go/droidcppstyle about case labels, but there seems to be
+# more code that does not indent the case labels in frameworks/base.
+IndentCaseLabels: false
+# There have been some bugs in which subsequent formatting operations introduce
+# weird comment jumps.
+ReflowComments: false
+# Android does support C++11 now.
+Standard: Cpp11
\ No newline at end of file
diff --git a/services/tuner/Android.bp b/services/tuner/Android.bp
index cd11c88..ec62d4e 100644
--- a/services/tuner/Android.bp
+++ b/services/tuner/Android.bp
@@ -7,33 +7,15 @@
     default_applicable_licenses: ["frameworks_av_license"],
 }
 
-filegroup {
-    name: "tv_tuner_aidl",
-    srcs: [
-        "aidl/android/media/tv/tuner/*.aidl",
-    ],
-    path: "aidl",
-}
-
-filegroup {
-    name: "tv_tuner_frontend_info",
-    srcs: [
-        "aidl/android/media/tv/tuner/TunerFrontendInfo.aidl",
-        "aidl/android/media/tv/tuner/TunerFrontend*Capabilities.aidl",
-    ],
-    path: "aidl",
-}
-
 aidl_interface {
     name: "tv_tuner_aidl_interface",
     unstable: true,
     local_include_dir: "aidl",
-    srcs: [
-        ":tv_tuner_aidl",
-    ],
+    srcs: ["aidl/android/media/tv/tuner/*.aidl"],
     imports: [
         "android.hardware.common-V2",
         "android.hardware.common.fmq-V1",
+        "android.hardware.tv.tuner-V1",
     ],
 
     backend: {
@@ -49,37 +31,18 @@
     },
 }
 
-aidl_interface {
-    name: "tv_tuner_frontend_info_aidl_interface",
-    unstable: true,
-    local_include_dir: "aidl",
-    srcs: [
-        ":tv_tuner_frontend_info",
-    ],
-
-    backend: {
-        java: {
-            enabled: true,
-        },
-        cpp: {
-            enabled: true,
-        },
-        ndk: {
-            enabled: true,
-        },
-    },
-}
-
 cc_library {
     name: "libtunerservice",
 
     srcs: [
         "Tuner*.cpp",
+        "hidl/Tuner*.cpp",
     ],
 
     shared_libs: [
         "android.hardware.tv.tuner@1.0",
         "android.hardware.tv.tuner@1.1",
+        "android.hardware.tv.tuner-V1-ndk",
         "libbase",
         "libbinder",
         "libbinder_ndk",
@@ -89,13 +52,13 @@
         "liblog",
         "libmedia",
         "libutils",
-        "tv_tuner_aidl_interface-ndk_platform",
-        "tv_tuner_resource_manager_aidl_interface-ndk_platform",
-        "tv_tuner_resource_manager_aidl_interface-cpp",
+        "packagemanager_aidl-cpp",
+        "tv_tuner_aidl_interface-ndk",
+        "tv_tuner_resource_manager_aidl_interface-ndk",
     ],
 
     static_libs: [
-        "android.hardware.common.fmq-V1-ndk_platform",
+        "android.hardware.common.fmq-V1-ndk",
         "libaidlcommonsupport",
     ],
 
@@ -122,18 +85,18 @@
     shared_libs: [
         "android.hardware.tv.tuner@1.0",
         "android.hardware.tv.tuner@1.1",
+        "android.hardware.tv.tuner-V1-ndk",
         "libbase",
         "libbinder",
         "libfmq",
         "liblog",
         "libtunerservice",
         "libutils",
-        "tv_tuner_resource_manager_aidl_interface-ndk_platform",
-        "tv_tuner_resource_manager_aidl_interface-cpp",
+        "tv_tuner_resource_manager_aidl_interface-ndk",
     ],
 
     static_libs: [
-        "tv_tuner_aidl_interface-ndk_platform",
+        "tv_tuner_aidl_interface-ndk",
     ],
 
     init_rc: ["mediatuner.rc"],
diff --git a/services/tuner/OWNERS b/services/tuner/OWNERS
index 0ceb8e8..bf9fe34 100644
--- a/services/tuner/OWNERS
+++ b/services/tuner/OWNERS
@@ -1,2 +1,2 @@
-nchalko@google.com
+hgchen@google.com
 quxiangfang@google.com
diff --git a/services/tuner/TunerDemux.cpp b/services/tuner/TunerDemux.cpp
index 1122368..a6f3a2c 100644
--- a/services/tuner/TunerDemux.cpp
+++ b/services/tuner/TunerDemux.cpp
@@ -16,23 +16,32 @@
 
 #define LOG_TAG "TunerDemux"
 
-#include "TunerDvr.h"
 #include "TunerDemux.h"
+
+#include <aidl/android/hardware/tv/tuner/IDvr.h>
+#include <aidl/android/hardware/tv/tuner/IDvrCallback.h>
+#include <aidl/android/hardware/tv/tuner/IFilter.h>
+#include <aidl/android/hardware/tv/tuner/IFilterCallback.h>
+#include <aidl/android/hardware/tv/tuner/ITimeFilter.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
+#include "TunerDvr.h"
 #include "TunerTimeFilter.h"
 
-using ::android::hardware::tv::tuner::V1_0::DemuxAlpFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxIpFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxTlvFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
-using ::android::hardware::tv::tuner::V1_0::DvrType;
-using ::android::hardware::tv::tuner::V1_0::Result;
+using ::aidl::android::hardware::tv::tuner::IDvr;
+using ::aidl::android::hardware::tv::tuner::IDvrCallback;
+using ::aidl::android::hardware::tv::tuner::IFilter;
+using ::aidl::android::hardware::tv::tuner::IFilterCallback;
+using ::aidl::android::hardware::tv::tuner::ITimeFilter;
+using ::aidl::android::hardware::tv::tuner::Result;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-TunerDemux::TunerDemux(sp<IDemux> demux, int id) {
+TunerDemux::TunerDemux(shared_ptr<IDemux> demux, int id) {
     mDemux = demux;
     mDemuxId = id;
 }
@@ -41,192 +50,143 @@
     mDemux = nullptr;
 }
 
-Status TunerDemux::setFrontendDataSource(const std::shared_ptr<ITunerFrontend>& frontend) {
+::ndk::ScopedAStatus TunerDemux::setFrontendDataSource(
+        const shared_ptr<ITunerFrontend>& in_frontend) {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
     int frontendId;
-    frontend->getFrontendId(&frontendId);
-    Result res = mDemux->setFrontendDataSource(frontendId);
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    in_frontend->getFrontendId(&frontendId);
+
+    return mDemux->setFrontendDataSource(frontendId);
 }
 
-Status TunerDemux::openFilter(
-        int type, int subType, int bufferSize, const std::shared_ptr<ITunerFilterCallback>& cb,
-        std::shared_ptr<ITunerFilter>* _aidl_return) {
+::ndk::ScopedAStatus TunerDemux::setFrontendDataSourceById(int frontendId) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    return mDemux->setFrontendDataSource(frontendId);
+}
+
+::ndk::ScopedAStatus TunerDemux::openFilter(const DemuxFilterType& in_type, int32_t in_bufferSize,
+                                            const shared_ptr<ITunerFilterCallback>& in_cb,
+                                            shared_ptr<ITunerFilter>* _aidl_return) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    shared_ptr<IFilter> filter;
+    shared_ptr<TunerFilter::FilterCallback> filterCb =
+            ::ndk::SharedRefBase::make<TunerFilter::FilterCallback>(in_cb);
+    shared_ptr<IFilterCallback> cb = filterCb;
+    auto status = mDemux->openFilter(in_type, in_bufferSize, cb, &filter);
+    if (status.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerFilter>(filter, filterCb, in_type);
+    }
+
+    return status;
+}
+
+::ndk::ScopedAStatus TunerDemux::openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    DemuxFilterMainType mainType = static_cast<DemuxFilterMainType>(type);
-    DemuxFilterType filterType {
-        .mainType = mainType,
-    };
-
-    switch(mainType) {
-        case DemuxFilterMainType::TS:
-            filterType.subType.tsFilterType(static_cast<DemuxTsFilterType>(subType));
-            break;
-        case DemuxFilterMainType::MMTP:
-            filterType.subType.mmtpFilterType(static_cast<DemuxMmtpFilterType>(subType));
-            break;
-        case DemuxFilterMainType::IP:
-            filterType.subType.ipFilterType(static_cast<DemuxIpFilterType>(subType));
-            break;
-        case DemuxFilterMainType::TLV:
-            filterType.subType.tlvFilterType(static_cast<DemuxTlvFilterType>(subType));
-            break;
-        case DemuxFilterMainType::ALP:
-            filterType.subType.alpFilterType(static_cast<DemuxAlpFilterType>(subType));
-            break;
-    }
-    Result status;
-    sp<IFilter> filterSp;
-    sp<IFilterCallback> cbSp = new TunerFilter::FilterCallback(cb);
-    mDemux->openFilter(filterType, bufferSize, cbSp,
-            [&](Result r, const sp<IFilter>& filter) {
-                filterSp = filter;
-                status = r;
-            });
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    shared_ptr<ITimeFilter> filter;
+    auto status = mDemux->openTimeFilter(&filter);
+    if (status.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerTimeFilter>(filter);
     }
 
-    *_aidl_return = ::ndk::SharedRefBase::make<TunerFilter>(filterSp, type, subType);
-    return Status::ok();
+    return status;
 }
 
-Status TunerDemux::openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) {
+::ndk::ScopedAStatus TunerDemux::getAvSyncHwId(const shared_ptr<ITunerFilter>& tunerFilter,
+                                               int32_t* _aidl_return) {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status;
-    sp<ITimeFilter> filterSp;
-    mDemux->openTimeFilter([&](Result r, const sp<ITimeFilter>& filter) {
-        filterSp = filter;
-        status = r;
-    });
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-
-    *_aidl_return = ::ndk::SharedRefBase::make<TunerTimeFilter>(filterSp);
-    return Status::ok();
+    shared_ptr<IFilter> halFilter = (static_cast<TunerFilter*>(tunerFilter.get()))->getHalFilter();
+    return mDemux->getAvSyncHwId(halFilter, _aidl_return);
 }
 
-Status TunerDemux::getAvSyncHwId(const shared_ptr<ITunerFilter>& tunerFilter, int* _aidl_return) {
+::ndk::ScopedAStatus TunerDemux::getAvSyncTime(int32_t avSyncHwId, int64_t* _aidl_return) {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    uint32_t avSyncHwId;
-    Result res;
-    sp<IFilter> halFilter = static_cast<TunerFilter*>(tunerFilter.get())->getHalFilter();
-    mDemux->getAvSyncHwId(halFilter,
-            [&](Result r, uint32_t id) {
-                res = r;
-                avSyncHwId = id;
-            });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-
-    *_aidl_return = (int)avSyncHwId;
-    return Status::ok();
+    return mDemux->getAvSyncTime(avSyncHwId, _aidl_return);
 }
 
-Status TunerDemux::getAvSyncTime(int avSyncHwId, int64_t* _aidl_return) {
+::ndk::ScopedAStatus TunerDemux::openDvr(DvrType in_dvbType, int32_t in_bufferSize,
+                                         const shared_ptr<ITunerDvrCallback>& in_cb,
+                                         shared_ptr<ITunerDvr>* _aidl_return) {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    uint64_t time;
-    Result res;
-    mDemux->getAvSyncTime(static_cast<uint32_t>(avSyncHwId),
-            [&](Result r, uint64_t ts) {
-                res = r;
-                time = ts;
-            });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+    shared_ptr<IDvrCallback> callback = ::ndk::SharedRefBase::make<TunerDvr::DvrCallback>(in_cb);
+    shared_ptr<IDvr> halDvr;
+    auto res = mDemux->openDvr(in_dvbType, in_bufferSize, callback, &halDvr);
+    if (res.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerDvr>(halDvr, in_dvbType);
     }
 
-    *_aidl_return = (int64_t)time;
-    return Status::ok();
+    return res;
 }
 
-Status TunerDemux::openDvr(int dvrType, int bufferSize, const shared_ptr<ITunerDvrCallback>& cb,
-        shared_ptr<ITunerDvr>* _aidl_return) {
+::ndk::ScopedAStatus TunerDemux::connectCiCam(int32_t ciCamId) {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res;
-    sp<IDvrCallback> callback = new TunerDvr::DvrCallback(cb);
-    sp<IDvr> hidlDvr;
-    mDemux->openDvr(static_cast<DvrType>(dvrType), bufferSize, callback,
-            [&](Result r, const sp<IDvr>& dvr) {
-                hidlDvr = dvr;
-                res = r;
-            });
-    if (res != Result::SUCCESS) {
-        *_aidl_return = NULL;
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-
-    *_aidl_return = ::ndk::SharedRefBase::make<TunerDvr>(hidlDvr, dvrType);
-    return Status::ok();
+    return mDemux->connectCiCam(ciCamId);
 }
 
-Status TunerDemux::connectCiCam(int ciCamId) {
+::ndk::ScopedAStatus TunerDemux::disconnectCiCam() {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDemux->connectCiCam(static_cast<uint32_t>(ciCamId));
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return mDemux->disconnectCiCam();
 }
 
-Status TunerDemux::disconnectCiCam() {
+::ndk::ScopedAStatus TunerDemux::close() {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDemux->disconnectCiCam();
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    auto res = mDemux->close();
+    mDemux = nullptr;
+
+    return res;
 }
 
-Status TunerDemux::close() {
-    if (mDemux == nullptr) {
-        ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    Result res = mDemux->close();
-    mDemux = NULL;
-
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
-}
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
 }  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerDemux.h b/services/tuner/TunerDemux.h
index 2a9836b..cdb3aa0 100644
--- a/services/tuner/TunerDemux.h
+++ b/services/tuner/TunerDemux.h
@@ -17,52 +17,55 @@
 #ifndef ANDROID_MEDIA_TUNERDEMUX_H
 #define ANDROID_MEDIA_TUNERDEMUX_H
 
+#include <aidl/android/hardware/tv/tuner/IDemux.h>
 #include <aidl/android/media/tv/tuner/BnTunerDemux.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
 
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::tv::tuner::BnTunerDemux;
-using ::aidl::android::media::tv::tuner::ITunerDvr;
-using ::aidl::android::media::tv::tuner::ITunerDvrCallback;
-using ::aidl::android::media::tv::tuner::ITunerFilter;
-using ::aidl::android::media::tv::tuner::ITunerFilterCallback;
-using ::aidl::android::media::tv::tuner::ITunerFrontend;
-using ::aidl::android::media::tv::tuner::ITunerTimeFilter;
-using ::android::hardware::tv::tuner::V1_0::IDemux;
-using ::android::hardware::tv::tuner::V1_0::IDvr;
-using ::android::hardware::tv::tuner::V1_0::IDvrCallback;
-using ::android::hardware::tv::tuner::V1_0::ITimeFilter;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterType;
+using ::aidl::android::hardware::tv::tuner::DvrType;
+using ::aidl::android::hardware::tv::tuner::IDemux;
 
 using namespace std;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
 class TunerDemux : public BnTunerDemux {
 
 public:
-    TunerDemux(sp<IDemux> demux, int demuxId);
+    TunerDemux(shared_ptr<IDemux> demux, int demuxId);
     virtual ~TunerDemux();
-    Status setFrontendDataSource(const shared_ptr<ITunerFrontend>& frontend) override;
-    Status openFilter(
-        int mainType, int subtype, int bufferSize, const shared_ptr<ITunerFilterCallback>& cb,
-        shared_ptr<ITunerFilter>* _aidl_return) override;
-    Status openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) override;
-    Status getAvSyncHwId(const shared_ptr<ITunerFilter>& tunerFilter, int* _aidl_return) override;
-    Status getAvSyncTime(int avSyncHwId, int64_t* _aidl_return) override;
-    Status openDvr(
-        int dvbType, int bufferSize, const shared_ptr<ITunerDvrCallback>& cb,
-        shared_ptr<ITunerDvr>* _aidl_return) override;
-    Status connectCiCam(int ciCamId) override;
-    Status disconnectCiCam() override;
-    Status close() override;
+
+    ::ndk::ScopedAStatus setFrontendDataSource(
+            const shared_ptr<ITunerFrontend>& in_frontend) override;
+    ::ndk::ScopedAStatus setFrontendDataSourceById(int frontendId) override;
+    ::ndk::ScopedAStatus openFilter(const DemuxFilterType& in_type, int32_t in_bufferSize,
+                                    const shared_ptr<ITunerFilterCallback>& in_cb,
+                                    shared_ptr<ITunerFilter>* _aidl_return) override;
+    ::ndk::ScopedAStatus openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) override;
+    ::ndk::ScopedAStatus getAvSyncHwId(const shared_ptr<ITunerFilter>& in_tunerFilter,
+                                       int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getAvSyncTime(int32_t in_avSyncHwId, int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus openDvr(DvrType in_dvbType, int32_t in_bufferSize,
+                                 const shared_ptr<ITunerDvrCallback>& in_cb,
+                                 shared_ptr<ITunerDvr>* _aidl_return) override;
+    ::ndk::ScopedAStatus connectCiCam(int32_t in_ciCamId) override;
+    ::ndk::ScopedAStatus disconnectCiCam() override;
+    ::ndk::ScopedAStatus close() override;
 
     int getId() { return mDemuxId; }
 
 private:
-    sp<IDemux> mDemux;
+    shared_ptr<IDemux> mDemux;
     int mDemuxId;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERDEMUX_H
diff --git a/services/tuner/TunerDescrambler.cpp b/services/tuner/TunerDescrambler.cpp
index b7ae167..70aee20 100644
--- a/services/tuner/TunerDescrambler.cpp
+++ b/services/tuner/TunerDescrambler.cpp
@@ -16,17 +16,27 @@
 
 #define LOG_TAG "TunerDescrambler"
 
-#include "TunerFilter.h"
-#include "TunerDemux.h"
 #include "TunerDescrambler.h"
 
-using ::android::hardware::tv::tuner::V1_0::Result;
+#include <aidl/android/hardware/tv/tuner/IFilter.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <utils/Log.h>
+
+#include "TunerDemux.h"
+#include "TunerFilter.h"
+
+using ::aidl::android::hardware::tv::tuner::IFilter;
+using ::aidl::android::hardware::tv::tuner::Result;
 
 using namespace std;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-TunerDescrambler::TunerDescrambler(sp<IDescrambler> descrambler) {
+TunerDescrambler::TunerDescrambler(shared_ptr<IDescrambler> descrambler) {
     mDescrambler = descrambler;
 }
 
@@ -34,91 +44,74 @@
     mDescrambler = nullptr;
 }
 
-Status TunerDescrambler::setDemuxSource(const std::shared_ptr<ITunerDemux>& demux) {
+::ndk::ScopedAStatus TunerDescrambler::setDemuxSource(
+        const shared_ptr<ITunerDemux>& in_tunerDemux) {
     if (mDescrambler == nullptr) {
         ALOGE("IDescrambler is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDescrambler->setDemuxSource(static_cast<TunerDemux*>(demux.get())->getId());
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return mDescrambler->setDemuxSource((static_cast<TunerDemux*>(in_tunerDemux.get()))->getId());
 }
 
-Status TunerDescrambler::setKeyToken(const vector<uint8_t>& keyToken) {
+::ndk::ScopedAStatus TunerDescrambler::setKeyToken(const vector<uint8_t>& in_keyToken) {
     if (mDescrambler == nullptr) {
         ALOGE("IDescrambler is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDescrambler->setKeyToken(keyToken);
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return mDescrambler->setKeyToken(in_keyToken);
 }
 
-Status TunerDescrambler::addPid(const TunerDemuxPid& pid,
-        const shared_ptr<ITunerFilter>& optionalSourceFilter) {
+::ndk::ScopedAStatus TunerDescrambler::addPid(
+        const DemuxPid& in_pid, const shared_ptr<ITunerFilter>& in_optionalSourceFilter) {
     if (mDescrambler == nullptr) {
         ALOGE("IDescrambler is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    sp<IFilter> halFilter = (optionalSourceFilter == NULL)
-            ? NULL : static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter();
-    Result res = mDescrambler->addPid(getHidlDemuxPid(pid), halFilter);
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    shared_ptr<IFilter> halFilter =
+            (in_optionalSourceFilter == nullptr)
+                    ? nullptr
+                    : static_cast<TunerFilter*>(in_optionalSourceFilter.get())->getHalFilter();
+
+    return mDescrambler->addPid(in_pid, halFilter);
 }
 
-Status TunerDescrambler::removePid(const TunerDemuxPid& pid,
-        const shared_ptr<ITunerFilter>& optionalSourceFilter) {
+::ndk::ScopedAStatus TunerDescrambler::removePid(
+        const DemuxPid& in_pid, const shared_ptr<ITunerFilter>& in_optionalSourceFilter) {
     if (mDescrambler == nullptr) {
         ALOGE("IDescrambler is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    sp<IFilter> halFilter = (optionalSourceFilter == NULL)
-            ? NULL : static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter();
-    Result res = mDescrambler->removePid(getHidlDemuxPid(pid), halFilter);
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    shared_ptr<IFilter> halFilter =
+            (in_optionalSourceFilter == nullptr)
+                    ? nullptr
+                    : static_cast<TunerFilter*>(in_optionalSourceFilter.get())->getHalFilter();
+
+    return mDescrambler->removePid(in_pid, halFilter);
 }
 
-Status TunerDescrambler::close() {
+::ndk::ScopedAStatus TunerDescrambler::close() {
     if (mDescrambler == nullptr) {
         ALOGE("IDescrambler is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDescrambler->close();
-    mDescrambler = NULL;
+    auto res = mDescrambler->close();
+    mDescrambler = nullptr;
 
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return res;
 }
 
-DemuxPid TunerDescrambler::getHidlDemuxPid(const TunerDemuxPid& pid) {
-    DemuxPid hidlPid;
-    switch (pid.getTag()) {
-        case TunerDemuxPid::tPid: {
-            hidlPid.tPid((uint16_t)pid.get<TunerDemuxPid::tPid>());
-            break;
-        }
-        case TunerDemuxPid::mmtpPid: {
-            hidlPid.mmtpPid((uint16_t)pid.get<TunerDemuxPid::mmtpPid>());
-            break;
-        }
-    }
-    return hidlPid;
-}
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
 }  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerDescrambler.h b/services/tuner/TunerDescrambler.h
index 1970fb7..b1d5fb9 100644
--- a/services/tuner/TunerDescrambler.h
+++ b/services/tuner/TunerDescrambler.h
@@ -17,38 +17,43 @@
 #ifndef ANDROID_MEDIA_TUNERDESCRAMBLER_H
 #define ANDROID_MEDIA_TUNERDESCRAMBLER_H
 
+#include <aidl/android/hardware/tv/tuner/IDescrambler.h>
 #include <aidl/android/media/tv/tuner/BnTunerDescrambler.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
 
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::tv::tuner::BnTunerDescrambler;
-using ::aidl::android::media::tv::tuner::ITunerDemux;
-using ::aidl::android::media::tv::tuner::ITunerFilter;
-using ::aidl::android::media::tv::tuner::TunerDemuxPid;
-using ::android::hardware::tv::tuner::V1_0::DemuxPid;
-using ::android::hardware::tv::tuner::V1_0::IDescrambler;
+using ::aidl::android::hardware::tv::tuner::DemuxPid;
+using ::aidl::android::hardware::tv::tuner::IDescrambler;
 
+using namespace std;
+
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
 class TunerDescrambler : public BnTunerDescrambler {
 
 public:
-    TunerDescrambler(sp<IDescrambler> descrambler);
+    TunerDescrambler(shared_ptr<IDescrambler> descrambler);
     virtual ~TunerDescrambler();
-    Status setDemuxSource(const shared_ptr<ITunerDemux>& demux) override;
-    Status setKeyToken(const vector<uint8_t>& keyToken) override;
-    Status addPid(const TunerDemuxPid& pid,
-            const shared_ptr<ITunerFilter>& optionalSourceFilter) override;
-    Status removePid(const TunerDemuxPid& pid,
-            const shared_ptr<ITunerFilter>& optionalSourceFilter) override;
-    Status close() override;
+
+    ::ndk::ScopedAStatus setDemuxSource(const shared_ptr<ITunerDemux>& in_tunerDemux) override;
+    ::ndk::ScopedAStatus setKeyToken(const vector<uint8_t>& in_keyToken) override;
+    ::ndk::ScopedAStatus addPid(const DemuxPid& in_pid,
+                                const shared_ptr<ITunerFilter>& in_optionalSourceFilter) override;
+    ::ndk::ScopedAStatus removePid(
+            const DemuxPid& in_pid,
+            const shared_ptr<ITunerFilter>& in_optionalSourceFilter) override;
+    ::ndk::ScopedAStatus close() override;
 
 private:
-    DemuxPid getHidlDemuxPid(const TunerDemuxPid& pid);
-
-    sp<IDescrambler> mDescrambler;
+    shared_ptr<IDescrambler> mDescrambler;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERDESCRAMBLER_H
diff --git a/services/tuner/TunerDvr.cpp b/services/tuner/TunerDvr.cpp
index db4e07b..8776f7e 100644
--- a/services/tuner/TunerDvr.cpp
+++ b/services/tuner/TunerDvr.cpp
@@ -16,194 +16,152 @@
 
 #define LOG_TAG "TunerDvr"
 
-#include <fmq/ConvertMQDescriptors.h>
 #include "TunerDvr.h"
+
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <utils/Log.h>
+
 #include "TunerFilter.h"
 
-using ::android::hardware::tv::tuner::V1_0::DataFormat;
-using ::android::hardware::tv::tuner::V1_0::Result;
+using ::aidl::android::hardware::tv::tuner::Result;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-TunerDvr::TunerDvr(sp<IDvr> dvr, int type) {
+TunerDvr::TunerDvr(shared_ptr<IDvr> dvr, DvrType type) {
     mDvr = dvr;
-    mType = static_cast<DvrType>(type);
+    mType = type;
 }
 
 TunerDvr::~TunerDvr() {
-    mDvr = NULL;
+    mDvr = nullptr;
 }
 
-Status TunerDvr::getQueueDesc(AidlMQDesc* _aidl_return) {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::getQueueDesc(AidlMQDesc* _aidl_return) {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    MQDesc dvrMQDesc;
-    Result res;
-    mDvr->getQueueDesc([&](Result r, const MQDesc& desc) {
-        dvrMQDesc = desc;
-        res = r;
-    });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-
-    AidlMQDesc aidlMQDesc;
-    unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(
-                dvrMQDesc,  &aidlMQDesc);
-    *_aidl_return = move(aidlMQDesc);
-    return Status::ok();
+    return mDvr->getQueueDesc(_aidl_return);
 }
 
-Status TunerDvr::configure(const TunerDvrSettings& settings) {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::configure(const DvrSettings& in_settings) {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDvr->configure(getHidlDvrSettingsFromAidl(settings));
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return mDvr->configure(in_settings);
 }
 
-Status TunerDvr::attachFilter(const shared_ptr<ITunerFilter>& filter) {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::attachFilter(const shared_ptr<ITunerFilter>& in_filter) {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    ITunerFilter* tunerFilter = filter.get();
-    sp<IFilter> hidlFilter = static_cast<TunerFilter*>(tunerFilter)->getHalFilter();
-    if (hidlFilter == NULL) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    if (in_filter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
     }
 
-    Result res = mDvr->attachFilter(hidlFilter);
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    shared_ptr<IFilter> halFilter = (static_cast<TunerFilter*>(in_filter.get()))->getHalFilter();
+    if (halFilter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
     }
-    return Status::ok();
+
+    return mDvr->attachFilter(halFilter);
 }
 
-Status TunerDvr::detachFilter(const shared_ptr<ITunerFilter>& filter) {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::detachFilter(const shared_ptr<ITunerFilter>& in_filter) {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    ITunerFilter* tunerFilter = filter.get();
-    sp<IFilter> hidlFilter = static_cast<TunerFilter*>(tunerFilter)->getHalFilter();
-    if (hidlFilter == NULL) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    if (in_filter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
     }
 
-    Result res = mDvr->detachFilter(hidlFilter);
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    shared_ptr<IFilter> halFilter = (static_cast<TunerFilter*>(in_filter.get()))->getHalFilter();
+    if (halFilter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
     }
-    return Status::ok();
+
+    return mDvr->detachFilter(halFilter);
 }
 
-Status TunerDvr::start() {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::start() {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDvr->start();
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return mDvr->start();
 }
 
-Status TunerDvr::stop() {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::stop() {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDvr->stop();
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return mDvr->stop();
 }
 
-Status TunerDvr::flush() {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::flush() {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDvr->flush();
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return mDvr->flush();
 }
 
-Status TunerDvr::close() {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::close() {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDvr->close();
-    mDvr = NULL;
+    auto status = mDvr->close();
+    mDvr = nullptr;
 
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
-}
-
-DvrSettings TunerDvr::getHidlDvrSettingsFromAidl(TunerDvrSettings settings) {
-    DvrSettings s;
-    switch (mType) {
-        case DvrType::PLAYBACK: {
-            s.playback({
-                .statusMask = static_cast<uint8_t>(settings.statusMask),
-                .lowThreshold = static_cast<uint32_t>(settings.lowThreshold),
-                .highThreshold = static_cast<uint32_t>(settings.highThreshold),
-                .dataFormat = static_cast<DataFormat>(settings.dataFormat),
-                .packetSize = static_cast<uint8_t>(settings.packetSize),
-            });
-            return s;
-        }
-        case DvrType::RECORD: {
-            s.record({
-                .statusMask = static_cast<uint8_t>(settings.statusMask),
-                .lowThreshold = static_cast<uint32_t>(settings.lowThreshold),
-                .highThreshold = static_cast<uint32_t>(settings.highThreshold),
-                .dataFormat = static_cast<DataFormat>(settings.dataFormat),
-                .packetSize = static_cast<uint8_t>(settings.packetSize),
-            });
-            return s;
-        }
-        default:
-            break;
-    }
-    return s;
+    return status;
 }
 
 /////////////// IDvrCallback ///////////////////////
-
-Return<void> TunerDvr::DvrCallback::onRecordStatus(const RecordStatus status) {
-    if (mTunerDvrCallback != NULL) {
-        mTunerDvrCallback->onRecordStatus(static_cast<int>(status));
+::ndk::ScopedAStatus TunerDvr::DvrCallback::onRecordStatus(const RecordStatus status) {
+    if (mTunerDvrCallback != nullptr) {
+        mTunerDvrCallback->onRecordStatus(status);
     }
-    return Void();
+    return ndk::ScopedAStatus::ok();
 }
 
-Return<void> TunerDvr::DvrCallback::onPlaybackStatus(const PlaybackStatus status) {
-    if (mTunerDvrCallback != NULL) {
-        mTunerDvrCallback->onPlaybackStatus(static_cast<int>(status));
+::ndk::ScopedAStatus TunerDvr::DvrCallback::onPlaybackStatus(const PlaybackStatus status) {
+    if (mTunerDvrCallback != nullptr) {
+        mTunerDvrCallback->onPlaybackStatus(status);
     }
-    return Void();
+    return ndk::ScopedAStatus::ok();
 }
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
 }  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerDvr.h b/services/tuner/TunerDvr.h
index a508e99..1854d08 100644
--- a/services/tuner/TunerDvr.h
+++ b/services/tuner/TunerDvr.h
@@ -17,81 +17,71 @@
 #ifndef ANDROID_MEDIA_TUNERDVR_H
 #define ANDROID_MEDIA_TUNERDVR_H
 
+#include <aidl/android/hardware/tv/tuner/BnDvrCallback.h>
+#include <aidl/android/hardware/tv/tuner/DvrSettings.h>
+#include <aidl/android/hardware/tv/tuner/DvrType.h>
+#include <aidl/android/hardware/tv/tuner/IDvr.h>
+#include <aidl/android/hardware/tv/tuner/PlaybackStatus.h>
+#include <aidl/android/hardware/tv/tuner/RecordStatus.h>
 #include <aidl/android/media/tv/tuner/BnTunerDvr.h>
 #include <aidl/android/media/tv/tuner/ITunerDvrCallback.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
-#include <fmq/MessageQueue.h>
 
-#include <TunerFilter.h>
+#include "TunerFilter.h"
 
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::hardware::common::fmq::GrantorDescriptor;
 using ::aidl::android::hardware::common::fmq::MQDescriptor;
 using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
-using ::aidl::android::media::tv::tuner::BnTunerDvr;
-using ::aidl::android::media::tv::tuner::ITunerDvrCallback;
-using ::aidl::android::media::tv::tuner::ITunerFilter;
-using ::aidl::android::media::tv::tuner::TunerDvrSettings;
-
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::MessageQueue;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-
-using ::android::hardware::tv::tuner::V1_0::DvrSettings;
-using ::android::hardware::tv::tuner::V1_0::DvrType;
-using ::android::hardware::tv::tuner::V1_0::IDvr;
-using ::android::hardware::tv::tuner::V1_0::IDvrCallback;
-using ::android::hardware::tv::tuner::V1_0::PlaybackStatus;
-using ::android::hardware::tv::tuner::V1_0::RecordStatus;
+using ::aidl::android::hardware::tv::tuner::BnDvrCallback;
+using ::aidl::android::hardware::tv::tuner::DvrSettings;
+using ::aidl::android::hardware::tv::tuner::DvrType;
+using ::aidl::android::hardware::tv::tuner::IDvr;
+using ::aidl::android::hardware::tv::tuner::PlaybackStatus;
+using ::aidl::android::hardware::tv::tuner::RecordStatus;
 
 using namespace std;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-using MQDesc = MQDescriptorSync<uint8_t>;
 using AidlMQDesc = MQDescriptor<int8_t, SynchronizedReadWrite>;
 
 class TunerDvr : public BnTunerDvr {
 
 public:
-    TunerDvr(sp<IDvr> dvr, int type);
+    TunerDvr(shared_ptr<IDvr> dvr, DvrType type);
     ~TunerDvr();
 
-    Status getQueueDesc(AidlMQDesc* _aidl_return) override;
+    ::ndk::ScopedAStatus getQueueDesc(AidlMQDesc* _aidl_return) override;
+    ::ndk::ScopedAStatus configure(const DvrSettings& in_settings) override;
+    ::ndk::ScopedAStatus attachFilter(const shared_ptr<ITunerFilter>& in_filter) override;
+    ::ndk::ScopedAStatus detachFilter(const shared_ptr<ITunerFilter>& in_filter) override;
+    ::ndk::ScopedAStatus start() override;
+    ::ndk::ScopedAStatus stop() override;
+    ::ndk::ScopedAStatus flush() override;
+    ::ndk::ScopedAStatus close() override;
 
-    Status configure(const TunerDvrSettings& settings) override;
-
-    Status attachFilter(const shared_ptr<ITunerFilter>& filter) override;
-
-    Status detachFilter(const shared_ptr<ITunerFilter>& filter) override;
-
-    Status start() override;
-
-    Status stop() override;
-
-    Status flush() override;
-
-    Status close() override;
-
-    struct DvrCallback : public IDvrCallback {
+    struct DvrCallback : public BnDvrCallback {
         DvrCallback(const shared_ptr<ITunerDvrCallback> tunerDvrCallback)
-                : mTunerDvrCallback(tunerDvrCallback) {};
+              : mTunerDvrCallback(tunerDvrCallback){};
 
-        virtual Return<void> onRecordStatus(const RecordStatus status);
-        virtual Return<void> onPlaybackStatus(const PlaybackStatus status);
+        ::ndk::ScopedAStatus onRecordStatus(const RecordStatus status) override;
+        ::ndk::ScopedAStatus onPlaybackStatus(const PlaybackStatus status) override;
 
-        private:
-            shared_ptr<ITunerDvrCallback> mTunerDvrCallback;
+    private:
+        shared_ptr<ITunerDvrCallback> mTunerDvrCallback;
     };
 
 private:
-    DvrSettings getHidlDvrSettingsFromAidl(TunerDvrSettings settings);
-
-    sp<IDvr> mDvr;
+    shared_ptr<IDvr> mDvr;
     DvrType mType;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERDVR_H
diff --git a/services/tuner/TunerFilter.cpp b/services/tuner/TunerFilter.cpp
index 039fd31..fb5bfa3 100644
--- a/services/tuner/TunerFilter.cpp
+++ b/services/tuner/TunerFilter.cpp
@@ -18,893 +18,460 @@
 
 #include "TunerFilter.h"
 
-using ::aidl::android::media::tv::tuner::TunerFilterSectionCondition;
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <binder/IPCThreadState.h>
 
-using ::android::hardware::hidl_handle;
-using ::android::hardware::tv::tuner::V1_0::DemuxAlpLengthType;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
-using ::android::hardware::tv::tuner::V1_0::DemuxIpAddress;
-using ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxMmtpPid;
-using ::android::hardware::tv::tuner::V1_0::DemuxRecordScIndexType;
-using ::android::hardware::tv::tuner::V1_0::DemuxStreamId;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
-using ::android::hardware::tv::tuner::V1_0::Result;
-using ::android::hardware::tv::tuner::V1_1::AudioStreamType;
-using ::android::hardware::tv::tuner::V1_1::Constant;
-using ::android::hardware::tv::tuner::V1_1::VideoStreamType;
+#include "TunerHelper.h"
+#include "TunerService.h"
 
+using ::aidl::android::hardware::tv::tuner::Result;
+
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+using ::android::IPCThreadState;
 
 using namespace std;
 
-TunerFilter::TunerFilter(
-        sp<IFilter> filter, int mainType, int subType) {
-    mFilter = filter;
-    mFilter_1_1 = ::android::hardware::tv::tuner::V1_1::IFilter::castFrom(filter);
-    mMainType = mainType;
-    mSubType = subType;
-}
+TunerFilter::TunerFilter(shared_ptr<IFilter> filter, shared_ptr<FilterCallback> cb,
+                         DemuxFilterType type)
+      : mFilter(filter),
+        mType(type),
+        mStarted(false),
+        mShared(false),
+        mClientPid(-1),
+        mFilterCallback(cb) {}
 
 TunerFilter::~TunerFilter() {
+    Mutex::Autolock _l(mLock);
     mFilter = nullptr;
-    mFilter_1_1 = nullptr;
 }
 
-Status TunerFilter::getQueueDesc(AidlMQDesc* _aidl_return) {
-    if (mFilter == NULL) {
-        ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    MQDesc filterMQDesc;
-    Result res;
-    mFilter->getQueueDesc([&](Result r, const MQDesc& desc) {
-        filterMQDesc = desc;
-        res = r;
-    });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-
-    AidlMQDesc aidlMQDesc;
-    unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(
-                filterMQDesc,  &aidlMQDesc);
-    *_aidl_return = move(aidlMQDesc);
-    return Status::ok();
-}
-
-Status TunerFilter::getId(int32_t* _aidl_return) {
+::ndk::ScopedAStatus TunerFilter::getQueueDesc(AidlMQDesc* _aidl_return) {
+    Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res;
-    mFilter->getId([&](Result r, uint32_t filterId) {
-        res = r;
-        mId = filterId;
-    });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
     }
-    *_aidl_return = mId;
-    return Status::ok();
+
+    return mFilter->getQueueDesc(_aidl_return);
 }
 
-Status TunerFilter::getId64Bit(int64_t* _aidl_return) {
-    if (mFilter_1_1 == nullptr) {
-        ALOGE("IFilter_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    Result res;
-    mFilter_1_1->getId64Bit([&](Result r, uint64_t filterId) {
-        res = r;
-        mId64Bit = filterId;
-    });
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    *_aidl_return = mId64Bit;
-    return Status::ok();
-}
-
-Status TunerFilter::configure(const TunerFilterConfiguration& config) {
+::ndk::ScopedAStatus TunerFilter::getId(int32_t* _aidl_return) {
+    Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    DemuxFilterSettings settings;
-    switch (config.getTag()) {
-        case TunerFilterConfiguration::ts: {
-            getHidlTsSettings(config, settings);
-            break;
-        }
-        case TunerFilterConfiguration::mmtp: {
-            getHidlMmtpSettings(config, settings);
-            break;
-        }
-        case TunerFilterConfiguration::ip: {
-            getHidlIpSettings(config, settings);
-            break;
-        }
-        case TunerFilterConfiguration::tlv: {
-            getHidlTlvSettings(config, settings);
-            break;
-        }
-        case TunerFilterConfiguration::alp: {
-            getHidlAlpSettings(config, settings);
-            break;
-        }
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
     }
 
-    Result res = mFilter->configure(settings);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+    auto status = mFilter->getId(&mId);
+    if (status.isOk()) {
+        *_aidl_return = mId;
     }
-    return Status::ok();
+    return status;
 }
 
-Status TunerFilter::configureMonitorEvent(int monitorEventType) {
-    if (mFilter_1_1 == nullptr) {
-        ALOGE("IFilter_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    Result res = mFilter_1_1->configureMonitorEvent(monitorEventType);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
-}
-
-Status TunerFilter::configureIpFilterContextId(int cid) {
-    if (mFilter_1_1 == nullptr) {
-        ALOGE("IFilter_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    Result res = mFilter_1_1->configureIpCid(cid);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
-}
-
-Status TunerFilter::configureAvStreamType(int avStreamType) {
-    if (mFilter_1_1 == nullptr) {
-        ALOGE("IFilter_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    AvStreamType type;
-    if (!getHidlAvStreamType(avStreamType, type)) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_STATE));
-    }
-
-    Result res = mFilter_1_1->configureAvStreamType(type);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
-}
-
-Status TunerFilter::setDataSource(const shared_ptr<ITunerFilter>& filter) {
+::ndk::ScopedAStatus TunerFilter::getId64Bit(int64_t* _aidl_return) {
+    Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    ITunerFilter* tunerFilter = filter.get();
-    sp<IFilter> hidlFilter = static_cast<TunerFilter*>(tunerFilter)->getHalFilter();
-    Result res = mFilter->setDataSource(hidlFilter);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
-}
-
-void TunerFilter::getHidlTsSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
-    auto tsConf = config.get<TunerFilterConfiguration::ts>();
-    DemuxTsFilterSettings ts{
-        .tpid = static_cast<uint16_t>(tsConf.tpid),
-    };
-
-    TunerFilterSettings tunerSettings = tsConf.filterSettings;
-    switch (tunerSettings.getTag()) {
-        case TunerFilterSettings::av: {
-            ts.filterSettings.av(getAvSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::section: {
-            ts.filterSettings.section(getSectionSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::pesData: {
-            ts.filterSettings.pesData(getPesDataSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::record: {
-            ts.filterSettings.record(getRecordSettings(tunerSettings));
-            break;
-        }
-        default: {
-            ts.filterSettings.noinit();
-            break;
-        }
-    }
-    settings.ts(ts);
-}
-
-void TunerFilter::getHidlMmtpSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
-    auto mmtpConf = config.get<TunerFilterConfiguration::mmtp>();
-    DemuxMmtpFilterSettings mmtp{
-        .mmtpPid = static_cast<DemuxMmtpPid>(mmtpConf.mmtpPid),
-    };
-
-    TunerFilterSettings tunerSettings = mmtpConf.filterSettings;
-    switch (tunerSettings.getTag()) {
-        case TunerFilterSettings::av: {
-            mmtp.filterSettings.av(getAvSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::section: {
-            mmtp.filterSettings.section(getSectionSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::pesData: {
-            mmtp.filterSettings.pesData(getPesDataSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::record: {
-            mmtp.filterSettings.record(getRecordSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::download: {
-            mmtp.filterSettings.download(getDownloadSettings(tunerSettings));
-            break;
-        }
-        default: {
-            mmtp.filterSettings.noinit();
-            break;
-        }
-    }
-    settings.mmtp(mmtp);
-}
-
-void TunerFilter::getHidlIpSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
-    auto ipConf = config.get<TunerFilterConfiguration::ip>();
-    DemuxIpAddress ipAddr{
-        .srcPort = static_cast<uint16_t>(ipConf.ipAddr.srcPort),
-        .dstPort = static_cast<uint16_t>(ipConf.ipAddr.dstPort),
-    };
-
-    ipConf.ipAddr.srcIpAddress.isIpV6
-            ? ipAddr.srcIpAddress.v6(getIpV6Address(ipConf.ipAddr.srcIpAddress))
-            : ipAddr.srcIpAddress.v4(getIpV4Address(ipConf.ipAddr.srcIpAddress));
-    ipConf.ipAddr.dstIpAddress.isIpV6
-            ? ipAddr.dstIpAddress.v6(getIpV6Address(ipConf.ipAddr.dstIpAddress))
-            : ipAddr.dstIpAddress.v4(getIpV4Address(ipConf.ipAddr.dstIpAddress));
-    DemuxIpFilterSettings ip{
-        .ipAddr = ipAddr,
-    };
-
-    TunerFilterSettings tunerSettings = ipConf.filterSettings;
-    switch (tunerSettings.getTag()) {
-        case TunerFilterSettings::section: {
-            ip.filterSettings.section(getSectionSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::isPassthrough: {
-            ip.filterSettings.bPassthrough(tunerSettings.isPassthrough);
-            break;
-        }
-        default: {
-            ip.filterSettings.noinit();
-            break;
-        }
-    }
-    settings.ip(ip);
-}
-
-hidl_array<uint8_t, IP_V6_LENGTH> TunerFilter::getIpV6Address(TunerDemuxIpAddress addr) {
-    hidl_array<uint8_t, IP_V6_LENGTH> ip;
-    if (addr.addr.size() != IP_V6_LENGTH) {
-        return ip;
-    }
-    copy(addr.addr.begin(), addr.addr.end(), ip.data());
-    return ip;
-}
-
-hidl_array<uint8_t, IP_V4_LENGTH> TunerFilter::getIpV4Address(TunerDemuxIpAddress addr) {
-    hidl_array<uint8_t, IP_V4_LENGTH> ip;
-    if (addr.addr.size() != IP_V4_LENGTH) {
-        return ip;
-    }
-    copy(addr.addr.begin(), addr.addr.end(), ip.data());
-    return ip;
-}
-
-void TunerFilter::getHidlTlvSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
-    auto tlvConf = config.get<TunerFilterConfiguration::tlv>();
-    DemuxTlvFilterSettings tlv{
-        .packetType = static_cast<uint8_t>(tlvConf.packetType),
-        .isCompressedIpPacket = tlvConf.isCompressedIpPacket,
-    };
-
-    TunerFilterSettings tunerSettings = tlvConf.filterSettings;
-    switch (tunerSettings.getTag()) {
-        case TunerFilterSettings::section: {
-            tlv.filterSettings.section(getSectionSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::isPassthrough: {
-            tlv.filterSettings.bPassthrough(tunerSettings.isPassthrough);
-            break;
-        }
-        default: {
-            tlv.filterSettings.noinit();
-            break;
-        }
-    }
-    settings.tlv(tlv);
-}
-
-void TunerFilter::getHidlAlpSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
-    auto alpConf = config.get<TunerFilterConfiguration::alp>();
-    DemuxAlpFilterSettings alp{
-        .packetType = static_cast<uint8_t>(alpConf.packetType),
-        .lengthType = static_cast<DemuxAlpLengthType>(alpConf.lengthType),
-    };
-
-    TunerFilterSettings tunerSettings = alpConf.filterSettings;
-    switch (tunerSettings.getTag()) {
-        case TunerFilterSettings::section: {
-            alp.filterSettings.section(getSectionSettings(tunerSettings));
-            break;
-        }
-        default: {
-            alp.filterSettings.noinit();
-            break;
-        }
-    }
-    settings.alp(alp);
-}
-
-DemuxFilterAvSettings TunerFilter::getAvSettings(const TunerFilterSettings& settings) {
-    DemuxFilterAvSettings av {
-        .isPassthrough = settings.get<TunerFilterSettings::av>().isPassthrough,
-    };
-    return av;
-}
-
-DemuxFilterSectionSettings TunerFilter::getSectionSettings(const TunerFilterSettings& settings) {
-    auto s = settings.get<TunerFilterSettings::section>();
-    DemuxFilterSectionSettings section{
-        .isCheckCrc = s.isCheckCrc,
-        .isRepeat = s.isRepeat,
-        .isRaw = s.isRaw,
-    };
-
-    switch (s.condition.getTag()) {
-        case TunerFilterSectionCondition::sectionBits: {
-            auto sectionBits = s.condition.get<TunerFilterSectionCondition::sectionBits>();
-            vector<uint8_t> filter(sectionBits.filter.begin(), sectionBits.filter.end());
-            vector<uint8_t> mask(sectionBits.mask.begin(), sectionBits.mask.end());
-            vector<uint8_t> mode(sectionBits.mode.begin(), sectionBits.mode.end());
-            section.condition.sectionBits({
-                .filter = filter,
-                .mask = mask,
-                .mode = mode,
-            });
-            break;
-        }
-        case TunerFilterSectionCondition::tableInfo: {
-            auto tableInfo = s.condition.get<TunerFilterSectionCondition::tableInfo>();
-            section.condition.tableInfo({
-                .tableId = static_cast<uint16_t>(tableInfo.tableId),
-                .version = static_cast<uint16_t>(tableInfo.version),
-            });
-            break;
-        }
-        default: {
-            break;
-        }
-    }
-    return section;
-}
-
-DemuxFilterPesDataSettings TunerFilter::getPesDataSettings(const TunerFilterSettings& settings) {
-    DemuxFilterPesDataSettings pes{
-        .streamId = static_cast<DemuxStreamId>(
-                settings.get<TunerFilterSettings::pesData>().streamId),
-        .isRaw = settings.get<TunerFilterSettings::pesData>().isRaw,
-    };
-    return pes;
-}
-
-DemuxFilterRecordSettings TunerFilter::getRecordSettings(const TunerFilterSettings& settings) {
-    auto r = settings.get<TunerFilterSettings::record>();
-    DemuxFilterRecordSettings record{
-        .tsIndexMask = static_cast<uint32_t>(r.tsIndexMask),
-        .scIndexType = static_cast<DemuxRecordScIndexType>(r.scIndexType),
-    };
-
-    switch (r.scIndexMask.getTag()) {
-        case TunerFilterScIndexMask::sc: {
-            record.scIndexMask.sc(static_cast<uint32_t>(
-                    r.scIndexMask.get<TunerFilterScIndexMask::sc>()));
-            break;
-        }
-        case TunerFilterScIndexMask::scHevc: {
-            record.scIndexMask.scHevc(static_cast<uint32_t>(
-                    r.scIndexMask.get<TunerFilterScIndexMask::scHevc>()));
-            break;
-        }
-    }
-    return record;
-}
-
-DemuxFilterDownloadSettings TunerFilter::getDownloadSettings(const TunerFilterSettings& settings) {
-    DemuxFilterDownloadSettings download {
-        .downloadId = static_cast<uint32_t>(
-                settings.get<TunerFilterSettings::download>().downloadId),
-    };
-    return download;
-}
-
-Status TunerFilter::getAvSharedHandleInfo(TunerFilterSharedHandleInfo* _aidl_return) {
-    if (mFilter_1_1 == nullptr) {
-        ALOGE("IFilter_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
     }
 
-    Result res;
-    mFilter_1_1->getAvSharedHandle([&](Result r, hidl_handle avMemory, uint64_t avMemSize) {
-        res = r;
-        if (res == Result::SUCCESS) {
-            TunerFilterSharedHandleInfo info{
-                .handle = dupToAidl(avMemory),
-                .size = static_cast<int64_t>(avMemSize),
-            };
-            *_aidl_return = move(info);
+    auto status = mFilter->getId64Bit(&mId64Bit);
+    if (status.isOk()) {
+        *_aidl_return = mId64Bit;
+    }
+    return status;
+}
+
+::ndk::ScopedAStatus TunerFilter::configure(const DemuxFilterSettings& in_settings) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    return mFilter->configure(in_settings);
+}
+
+::ndk::ScopedAStatus TunerFilter::configureMonitorEvent(int32_t monitorEventType) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    return mFilter->configureMonitorEvent(monitorEventType);
+}
+
+::ndk::ScopedAStatus TunerFilter::configureIpFilterContextId(int32_t cid) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    return mFilter->configureIpCid(cid);
+}
+
+::ndk::ScopedAStatus TunerFilter::configureAvStreamType(const AvStreamType& in_avStreamType) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    return mFilter->configureAvStreamType(in_avStreamType);
+}
+
+::ndk::ScopedAStatus TunerFilter::setDataSource(const shared_ptr<ITunerFilter>& filter) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (filter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    shared_ptr<IFilter> halFilter = static_cast<TunerFilter*>(filter.get())->getHalFilter();
+    return mFilter->setDataSource(halFilter);
+}
+
+::ndk::ScopedAStatus TunerFilter::getAvSharedHandle(NativeHandle* out_avMemory,
+                                                    int64_t* _aidl_return) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    return mFilter->getAvSharedHandle(out_avMemory, _aidl_return);
+}
+
+::ndk::ScopedAStatus TunerFilter::releaseAvHandle(const NativeHandle& in_handle,
+                                                  int64_t in_avDataId) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    return mFilter->releaseAvHandle(in_handle, in_avDataId);
+}
+
+::ndk::ScopedAStatus TunerFilter::start() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
+    }
+
+    auto res = mFilter->start();
+    if (res.isOk()) {
+        mStarted = true;
+    }
+    return res;
+}
+
+::ndk::ScopedAStatus TunerFilter::stop() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
+    }
+
+    auto res = mFilter->stop();
+    mStarted = false;
+
+    return res;
+}
+
+::ndk::ScopedAStatus TunerFilter::flush() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
+    }
+
+    return mFilter->flush();
+}
+
+::ndk::ScopedAStatus TunerFilter::close() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            if (mFilterCallback != nullptr) {
+                mFilterCallback->sendSharedFilterStatus(STATUS_INACCESSIBLE);
+                mFilterCallback->detachSharedFilterCallback();
+            }
+            TunerService::getTunerService()->removeSharedFilter(this->ref<TunerFilter>());
         } else {
-            _aidl_return = NULL;
+            // Calling from shared process, do not really close this filter.
+            if (mFilterCallback != nullptr) {
+                mFilterCallback->detachSharedFilterCallback();
+            }
+            mStarted = false;
+            return ::ndk::ScopedAStatus::ok();
         }
-    });
-
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
     }
-    return Status::ok();
+
+    auto res = mFilter->close();
+    mFilter = nullptr;
+    mStarted = false;
+    mShared = false;
+    mClientPid = -1;
+
+    return res;
 }
 
-Status TunerFilter::releaseAvHandle(
-        const ::aidl::android::hardware::common::NativeHandle& handle, int64_t avDataId) {
+::ndk::ScopedAStatus TunerFilter::acquireSharedFilterToken(string* _aidl_return) {
+    Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mFilter->releaseAvHandle(hidl_handle(makeFromAidl(handle)), avDataId);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+    if (mShared || mStarted) {
+        ALOGD("create SharedFilter in wrong state");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
     }
-    return Status::ok();
+
+    IPCThreadState* ipc = IPCThreadState::self();
+    mClientPid = ipc->getCallingPid();
+    string token = TunerService::getTunerService()->addFilterToShared(this->ref<TunerFilter>());
+    _aidl_return->assign(token);
+    mShared = true;
+
+    return ::ndk::ScopedAStatus::ok();
 }
 
-Status TunerFilter::start() {
+::ndk::ScopedAStatus TunerFilter::freeSharedFilterToken(const string& /* in_filterToken */) {
+    Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
-    Result res = mFilter->start();
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+
+    if (!mShared) {
+        // The filter is not shared or the shared filter has been closed.
+        return ::ndk::ScopedAStatus::ok();
     }
-    return Status::ok();
+
+    if (mFilterCallback != nullptr) {
+        mFilterCallback->sendSharedFilterStatus(STATUS_INACCESSIBLE);
+        mFilterCallback->detachSharedFilterCallback();
+    }
+
+    TunerService::getTunerService()->removeSharedFilter(this->ref<TunerFilter>());
+    mShared = false;
+
+    return ::ndk::ScopedAStatus::ok();
 }
 
-Status TunerFilter::stop() {
+::ndk::ScopedAStatus TunerFilter::getFilterType(DemuxFilterType* _aidl_return) {
+    Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
-    Result res = mFilter->stop();
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+
+    *_aidl_return = mType;
+    return ::ndk::ScopedAStatus::ok();
 }
 
-Status TunerFilter::flush() {
+::ndk::ScopedAStatus TunerFilter::setDelayHint(const FilterDelayHint& in_hint) {
+    Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
-    Result res = mFilter->flush();
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+
+    return mFilter->setDelayHint(in_hint);
 }
 
-Status TunerFilter::close() {
-    if (mFilter == nullptr) {
-        ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-    Result res = mFilter->close();
-    mFilter = NULL;
-    mFilter_1_1 = NULL;
-
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+bool TunerFilter::isSharedFilterAllowed(int callingPid) {
+    return mShared && mClientPid != callingPid;
 }
 
-sp<IFilter> TunerFilter::getHalFilter() {
+void TunerFilter::attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb) {
+    if (mFilterCallback != nullptr) {
+        mFilterCallback->attachSharedFilterCallback(in_cb);
+    }
+}
+
+shared_ptr<IFilter> TunerFilter::getHalFilter() {
     return mFilter;
 }
 
-bool TunerFilter::isAudioFilter() {
-    return (mMainType == (int)DemuxFilterMainType::TS
-                    && mSubType == (int)DemuxTsFilterType::AUDIO)
-            || (mMainType == (int)DemuxFilterMainType::MMTP
-                    && mSubType == (int)DemuxMmtpFilterType::AUDIO);
-}
-
-bool TunerFilter::isVideoFilter() {
-    return (mMainType == (int)DemuxFilterMainType::TS
-                    && mSubType == (int)DemuxTsFilterType::VIDEO)
-            || (mMainType == (int)DemuxFilterMainType::MMTP
-                    && mSubType == (int)DemuxMmtpFilterType::VIDEO);
-}
-
-bool TunerFilter::getHidlAvStreamType(int avStreamType, AvStreamType& type) {
-    if (isAudioFilter()) {
-        type.audio(static_cast<AudioStreamType>(avStreamType));
-        return true;
-    }
-
-    if (isVideoFilter()) {
-        type.video(static_cast<VideoStreamType>(avStreamType));
-        return true;
-    }
-
-    return false;
-}
-
 /////////////// FilterCallback ///////////////////////
-
-Return<void> TunerFilter::FilterCallback::onFilterStatus(DemuxFilterStatus status) {
-    if (mTunerFilterCallback != NULL) {
-        mTunerFilterCallback->onFilterStatus((int)status);
+::ndk::ScopedAStatus TunerFilter::FilterCallback::onFilterStatus(DemuxFilterStatus status) {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr) {
+        mTunerFilterCallback->onFilterStatus(status);
     }
-    return Void();
+    return ::ndk::ScopedAStatus::ok();
 }
 
-Return<void> TunerFilter::FilterCallback::onFilterEvent(const DemuxFilterEvent& filterEvent) {
-    vector<DemuxFilterEventExt::Event> emptyEventsExt;
-    DemuxFilterEventExt emptyFilterEventExt {
-            .events = emptyEventsExt,
-    };
-    onFilterEvent_1_1(filterEvent, emptyFilterEventExt);
-    return Void();
+::ndk::ScopedAStatus TunerFilter::FilterCallback::onFilterEvent(
+        const vector<DemuxFilterEvent>& events) {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr) {
+        mTunerFilterCallback->onFilterEvent(events);
+    }
+    return ::ndk::ScopedAStatus::ok();
 }
 
-Return<void> TunerFilter::FilterCallback::onFilterEvent_1_1(const DemuxFilterEvent& filterEvent,
-        const DemuxFilterEventExt& filterEventExt) {
-    if (mTunerFilterCallback != NULL) {
-        vector<DemuxFilterEvent::Event> events = filterEvent.events;
-        vector<DemuxFilterEventExt::Event> eventsExt = filterEventExt.events;
-        vector<TunerFilterEvent> tunerEvent;
-
-        getAidlFilterEvent(events, eventsExt, tunerEvent);
-        mTunerFilterCallback->onFilterEvent(tunerEvent);
-    }
-    return Void();
-}
-
-/////////////// FilterCallback Helper Methods ///////////////////////
-
-void TunerFilter::FilterCallback::getAidlFilterEvent(vector<DemuxFilterEvent::Event>& events,
-        vector<DemuxFilterEventExt::Event>& eventsExt,
-        vector<TunerFilterEvent>& tunerEvent) {
-    if (events.empty() && !eventsExt.empty()) {
-        auto eventExt = eventsExt[0];
-        switch (eventExt.getDiscriminator()) {
-            case DemuxFilterEventExt::Event::hidl_discriminator::monitorEvent: {
-                getMonitorEvent(eventsExt, tunerEvent);
-                return;
-            }
-            case DemuxFilterEventExt::Event::hidl_discriminator::startId: {
-                getRestartEvent(eventsExt, tunerEvent);
-                return;
-            }
-            default: {
-                break;
-            }
-        }
-        return;
-    }
-
-    if (!events.empty()) {
-        auto event = events[0];
-        switch (event.getDiscriminator()) {
-            case DemuxFilterEvent::Event::hidl_discriminator::media: {
-                getMediaEvent(events, tunerEvent);
-                break;
-            }
-            case DemuxFilterEvent::Event::hidl_discriminator::section: {
-                getSectionEvent(events, tunerEvent);
-                break;
-            }
-            case DemuxFilterEvent::Event::hidl_discriminator::pes: {
-                getPesEvent(events, tunerEvent);
-                break;
-            }
-            case DemuxFilterEvent::Event::hidl_discriminator::tsRecord: {
-                getTsRecordEvent(events, eventsExt, tunerEvent);
-                break;
-            }
-            case DemuxFilterEvent::Event::hidl_discriminator::mmtpRecord: {
-                getMmtpRecordEvent(events, eventsExt, tunerEvent);
-                break;
-            }
-            case DemuxFilterEvent::Event::hidl_discriminator::download: {
-                getDownloadEvent(events, tunerEvent);
-                break;
-            }
-            case DemuxFilterEvent::Event::hidl_discriminator::ipPayload: {
-                getIpPayloadEvent(events, tunerEvent);
-                break;
-            }
-            case DemuxFilterEvent::Event::hidl_discriminator::temi: {
-                getTemiEvent(events, tunerEvent);
-                break;
-            }
-            default: {
-                break;
-            }
-        }
+void TunerFilter::FilterCallback::sendSharedFilterStatus(int32_t status) {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr && mOriginalCallback != nullptr) {
+        mTunerFilterCallback->onFilterStatus(static_cast<DemuxFilterStatus>(status));
     }
 }
 
-void TunerFilter::FilterCallback::getMediaEvent(
-        vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
-    for (DemuxFilterEvent::Event e : events) {
-        DemuxFilterMediaEvent mediaEvent = e.media();
-        TunerFilterMediaEvent tunerMedia;
+void TunerFilter::FilterCallback::attachSharedFilterCallback(
+        const shared_ptr<ITunerFilterCallback>& in_cb) {
+    Mutex::Autolock _l(mCallbackLock);
+    mOriginalCallback = mTunerFilterCallback;
+    mTunerFilterCallback = in_cb;
+}
 
-        tunerMedia.streamId = static_cast<char16_t>(mediaEvent.streamId);
-        tunerMedia.isPtsPresent = mediaEvent.isPtsPresent;
-        tunerMedia.pts = static_cast<long>(mediaEvent.pts);
-        tunerMedia.dataLength = static_cast<int>(mediaEvent.dataLength);
-        tunerMedia.offset = static_cast<int>(mediaEvent.offset);
-        tunerMedia.isSecureMemory = mediaEvent.isSecureMemory;
-        tunerMedia.avDataId = static_cast<long>(mediaEvent.avDataId);
-        tunerMedia.mpuSequenceNumber = static_cast<int>(mediaEvent.mpuSequenceNumber);
-        tunerMedia.isPesPrivateData = mediaEvent.isPesPrivateData;
-
-        if (mediaEvent.extraMetaData.getDiscriminator() ==
-                DemuxFilterMediaEvent::ExtraMetaData::hidl_discriminator::audio) {
-            tunerMedia.isAudioExtraMetaData = true;
-            tunerMedia.audio = {
-                .adFade = static_cast<int8_t>(
-                        mediaEvent.extraMetaData.audio().adFade),
-                .adPan = static_cast<int8_t>(
-                        mediaEvent.extraMetaData.audio().adPan),
-                .versionTextTag = static_cast<int8_t>(
-                        mediaEvent.extraMetaData.audio().versionTextTag),
-                .adGainCenter = static_cast<int8_t>(
-                        mediaEvent.extraMetaData.audio().adGainCenter),
-                .adGainFront = static_cast<int8_t>(
-                        mediaEvent.extraMetaData.audio().adGainFront),
-                .adGainSurround = static_cast<int8_t>(
-                        mediaEvent.extraMetaData.audio().adGainSurround),
-            };
-        } else {
-            tunerMedia.isAudioExtraMetaData = false;
-        }
-
-        if (mediaEvent.avMemory.getNativeHandle() != nullptr) {
-            tunerMedia.avMemory = dupToAidl(mediaEvent.avMemory.getNativeHandle());
-        }
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::media>(move(tunerMedia));
-        res.push_back(move(tunerEvent));
+void TunerFilter::FilterCallback::detachSharedFilterCallback() {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr && mOriginalCallback != nullptr) {
+        mTunerFilterCallback = mOriginalCallback;
+        mOriginalCallback = nullptr;
     }
 }
 
-void TunerFilter::FilterCallback::getSectionEvent(
-        vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
-    for (DemuxFilterEvent::Event e : events) {
-        DemuxFilterSectionEvent sectionEvent = e.section();
-        TunerFilterSectionEvent tunerSection;
-
-        tunerSection.tableId = static_cast<char16_t>(sectionEvent.tableId);
-        tunerSection.version = static_cast<char16_t>(sectionEvent.version);
-        tunerSection.sectionNum = static_cast<char16_t>(sectionEvent.sectionNum);
-        tunerSection.dataLength = static_cast<char16_t>(sectionEvent.dataLength);
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::section>(move(tunerSection));
-        res.push_back(move(tunerEvent));
-    }
-}
-
-void TunerFilter::FilterCallback::getPesEvent(
-        vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
-    for (DemuxFilterEvent::Event e : events) {
-        DemuxFilterPesEvent pesEvent = e.pes();
-        TunerFilterPesEvent tunerPes;
-
-        tunerPes.streamId = static_cast<char16_t>(pesEvent.streamId);
-        tunerPes.dataLength = static_cast<char16_t>(pesEvent.dataLength);
-        tunerPes.mpuSequenceNumber = static_cast<int>(pesEvent.mpuSequenceNumber);
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::pes>(move(tunerPes));
-        res.push_back(move(tunerEvent));
-    }
-}
-
-void TunerFilter::FilterCallback::getTsRecordEvent(vector<DemuxFilterEvent::Event>& events,
-        vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
-    for (int i = 0; i < events.size(); i++) {
-        TunerFilterTsRecordEvent tunerTsRecord;
-        DemuxFilterTsRecordEvent tsRecordEvent = events[i].tsRecord();
-
-        TunerFilterScIndexMask scIndexMask;
-        if (tsRecordEvent.scIndexMask.getDiscriminator()
-                == DemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::sc) {
-            scIndexMask.set<TunerFilterScIndexMask::sc>(
-                    static_cast<int>(tsRecordEvent.scIndexMask.sc()));
-        } else if (tsRecordEvent.scIndexMask.getDiscriminator()
-                == DemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::scHevc) {
-            scIndexMask.set<TunerFilterScIndexMask::scHevc>(
-                    static_cast<int>(tsRecordEvent.scIndexMask.scHevc()));
-        }
-
-        if (tsRecordEvent.pid.getDiscriminator() == DemuxPid::hidl_discriminator::tPid) {
-            tunerTsRecord.pid = static_cast<char16_t>(tsRecordEvent.pid.tPid());
-        } else {
-            tunerTsRecord.pid = static_cast<char16_t>(Constant::INVALID_TS_PID);
-        }
-
-        tunerTsRecord.scIndexMask = scIndexMask;
-        tunerTsRecord.tsIndexMask = static_cast<int>(tsRecordEvent.tsIndexMask);
-        tunerTsRecord.byteNumber = static_cast<long>(tsRecordEvent.byteNumber);
-
-        if (eventsExt.size() > i && eventsExt[i].getDiscriminator() ==
-                    DemuxFilterEventExt::Event::hidl_discriminator::tsRecord) {
-            tunerTsRecord.isExtended = true;
-            tunerTsRecord.pts = static_cast<long>(eventsExt[i].tsRecord().pts);
-            tunerTsRecord.firstMbInSlice = static_cast<int>(eventsExt[i].tsRecord().firstMbInSlice);
-        } else {
-            tunerTsRecord.isExtended = false;
-        }
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::tsRecord>(move(tunerTsRecord));
-        res.push_back(move(tunerEvent));
-    }
-}
-
-void TunerFilter::FilterCallback::getMmtpRecordEvent(vector<DemuxFilterEvent::Event>& events,
-        vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
-    for (int i = 0; i < events.size(); i++) {
-        TunerFilterMmtpRecordEvent tunerMmtpRecord;
-        DemuxFilterMmtpRecordEvent mmtpRecordEvent = events[i].mmtpRecord();
-
-        tunerMmtpRecord.scHevcIndexMask = static_cast<int>(mmtpRecordEvent.scHevcIndexMask);
-        tunerMmtpRecord.byteNumber = static_cast<long>(mmtpRecordEvent.byteNumber);
-
-        if (eventsExt.size() > i && eventsExt[i].getDiscriminator() ==
-                    DemuxFilterEventExt::Event::hidl_discriminator::mmtpRecord) {
-            tunerMmtpRecord.isExtended = true;
-            tunerMmtpRecord.pts = static_cast<long>(eventsExt[i].mmtpRecord().pts);
-            tunerMmtpRecord.mpuSequenceNumber =
-                    static_cast<int>(eventsExt[i].mmtpRecord().mpuSequenceNumber);
-            tunerMmtpRecord.firstMbInSlice =
-                    static_cast<int>(eventsExt[i].mmtpRecord().firstMbInSlice);
-            tunerMmtpRecord.tsIndexMask = static_cast<int>(eventsExt[i].mmtpRecord().tsIndexMask);
-        } else {
-            tunerMmtpRecord.isExtended = false;
-        }
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::mmtpRecord>(move(tunerMmtpRecord));
-        res.push_back(move(tunerEvent));
-    }
-}
-
-void TunerFilter::FilterCallback::getDownloadEvent(
-        vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
-    for (DemuxFilterEvent::Event e : events) {
-        DemuxFilterDownloadEvent downloadEvent = e.download();
-        TunerFilterDownloadEvent tunerDownload;
-
-        tunerDownload.itemId = static_cast<int>(downloadEvent.itemId);
-        tunerDownload.itemFragmentIndex = static_cast<int>(downloadEvent.itemFragmentIndex);
-        tunerDownload.mpuSequenceNumber = static_cast<int>(downloadEvent.mpuSequenceNumber);
-        tunerDownload.lastItemFragmentIndex = static_cast<int>(downloadEvent.lastItemFragmentIndex);
-        tunerDownload.dataLength = static_cast<char16_t>(downloadEvent.dataLength);
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::download>(move(tunerDownload));
-        res.push_back(move(tunerEvent));
-    }
-}
-
-void TunerFilter::FilterCallback::getIpPayloadEvent(
-        vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
-    for (DemuxFilterEvent::Event e : events) {
-        DemuxFilterIpPayloadEvent ipPayloadEvent = e.ipPayload();
-        TunerFilterIpPayloadEvent tunerIpPayload;
-
-        tunerIpPayload.dataLength = static_cast<char16_t>(ipPayloadEvent.dataLength);
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::ipPayload>(move(tunerIpPayload));
-        res.push_back(move(tunerEvent));
-    }
-}
-
-void TunerFilter::FilterCallback::getTemiEvent(
-        vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
-    for (DemuxFilterEvent::Event e : events) {
-        DemuxFilterTemiEvent temiEvent = e.temi();
-        TunerFilterTemiEvent tunerTemi;
-
-        tunerTemi.pts = static_cast<long>(temiEvent.pts);
-        tunerTemi.descrTag = static_cast<int8_t>(temiEvent.descrTag);
-        vector<uint8_t> descrData = temiEvent.descrData;
-        tunerTemi.descrData.resize(descrData.size());
-        copy(descrData.begin(), descrData.end(), tunerTemi.descrData.begin());
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::temi>(move(tunerTemi));
-        res.push_back(move(tunerEvent));
-    }
-}
-
-void TunerFilter::FilterCallback::getMonitorEvent(
-        vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
-    DemuxFilterMonitorEvent monitorEvent = eventsExt[0].monitorEvent();
-    TunerFilterMonitorEvent tunerMonitor;
-
-    switch (monitorEvent.getDiscriminator()) {
-        case DemuxFilterMonitorEvent::hidl_discriminator::scramblingStatus: {
-            tunerMonitor.set<TunerFilterMonitorEvent::scramblingStatus>(
-                    static_cast<int>(monitorEvent.scramblingStatus()));
-            break;
-        }
-        case DemuxFilterMonitorEvent::hidl_discriminator::cid: {
-            tunerMonitor.set<TunerFilterMonitorEvent::cid>(static_cast<int>(monitorEvent.cid()));
-            break;
-        }
-    }
-
-    TunerFilterEvent tunerEvent;
-    tunerEvent.set<TunerFilterEvent::monitor>(move(tunerMonitor));
-    res.push_back(move(tunerEvent));
-}
-
-void TunerFilter::FilterCallback::getRestartEvent(
-        vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
-    TunerFilterEvent tunerEvent;
-    tunerEvent.set<TunerFilterEvent::startId>(static_cast<int>(eventsExt[0].startId()));
-    res.push_back(move(tunerEvent));
-}
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
 }  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerFilter.h b/services/tuner/TunerFilter.h
index ff4728c..529c191 100644
--- a/services/tuner/TunerFilter.h
+++ b/services/tuner/TunerFilter.h
@@ -17,176 +17,107 @@
 #ifndef ANDROID_MEDIA_TUNERFILTER_H
 #define ANDROID_MEDIA_TUNERFILTER_H
 
+#include <aidl/android/hardware/tv/tuner/AvStreamType.h>
+#include <aidl/android/hardware/tv/tuner/BnFilterCallback.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterEvent.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterStatus.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterType.h>
+#include <aidl/android/hardware/tv/tuner/FilterDelayHint.h>
+#include <aidl/android/hardware/tv/tuner/IFilter.h>
 #include <aidl/android/media/tv/tuner/BnTunerFilter.h>
 #include <aidl/android/media/tv/tuner/ITunerFilterCallback.h>
-#include <aidlcommonsupport/NativeHandle.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
-#include <android/hardware/tv/tuner/1.1/IFilter.h>
-#include <android/hardware/tv/tuner/1.1/IFilterCallback.h>
-#include <android/hardware/tv/tuner/1.1/types.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <fmq/ConvertMQDescriptors.h>
-#include <fmq/MessageQueue.h>
+#include <utils/Mutex.h>
 
-using Status = ::ndk::ScopedAStatus;
+using ::aidl::android::hardware::common::NativeHandle;
 using ::aidl::android::hardware::common::fmq::MQDescriptor;
 using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
+using ::aidl::android::hardware::tv::tuner::AvStreamType;
+using ::aidl::android::hardware::tv::tuner::BnFilterCallback;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterType;
+using ::aidl::android::hardware::tv::tuner::FilterDelayHint;
+using ::aidl::android::hardware::tv::tuner::IFilter;
 using ::aidl::android::media::tv::tuner::BnTunerFilter;
-using ::aidl::android::media::tv::tuner::ITunerFilterCallback;
-using ::aidl::android::media::tv::tuner::TunerDemuxIpAddress;
-using ::aidl::android::media::tv::tuner::TunerFilterConfiguration;
-using ::aidl::android::media::tv::tuner::TunerFilterDownloadEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterIpPayloadEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterMediaEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterMmtpRecordEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterMonitorEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterPesEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterScIndexMask;
-using ::aidl::android::media::tv::tuner::TunerFilterSectionEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterSharedHandleInfo;
-using ::aidl::android::media::tv::tuner::TunerFilterSettings;
-using ::aidl::android::media::tv::tuner::TunerFilterTemiEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterTsRecordEvent;
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::hidl_array;
-using ::android::hardware::tv::tuner::V1_0::DemuxAlpFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterAvSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterDownloadEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterDownloadSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterIpPayloadEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMediaEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMmtpRecordEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterPesDataSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterPesEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterRecordSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSectionEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSectionSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterStatus;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterTemiEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterTsRecordEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxIpFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxTlvFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxPid;
-using ::android::hardware::tv::tuner::V1_0::IFilter;
-using ::android::hardware::tv::tuner::V1_1::AvStreamType;
-using ::android::hardware::tv::tuner::V1_1::DemuxFilterEventExt;
-using ::android::hardware::tv::tuner::V1_1::DemuxFilterMonitorEvent;
-using ::android::hardware::tv::tuner::V1_1::DemuxFilterTsRecordEventExt;
-using ::android::hardware::tv::tuner::V1_1::IFilterCallback;
+using ::android::Mutex;
 
+using namespace std;
+
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-using MQDesc = MQDescriptorSync<uint8_t>;
 using AidlMQDesc = MQDescriptor<int8_t, SynchronizedReadWrite>;
 
-const static int IP_V4_LENGTH = 4;
-const static int IP_V6_LENGTH = 16;
-
 class TunerFilter : public BnTunerFilter {
 
 public:
-    TunerFilter(sp<IFilter> filter, int mainType, int subTyp);
-    virtual ~TunerFilter();
-    Status getId(int32_t* _aidl_return) override;
-    Status getId64Bit(int64_t* _aidl_return) override;
-    Status getQueueDesc(AidlMQDesc* _aidl_return) override;
-    Status configure(const TunerFilterConfiguration& config) override;
-    Status configureMonitorEvent(int monitorEventType) override;
-    Status configureIpFilterContextId(int cid) override;
-    Status configureAvStreamType(int avStreamType) override;
-    Status getAvSharedHandleInfo(TunerFilterSharedHandleInfo* _aidl_return) override;
-    Status releaseAvHandle(const ::aidl::android::hardware::common::NativeHandle& handle,
-            int64_t avDataId) override;
-    Status setDataSource(const std::shared_ptr<ITunerFilter>& filter) override;
-    Status start() override;
-    Status stop() override;
-    Status flush() override;
-    Status close() override;
-    sp<IFilter> getHalFilter();
+    class FilterCallback : public BnFilterCallback {
+    public:
+        FilterCallback(const shared_ptr<ITunerFilterCallback>& tunerFilterCallback)
+              : mTunerFilterCallback(tunerFilterCallback), mOriginalCallback(nullptr){};
 
-    struct FilterCallback : public IFilterCallback {
-        FilterCallback(const std::shared_ptr<ITunerFilterCallback> tunerFilterCallback)
-                : mTunerFilterCallback(tunerFilterCallback) {};
+        ::ndk::ScopedAStatus onFilterEvent(const vector<DemuxFilterEvent>& events) override;
+        ::ndk::ScopedAStatus onFilterStatus(DemuxFilterStatus status) override;
 
-        virtual Return<void> onFilterEvent(const DemuxFilterEvent& filterEvent);
-        virtual Return<void> onFilterEvent_1_1(const DemuxFilterEvent& filterEvent,
-                const DemuxFilterEventExt& filterEventExt);
-        virtual Return<void> onFilterStatus(DemuxFilterStatus status);
+        void sendSharedFilterStatus(int32_t status);
+        void attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb);
+        void detachSharedFilterCallback();
 
-        void getAidlFilterEvent(std::vector<DemuxFilterEvent::Event>& events,
-                std::vector<DemuxFilterEventExt::Event>& eventsExt,
-                std::vector<TunerFilterEvent>& tunerEvent);
-
-        void getMediaEvent(
-                std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
-        void getSectionEvent(
-                std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
-        void getPesEvent(
-                std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
-        void getTsRecordEvent(
-                std::vector<DemuxFilterEvent::Event>& events,
-                std::vector<DemuxFilterEventExt::Event>& eventsExt,
-                std::vector<TunerFilterEvent>& res);
-        void getMmtpRecordEvent(
-                std::vector<DemuxFilterEvent::Event>& events,
-                std::vector<DemuxFilterEventExt::Event>& eventsExt,
-                std::vector<TunerFilterEvent>& res);
-        void getDownloadEvent(
-                std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
-        void getIpPayloadEvent(
-                std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
-        void getTemiEvent(
-                std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
-        void getMonitorEvent(
-                std::vector<DemuxFilterEventExt::Event>& eventsExt,
-                std::vector<TunerFilterEvent>& res);
-        void getRestartEvent(
-                std::vector<DemuxFilterEventExt::Event>& eventsExt,
-                std::vector<TunerFilterEvent>& res);
-
-        std::shared_ptr<ITunerFilterCallback> mTunerFilterCallback;
+    private:
+        shared_ptr<ITunerFilterCallback> mTunerFilterCallback;
+        shared_ptr<ITunerFilterCallback> mOriginalCallback;
+        Mutex mCallbackLock;
     };
 
+    TunerFilter(shared_ptr<IFilter> filter, shared_ptr<FilterCallback> cb, DemuxFilterType type);
+    virtual ~TunerFilter();
+
+    ::ndk::ScopedAStatus getId(int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getId64Bit(int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getQueueDesc(AidlMQDesc* _aidl_return) override;
+    ::ndk::ScopedAStatus configure(const DemuxFilterSettings& in_settings) override;
+    ::ndk::ScopedAStatus configureMonitorEvent(int32_t in_monitorEventTypes) override;
+    ::ndk::ScopedAStatus configureIpFilterContextId(int32_t in_cid) override;
+    ::ndk::ScopedAStatus configureAvStreamType(const AvStreamType& in_avStreamType) override;
+    ::ndk::ScopedAStatus getAvSharedHandle(NativeHandle* out_avMemory,
+                                           int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus releaseAvHandle(const NativeHandle& in_handle,
+                                         int64_t in_avDataId) override;
+    ::ndk::ScopedAStatus setDataSource(const shared_ptr<ITunerFilter>& in_filter) override;
+    ::ndk::ScopedAStatus start() override;
+    ::ndk::ScopedAStatus stop() override;
+    ::ndk::ScopedAStatus flush() override;
+    ::ndk::ScopedAStatus close() override;
+    ::ndk::ScopedAStatus acquireSharedFilterToken(string* _aidl_return) override;
+    ::ndk::ScopedAStatus freeSharedFilterToken(const string& in_filterToken) override;
+    ::ndk::ScopedAStatus getFilterType(DemuxFilterType* _aidl_return) override;
+    ::ndk::ScopedAStatus setDelayHint(const FilterDelayHint& in_hint) override;
+
+    bool isSharedFilterAllowed(int32_t pid);
+    void attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb);
+    shared_ptr<IFilter> getHalFilter();
+
 private:
-    DemuxFilterAvSettings getAvSettings(const TunerFilterSettings& settings);
-    DemuxFilterSectionSettings getSectionSettings(const TunerFilterSettings& settings);
-    DemuxFilterPesDataSettings getPesDataSettings(const TunerFilterSettings& settings);
-    DemuxFilterRecordSettings getRecordSettings(const TunerFilterSettings& settings);
-    DemuxFilterDownloadSettings getDownloadSettings(const TunerFilterSettings& settings);
-
-    bool isAudioFilter();
-    bool isVideoFilter();
-    bool getHidlAvStreamType(int avStreamType, AvStreamType& type);
-
-    void getHidlTsSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
-    void getHidlMmtpSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
-    void getHidlIpSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
-    void getHidlTlvSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
-    void getHidlAlpSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
-
-    hidl_array<uint8_t, IP_V4_LENGTH> getIpV4Address(TunerDemuxIpAddress addr);
-    hidl_array<uint8_t, IP_V6_LENGTH> getIpV6Address(TunerDemuxIpAddress addr);
-
-    sp<IFilter> mFilter;
-    sp<::android::hardware::tv::tuner::V1_1::IFilter> mFilter_1_1;
+    shared_ptr<IFilter> mFilter;
     int32_t mId;
     int64_t mId64Bit;
-    int mMainType;
-    int mSubType;
+    DemuxFilterType mType;
+    bool mStarted;
+    bool mShared;
+    int32_t mClientPid;
+    shared_ptr<FilterCallback> mFilterCallback;
+    Mutex mLock;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERFILTER_H
diff --git a/services/tuner/TunerFrontend.cpp b/services/tuner/TunerFrontend.cpp
index 74b5519..5116305 100644
--- a/services/tuner/TunerFrontend.cpp
+++ b/services/tuner/TunerFrontend.cpp
@@ -1,5 +1,5 @@
 /**
- * Copyright 2020, The Android Open Source Project
+ * Copyright 2021, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,1081 +14,208 @@
  * limitations under the License.
  */
 
+//#define LOG_NDEBUG 0
 #define LOG_TAG "TunerFrontend"
 
 #include "TunerFrontend.h"
+
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
 #include "TunerLnb.h"
 
-using ::aidl::android::media::tv::tuner::TunerFrontendAtsc3PlpSettings;
-using ::aidl::android::media::tv::tuner::TunerFrontendScanAtsc3PlpInfo;
-using ::aidl::android::media::tv::tuner::TunerFrontendStatusAtsc3PlpInfo;
-using ::aidl::android::media::tv::tuner::TunerFrontendUnionSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendAnalogSifStandard;
-using ::android::hardware::tv::tuner::V1_0::FrontendAnalogType;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtscModulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Bandwidth;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3CodeRate;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3DemodOutputFormat;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Fec;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Modulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3TimeInterleaveMode;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbcAnnex;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbcModulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbcOuterFec;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbcSpectralInversion;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsModulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsPilot;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsRolloff;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsStandard;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsVcmMode;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtBandwidth;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtCoderate;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtConstellation;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtGuardInterval;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtHierarchy;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtPlpMode;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtStandard;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtTransmissionMode;
-using ::android::hardware::tv::tuner::V1_0::FrontendInnerFec;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Coderate;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Modulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Rolloff;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Settings;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbsCoderate;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbsModulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbsRolloff;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbsSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbsStreamIdType;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtBandwidth;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtCoderate;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtGuardInterval;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtMode;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtModulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendModulationStatus;
-using ::android::hardware::tv::tuner::V1_0::FrontendScanAtsc3PlpInfo;
-using ::android::hardware::tv::tuner::V1_0::FrontendScanType;
-using ::android::hardware::tv::tuner::V1_0::FrontendStatusType;
-using ::android::hardware::tv::tuner::V1_0::Result;
-using ::android::hardware::tv::tuner::V1_1::FrontendAnalogAftFlag;
-using ::android::hardware::tv::tuner::V1_1::FrontendBandwidth;
-using ::android::hardware::tv::tuner::V1_1::FrontendCableTimeInterleaveMode;
-using ::android::hardware::tv::tuner::V1_1::FrontendDvbcBandwidth;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbBandwidth;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbCodeRate;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbGuardInterval;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbModulation;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbTimeInterleaveMode;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbTransmissionMode;
-using ::android::hardware::tv::tuner::V1_1::FrontendDvbsScanType;
-using ::android::hardware::tv::tuner::V1_1::FrontendGuardInterval;
-using ::android::hardware::tv::tuner::V1_1::FrontendInterleaveMode;
-using ::android::hardware::tv::tuner::V1_1::FrontendModulation;
-using ::android::hardware::tv::tuner::V1_1::FrontendRollOff;
-using ::android::hardware::tv::tuner::V1_1::FrontendTransmissionMode;
-using ::android::hardware::tv::tuner::V1_1::FrontendSpectralInversion;
-using ::android::hardware::tv::tuner::V1_1::FrontendStatusTypeExt1_1;
+using ::aidl::android::hardware::tv::tuner::Result;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-TunerFrontend::TunerFrontend(sp<IFrontend> frontend, int id) {
+TunerFrontend::TunerFrontend(shared_ptr<IFrontend> frontend, int id) {
     mFrontend = frontend;
-    mFrontend_1_1 = ::android::hardware::tv::tuner::V1_1::IFrontend::castFrom(mFrontend);
     mId = id;
 }
 
 TunerFrontend::~TunerFrontend() {
-    mFrontend = NULL;
-    mFrontend_1_1 = NULL;
+    mFrontend = nullptr;
     mId = -1;
 }
 
-Status TunerFrontend::setCallback(
+::ndk::ScopedAStatus TunerFrontend::setCallback(
         const shared_ptr<ITunerFrontendCallback>& tunerFrontendCallback) {
-    if (mFrontend == NULL) {
+    if (mFrontend == nullptr) {
         ALOGE("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    if (tunerFrontendCallback == NULL) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    if (tunerFrontendCallback == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
     }
 
-    sp<IFrontendCallback> frontendCallback = new FrontendCallback(tunerFrontendCallback);
-    Result status = mFrontend->setCallback(frontendCallback);
-    if (status == Result::SUCCESS) {
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    shared_ptr<IFrontendCallback> frontendCallback =
+            ::ndk::SharedRefBase::make<FrontendCallback>(tunerFrontendCallback);
+    return mFrontend->setCallback(frontendCallback);
 }
 
-Status TunerFrontend::tune(const TunerFrontendSettings& settings) {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::tune(const FrontendSettings& settings) {
+    if (mFrontend == nullptr) {
         ALOGE("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status;
-    FrontendSettings frontendSettings = getHidlFrontendSettings(settings);
-    if (settings.isExtended) {
-        if (mFrontend_1_1 == NULL) {
-            ALOGE("IFrontend_1_1 is not initialized");
-            return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-        }
-        FrontendSettingsExt1_1 frontendSettingsExt = getHidlFrontendSettingsExt(settings);
-        status = mFrontend_1_1->tune_1_1(frontendSettings, frontendSettingsExt);
-    } else {
-        status = mFrontend->tune(frontendSettings);
-    }
-
-    if (status == Result::SUCCESS) {
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    return mFrontend->tune(settings);
 }
 
-Status TunerFrontend::stopTune() {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::stopTune() {
+    if (mFrontend == nullptr) {
         ALOGD("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mFrontend->stopTune();
-    if (status == Result::SUCCESS) {
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    return mFrontend->stopTune();
 }
 
-Status TunerFrontend::scan(const TunerFrontendSettings& settings, int frontendScanType) {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::scan(const FrontendSettings& settings,
+                                         FrontendScanType frontendScanType) {
+    if (mFrontend == nullptr) {
         ALOGD("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status;
-    FrontendSettings frontendSettings = getHidlFrontendSettings(settings);
-    if (settings.isExtended) {
-        if (mFrontend_1_1 == NULL) {
-            ALOGE("IFrontend_1_1 is not initialized");
-            return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-        }
-        FrontendSettingsExt1_1 frontendSettingsExt = getHidlFrontendSettingsExt(settings);
-        status = mFrontend_1_1->scan_1_1(frontendSettings,
-                static_cast<FrontendScanType>(frontendScanType), frontendSettingsExt);
-    } else {
-        status = mFrontend->scan(
-                frontendSettings, static_cast<FrontendScanType>(frontendScanType));
-    }
-
-    if (status == Result::SUCCESS) {
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    return mFrontend->scan(settings, frontendScanType);
 }
 
-Status TunerFrontend::stopScan() {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::stopScan() {
+    if (mFrontend == nullptr) {
         ALOGD("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mFrontend->stopScan();
-    if (status == Result::SUCCESS) {
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    return mFrontend->stopScan();
 }
 
-Status TunerFrontend::setLnb(const shared_ptr<ITunerLnb>& lnb) {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::setLnb(const shared_ptr<ITunerLnb>& lnb) {
+    if (mFrontend == nullptr) {
         ALOGD("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mFrontend->setLnb(static_cast<TunerLnb*>(lnb.get())->getId());
-    if (status == Result::SUCCESS) {
-        return Status::ok();
+    if (lnb == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
     }
 
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    return mFrontend->setLnb(static_cast<TunerLnb*>(lnb.get())->getId());
 }
 
-Status TunerFrontend::setLna(bool bEnable) {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::linkCiCamToFrontend(int32_t ciCamId, int32_t* _aidl_return) {
+    if (mFrontend == nullptr) {
         ALOGD("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mFrontend->setLna(bEnable);
-    if (status == Result::SUCCESS) {
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    return mFrontend->linkCiCam(ciCamId, _aidl_return);
 }
 
-Status TunerFrontend::linkCiCamToFrontend(int ciCamId, int32_t* _aidl_return) {
-    if (mFrontend_1_1 == NULL) {
-        ALOGD("IFrontend_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    int ltsId;
-    Result status;
-    mFrontend_1_1->linkCiCam(static_cast<uint32_t>(ciCamId),
-            [&](Result r, uint32_t id) {
-                status = r;
-                ltsId = id;
-            });
-
-    if (status == Result::SUCCESS) {
-        *_aidl_return = ltsId;
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-}
-
-Status TunerFrontend::unlinkCiCamToFrontend(int ciCamId) {
-    if (mFrontend_1_1 == NULL) {
-        ALOGD("IFrontend_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    Result status = mFrontend_1_1->unlinkCiCam(ciCamId);
-    if (status == Result::SUCCESS) {
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-}
-
-Status TunerFrontend::close() {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::unlinkCiCamToFrontend(int32_t ciCamId) {
+    if (mFrontend == nullptr) {
         ALOGD("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mFrontend->close();
-    mFrontend = NULL;
-    mFrontend_1_1 = NULL;
-
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    return mFrontend->unlinkCiCam(ciCamId);
 }
 
-Status TunerFrontend::getStatus(const vector<int32_t>& statusTypes,
-        vector<TunerFrontendStatus>* _aidl_return) {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::close() {
+    if (mFrontend == nullptr) {
         ALOGD("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res;
-    vector<FrontendStatus> status;
-    vector<FrontendStatusType> types;
-    for (auto s : statusTypes) {
-        types.push_back(static_cast<FrontendStatusType>(s));
-    }
+    auto res = mFrontend->close();
+    mFrontend = nullptr;
 
-    mFrontend->getStatus(types, [&](Result r, const hidl_vec<FrontendStatus>& s) {
-        res = r;
-        status = s;
-    });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-
-    getAidlFrontendStatus(status, *_aidl_return);
-    return Status::ok();
+    return res;
 }
 
-Status TunerFrontend::getStatusExtended_1_1(const vector<int32_t>& statusTypes,
-        vector<TunerFrontendStatus>* _aidl_return) {
-    if (mFrontend_1_1 == NULL) {
-        ALOGD("IFrontend_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+::ndk::ScopedAStatus TunerFrontend::getStatus(const vector<FrontendStatusType>& in_statusTypes,
+                                              vector<FrontendStatus>* _aidl_return) {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res;
-    vector<FrontendStatusExt1_1> status;
-    vector<FrontendStatusTypeExt1_1> types;
-    for (auto s : statusTypes) {
-        types.push_back(static_cast<FrontendStatusTypeExt1_1>(s));
-    }
-
-    mFrontend_1_1->getStatusExt1_1(types, [&](Result r, const hidl_vec<FrontendStatusExt1_1>& s) {
-        res = r;
-        status = s;
-    });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-
-    getAidlFrontendStatusExt(status, *_aidl_return);
-    return Status::ok();
+    return mFrontend->getStatus(in_statusTypes, _aidl_return);
 }
 
-Status TunerFrontend::getFrontendId(int* _aidl_return) {
+::ndk::ScopedAStatus TunerFrontend::getFrontendId(int32_t* _aidl_return) {
     *_aidl_return = mId;
-    return Status::ok();
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerFrontend::getHardwareInfo(std::string* _aidl_return) {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    return mFrontend->getHardwareInfo(_aidl_return);
+}
+
+::ndk::ScopedAStatus TunerFrontend::removeOutputPid(int32_t in_pid) {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    return mFrontend->removeOutputPid(in_pid);
+}
+
+::ndk::ScopedAStatus TunerFrontend::getFrontendStatusReadiness(
+        const std::vector<FrontendStatusType>& in_statusTypes,
+        std::vector<FrontendStatusReadiness>* _aidl_return) {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    return mFrontend->getFrontendStatusReadiness(in_statusTypes, _aidl_return);
 }
 
 /////////////// FrontendCallback ///////////////////////
-
-Return<void> TunerFrontend::FrontendCallback::onEvent(FrontendEventType frontendEventType) {
-    ALOGD("FrontendCallback::onEvent, type=%d", frontendEventType);
-    mTunerFrontendCallback->onEvent((int)frontendEventType);
-    return Void();
+::ndk::ScopedAStatus TunerFrontend::FrontendCallback::onEvent(FrontendEventType frontendEventType) {
+    ALOGV("FrontendCallback::onEvent, type=%d", frontendEventType);
+    if (mTunerFrontendCallback != nullptr) {
+        mTunerFrontendCallback->onEvent(frontendEventType);
+    }
+    return ndk::ScopedAStatus::ok();
 }
 
-Return<void> TunerFrontend::FrontendCallback::onScanMessage(
+::ndk::ScopedAStatus TunerFrontend::FrontendCallback::onScanMessage(
         FrontendScanMessageType type, const FrontendScanMessage& message) {
-    ALOGD("FrontendCallback::onScanMessage, type=%d", type);
-    TunerFrontendScanMessage scanMessage;
-    switch(type) {
-        case FrontendScanMessageType::LOCKED: {
-            scanMessage.set<TunerFrontendScanMessage::isLocked>(message.isLocked());
-            break;
-        }
-        case FrontendScanMessageType::END: {
-            scanMessage.set<TunerFrontendScanMessage::isEnd>(message.isEnd());
-            break;
-        }
-        case FrontendScanMessageType::PROGRESS_PERCENT: {
-            scanMessage.set<TunerFrontendScanMessage::progressPercent>(message.progressPercent());
-            break;
-        }
-        case FrontendScanMessageType::FREQUENCY: {
-            auto f = message.frequencies();
-            vector<int> frequencies(begin(f), end(f));
-            scanMessage.set<TunerFrontendScanMessage::frequencies>(frequencies);
-            break;
-        }
-        case FrontendScanMessageType::SYMBOL_RATE: {
-            auto s = message.symbolRates();
-            vector<int> symbolRates(begin(s), end(s));
-            scanMessage.set<TunerFrontendScanMessage::symbolRates>(symbolRates);
-            break;
-        }
-        case FrontendScanMessageType::HIERARCHY: {
-            scanMessage.set<TunerFrontendScanMessage::hierarchy>((int)message.hierarchy());
-            break;
-        }
-        case FrontendScanMessageType::ANALOG_TYPE: {
-            scanMessage.set<TunerFrontendScanMessage::analogType>((int)message.analogType());
-            break;
-        }
-        case FrontendScanMessageType::PLP_IDS: {
-            auto p = message.plpIds();
-            vector<uint8_t> plpIds(begin(p), end(p));
-            scanMessage.set<TunerFrontendScanMessage::plpIds>(plpIds);
-            break;
-        }
-        case FrontendScanMessageType::GROUP_IDS: {
-            auto g = message.groupIds();
-            vector<uint8_t> groupIds(begin(g), end(g));
-            scanMessage.set<TunerFrontendScanMessage::groupIds>(groupIds);
-            break;
-        }
-        case FrontendScanMessageType::INPUT_STREAM_IDS: {
-            auto i = message.inputStreamIds();
-            vector<char16_t> streamIds(begin(i), end(i));
-            scanMessage.set<TunerFrontendScanMessage::inputStreamIds>(streamIds);
-            break;
-        }
-        case FrontendScanMessageType::STANDARD: {
-            FrontendScanMessage::Standard std = message.std();
-            int standard;
-            if (std.getDiscriminator() == FrontendScanMessage::Standard::hidl_discriminator::sStd) {
-                standard = (int) std.sStd();
-            } else if (std.getDiscriminator() ==
-                    FrontendScanMessage::Standard::hidl_discriminator::tStd) {
-                standard = (int) std.tStd();
-            } else if (std.getDiscriminator() ==
-                    FrontendScanMessage::Standard::hidl_discriminator::sifStd) {
-                standard = (int) std.sifStd();
-            }
-            scanMessage.set<TunerFrontendScanMessage::std>(standard);
-            break;
-        }
-        case FrontendScanMessageType::ATSC3_PLP_INFO: {
-            vector<FrontendScanAtsc3PlpInfo> plpInfos = message.atsc3PlpInfos();
-            vector<TunerFrontendScanAtsc3PlpInfo> tunerPlpInfos;
-            for (int i = 0; i < plpInfos.size(); i++) {
-                auto info = plpInfos[i];
-                int8_t plpId = (int8_t) info.plpId;
-                bool lls = (bool) info.bLlsFlag;
-                TunerFrontendScanAtsc3PlpInfo plpInfo{
-                    .plpId = plpId,
-                    .llsFlag = lls,
-                };
-                tunerPlpInfos.push_back(plpInfo);
-            }
-            scanMessage.set<TunerFrontendScanMessage::atsc3PlpInfos>(tunerPlpInfos);
-            break;
-        }
-        default:
-            break;
+    ALOGV("FrontendCallback::onScanMessage, type=%d", type);
+    if (mTunerFrontendCallback != nullptr) {
+        mTunerFrontendCallback->onScanMessage(type, message);
     }
-    mTunerFrontendCallback->onScanMessage((int)type, scanMessage);
-    return Void();
+    return ndk::ScopedAStatus::ok();
 }
 
-Return<void> TunerFrontend::FrontendCallback::onScanMessageExt1_1(
-        FrontendScanMessageTypeExt1_1 type, const FrontendScanMessageExt1_1& message) {
-    ALOGD("onScanMessageExt1_1::onScanMessage, type=%d", type);
-    TunerFrontendScanMessage scanMessage;
-    switch(type) {
-        case FrontendScanMessageTypeExt1_1::MODULATION: {
-            FrontendModulation m = message.modulation();
-            int modulation;
-            switch (m.getDiscriminator()) {
-                case FrontendModulation::hidl_discriminator::dvbc:
-                    modulation = (int) m.dvbc();
-                    break;
-                case FrontendModulation::hidl_discriminator::dvbt:
-                    modulation = (int) m.dvbt();
-                    break;
-                case FrontendModulation::hidl_discriminator::dvbs:
-                    modulation = (int) m.dvbs();
-                    break;
-                case FrontendModulation::hidl_discriminator::isdbs:
-                    modulation = (int) m.isdbs();
-                    break;
-                case FrontendModulation::hidl_discriminator::isdbs3:
-                    modulation = (int) m.isdbs3();
-                    break;
-                case FrontendModulation::hidl_discriminator::isdbt:
-                    modulation = (int) m.isdbt();
-                    break;
-                case FrontendModulation::hidl_discriminator::atsc:
-                    modulation = (int) m.atsc();
-                    break;
-                case FrontendModulation::hidl_discriminator::atsc3:
-                    modulation = (int) m.atsc3();
-                    break;
-                case FrontendModulation::hidl_discriminator::dtmb:
-                    modulation = (int) m.dtmb();
-                    break;
-            }
-            scanMessage.set<TunerFrontendScanMessage::modulation>(modulation);
-            break;
-        }
-        case FrontendScanMessageTypeExt1_1::DVBC_ANNEX: {
-            scanMessage.set<TunerFrontendScanMessage::annex>((int)message.annex());
-            break;
-        }
-        case FrontendScanMessageTypeExt1_1::HIGH_PRIORITY: {
-            scanMessage.set<TunerFrontendScanMessage::isHighPriority>(message.isHighPriority());
-            break;
-        }
-        default:
-            break;
-    }
-    mTunerFrontendCallback->onScanMessage((int)type, scanMessage);
-    return Void();
-}
-
-/////////////// TunerFrontend Helper Methods ///////////////////////
-
-void TunerFrontend::getAidlFrontendStatus(
-        vector<FrontendStatus>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus) {
-    for (FrontendStatus s : hidlStatus) {
-        TunerFrontendStatus status;
-        switch (s.getDiscriminator()) {
-            case FrontendStatus::hidl_discriminator::isDemodLocked: {
-                status.set<TunerFrontendStatus::isDemodLocked>(s.isDemodLocked());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::snr: {
-                status.set<TunerFrontendStatus::snr>((int)s.snr());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::ber: {
-                status.set<TunerFrontendStatus::ber>((int)s.ber());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::per: {
-                status.set<TunerFrontendStatus::per>((int)s.per());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::preBer: {
-                status.set<TunerFrontendStatus::preBer>((int)s.preBer());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::signalQuality: {
-                status.set<TunerFrontendStatus::signalQuality>((int)s.signalQuality());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::signalStrength: {
-                status.set<TunerFrontendStatus::signalStrength>((int)s.signalStrength());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::symbolRate: {
-                status.set<TunerFrontendStatus::symbolRate>((int)s.symbolRate());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::innerFec: {
-                status.set<TunerFrontendStatus::innerFec>((long)s.innerFec());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::modulation: {
-                switch (s.modulation().getDiscriminator()) {
-                    case FrontendModulationStatus::hidl_discriminator::dvbc:
-                        status.set<TunerFrontendStatus::modulation>((int)s.modulation().dvbc());
-                        aidlStatus.push_back(status);
-                        break;
-                    case FrontendModulationStatus::hidl_discriminator::dvbs:
-                        status.set<TunerFrontendStatus::modulation>((int)s.modulation().dvbs());
-                        aidlStatus.push_back(status);
-                        break;
-                    case FrontendModulationStatus::hidl_discriminator::isdbs:
-                        status.set<TunerFrontendStatus::modulation>((int)s.modulation().isdbs());
-                        aidlStatus.push_back(status);
-                        break;
-                    case FrontendModulationStatus::hidl_discriminator::isdbs3:
-                        status.set<TunerFrontendStatus::modulation>((int)s.modulation().isdbs3());
-                        aidlStatus.push_back(status);
-                        break;
-                    case FrontendModulationStatus::hidl_discriminator::isdbt:
-                        status.set<TunerFrontendStatus::modulation>((int)s.modulation().isdbt());
-                        aidlStatus.push_back(status);
-                        break;
-                }
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::inversion: {
-                status.set<TunerFrontendStatus::inversion>((int)s.inversion());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::lnbVoltage: {
-                status.set<TunerFrontendStatus::lnbVoltage>((int)s.lnbVoltage());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::plpId: {
-                status.set<TunerFrontendStatus::plpId>((int8_t)s.plpId());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::isEWBS: {
-                status.set<TunerFrontendStatus::isEWBS>(s.isEWBS());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::agc: {
-                status.set<TunerFrontendStatus::agc>((int8_t)s.agc());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::isLnaOn: {
-                status.set<TunerFrontendStatus::isLnaOn>(s.isLnaOn());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::isLayerError: {
-                vector<bool> e(s.isLayerError().begin(), s.isLayerError().end());
-                status.set<TunerFrontendStatus::isLayerError>(e);
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::mer: {
-                status.set<TunerFrontendStatus::mer>((int)s.mer());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::freqOffset: {
-                status.set<TunerFrontendStatus::freqOffset>((int)s.freqOffset());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::hierarchy: {
-                status.set<TunerFrontendStatus::hierarchy>((int)s.hierarchy());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::isRfLocked: {
-                status.set<TunerFrontendStatus::isRfLocked>(s.isRfLocked());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::plpInfo: {
-                vector<TunerFrontendStatusAtsc3PlpInfo> info;
-                for (auto i : s.plpInfo()) {
-                    info.push_back({
-                        .plpId = (int8_t)i.plpId,
-                        .isLocked = i.isLocked,
-                        .uec = (int)i.uec,
-                    });
-                }
-                status.set<TunerFrontendStatus::plpInfo>(info);
-                aidlStatus.push_back(status);
-                break;
-            }
-        }
-    }
-}
-
-void TunerFrontend::getAidlFrontendStatusExt(
-        vector<FrontendStatusExt1_1>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus) {
-    for (FrontendStatusExt1_1 s : hidlStatus) {
-        TunerFrontendStatus status;
-        switch (s.getDiscriminator()) {
-            case FrontendStatusExt1_1::hidl_discriminator::modulations: {
-                vector<int> aidlMod;
-                for (auto m : s.modulations()) {
-                    switch (m.getDiscriminator()) {
-                        case FrontendModulation::hidl_discriminator::dvbc:
-                            aidlMod.push_back((int)m.dvbc());
-                            break;
-                        case FrontendModulation::hidl_discriminator::dvbs:
-                            aidlMod.push_back((int)m.dvbs());
-                            break;
-                        case FrontendModulation::hidl_discriminator::dvbt:
-                            aidlMod.push_back((int)m.dvbt());
-                            break;
-                        case FrontendModulation::hidl_discriminator::isdbs:
-                            aidlMod.push_back((int)m.isdbs());
-                            break;
-                        case FrontendModulation::hidl_discriminator::isdbs3:
-                            aidlMod.push_back((int)m.isdbs3());
-                            break;
-                        case FrontendModulation::hidl_discriminator::isdbt:
-                            aidlMod.push_back((int)m.isdbt());
-                            break;
-                        case FrontendModulation::hidl_discriminator::atsc:
-                            aidlMod.push_back((int)m.atsc());
-                            break;
-                        case FrontendModulation::hidl_discriminator::atsc3:
-                            aidlMod.push_back((int)m.atsc3());
-                            break;
-                        case FrontendModulation::hidl_discriminator::dtmb:
-                            aidlMod.push_back((int)m.dtmb());
-                            break;
-                    }
-                }
-                status.set<TunerFrontendStatus::modulations>(aidlMod);
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::bers: {
-                vector<int> b(s.bers().begin(), s.bers().end());
-                status.set<TunerFrontendStatus::bers>(b);
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::codeRates: {
-                vector<int64_t> codeRates;
-                for (auto c : s.codeRates()) {
-                    codeRates.push_back((long)c);
-                }
-                status.set<TunerFrontendStatus::codeRates>(codeRates);
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::bandwidth: {
-                switch (s.bandwidth().getDiscriminator()) {
-                    case FrontendBandwidth::hidl_discriminator::atsc3:
-                        status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().atsc3());
-                        break;
-                    case FrontendBandwidth::hidl_discriminator::dvbc:
-                        status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().dvbc());
-                        break;
-                    case FrontendBandwidth::hidl_discriminator::dvbt:
-                        status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().dvbt());
-                        break;
-                    case FrontendBandwidth::hidl_discriminator::isdbt:
-                        status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().isdbt());
-                        break;
-                    case FrontendBandwidth::hidl_discriminator::dtmb:
-                        status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().dtmb());
-                        break;
-                }
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::interval: {
-                switch (s.interval().getDiscriminator()) {
-                    case FrontendGuardInterval::hidl_discriminator::dvbt:
-                        status.set<TunerFrontendStatus::interval>((int)s.interval().dvbt());
-                        break;
-                    case FrontendGuardInterval::hidl_discriminator::isdbt:
-                        status.set<TunerFrontendStatus::interval>((int)s.interval().isdbt());
-                        break;
-                    case FrontendGuardInterval::hidl_discriminator::dtmb:
-                        status.set<TunerFrontendStatus::interval>((int)s.interval().dtmb());
-                        break;
-                }
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::transmissionMode: {
-                switch (s.transmissionMode().getDiscriminator()) {
-                    case FrontendTransmissionMode::hidl_discriminator::dvbt:
-                        status.set<TunerFrontendStatus::transmissionMode>(
-                                (int)s.transmissionMode().dvbt());
-                        break;
-                    case FrontendTransmissionMode::hidl_discriminator::isdbt:
-                        status.set<TunerFrontendStatus::transmissionMode>(
-                                (int)s.transmissionMode().isdbt());
-                        break;
-                    case FrontendTransmissionMode::hidl_discriminator::dtmb:
-                        status.set<TunerFrontendStatus::transmissionMode>(
-                                (int)s.transmissionMode().dtmb());
-                        break;
-                }
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::uec: {
-                status.set<TunerFrontendStatus::uec>((int)s.uec());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::systemId: {
-                status.set<TunerFrontendStatus::systemId>((char16_t)s.systemId());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::interleaving: {
-                vector<int> aidlInter;
-                for (auto i : s.interleaving()) {
-                    switch (i.getDiscriminator()) {
-                        case FrontendInterleaveMode::hidl_discriminator::atsc3:
-                            aidlInter.push_back((int)i.atsc3());
-                            break;
-                        case FrontendInterleaveMode::hidl_discriminator::dvbc:
-                            aidlInter.push_back((int)i.dvbc());
-                            break;
-                        case FrontendInterleaveMode::hidl_discriminator::dtmb:
-                            aidlInter.push_back((int)i.dtmb());
-                            break;
-                    }
-                }
-                status.set<TunerFrontendStatus::interleaving>(aidlInter);
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::isdbtSegment: {
-                auto seg = s.isdbtSegment();
-                vector<uint8_t> i(seg.begin(), seg.end());
-                status.set<TunerFrontendStatus::isdbtSegment>(i);
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::tsDataRate: {
-                vector<int> ts(s.tsDataRate().begin(), s.tsDataRate().end());
-                status.set<TunerFrontendStatus::tsDataRate>(ts);
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::rollOff: {
-                switch (s.rollOff().getDiscriminator()) {
-                    case FrontendRollOff::hidl_discriminator::dvbs:
-                        status.set<TunerFrontendStatus::rollOff>((int)s.rollOff().dvbs());
-                        break;
-                    case FrontendRollOff::hidl_discriminator::isdbs:
-                        status.set<TunerFrontendStatus::rollOff>((int)s.rollOff().isdbs());
-                        break;
-                    case FrontendRollOff::hidl_discriminator::isdbs3:
-                        status.set<TunerFrontendStatus::rollOff>((int)s.rollOff().isdbs3());
-                        break;
-                }
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::isMiso: {
-                status.set<TunerFrontendStatus::isMiso>(s.isMiso());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::isLinear: {
-                status.set<TunerFrontendStatus::isLinear>(s.isLinear());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::isShortFrames: {
-                status.set<TunerFrontendStatus::isShortFrames>(s.isShortFrames());
-                aidlStatus.push_back(status);
-                break;
-            }
-        }
-    }
-}
-
-hidl_vec<FrontendAtsc3PlpSettings> TunerFrontend::getAtsc3PlpSettings(
-        const TunerFrontendAtsc3Settings& settings) {
-    int len = settings.plpSettings.size();
-    hidl_vec<FrontendAtsc3PlpSettings> plps = hidl_vec<FrontendAtsc3PlpSettings>(len);
-    // parse PLP settings
-    for (int i = 0; i < len; i++) {
-        uint8_t plpId = static_cast<uint8_t>(settings.plpSettings[i].plpId);
-        FrontendAtsc3Modulation modulation =
-                static_cast<FrontendAtsc3Modulation>(settings.plpSettings[i].modulation);
-        FrontendAtsc3TimeInterleaveMode interleaveMode =
-                static_cast<FrontendAtsc3TimeInterleaveMode>(
-                        settings.plpSettings[i].interleaveMode);
-        FrontendAtsc3CodeRate codeRate =
-                static_cast<FrontendAtsc3CodeRate>(settings.plpSettings[i].codeRate);
-        FrontendAtsc3Fec fec =
-                static_cast<FrontendAtsc3Fec>(settings.plpSettings[i].fec);
-        FrontendAtsc3PlpSettings frontendAtsc3PlpSettings {
-                .plpId = plpId,
-                .modulation = modulation,
-                .interleaveMode = interleaveMode,
-                .codeRate = codeRate,
-                .fec = fec,
-        };
-        plps[i] = frontendAtsc3PlpSettings;
-    }
-    return plps;
-}
-
-FrontendDvbsCodeRate TunerFrontend::getDvbsCodeRate(const TunerFrontendDvbsCodeRate& codeRate) {
-    FrontendInnerFec innerFec = static_cast<FrontendInnerFec>(codeRate.fec);
-    bool isLinear = codeRate.isLinear;
-    bool isShortFrames = codeRate.isShortFrames;
-    uint32_t bitsPer1000Symbol = static_cast<uint32_t>(codeRate.bitsPer1000Symbol);
-    FrontendDvbsCodeRate coderate {
-            .fec = innerFec,
-            .isLinear = isLinear,
-            .isShortFrames = isShortFrames,
-            .bitsPer1000Symbol = bitsPer1000Symbol,
-    };
-    return coderate;
-}
-
-FrontendSettings TunerFrontend::getHidlFrontendSettings(const TunerFrontendSettings& aidlSettings) {
-    auto settings = aidlSettings.settings;
-    FrontendSettings frontendSettings;
-
-    switch (settings.getTag()) {
-        case TunerFrontendUnionSettings::analog: {
-            auto analog = settings.get<TunerFrontendUnionSettings::analog>();
-            frontendSettings.analog({
-                .frequency = static_cast<uint32_t>(analog.frequency),
-                .type = static_cast<FrontendAnalogType>(analog.signalType),
-                .sifStandard = static_cast<FrontendAnalogSifStandard>(analog.sifStandard),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::atsc: {
-            auto atsc = settings.get<TunerFrontendUnionSettings::atsc>();
-            frontendSettings.atsc({
-                .frequency = static_cast<uint32_t>(atsc.frequency),
-                .modulation = static_cast<FrontendAtscModulation>(atsc.modulation),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::atsc3: {
-            auto atsc3 = settings.get<TunerFrontendUnionSettings::atsc3>();
-            frontendSettings.atsc3({
-                .frequency = static_cast<uint32_t>(atsc3.frequency),
-                .bandwidth = static_cast<FrontendAtsc3Bandwidth>(atsc3.bandwidth),
-                .demodOutputFormat = static_cast<FrontendAtsc3DemodOutputFormat>(
-                        atsc3.demodOutputFormat),
-                .plpSettings = getAtsc3PlpSettings(atsc3),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::cable: {
-            auto dvbc = settings.get<TunerFrontendUnionSettings::cable>();
-            frontendSettings.dvbc({
-                .frequency = static_cast<uint32_t>(dvbc.frequency),
-                .modulation = static_cast<FrontendDvbcModulation>(dvbc.modulation),
-                .fec = static_cast<FrontendInnerFec>(dvbc.innerFec),
-                .symbolRate = static_cast<uint32_t>(dvbc.symbolRate),
-                .outerFec = static_cast<FrontendDvbcOuterFec>(dvbc.outerFec),
-                .annex = static_cast<FrontendDvbcAnnex>(dvbc.annex),
-                .spectralInversion = static_cast<FrontendDvbcSpectralInversion>(
-                        dvbc.spectralInversion),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::dvbs: {
-            auto dvbs = settings.get<TunerFrontendUnionSettings::dvbs>();
-            frontendSettings.dvbs({
-                .frequency = static_cast<uint32_t>(dvbs.frequency),
-                .modulation = static_cast<FrontendDvbsModulation>(dvbs.modulation),
-                .coderate = getDvbsCodeRate(dvbs.codeRate),
-                .symbolRate = static_cast<uint32_t>(dvbs.symbolRate),
-                .rolloff = static_cast<FrontendDvbsRolloff>(dvbs.rolloff),
-                .pilot = static_cast<FrontendDvbsPilot>(dvbs.pilot),
-                .inputStreamId = static_cast<uint32_t>(dvbs.inputStreamId),
-                .standard = static_cast<FrontendDvbsStandard>(dvbs.standard),
-                .vcmMode = static_cast<FrontendDvbsVcmMode>(dvbs.vcm),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::dvbt: {
-            auto dvbt = settings.get<TunerFrontendUnionSettings::dvbt>();
-            frontendSettings.dvbt({
-                .frequency = static_cast<uint32_t>(dvbt.frequency),
-                .transmissionMode = static_cast<FrontendDvbtTransmissionMode>(
-                        dvbt.transmissionMode),
-                .bandwidth = static_cast<FrontendDvbtBandwidth>(dvbt.bandwidth),
-                .constellation = static_cast<FrontendDvbtConstellation>(dvbt.constellation),
-                .hierarchy = static_cast<FrontendDvbtHierarchy>(dvbt.hierarchy),
-                .hpCoderate = static_cast<FrontendDvbtCoderate>(dvbt.hpCodeRate),
-                .lpCoderate = static_cast<FrontendDvbtCoderate>(dvbt.lpCodeRate),
-                .guardInterval = static_cast<FrontendDvbtGuardInterval>(dvbt.guardInterval),
-                .isHighPriority = dvbt.isHighPriority,
-                .standard = static_cast<FrontendDvbtStandard>(dvbt.standard),
-                .isMiso = dvbt.isMiso,
-                .plpMode = static_cast<FrontendDvbtPlpMode>(dvbt.plpMode),
-                .plpId = static_cast<uint8_t>(dvbt.plpId),
-                .plpGroupId = static_cast<uint8_t>(dvbt.plpGroupId),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::isdbs: {
-            auto isdbs = settings.get<TunerFrontendUnionSettings::isdbs>();
-            frontendSettings.isdbs({
-                .frequency = static_cast<uint32_t>(isdbs.frequency),
-                .streamId = static_cast<uint16_t>(isdbs.streamId),
-                .streamIdType = static_cast<FrontendIsdbsStreamIdType>(isdbs.streamIdType),
-                .modulation = static_cast<FrontendIsdbsModulation>(isdbs.modulation),
-                .coderate = static_cast<FrontendIsdbsCoderate>(isdbs.codeRate),
-                .symbolRate = static_cast<uint32_t>(isdbs.symbolRate),
-                .rolloff = static_cast<FrontendIsdbsRolloff>(isdbs.rolloff),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::isdbs3: {
-            auto isdbs3 = settings.get<TunerFrontendUnionSettings::isdbs3>();
-            frontendSettings.isdbs3({
-                .frequency = static_cast<uint32_t>(isdbs3.frequency),
-                .streamId = static_cast<uint16_t>(isdbs3.streamId),
-                .streamIdType = static_cast<FrontendIsdbsStreamIdType>(isdbs3.streamIdType),
-                .modulation = static_cast<FrontendIsdbs3Modulation>(isdbs3.modulation),
-                .coderate = static_cast<FrontendIsdbs3Coderate>(isdbs3.codeRate),
-                .symbolRate = static_cast<uint32_t>(isdbs3.symbolRate),
-                .rolloff = static_cast<FrontendIsdbs3Rolloff>(isdbs3.rolloff),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::isdbt: {
-            auto isdbt = settings.get<TunerFrontendUnionSettings::isdbt>();
-            frontendSettings.isdbt({
-                .frequency = static_cast<uint32_t>(isdbt.frequency),
-                .modulation = static_cast<FrontendIsdbtModulation>(isdbt.modulation),
-                .bandwidth = static_cast<FrontendIsdbtBandwidth>(isdbt.bandwidth),
-                .mode = static_cast<FrontendIsdbtMode>(isdbt.mode),
-                .coderate = static_cast<FrontendIsdbtCoderate>(isdbt.codeRate),
-                .guardInterval = static_cast<FrontendIsdbtGuardInterval>(isdbt.guardInterval),
-                .serviceAreaId = static_cast<uint32_t>(isdbt.serviceAreaId),
-            });
-            break;
-        }
-        default:
-            break;
-    }
-
-    return frontendSettings;
-}
-
-FrontendSettingsExt1_1 TunerFrontend::getHidlFrontendSettingsExt(
-        const TunerFrontendSettings& aidlSettings) {
-    FrontendSettingsExt1_1 frontendSettingsExt{
-        .endFrequency = static_cast<uint32_t>(aidlSettings.endFrequency),
-        .inversion = static_cast<FrontendSpectralInversion>(aidlSettings.inversion),
-    };
-
-    auto settings = aidlSettings.settings;
-    switch (settings.getTag()) {
-        case TunerFrontendUnionSettings::analog: {
-            auto analog = settings.get<TunerFrontendUnionSettings::analog>();
-            if (analog.isExtended) {
-                frontendSettingsExt.settingExt.analog({
-                    .aftFlag = static_cast<FrontendAnalogAftFlag>(analog.aftFlag),
-                });
-            } else {
-                frontendSettingsExt.settingExt.noinit();
-            }
-            break;
-        }
-        case TunerFrontendUnionSettings::cable: {
-            auto dvbc = settings.get<TunerFrontendUnionSettings::cable>();
-            if (dvbc.isExtended) {
-                frontendSettingsExt.settingExt.dvbc({
-                    .interleaveMode = static_cast<FrontendCableTimeInterleaveMode>(
-                            dvbc.interleaveMode),
-                    .bandwidth = static_cast<FrontendDvbcBandwidth>(
-                            dvbc.bandwidth),
-                });
-            } else {
-                frontendSettingsExt.settingExt.noinit();
-            }
-            break;
-        }
-        case TunerFrontendUnionSettings::dvbs: {
-            auto dvbs = settings.get<TunerFrontendUnionSettings::dvbs>();
-            if (dvbs.isExtended) {
-                frontendSettingsExt.settingExt.dvbs({
-                    .scanType = static_cast<FrontendDvbsScanType>(dvbs.scanType),
-                    .isDiseqcRxMessage = dvbs.isDiseqcRxMessage,
-                });
-            } else {
-                frontendSettingsExt.settingExt.noinit();
-            }
-            break;
-        }
-        case TunerFrontendUnionSettings::dvbt: {
-            auto dvbt = settings.get<TunerFrontendUnionSettings::dvbt>();
-            if (dvbt.isExtended) {
-                frontendSettingsExt.settingExt.dvbt({
-                    .constellation =
-                            static_cast<hardware::tv::tuner::V1_1::FrontendDvbtConstellation>(
-                                    dvbt.constellation),
-                    .transmissionMode =
-                            static_cast<hardware::tv::tuner::V1_1::FrontendDvbtTransmissionMode>(
-                                    dvbt.transmissionMode),
-                });
-            } else {
-                frontendSettingsExt.settingExt.noinit();
-            }
-            break;
-        }
-        case TunerFrontendUnionSettings::dtmb: {
-            auto dtmb = settings.get<TunerFrontendUnionSettings::dtmb>();
-            frontendSettingsExt.settingExt.dtmb({
-                .frequency = static_cast<uint32_t>(dtmb.frequency),
-                .transmissionMode = static_cast<FrontendDtmbTransmissionMode>(
-                        dtmb.transmissionMode),
-                .bandwidth = static_cast<FrontendDtmbBandwidth>(dtmb.bandwidth),
-                .modulation = static_cast<FrontendDtmbModulation>(dtmb.modulation),
-                .codeRate = static_cast<FrontendDtmbCodeRate>(dtmb.codeRate),
-                .guardInterval = static_cast<FrontendDtmbGuardInterval>(dtmb.guardInterval),
-                .interleaveMode = static_cast<FrontendDtmbTimeInterleaveMode>(dtmb.interleaveMode),
-            });
-            break;
-        }
-        default:
-            frontendSettingsExt.settingExt.noinit();
-            break;
-    }
-
-    return frontendSettingsExt;
-}
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
 }  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerFrontend.h b/services/tuner/TunerFrontend.h
index 22fd509..da471fb 100644
--- a/services/tuner/TunerFrontend.h
+++ b/services/tuner/TunerFrontend.h
@@ -1,5 +1,5 @@
 /**
- * Copyright 2020, The Android Open Source Project
+ * Copyright 2021, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -17,95 +17,78 @@
 #ifndef ANDROID_MEDIA_TUNERFRONTEND_H
 #define ANDROID_MEDIA_TUNERFRONTEND_H
 
+#include <aidl/android/hardware/tv/tuner/BnFrontendCallback.h>
+#include <aidl/android/hardware/tv/tuner/IFrontend.h>
+#include <aidl/android/hardware/tv/tuner/IFrontendCallback.h>
 #include <aidl/android/media/tv/tuner/BnTunerFrontend.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
-#include <android/hardware/tv/tuner/1.1/IFrontend.h>
-#include <android/hardware/tv/tuner/1.1/IFrontendCallback.h>
-#include <media/stagefright/foundation/ADebug.h>
 #include <utils/Log.h>
 
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::tv::tuner::BnTunerFrontend;
-using ::aidl::android::media::tv::tuner::ITunerFrontendCallback;
-using ::aidl::android::media::tv::tuner::ITunerLnb;
-using ::aidl::android::media::tv::tuner::TunerFrontendAtsc3Settings;
-using ::aidl::android::media::tv::tuner::TunerFrontendDvbsCodeRate;
-using ::aidl::android::media::tv::tuner::TunerFrontendScanMessage;
-using ::aidl::android::media::tv::tuner::TunerFrontendSettings;
-using ::aidl::android::media::tv::tuner::TunerFrontendStatus;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3PlpSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsCodeRate;
-using ::android::hardware::tv::tuner::V1_0::FrontendEventType;
-using ::android::hardware::tv::tuner::V1_0::FrontendId;
-using ::android::hardware::tv::tuner::V1_0::FrontendScanMessage;
-using ::android::hardware::tv::tuner::V1_0::FrontendScanMessageType;
-using ::android::hardware::tv::tuner::V1_0::FrontendSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendStatus;
-using ::android::hardware::tv::tuner::V1_0::IFrontend;
-using ::android::hardware::tv::tuner::V1_1::IFrontendCallback;
-using ::android::hardware::tv::tuner::V1_1::FrontendScanMessageExt1_1;
-using ::android::hardware::tv::tuner::V1_1::FrontendScanMessageTypeExt1_1;
-using ::android::hardware::tv::tuner::V1_1::FrontendSettingsExt1_1;
-using ::android::hardware::tv::tuner::V1_1::FrontendStatusExt1_1;
+using ::aidl::android::hardware::tv::tuner::BnFrontendCallback;
+using ::aidl::android::hardware::tv::tuner::FrontendEventType;
+using ::aidl::android::hardware::tv::tuner::FrontendScanMessage;
+using ::aidl::android::hardware::tv::tuner::FrontendScanMessageType;
+using ::aidl::android::hardware::tv::tuner::FrontendScanType;
+using ::aidl::android::hardware::tv::tuner::FrontendSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendStatus;
+using ::aidl::android::hardware::tv::tuner::FrontendStatusReadiness;
+using ::aidl::android::hardware::tv::tuner::FrontendStatusType;
+using ::aidl::android::hardware::tv::tuner::IFrontend;
+using ::aidl::android::hardware::tv::tuner::IFrontendCallback;
 
 using namespace std;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
 class TunerFrontend : public BnTunerFrontend {
 
 public:
-    TunerFrontend(sp<IFrontend> frontend, int id);
+    TunerFrontend(shared_ptr<IFrontend> frontend, int id);
     virtual ~TunerFrontend();
-    Status setCallback(
-            const shared_ptr<ITunerFrontendCallback>& tunerFrontendCallback) override;
-    Status tune(const TunerFrontendSettings& settings) override;
-    Status stopTune() override;
-    Status scan(const TunerFrontendSettings& settings, int frontendScanType) override;
-    Status stopScan() override;
-    Status setLnb(const shared_ptr<ITunerLnb>& lnb) override;
-    Status setLna(bool bEnable) override;
-    Status linkCiCamToFrontend(int ciCamId, int32_t* _aidl_return) override;
-    Status unlinkCiCamToFrontend(int ciCamId) override;
-    Status close() override;
-    Status getStatus(const vector<int32_t>& statusTypes,
-            vector<TunerFrontendStatus>* _aidl_return) override;
-    Status getStatusExtended_1_1(const vector<int32_t>& statusTypes,
-            vector<TunerFrontendStatus>* _aidl_return) override;
-    Status getFrontendId(int* _aidl_return) override;
 
-    struct FrontendCallback : public IFrontendCallback {
+    ::ndk::ScopedAStatus setCallback(
+            const shared_ptr<ITunerFrontendCallback>& in_tunerFrontendCallback) override;
+    ::ndk::ScopedAStatus tune(const FrontendSettings& in_settings) override;
+    ::ndk::ScopedAStatus stopTune() override;
+    ::ndk::ScopedAStatus scan(const FrontendSettings& in_settings,
+                              FrontendScanType in_frontendScanType) override;
+    ::ndk::ScopedAStatus stopScan() override;
+    ::ndk::ScopedAStatus setLnb(const shared_ptr<ITunerLnb>& in_lnb) override;
+    ::ndk::ScopedAStatus linkCiCamToFrontend(int32_t in_ciCamId, int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus unlinkCiCamToFrontend(int32_t in_ciCamId) override;
+    ::ndk::ScopedAStatus close() override;
+    ::ndk::ScopedAStatus getStatus(const vector<FrontendStatusType>& in_statusTypes,
+                                   vector<FrontendStatus>* _aidl_return) override;
+    ::ndk::ScopedAStatus getFrontendId(int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getHardwareInfo(std::string* _aidl_return) override;
+    ::ndk::ScopedAStatus removeOutputPid(int32_t in_pid) override;
+    ::ndk::ScopedAStatus getFrontendStatusReadiness(
+            const std::vector<FrontendStatusType>& in_statusTypes,
+            std::vector<FrontendStatusReadiness>* _aidl_return) override;
+
+    struct FrontendCallback : public BnFrontendCallback {
         FrontendCallback(const shared_ptr<ITunerFrontendCallback> tunerFrontendCallback)
-                : mTunerFrontendCallback(tunerFrontendCallback) {};
+              : mTunerFrontendCallback(tunerFrontendCallback){};
 
-        virtual Return<void> onEvent(FrontendEventType frontendEventType);
-        virtual Return<void> onScanMessage(
-                FrontendScanMessageType type, const FrontendScanMessage& message);
-        virtual Return<void> onScanMessageExt1_1(
-                FrontendScanMessageTypeExt1_1 type, const FrontendScanMessageExt1_1& message);
+        ::ndk::ScopedAStatus onEvent(FrontendEventType frontendEventType) override;
+        ::ndk::ScopedAStatus onScanMessage(FrontendScanMessageType type,
+                                           const FrontendScanMessage& message) override;
 
         shared_ptr<ITunerFrontendCallback> mTunerFrontendCallback;
     };
 
 private:
-    hidl_vec<FrontendAtsc3PlpSettings> getAtsc3PlpSettings(
-            const TunerFrontendAtsc3Settings& settings);
-    FrontendDvbsCodeRate getDvbsCodeRate(const TunerFrontendDvbsCodeRate& codeRate);
-    FrontendSettings getHidlFrontendSettings(const TunerFrontendSettings& aidlSettings);
-    FrontendSettingsExt1_1 getHidlFrontendSettingsExt(const TunerFrontendSettings& aidlSettings);
-    void getAidlFrontendStatus(
-            vector<FrontendStatus>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus);
-    void getAidlFrontendStatusExt(
-            vector<FrontendStatusExt1_1>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus);
-
     int mId;
-    sp<IFrontend> mFrontend;
-    sp<::android::hardware::tv::tuner::V1_1::IFrontend> mFrontend_1_1;
+    shared_ptr<IFrontend> mFrontend;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERFRONTEND_H
diff --git a/services/tuner/TunerHelper.cpp b/services/tuner/TunerHelper.cpp
new file mode 100644
index 0000000..dc67110
--- /dev/null
+++ b/services/tuner/TunerHelper.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TunerHelper.h"
+
+#include <aidl/android/media/tv/tunerresourcemanager/ITunerResourceManager.h>
+#include <android/binder_manager.h>
+#include <android/content/pm/IPackageManagerNative.h>
+#include <binder/IServiceManager.h>
+#include <utils/Log.h>
+
+using ::aidl::android::media::tv::tunerresourcemanager::ITunerResourceManager;
+using ::android::defaultServiceManager;
+using ::android::IBinder;
+using ::android::interface_cast;
+using ::android::IServiceManager;
+using ::android::sp;
+using ::android::binder::Status;
+using ::android::content::pm::IPackageManagerNative;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+// System Feature defined in PackageManager
+static const ::android::String16 FEATURE_TUNER(::android::String16("android.hardware.tv.tuner"));
+
+int32_t TunerHelper::sResourceRequestCount = 0;
+
+bool TunerHelper::checkTunerFeature() {
+    sp<IServiceManager> serviceMgr = defaultServiceManager();
+    sp<IPackageManagerNative> packageMgr;
+    if (serviceMgr.get() == nullptr) {
+        ALOGE("%s: Cannot find service manager", __func__);
+        return false;
+    }
+
+    sp<IBinder> binder = serviceMgr->waitForService(String16("package_native"));
+    packageMgr = interface_cast<IPackageManagerNative>(binder);
+    if (packageMgr != nullptr) {
+        bool hasFeature = false;
+        Status status = packageMgr->hasSystemFeature(FEATURE_TUNER, 0, &hasFeature);
+        if (!status.isOk()) {
+            ALOGE("%s: hasSystemFeature failed: %s", __func__, status.exceptionMessage().c_str());
+            return false;
+        }
+        if (!hasFeature) {
+            ALOGD("Current device does not support tuner feaure.");
+            return false;
+        }
+    } else {
+        ALOGD("%s: Cannot find package manager.", __func__);
+        return false;
+    }
+
+    return true;
+}
+
+// TODO: update Demux, Descrambler.
+void TunerHelper::updateTunerResources(const vector<TunerFrontendInfo>& feInfos,
+                                       const vector<int32_t>& lnbHandles) {
+    ::ndk::SpAIBinder binder(AServiceManager_waitForService("tv_tuner_resource_mgr"));
+    shared_ptr<ITunerResourceManager> tunerRM = ITunerResourceManager::fromBinder(binder);
+    if (tunerRM == nullptr) {
+        return;
+    }
+
+    tunerRM->setFrontendInfoList(feInfos);
+    tunerRM->setLnbInfoList(lnbHandles);
+}
+
+// TODO: create a map between resource id and handles.
+int TunerHelper::getResourceIdFromHandle(int resourceHandle, int /*type*/) {
+    return (resourceHandle & 0x00ff0000) >> 16;
+}
+
+int TunerHelper::getResourceHandleFromId(int id, int resourceType) {
+    // TODO: build up randomly generated id to handle mapping
+    return (resourceType & 0x000000ff) << 24 | (id << 16) | (sResourceRequestCount++ & 0xffff);
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerHelper.h b/services/tuner/TunerHelper.h
new file mode 100644
index 0000000..755df57
--- /dev/null
+++ b/services/tuner/TunerHelper.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERDVRHELPER_H
+#define ANDROID_MEDIA_TUNERDVRHELPER_H
+
+#include <aidl/android/media/tv/tunerresourcemanager/TunerFrontendInfo.h>
+#include <utils/String16.h>
+
+using ::aidl::android::media::tv::tunerresourcemanager::TunerFrontendInfo;
+using ::android::String16;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+const static int TUNER_HAL_VERSION_UNKNOWN = 0;
+const static int TUNER_HAL_VERSION_1_0 = 1 << 16;
+const static int TUNER_HAL_VERSION_1_1 = (1 << 16) | 1;
+const static int TUNER_HAL_VERSION_2_0 = 2 << 16;
+
+// Keep syncing with ShareFilter.java
+const static int STATUS_INACCESSIBLE = 1 << 7;
+
+const static String16 sSharedFilterPermission("android.permission.ACCESS_TV_SHARED_FILTER");
+
+typedef enum {
+    FRONTEND,
+    DEMUX,
+    DESCRAMBLER,
+    LNB
+} TunerResourceType;
+
+class TunerHelper {
+public:
+    static bool checkTunerFeature();
+
+    // TODO: update Demux, Descrambler.
+    static void updateTunerResources(const vector<TunerFrontendInfo>& feInfos,
+                                     const vector<int32_t>& lnbHandles);
+    // TODO: create a map between resource id and handles.
+    static int getResourceIdFromHandle(int resourceHandle, int type);
+    static int getResourceHandleFromId(int id, int resourceType);
+
+private:
+    static int32_t sResourceRequestCount;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERDVRHELPER_H
diff --git a/services/tuner/TunerLnb.cpp b/services/tuner/TunerLnb.cpp
index 77248d4..1e143c3 100644
--- a/services/tuner/TunerLnb.cpp
+++ b/services/tuner/TunerLnb.cpp
@@ -18,123 +18,116 @@
 
 #include "TunerLnb.h"
 
-using ::android::hardware::tv::tuner::V1_0::LnbPosition;
-using ::android::hardware::tv::tuner::V1_0::LnbTone;
-using ::android::hardware::tv::tuner::V1_0::LnbVoltage;
-using ::android::hardware::tv::tuner::V1_0::Result;
+#include <aidl/android/hardware/tv/tuner/ILnbCallback.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
 
+using ::aidl::android::hardware::tv::tuner::ILnbCallback;
+using ::aidl::android::hardware::tv::tuner::Result;
+
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-TunerLnb::TunerLnb(sp<ILnb> lnb, int id) {
+TunerLnb::TunerLnb(shared_ptr<ILnb> lnb, int id) {
     mLnb = lnb;
     mId = id;
 }
 
 TunerLnb::~TunerLnb() {
-    mLnb = NULL;
+    mLnb = nullptr;
     mId = -1;
 }
 
-Status TunerLnb::setCallback(
-        const shared_ptr<ITunerLnbCallback>& tunerLnbCallback) {
-    if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::setCallback(
+        const shared_ptr<ITunerLnbCallback>& in_tunerLnbCallback) {
+    if (mLnb == nullptr) {
         ALOGE("ILnb is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    if (tunerLnbCallback == NULL) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    if (in_tunerLnbCallback == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
     }
 
-    sp<ILnbCallback> lnbCallback = new LnbCallback(tunerLnbCallback);
-    Result status = mLnb->setCallback(lnbCallback);
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    shared_ptr<ILnbCallback> lnbCallback =
+            ::ndk::SharedRefBase::make<LnbCallback>(in_tunerLnbCallback);
+    return mLnb->setCallback(lnbCallback);
 }
 
-Status TunerLnb::setVoltage(int voltage) {
-    if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::setVoltage(LnbVoltage in_voltage) {
+    if (mLnb == nullptr) {
         ALOGE("ILnb is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mLnb->setVoltage(static_cast<LnbVoltage>(voltage));
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    return mLnb->setVoltage(in_voltage);
 }
 
-Status TunerLnb::setTone(int tone) {
-    if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::setTone(LnbTone in_tone) {
+    if (mLnb == nullptr) {
         ALOGE("ILnb is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mLnb->setTone(static_cast<LnbTone>(tone));
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    return mLnb->setTone(in_tone);
 }
 
-Status TunerLnb::setSatellitePosition(int position) {
-    if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::setSatellitePosition(LnbPosition in_position) {
+    if (mLnb == nullptr) {
         ALOGE("ILnb is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mLnb->setSatellitePosition(static_cast<LnbPosition>(position));
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    return mLnb->setSatellitePosition(in_position);
 }
 
-Status TunerLnb::sendDiseqcMessage(const vector<uint8_t>& diseqcMessage) {
-    if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::sendDiseqcMessage(const vector<uint8_t>& in_diseqcMessage) {
+    if (mLnb == nullptr) {
         ALOGE("ILnb is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mLnb->sendDiseqcMessage(diseqcMessage);
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    return mLnb->sendDiseqcMessage(in_diseqcMessage);
 }
 
-Status TunerLnb::close() {
-    if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::close() {
+    if (mLnb == nullptr) {
         ALOGE("ILnb is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mLnb->close();
-    mLnb = NULL;
+    auto res = mLnb->close();
+    mLnb = nullptr;
 
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return res;
 }
 
 /////////////// ILnbCallback ///////////////////////
-
-Return<void> TunerLnb::LnbCallback::onEvent(const LnbEventType lnbEventType) {
-    if (mTunerLnbCallback != NULL) {
-        mTunerLnbCallback->onEvent((int)lnbEventType);
+::ndk::ScopedAStatus TunerLnb::LnbCallback::onEvent(const LnbEventType lnbEventType) {
+    if (mTunerLnbCallback != nullptr) {
+        mTunerLnbCallback->onEvent(lnbEventType);
     }
-    return Void();
+    return ndk::ScopedAStatus::ok();
 }
 
-Return<void> TunerLnb::LnbCallback::onDiseqcMessage(const hidl_vec<uint8_t>& diseqcMessage) {
-    if (mTunerLnbCallback != NULL && diseqcMessage != NULL) {
-        vector<uint8_t> msg(begin(diseqcMessage), end(diseqcMessage));
-        mTunerLnbCallback->onDiseqcMessage(msg);
+::ndk::ScopedAStatus TunerLnb::LnbCallback::onDiseqcMessage(const vector<uint8_t>& diseqcMessage) {
+    if (mTunerLnbCallback != nullptr) {
+        mTunerLnbCallback->onDiseqcMessage(diseqcMessage);
     }
-    return Void();
+    return ndk::ScopedAStatus::ok();
 }
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
 }  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerLnb.h b/services/tuner/TunerLnb.h
index 500d072..72988a6 100644
--- a/services/tuner/TunerLnb.h
+++ b/services/tuner/TunerLnb.h
@@ -17,55 +17,61 @@
 #ifndef ANDROID_MEDIA_TUNERFLNB_H
 #define ANDROID_MEDIA_TUNERFLNB_H
 
+#include <aidl/android/hardware/tv/tuner/BnLnbCallback.h>
+#include <aidl/android/hardware/tv/tuner/ILnb.h>
 #include <aidl/android/media/tv/tuner/BnTunerLnb.h>
-#include <android/hardware/tv/tuner/1.0/ILnb.h>
-#include <android/hardware/tv/tuner/1.0/ILnbCallback.h>
-#include <media/stagefright/foundation/ADebug.h>
 #include <utils/Log.h>
 
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::tv::tuner::BnTunerLnb;
-using ::aidl::android::media::tv::tuner::ITunerLnbCallback;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::tv::tuner::V1_0::ILnb;
-using ::android::hardware::tv::tuner::V1_0::ILnbCallback;
-using ::android::hardware::tv::tuner::V1_0::LnbEventType;
+using ::aidl::android::hardware::tv::tuner::BnLnbCallback;
+using ::aidl::android::hardware::tv::tuner::ILnb;
+using ::aidl::android::hardware::tv::tuner::LnbEventType;
+using ::aidl::android::hardware::tv::tuner::LnbPosition;
+using ::aidl::android::hardware::tv::tuner::LnbTone;
+using ::aidl::android::hardware::tv::tuner::LnbVoltage;
 
 using namespace std;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
 class TunerLnb : public BnTunerLnb {
 
 public:
-    TunerLnb(sp<ILnb> lnb, int id);
+    TunerLnb(shared_ptr<ILnb> lnb, int id);
     virtual ~TunerLnb();
-    Status setCallback(const shared_ptr<ITunerLnbCallback>& tunerLnbCallback) override;
-    Status setVoltage(int voltage) override;
-    Status setTone(int tone) override;
-    Status setSatellitePosition(int position) override;
-    Status sendDiseqcMessage(const vector<uint8_t>& diseqcMessage) override;
-    Status close() override;
+
+    ::ndk::ScopedAStatus setCallback(
+            const shared_ptr<ITunerLnbCallback>& in_tunerLnbCallback) override;
+    ::ndk::ScopedAStatus setVoltage(LnbVoltage in_voltage) override;
+    ::ndk::ScopedAStatus setTone(LnbTone in_tone) override;
+    ::ndk::ScopedAStatus setSatellitePosition(LnbPosition in_position) override;
+    ::ndk::ScopedAStatus sendDiseqcMessage(const vector<uint8_t>& in_diseqcMessage) override;
+    ::ndk::ScopedAStatus close() override;
 
     int getId() { return mId; }
 
-    struct LnbCallback : public ILnbCallback {
+    struct LnbCallback : public BnLnbCallback {
         LnbCallback(const shared_ptr<ITunerLnbCallback> tunerLnbCallback)
-                : mTunerLnbCallback(tunerLnbCallback) {};
+              : mTunerLnbCallback(tunerLnbCallback){};
 
-        virtual Return<void> onEvent(const LnbEventType lnbEventType);
-        virtual Return<void> onDiseqcMessage(const hidl_vec<uint8_t>& diseqcMessage);
+        ::ndk::ScopedAStatus onEvent(const LnbEventType lnbEventType) override;
+        ::ndk::ScopedAStatus onDiseqcMessage(const vector<uint8_t>& diseqcMessage) override;
 
         shared_ptr<ITunerLnbCallback> mTunerLnbCallback;
     };
 
 private:
     int mId;
-    sp<ILnb> mLnb;
+    shared_ptr<ILnb> mLnb;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERFLNB_H
diff --git a/services/tuner/TunerService.cpp b/services/tuner/TunerService.cpp
index 5b4129a..4833aaf 100644
--- a/services/tuner/TunerService.cpp
+++ b/services/tuner/TunerService.cpp
@@ -1,5 +1,5 @@
 /**
- * Copyright (c) 2020, The Android Open Source Project
+ * Copyright (c) 2021, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,369 +14,342 @@
  * limitations under the License.
  */
 
+//#define LOG_NDEBUG 0
 #define LOG_TAG "TunerService"
 
-#include <android/binder_manager.h>
-#include <android/content/pm/IPackageManagerNative.h>
-#include <binder/IServiceManager.h>
-#include <utils/Log.h>
 #include "TunerService.h"
-#include "TunerFrontend.h"
-#include "TunerLnb.h"
+
+#include <aidl/android/hardware/tv/tuner/IDemux.h>
+#include <aidl/android/hardware/tv/tuner/IDescrambler.h>
+#include <aidl/android/hardware/tv/tuner/IFrontend.h>
+#include <aidl/android/hardware/tv/tuner/ILnb.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <android/binder_manager.h>
+#include <binder/IPCThreadState.h>
+#include <binder/PermissionCache.h>
+#include <utils/Log.h>
+
+#include <string>
+
 #include "TunerDemux.h"
 #include "TunerDescrambler.h"
+#include "TunerFrontend.h"
+#include "TunerHelper.h"
+#include "TunerLnb.h"
 
-using ::aidl::android::media::tv::tuner::TunerFrontendAnalogCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendAtsc3Capabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendAtscCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendCableCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendDvbsCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendDvbtCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendIsdbs3Capabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendIsdbsCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendIsdbtCapabilities;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterAvSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
-using ::android::hardware::tv::tuner::V1_0::FrontendId;
-using ::android::hardware::tv::tuner::V1_0::FrontendType;
-using ::android::hardware::tv::tuner::V1_0::IFrontend;
-using ::android::hardware::tv::tuner::V1_0::ILnb;
-using ::android::hardware::tv::tuner::V1_0::LnbId;
-using ::android::hardware::tv::tuner::V1_0::Result;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbCapabilities;
+using ::aidl::android::hardware::tv::tuner::IDemux;
+using ::aidl::android::hardware::tv::tuner::IDescrambler;
+using ::aidl::android::hardware::tv::tuner::IFrontend;
+using ::aidl::android::hardware::tv::tuner::Result;
+using ::android::IPCThreadState;
+using ::android::PermissionCache;
+using ::android::sp;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+shared_ptr<TunerService> TunerService::sTunerService = nullptr;
 
 TunerService::TunerService() {
-    sp<IServiceManager> serviceMgr = defaultServiceManager();
-    sp<content::pm::IPackageManagerNative> packageMgr;
-    if (serviceMgr.get() == nullptr) {
-        ALOGE("%s: Cannot find service manager", __func__);
-        return;
-    } else {
-        sp<IBinder> binder = serviceMgr->waitForService(String16("package_native"));
-        packageMgr = interface_cast<content::pm::IPackageManagerNative>(binder);
-    }
-
-    bool hasFeature = false;
-    if (packageMgr != nullptr) {
-        binder::Status status = packageMgr->hasSystemFeature(FEATURE_TUNER, 0, &hasFeature);
-        if (!status.isOk()) {
-            ALOGE("%s: hasSystemFeature failed: %s",
-                    __func__, status.exceptionMessage().c_str());
-            return;
-        }
-        if (!hasFeature) {
-            ALOGD("Current device does not support tuner feaure.");
-            return;
-        }
-    } else {
-        ALOGD("%s: Cannot find package manager.", __func__);
+    if (!TunerHelper::checkTunerFeature()) {
+        ALOGD("Device doesn't have tuner hardware.");
         return;
     }
 
-    ::ndk::SpAIBinder binder(AServiceManager_waitForService("tv_tuner_resource_mgr"));
-    mTunerResourceManager = ITunerResourceManager::fromBinder(binder);
     updateTunerResources();
 }
 
 TunerService::~TunerService() {}
 
 binder_status_t TunerService::instantiate() {
-    shared_ptr<TunerService> service =
-            ::ndk::SharedRefBase::make<TunerService>();
-    return AServiceManager_addService(service->asBinder().get(), getServiceName());
+    sTunerService = ::ndk::SharedRefBase::make<TunerService>();
+    return AServiceManager_addService(sTunerService->asBinder().get(), getServiceName());
+}
+
+shared_ptr<TunerService> TunerService::getTunerService() {
+    return sTunerService;
 }
 
 bool TunerService::hasITuner() {
-    ALOGD("hasITuner");
+    ALOGV("hasITuner");
     if (mTuner != nullptr) {
         return true;
     }
-    mTuner = ITuner::getService();
-    if (mTuner == nullptr) {
-        ALOGE("Failed to get ITuner service");
+    const string statsServiceName = string() + ITuner::descriptor + "/default";
+    if (AServiceManager_isDeclared(statsServiceName.c_str())) {
+        ::ndk::SpAIBinder binder(AServiceManager_waitForService(statsServiceName.c_str()));
+        mTuner = ITuner::fromBinder(binder);
+    } else {
+        mTuner = nullptr;
+        ALOGE("Failed to get Tuner HAL Service");
         return false;
     }
-    mTunerVersion = TUNER_HAL_VERSION_1_0;
-    mTuner_1_1 = ::android::hardware::tv::tuner::V1_1::ITuner::castFrom(mTuner);
-    if (mTuner_1_1 != nullptr) {
-        mTunerVersion = TUNER_HAL_VERSION_1_1;
-    } else {
-        ALOGE("Failed to get ITuner_1_1 service");
-    }
+
+    mTunerVersion = TUNER_HAL_VERSION_2_0;
+    // TODO: Enable this after Tuner HAL is frozen.
+    // if (mTuner->getInterfaceVersion(&mTunerVersion).isOk()) {
+    //  // Tuner AIDL HAL version 1 will be Tuner HAL 2.0
+    //  mTunerVersion = (mTunerVersion + 1) << 16;
+    //}
+
     return true;
 }
 
-bool TunerService::hasITuner_1_1() {
-    ALOGD("hasITuner_1_1");
-    hasITuner();
-    return (mTunerVersion == TUNER_HAL_VERSION_1_1);
-}
-
-Status TunerService::openDemux(
-        int /* demuxHandle */, std::shared_ptr<ITunerDemux>* _aidl_return) {
-    ALOGD("openDemux");
+::ndk::ScopedAStatus TunerService::openDemux(int32_t /* in_demuxHandle */,
+                                             shared_ptr<ITunerDemux>* _aidl_return) {
+    ALOGV("openDemux");
     if (!hasITuner()) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::NOT_INITIALIZED));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
-    Result res;
-    uint32_t id;
-    sp<IDemux> demuxSp = nullptr;
-    shared_ptr<ITunerDemux> tunerDemux = nullptr;
-    mTuner->openDemux([&](Result r, uint32_t demuxId, const sp<IDemux>& demux) {
-        demuxSp = demux;
-        id = demuxId;
-        res = r;
-        ALOGD("open demux, id = %d", demuxId);
-    });
-    if (res == Result::SUCCESS) {
-        tunerDemux = ::ndk::SharedRefBase::make<TunerDemux>(demuxSp, id);
-        *_aidl_return = tunerDemux->ref<ITunerDemux>();
-        return Status::ok();
+    vector<int32_t> id;
+    shared_ptr<IDemux> demux;
+    auto status = mTuner->openDemux(&id, &demux);
+    if (status.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerDemux>(demux, id[0]);
     }
 
-    ALOGW("open demux failed, res = %d", res);
-    return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+    return status;
 }
 
-Status TunerService::getDemuxCaps(TunerDemuxCapabilities* _aidl_return) {
-    ALOGD("getDemuxCaps");
+::ndk::ScopedAStatus TunerService::getDemuxCaps(DemuxCapabilities* _aidl_return) {
+    ALOGV("getDemuxCaps");
     if (!hasITuner()) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::NOT_INITIALIZED));
-    }
-    Result res;
-    DemuxCapabilities caps;
-    mTuner->getDemuxCaps([&](Result r, const DemuxCapabilities& demuxCaps) {
-        caps = demuxCaps;
-        res = r;
-    });
-    if (res == Result::SUCCESS) {
-        *_aidl_return = getAidlDemuxCaps(caps);
-        return Status::ok();
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    ALOGW("Get demux caps failed, res = %d", res);
-    return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+    return mTuner->getDemuxCaps(_aidl_return);
 }
 
-Status TunerService::getFrontendIds(vector<int32_t>* ids) {
+::ndk::ScopedAStatus TunerService::getFrontendIds(vector<int32_t>* ids) {
     if (!hasITuner()) {
-        return Status::fromServiceSpecificError(
-                static_cast<int32_t>(Result::NOT_INITIALIZED));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
-    hidl_vec<FrontendId> feIds;
-    Result res = getHidlFrontendIds(feIds);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    ids->resize(feIds.size());
-    copy(feIds.begin(), feIds.end(), ids->begin());
 
-    return Status::ok();
+    return mTuner->getFrontendIds(ids);
 }
 
-Status TunerService::getFrontendInfo(int32_t id, TunerFrontendInfo* _aidl_return) {
+::ndk::ScopedAStatus TunerService::getFrontendInfo(int32_t id, FrontendInfo* _aidl_return) {
     if (!hasITuner()) {
         ALOGE("ITuner service is not init.");
         return ::ndk::ScopedAStatus::fromServiceSpecificError(
                 static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    FrontendInfo info;
-    Result res = getHidlFrontendInfo(id, info);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-
-    TunerFrontendInfo tunerInfo = convertToAidlFrontendInfo(info);
-    *_aidl_return = tunerInfo;
-    return Status::ok();
+    return mTuner->getFrontendInfo(id, _aidl_return);
 }
 
-Status TunerService::getFrontendDtmbCapabilities(
-        int32_t id, TunerFrontendDtmbCapabilities* _aidl_return) {
-    if (!hasITuner_1_1()) {
-        ALOGE("ITuner_1_1 service is not init.");
+::ndk::ScopedAStatus TunerService::openFrontend(int32_t frontendHandle,
+                                                shared_ptr<ITunerFrontend>* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGE("ITuner service is not init.");
         return ::ndk::ScopedAStatus::fromServiceSpecificError(
                 static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res;
-    FrontendDtmbCapabilities dtmbCaps;
-    mTuner_1_1->getFrontendDtmbCapabilities(id,
-            [&](Result r, const FrontendDtmbCapabilities& caps) {
-        dtmbCaps = caps;
-        res = r;
-    });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+    int id = TunerHelper::getResourceIdFromHandle(frontendHandle, FRONTEND);
+    shared_ptr<IFrontend> frontend;
+    auto status = mTuner->openFrontendById(id, &frontend);
+    if (status.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerFrontend>(frontend, id);
     }
 
-    TunerFrontendDtmbCapabilities aidlDtmbCaps{
-        .transmissionModeCap = (int)dtmbCaps.transmissionModeCap,
-        .bandwidthCap = (int)dtmbCaps.bandwidthCap,
-        .modulationCap = (int)dtmbCaps.modulationCap,
-        .codeRateCap = (int)dtmbCaps.codeRateCap,
-        .guardIntervalCap = (int)dtmbCaps.guardIntervalCap,
-        .interleaveModeCap = (int)dtmbCaps.interleaveModeCap,
-    };
-
-    *_aidl_return = aidlDtmbCaps;
-    return Status::ok();
+    return status;
 }
 
-Status TunerService::openFrontend(
-        int32_t frontendHandle, shared_ptr<ITunerFrontend>* _aidl_return) {
-    if (!hasITuner()) {
-        ALOGE("ITuner service is not init.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    Result status;
-    sp<IFrontend> frontend;
-    int id = getResourceIdFromHandle(frontendHandle, FRONTEND);
-    mTuner->openFrontendById(id, [&](Result result, const sp<IFrontend>& fe) {
-        frontend = fe;
-        status = result;
-    });
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    *_aidl_return = ::ndk::SharedRefBase::make<TunerFrontend>(frontend, id);
-    return Status::ok();
-}
-
-Status TunerService::openLnb(int lnbHandle, shared_ptr<ITunerLnb>* _aidl_return) {
+::ndk::ScopedAStatus TunerService::openLnb(int lnbHandle, shared_ptr<ITunerLnb>* _aidl_return) {
     if (!hasITuner()) {
         ALOGD("get ITuner failed");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status;
-    sp<ILnb> lnb;
-    int id = getResourceIdFromHandle(lnbHandle, LNB);
-    mTuner->openLnbById(id, [&](Result result, const sp<ILnb>& lnbSp){
-        lnb = lnbSp;
-        status = result;
-    });
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    shared_ptr<ILnb> lnb;
+    int id = TunerHelper::getResourceIdFromHandle(lnbHandle, LNB);
+    auto status = mTuner->openLnbById(id, &lnb);
+    if (status.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerLnb>(lnb, id);
     }
 
-    *_aidl_return = ::ndk::SharedRefBase::make<TunerLnb>(lnb, id);
-    return Status::ok();
+    return status;
 }
 
-Status TunerService::openLnbByName(const string& lnbName, shared_ptr<ITunerLnb>* _aidl_return) {
+::ndk::ScopedAStatus TunerService::openLnbByName(const string& lnbName,
+                                                 shared_ptr<ITunerLnb>* _aidl_return) {
     if (!hasITuner()) {
         ALOGE("get ITuner failed");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    int lnbId;
-    Result status;
-    sp<ILnb> lnb;
-    mTuner->openLnbByName(lnbName, [&](Result r, LnbId id, const sp<ILnb>& lnbSp) {
-        status = r;
-        lnb = lnbSp;
-        lnbId = (int)id;
-    });
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    vector<int32_t> id;
+    shared_ptr<ILnb> lnb;
+    auto status = mTuner->openLnbByName(lnbName, &id, &lnb);
+    if (status.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerLnb>(lnb, id[0]);
     }
 
-    *_aidl_return = ::ndk::SharedRefBase::make<TunerLnb>(lnb, lnbId);
-    return Status::ok();
+    return ::ndk::ScopedAStatus::ok();
 }
 
-Status TunerService::openDescrambler(int32_t /*descramblerHandle*/,
-            std::shared_ptr<ITunerDescrambler>* _aidl_return) {
+::ndk::ScopedAStatus TunerService::openDescrambler(int32_t /*descramblerHandle*/,
+                                                   shared_ptr<ITunerDescrambler>* _aidl_return) {
     if (!hasITuner()) {
         ALOGD("get ITuner failed");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status;
-    sp<IDescrambler> descrambler;
-    //int id = getResourceIdFromHandle(descramblerHandle, DESCRAMBLER);
-    mTuner->openDescrambler([&](Result r, const sp<IDescrambler>& descramblerSp) {
-        status = r;
-        descrambler = descramblerSp;
-    });
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    shared_ptr<IDescrambler> descrambler;
+    // int id = TunerHelper::getResourceIdFromHandle(descramblerHandle, DESCRAMBLER);
+    auto status = mTuner->openDescrambler(&descrambler);
+    if (status.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerDescrambler>(descrambler);
     }
 
-    *_aidl_return = ::ndk::SharedRefBase::make<TunerDescrambler>(descrambler);
-    return Status::ok();
+    return status;
+}
+
+::ndk::ScopedAStatus TunerService::getTunerHalVersion(int* _aidl_return) {
+    hasITuner();
+    *_aidl_return = mTunerVersion;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerService::openSharedFilter(const string& in_filterToken,
+                                                    const shared_ptr<ITunerFilterCallback>& in_cb,
+                                                    shared_ptr<ITunerFilter>* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGE("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (!PermissionCache::checkCallingPermission(sSharedFilterPermission)) {
+        ALOGE("Request requires android.permission.ACCESS_TV_SHARED_FILTER");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    Mutex::Autolock _l(mSharedFiltersLock);
+    if (mSharedFilters.find(in_filterToken) == mSharedFilters.end()) {
+        *_aidl_return = nullptr;
+        ALOGD("fail to find %s", in_filterToken.c_str());
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    shared_ptr<TunerFilter> filter = mSharedFilters.at(in_filterToken);
+    IPCThreadState* ipc = IPCThreadState::self();
+    const int pid = ipc->getCallingPid();
+    if (!filter->isSharedFilterAllowed(pid)) {
+        *_aidl_return = nullptr;
+        ALOGD("shared filter %s is opened in the same process", in_filterToken.c_str());
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    filter->attachSharedFilterCallback(in_cb);
+
+    *_aidl_return = filter;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerService::setLna(bool bEnable) {
+    if (!hasITuner()) {
+        ALOGD("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    return mTuner->setLna(bEnable);
+}
+
+::ndk::ScopedAStatus TunerService::setMaxNumberOfFrontends(FrontendType in_frontendType,
+                                                           int32_t in_maxNumber) {
+    if (!hasITuner()) {
+        ALOGD("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    return mTuner->setMaxNumberOfFrontends(in_frontendType, in_maxNumber);
+}
+
+::ndk::ScopedAStatus TunerService::getMaxNumberOfFrontends(FrontendType in_frontendType,
+                                                           int32_t* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGD("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    return mTuner->getMaxNumberOfFrontends(in_frontendType, _aidl_return);
+}
+
+string TunerService::addFilterToShared(const shared_ptr<TunerFilter>& sharedFilter) {
+    Mutex::Autolock _l(mSharedFiltersLock);
+
+    // Use sharedFilter address as token.
+    string token = to_string(reinterpret_cast<std::uintptr_t>(sharedFilter.get()));
+    mSharedFilters[token] = sharedFilter;
+    return token;
+}
+
+void TunerService::removeSharedFilter(const shared_ptr<TunerFilter>& sharedFilter) {
+    Mutex::Autolock _l(mSharedFiltersLock);
+
+    // Use sharedFilter address as token.
+    mSharedFilters.erase(to_string(reinterpret_cast<std::uintptr_t>(sharedFilter.get())));
 }
 
 void TunerService::updateTunerResources() {
-    if (!hasITuner() || mTunerResourceManager == NULL) {
+    if (!hasITuner()) {
         ALOGE("Failed to updateTunerResources");
         return;
     }
 
-    updateFrontendResources();
-    updateLnbResources();
-    // TODO: update Demux, Descrambler.
+    TunerHelper::updateTunerResources(getTRMFrontendInfos(), getTRMLnbHandles());
 }
 
-Status TunerService::getTunerHalVersion(int* _aidl_return) {
-    hasITuner();
-    *_aidl_return = mTunerVersion;
-    return Status::ok();
-}
-
-void TunerService::updateFrontendResources() {
-    hidl_vec<FrontendId> ids;
-    Result res = getHidlFrontendIds(ids);
-    if (res != Result::SUCCESS) {
-        return;
-    }
+vector<TunerFrontendInfo> TunerService::getTRMFrontendInfos() {
     vector<TunerFrontendInfo> infos;
+    vector<int32_t> ids;
+    auto status = mTuner->getFrontendIds(&ids);
+    if (!status.isOk()) {
+        return infos;
+    }
+
     for (int i = 0; i < ids.size(); i++) {
         FrontendInfo frontendInfo;
-        Result res = getHidlFrontendInfo((int)ids[i], frontendInfo);
-        if (res != Result::SUCCESS) {
+        auto res = mTuner->getFrontendInfo(ids[i], &frontendInfo);
+        if (!res.isOk()) {
             continue;
         }
         TunerFrontendInfo tunerFrontendInfo{
-            .handle = getResourceHandleFromId((int)ids[i], FRONTEND),
-            .type = static_cast<int>(frontendInfo.type),
-            .exclusiveGroupId = static_cast<int>(frontendInfo.exclusiveGroupId),
+                .handle = TunerHelper::getResourceHandleFromId((int)ids[i], FRONTEND),
+                .type = static_cast<int>(frontendInfo.type),
+                .exclusiveGroupId = frontendInfo.exclusiveGroupId,
         };
         infos.push_back(tunerFrontendInfo);
     }
-    mTunerResourceManager->setFrontendInfoList(infos);
+
+    return infos;
 }
 
-void TunerService::updateLnbResources() {
-    vector<int> handles = getLnbHandles();
-    if (handles.size() == 0) {
-        return;
-    }
-    mTunerResourceManager->setLnbInfoList(handles);
-}
-
-vector<int> TunerService::getLnbHandles() {
-    vector<int> lnbHandles;
-    if (mTuner != NULL) {
-        Result res;
-        vector<LnbId> lnbIds;
-        mTuner->getLnbIds([&](Result r, const hardware::hidl_vec<LnbId>& ids) {
-            lnbIds = ids;
-            res = r;
-        });
-        if (res != Result::SUCCESS || lnbIds.size() == 0) {
-        } else {
+vector<int32_t> TunerService::getTRMLnbHandles() {
+    vector<int32_t> lnbHandles;
+    if (mTuner != nullptr) {
+        vector<int32_t> lnbIds;
+        auto res = mTuner->getLnbIds(&lnbIds);
+        if (res.isOk()) {
             for (int i = 0; i < lnbIds.size(); i++) {
-                lnbHandles.push_back(getResourceHandleFromId((int)lnbIds[i], LNB));
+                lnbHandles.push_back(TunerHelper::getResourceHandleFromId(lnbIds[i], LNB));
             }
         }
     }
@@ -384,186 +357,8 @@
     return lnbHandles;
 }
 
-Result TunerService::getHidlFrontendIds(hidl_vec<FrontendId>& ids) {
-    if (mTuner == NULL) {
-        return Result::NOT_INITIALIZED;
-    }
-    Result res;
-    mTuner->getFrontendIds([&](Result r, const hidl_vec<FrontendId>& frontendIds) {
-        ids = frontendIds;
-        res = r;
-    });
-    return res;
-}
-
-Result TunerService::getHidlFrontendInfo(int id, FrontendInfo& info) {
-    if (mTuner == NULL) {
-        return Result::NOT_INITIALIZED;
-    }
-    Result res;
-    mTuner->getFrontendInfo(id, [&](Result r, const FrontendInfo& feInfo) {
-        info = feInfo;
-        res = r;
-    });
-    return res;
-}
-
-TunerDemuxCapabilities TunerService::getAidlDemuxCaps(DemuxCapabilities caps) {
-    TunerDemuxCapabilities aidlCaps{
-        .numDemux = (int)caps.numDemux,
-        .numRecord = (int)caps.numRecord,
-        .numPlayback = (int)caps.numPlayback,
-        .numTsFilter = (int)caps.numTsFilter,
-        .numSectionFilter = (int)caps.numSectionFilter,
-        .numAudioFilter = (int)caps.numAudioFilter,
-        .numVideoFilter = (int)caps.numVideoFilter,
-        .numPesFilter = (int)caps.numPesFilter,
-        .numPcrFilter = (int)caps.numPcrFilter,
-        .numBytesInSectionFilter = (int)caps.numBytesInSectionFilter,
-        .filterCaps = (int)caps.filterCaps,
-        .bTimeFilter = caps.bTimeFilter,
-    };
-    aidlCaps.linkCaps.resize(caps.linkCaps.size());
-    copy(caps.linkCaps.begin(), caps.linkCaps.end(), aidlCaps.linkCaps.begin());
-    return aidlCaps;
-}
-
-TunerFrontendInfo TunerService::convertToAidlFrontendInfo(FrontendInfo halInfo) {
-    TunerFrontendInfo info{
-        .type = (int)halInfo.type,
-        .minFrequency = (int)halInfo.minFrequency,
-        .maxFrequency = (int)halInfo.maxFrequency,
-        .minSymbolRate = (int)halInfo.minSymbolRate,
-        .maxSymbolRate = (int)halInfo.maxSymbolRate,
-        .acquireRange = (int)halInfo.acquireRange,
-        .exclusiveGroupId = (int)halInfo.exclusiveGroupId,
-    };
-    for (int i = 0; i < halInfo.statusCaps.size(); i++) {
-        info.statusCaps.push_back((int)halInfo.statusCaps[i]);
-    }
-
-    TunerFrontendCapabilities caps;
-    switch (halInfo.type) {
-        case FrontendType::ANALOG: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::analogCaps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendAnalogCapabilities analogCaps{
-                    .typeCap = (int)halInfo.frontendCaps.analogCaps().typeCap,
-                    .sifStandardCap = (int)halInfo.frontendCaps.analogCaps().sifStandardCap,
-                };
-                caps.set<TunerFrontendCapabilities::analogCaps>(analogCaps);
-            }
-            break;
-        }
-        case FrontendType::ATSC: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::atscCaps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendAtscCapabilities atscCaps{
-                    .modulationCap = (int)halInfo.frontendCaps.atscCaps().modulationCap,
-                };
-                caps.set<TunerFrontendCapabilities::atscCaps>(atscCaps);
-            }
-            break;
-        }
-        case FrontendType::ATSC3: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::atsc3Caps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendAtsc3Capabilities atsc3Caps{
-                    .bandwidthCap = (int)halInfo.frontendCaps.atsc3Caps().bandwidthCap,
-                    .modulationCap = (int)halInfo.frontendCaps.atsc3Caps().modulationCap,
-                    .timeInterleaveModeCap =
-                            (int)halInfo.frontendCaps.atsc3Caps().timeInterleaveModeCap,
-                    .codeRateCap = (int)halInfo.frontendCaps.atsc3Caps().codeRateCap,
-                    .demodOutputFormatCap
-                        = (int)halInfo.frontendCaps.atsc3Caps().demodOutputFormatCap,
-                    .fecCap = (int)halInfo.frontendCaps.atsc3Caps().fecCap,
-                };
-                caps.set<TunerFrontendCapabilities::atsc3Caps>(atsc3Caps);
-            }
-            break;
-        }
-        case FrontendType::DVBC: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbcCaps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendCableCapabilities cableCaps{
-                    .modulationCap = (int)halInfo.frontendCaps.dvbcCaps().modulationCap,
-                    .codeRateCap = (int64_t)halInfo.frontendCaps.dvbcCaps().fecCap,
-                    .annexCap = (int)halInfo.frontendCaps.dvbcCaps().annexCap,
-                };
-                caps.set<TunerFrontendCapabilities::cableCaps>(cableCaps);
-            }
-            break;
-        }
-        case FrontendType::DVBS: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbsCaps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendDvbsCapabilities dvbsCaps{
-                    .modulationCap = (int)halInfo.frontendCaps.dvbsCaps().modulationCap,
-                    .codeRateCap = (long)halInfo.frontendCaps.dvbsCaps().innerfecCap,
-                    .standard = (int)halInfo.frontendCaps.dvbsCaps().standard,
-                };
-                caps.set<TunerFrontendCapabilities::dvbsCaps>(dvbsCaps);
-            }
-            break;
-        }
-        case FrontendType::DVBT: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbtCaps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendDvbtCapabilities dvbtCaps{
-                    .transmissionModeCap = (int)halInfo.frontendCaps.dvbtCaps().transmissionModeCap,
-                    .bandwidthCap = (int)halInfo.frontendCaps.dvbtCaps().bandwidthCap,
-                    .constellationCap = (int)halInfo.frontendCaps.dvbtCaps().constellationCap,
-                    .codeRateCap = (int)halInfo.frontendCaps.dvbtCaps().coderateCap,
-                    .hierarchyCap = (int)halInfo.frontendCaps.dvbtCaps().hierarchyCap,
-                    .guardIntervalCap = (int)halInfo.frontendCaps.dvbtCaps().guardIntervalCap,
-                    .isT2Supported = (bool)halInfo.frontendCaps.dvbtCaps().isT2Supported,
-                    .isMisoSupported = (bool)halInfo.frontendCaps.dvbtCaps().isMisoSupported,
-                };
-                caps.set<TunerFrontendCapabilities::dvbtCaps>(dvbtCaps);
-            }
-            break;
-        }
-        case FrontendType::ISDBS: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbsCaps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendIsdbsCapabilities isdbsCaps{
-                    .modulationCap = (int)halInfo.frontendCaps.isdbsCaps().modulationCap,
-                    .codeRateCap = (int)halInfo.frontendCaps.isdbsCaps().coderateCap,
-                };
-                caps.set<TunerFrontendCapabilities::isdbsCaps>(isdbsCaps);
-            }
-            break;
-        }
-        case FrontendType::ISDBS3: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbs3Caps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendIsdbs3Capabilities isdbs3Caps{
-                    .modulationCap = (int)halInfo.frontendCaps.isdbs3Caps().modulationCap,
-                    .codeRateCap = (int)halInfo.frontendCaps.isdbs3Caps().coderateCap,
-                };
-                caps.set<TunerFrontendCapabilities::isdbs3Caps>(isdbs3Caps);
-            }
-            break;
-        }
-        case FrontendType::ISDBT: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbtCaps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendIsdbtCapabilities isdbtCaps{
-                    .modeCap = (int)halInfo.frontendCaps.isdbtCaps().modeCap,
-                    .bandwidthCap = (int)halInfo.frontendCaps.isdbtCaps().bandwidthCap,
-                    .modulationCap = (int)halInfo.frontendCaps.isdbtCaps().modulationCap,
-                    .codeRateCap = (int)halInfo.frontendCaps.isdbtCaps().coderateCap,
-                    .guardIntervalCap = (int)halInfo.frontendCaps.isdbtCaps().guardIntervalCap,
-                };
-                caps.set<TunerFrontendCapabilities::isdbtCaps>(isdbtCaps);
-            }
-            break;
-        }
-        default:
-            break;
-    }
-
-    info.caps = caps;
-    return info;
-}
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerService.h b/services/tuner/TunerService.h
index f8e2ee6..7fc2aa4 100644
--- a/services/tuner/TunerService.h
+++ b/services/tuner/TunerService.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (c) 2020, The Android Open Source Project
+ * Copyright (c) 2021, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -17,141 +17,95 @@
 #ifndef ANDROID_MEDIA_TUNERSERVICE_H
 #define ANDROID_MEDIA_TUNERSERVICE_H
 
-#include <aidl/android/media/tv/tunerresourcemanager/ITunerResourceManager.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterEvent.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterStatus.h>
+#include <aidl/android/hardware/tv/tuner/ITuner.h>
 #include <aidl/android/media/tv/tuner/BnTunerService.h>
-#include <android/hardware/tv/tuner/1.1/ITuner.h>
-#include <fmq/AidlMessageQueue.h>
-#include <fmq/EventFlag.h>
-#include <fmq/MessageQueue.h>
+#include <aidl/android/media/tv/tunerresourcemanager/TunerFrontendInfo.h>
+#include <utils/Mutex.h>
 
-using ::aidl::android::hardware::common::fmq::GrantorDescriptor;
-using ::aidl::android::hardware::common::fmq::MQDescriptor;
-using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
+#include <map>
+
+#include "TunerFilter.h"
+#include "TunerHelper.h"
+
+using ::aidl::android::hardware::tv::tuner::DemuxCapabilities;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
+using ::aidl::android::hardware::tv::tuner::FrontendInfo;
+using ::aidl::android::hardware::tv::tuner::FrontendType;
+using ::aidl::android::hardware::tv::tuner::ITuner;
 using ::aidl::android::media::tv::tuner::BnTunerService;
 using ::aidl::android::media::tv::tuner::ITunerDemux;
-using ::aidl::android::media::tv::tuner::ITunerDescrambler;
+using ::aidl::android::media::tv::tuner::ITunerFilter;
+using ::aidl::android::media::tv::tuner::ITunerFilterCallback;
 using ::aidl::android::media::tv::tuner::ITunerFrontend;
 using ::aidl::android::media::tv::tuner::ITunerLnb;
-using ::aidl::android::media::tv::tuner::TunerDemuxCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendDtmbCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendInfo;
-using ::aidl::android::media::tv::tunerresourcemanager::ITunerResourceManager;
-
-using ::android::hardware::details::logError;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::kSynchronizedReadWrite;
-using ::android::hardware::EventFlag;
-using ::android::hardware::MessageQueue;
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::tv::tuner::V1_0::DemuxCapabilities;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterAvSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterStatus;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
-using ::android::hardware::tv::tuner::V1_0::FrontendId;
-using ::android::hardware::tv::tuner::V1_0::FrontendInfo;
-using ::android::hardware::tv::tuner::V1_0::IDemux;
-using ::android::hardware::tv::tuner::V1_0::IDescrambler;
-using ::android::hardware::tv::tuner::V1_0::IFilter;
-using ::android::hardware::tv::tuner::V1_0::IFilterCallback;
-using ::android::hardware::tv::tuner::V1_0::ITuner;
-using ::android::hardware::tv::tuner::V1_0::Result;
-
-using Status = ::ndk::ScopedAStatus;
+using ::aidl::android::media::tv::tunerresourcemanager::TunerFrontendInfo;
+using ::android::Mutex;
 
 using namespace std;
 
+namespace aidl {
 namespace android {
-
-const static int TUNER_HAL_VERSION_UNKNOWN = 0;
-const static int TUNER_HAL_VERSION_1_0 = 1 << 16;
-const static int TUNER_HAL_VERSION_1_1 = (1 << 16) | 1;
-// System Feature defined in PackageManager
-static const ::android::String16 FEATURE_TUNER(::android::String16("android.hardware.tv.tuner"));
-
-typedef enum {
-    FRONTEND,
-    LNB,
-    DEMUX,
-    DESCRAMBLER,
-} TunerResourceType;
-
-struct FilterCallback : public IFilterCallback {
-    ~FilterCallback() {}
-    Return<void> onFilterEvent(const DemuxFilterEvent&) {
-        return Void();
-    }
-    Return<void> onFilterStatus(const DemuxFilterStatus) {
-        return Void();
-    }
-};
+namespace media {
+namespace tv {
+namespace tuner {
 
 class TunerService : public BnTunerService {
-    typedef AidlMessageQueue<int8_t, SynchronizedReadWrite> AidlMessageQueue;
-    typedef MessageQueue<uint8_t, kSynchronizedReadWrite> HidlMessageQueue;
-    typedef MQDescriptor<int8_t, SynchronizedReadWrite> AidlMQDesc;
-
 public:
     static char const *getServiceName() { return "media.tuner"; }
     static binder_status_t instantiate();
     TunerService();
     virtual ~TunerService();
 
-    Status getFrontendIds(vector<int32_t>* ids) override;
-    Status getFrontendInfo(int32_t id, TunerFrontendInfo* _aidl_return) override;
-    Status getFrontendDtmbCapabilities(
-            int32_t id, TunerFrontendDtmbCapabilities* _aidl_return) override;
-    Status openFrontend(
-            int32_t frontendHandle, shared_ptr<ITunerFrontend>* _aidl_return) override;
-    Status openLnb(int lnbHandle, shared_ptr<ITunerLnb>* _aidl_return) override;
-    Status openLnbByName(const string& lnbName, shared_ptr<ITunerLnb>* _aidl_return) override;
-    Status openDemux(int32_t demuxHandle, std::shared_ptr<ITunerDemux>* _aidl_return) override;
-    Status getDemuxCaps(TunerDemuxCapabilities* _aidl_return) override;
-    Status openDescrambler(int32_t descramblerHandle,
-            std::shared_ptr<ITunerDescrambler>* _aidl_return) override;
-    Status getTunerHalVersion(int* _aidl_return) override;
+    ::ndk::ScopedAStatus getFrontendIds(vector<int32_t>* out_ids) override;
+    ::ndk::ScopedAStatus getFrontendInfo(int32_t in_frontendHandle,
+                                         FrontendInfo* _aidl_return) override;
+    ::ndk::ScopedAStatus openFrontend(int32_t in_frontendHandle,
+                                      shared_ptr<ITunerFrontend>* _aidl_return) override;
+    ::ndk::ScopedAStatus openLnb(int32_t in_lnbHandle,
+                                 shared_ptr<ITunerLnb>* _aidl_return) override;
+    ::ndk::ScopedAStatus openLnbByName(const string& in_lnbName,
+                                       shared_ptr<ITunerLnb>* _aidl_return) override;
+    ::ndk::ScopedAStatus openDemux(int32_t in_demuxHandle,
+                                   shared_ptr<ITunerDemux>* _aidl_return) override;
+    ::ndk::ScopedAStatus getDemuxCaps(DemuxCapabilities* _aidl_return) override;
+    ::ndk::ScopedAStatus openDescrambler(int32_t in_descramblerHandle,
+                                         shared_ptr<ITunerDescrambler>* _aidl_return) override;
+    ::ndk::ScopedAStatus getTunerHalVersion(int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus openSharedFilter(const string& in_filterToken,
+                                          const shared_ptr<ITunerFilterCallback>& in_cb,
+                                          shared_ptr<ITunerFilter>* _aidl_return) override;
+    ::ndk::ScopedAStatus setLna(bool in_bEnable) override;
+    ::ndk::ScopedAStatus setMaxNumberOfFrontends(FrontendType in_frontendType,
+                                                 int32_t in_maxNumber) override;
+    ::ndk::ScopedAStatus getMaxNumberOfFrontends(FrontendType in_frontendType,
+                                                 int32_t* _aidl_return) override;
 
-    // TODO: create a map between resource id and handles.
-    static int getResourceIdFromHandle(int resourceHandle, int /*type*/) {
-        return (resourceHandle & 0x00ff0000) >> 16;
-    }
+    string addFilterToShared(const shared_ptr<TunerFilter>& sharedFilter);
+    void removeSharedFilter(const shared_ptr<TunerFilter>& sharedFilter);
 
-    int getResourceHandleFromId(int id, int resourceType) {
-        // TODO: build up randomly generated id to handle mapping
-        return (resourceType & 0x000000ff) << 24
-                | (id << 16)
-                | (mResourceRequestCount++ & 0xffff);
-    }
+    static shared_ptr<TunerService> getTunerService();
 
 private:
     bool hasITuner();
-    bool hasITuner_1_1();
     void updateTunerResources();
+    vector<TunerFrontendInfo> getTRMFrontendInfos();
+    vector<int32_t> getTRMLnbHandles();
 
-    void updateFrontendResources();
-    void updateLnbResources();
-    Result getHidlFrontendIds(hidl_vec<FrontendId>& ids);
-    Result getHidlFrontendInfo(int id, FrontendInfo& info);
-    vector<int> getLnbHandles();
-
-    TunerDemuxCapabilities getAidlDemuxCaps(DemuxCapabilities caps);
-    TunerFrontendInfo convertToAidlFrontendInfo(FrontendInfo halInfo);
-
-    sp<ITuner> mTuner;
-    sp<::android::hardware::tv::tuner::V1_1::ITuner> mTuner_1_1;
-
-    shared_ptr<ITunerResourceManager> mTunerResourceManager;
-    int mResourceRequestCount = 0;
-
+    shared_ptr<ITuner> mTuner;
     int mTunerVersion = TUNER_HAL_VERSION_UNKNOWN;
+    Mutex mSharedFiltersLock;
+    map<string, shared_ptr<TunerFilter>> mSharedFilters;
+
+    static shared_ptr<TunerService> sTunerService;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERSERVICE_H
diff --git a/services/tuner/TunerTimeFilter.cpp b/services/tuner/TunerTimeFilter.cpp
index ea9da30..73cd6b4 100644
--- a/services/tuner/TunerTimeFilter.cpp
+++ b/services/tuner/TunerTimeFilter.cpp
@@ -18,97 +18,91 @@
 
 #include "TunerTimeFilter.h"
 
-using ::android::hardware::tv::tuner::V1_0::Result;
-using ::android::hardware::tv::tuner::V1_1::Constant64Bit;
+#include <aidl/android/hardware/tv/tuner/Constant64Bit.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
 
+using ::aidl::android::hardware::tv::tuner::Constant64Bit;
+using ::aidl::android::hardware::tv::tuner::Result;
+
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-TunerTimeFilter::TunerTimeFilter(sp<ITimeFilter> timeFilter) {
+TunerTimeFilter::TunerTimeFilter(shared_ptr<ITimeFilter> timeFilter) {
     mTimeFilter = timeFilter;
 }
 
 TunerTimeFilter::~TunerTimeFilter() {
-    mTimeFilter = NULL;
+    mTimeFilter = nullptr;
 }
 
-Status TunerTimeFilter::setTimeStamp(int64_t timeStamp) {
-    if (mTimeFilter == NULL) {
+::ndk::ScopedAStatus TunerTimeFilter::setTimeStamp(int64_t timeStamp) {
+    if (mTimeFilter == nullptr) {
         ALOGE("ITimeFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mTimeFilter->setTimeStamp(timeStamp);
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    return mTimeFilter->setTimeStamp(timeStamp);
 }
 
-Status TunerTimeFilter::clearTimeStamp() {
-    if (mTimeFilter == NULL) {
+::ndk::ScopedAStatus TunerTimeFilter::clearTimeStamp() {
+    if (mTimeFilter == nullptr) {
         ALOGE("ITimeFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mTimeFilter->clearTimeStamp();
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    return mTimeFilter->clearTimeStamp();
 }
 
-Status TunerTimeFilter::getSourceTime(int64_t* _aidl_return) {
-    if (mTimeFilter == NULL) {
+::ndk::ScopedAStatus TunerTimeFilter::getSourceTime(int64_t* _aidl_return) {
+    if (mTimeFilter == nullptr) {
         *_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
         ALOGE("ITimeFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status;
-    mTimeFilter->getSourceTime(
-            [&](Result r, uint64_t t) {
-                status = r;
-                *_aidl_return = t;
-            });
-    if (status != Result::SUCCESS) {
+    auto status = mTimeFilter->getSourceTime(_aidl_return);
+    if (!status.isOk()) {
         *_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
     }
-    return Status::ok();
+    return status;
 }
 
-Status TunerTimeFilter::getTimeStamp(int64_t* _aidl_return) {
-    if (mTimeFilter == NULL) {
+::ndk::ScopedAStatus TunerTimeFilter::getTimeStamp(int64_t* _aidl_return) {
+    if (mTimeFilter == nullptr) {
         *_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
         ALOGE("ITimeFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status;
-    mTimeFilter->getTimeStamp(
-            [&](Result r, uint64_t t) {
-                status = r;
-                *_aidl_return = t;
-            });
-    if (status != Result::SUCCESS) {
+    auto status = mTimeFilter->getTimeStamp(_aidl_return);
+    if (!status.isOk()) {
         *_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
     }
-    return Status::ok();
+    return status;
 }
 
-Status TunerTimeFilter::close() {
-    if (mTimeFilter == NULL) {
+::ndk::ScopedAStatus TunerTimeFilter::close() {
+    if (mTimeFilter == nullptr) {
         ALOGE("ITimeFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mTimeFilter->close();
-    mTimeFilter = NULL;
+    auto status = mTimeFilter->close();
+    mTimeFilter = nullptr;
 
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return status;
 }
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
 }  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerTimeFilter.h b/services/tuner/TunerTimeFilter.h
index d675319..31a47cd 100644
--- a/services/tuner/TunerTimeFilter.h
+++ b/services/tuner/TunerTimeFilter.h
@@ -17,38 +17,40 @@
 #ifndef ANDROID_MEDIA_TUNERFTIMEFILTER_H
 #define ANDROID_MEDIA_TUNERFTIMEFILTER_H
 
+#include <aidl/android/hardware/tv/tuner/ITimeFilter.h>
 #include <aidl/android/media/tv/tuner/BnTunerTimeFilter.h>
-#include <android/hardware/tv/tuner/1.0/ITimeFilter.h>
-#include <android/hardware/tv/tuner/1.1/types.h>
-#include <media/stagefright/foundation/ADebug.h>
 #include <utils/Log.h>
 
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::tv::tuner::BnTunerTimeFilter;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::tv::tuner::V1_0::ITimeFilter;
+using ::aidl::android::hardware::tv::tuner::ITimeFilter;
 
 using namespace std;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
 class TunerTimeFilter : public BnTunerTimeFilter {
 
 public:
-    TunerTimeFilter(sp<ITimeFilter> timeFilter);
+    TunerTimeFilter(shared_ptr<ITimeFilter> timeFilter);
     virtual ~TunerTimeFilter();
-    Status setTimeStamp(int64_t timeStamp) override;
-    Status clearTimeStamp() override;
-    Status getSourceTime(int64_t* _aidl_return) override;
-    Status getTimeStamp(int64_t* _aidl_return) override;
-    Status close() override;
+
+    ::ndk::ScopedAStatus setTimeStamp(int64_t in_timeStamp) override;
+    ::ndk::ScopedAStatus clearTimeStamp() override;
+    ::ndk::ScopedAStatus getSourceTime(int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getTimeStamp(int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus close() override;
 
 private:
-    sp<ITimeFilter> mTimeFilter;
+    shared_ptr<ITimeFilter> mTimeFilter;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERFTIMEFILTER_H
diff --git a/services/tuner/aidl/android/media/tv/OWNERS b/services/tuner/aidl/android/media/tv/OWNERS
index 0ceb8e8..bf9fe34 100644
--- a/services/tuner/aidl/android/media/tv/OWNERS
+++ b/services/tuner/aidl/android/media/tv/OWNERS
@@ -1,2 +1,2 @@
-nchalko@google.com
+hgchen@google.com
 quxiangfang@google.com
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl
index 73b00ae..fa326b2 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl
@@ -16,6 +16,8 @@
 
 package android.media.tv.tuner;
 
+import android.hardware.tv.tuner.DemuxFilterType;
+import android.hardware.tv.tuner.DvrType;
 import android.media.tv.tuner.ITunerDvr;
 import android.media.tv.tuner.ITunerDvrCallback;
 import android.media.tv.tuner.ITunerFilter;
@@ -36,10 +38,15 @@
     void setFrontendDataSource(in ITunerFrontend frontend);
 
     /**
+     * Set a frontend resource by ID as data input of the demux
+     */
+    void setFrontendDataSourceById(in int frontendId);
+
+    /**
      * Open a new filter in the demux
      */
-    ITunerFilter openFilter(
-        in int mainType, in int subtype, in int bufferSize, in ITunerFilterCallback cb);
+    ITunerFilter openFilter(in DemuxFilterType type, in int bufferSize,
+        in ITunerFilterCallback cb);
 
     /**
      * Open time filter of the demux.
@@ -59,7 +66,7 @@
     /**
      * Open a DVR (Digital Video Record) instance in the demux.
      */
-    ITunerDvr openDvr(in int dvbType, in int bufferSize, in ITunerDvrCallback cb);
+    ITunerDvr openDvr(in DvrType dvbType, in int bufferSize, in ITunerDvrCallback cb);
 
     /**
      * Connect Conditional Access Modules (CAM) through Common Interface (CI).
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerDescrambler.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerDescrambler.aidl
index 7370eee..39d193c 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerDescrambler.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerDescrambler.aidl
@@ -16,9 +16,9 @@
 
 package android.media.tv.tuner;
 
+import android.hardware.tv.tuner.DemuxPid;
 import android.media.tv.tuner.ITunerDemux;
 import android.media.tv.tuner.ITunerFilter;
-import android.media.tv.tuner.TunerDemuxPid;
 
 /**
  * Tuner Demux interface handles tuner related operations.
@@ -39,12 +39,12 @@
     /**
      * Add packets' PID to the descrambler for descrambling.
      */
-    void addPid(in TunerDemuxPid pid, in ITunerFilter optionalSourceFilter);
+    void addPid(in DemuxPid pid, in ITunerFilter optionalSourceFilter);
 
     /**
      * Remove packets' PID from the descrambler.
      */
-    void removePid(in TunerDemuxPid pid, in ITunerFilter optionalSourceFilter);
+    void removePid(in DemuxPid pid, in ITunerFilter optionalSourceFilter);
 
     /**
      * Close a new interface of ITunerDescrambler.
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl
index 8f1601b..2c01c4e 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl
@@ -18,8 +18,8 @@
 
 import android.hardware.common.fmq.MQDescriptor;
 import android.hardware.common.fmq.SynchronizedReadWrite;
+import android.hardware.tv.tuner.DvrSettings;
 import android.media.tv.tuner.ITunerFilter;
-import android.media.tv.tuner.TunerDvrSettings;
 
 /**
  * Tuner Dvr interface handles tuner related operations.
@@ -35,7 +35,7 @@
     /**
      * Configure the DVR.
      */
-    void configure(in TunerDvrSettings settings);
+    void configure(in DvrSettings settings);
 
     /**
      * Attach one filter to DVR interface for recording.
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerDvrCallback.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerDvrCallback.aidl
index e234fe5..3043d24 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerDvrCallback.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerDvrCallback.aidl
@@ -16,6 +16,9 @@
 
 package android.media.tv.tuner;
 
+import android.hardware.tv.tuner.PlaybackStatus;
+import android.hardware.tv.tuner.RecordStatus;
+
 /**
  * TunerDvrCallback interface handles tuner dvr related callbacks.
  *
@@ -25,10 +28,10 @@
     /**
      * Notify the client a new status of the demux's record.
      */
-    void onRecordStatus(in int status);
+    void onRecordStatus(in RecordStatus status);
 
     /**
      * Notify the client a new status of the demux's playback.
      */
-    void onPlaybackStatus(in int status);
+    void onPlaybackStatus(in PlaybackStatus status);
 }
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl
index 10d4c3b..dc40f03 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl
@@ -19,8 +19,11 @@
 import android.hardware.common.fmq.MQDescriptor;
 import android.hardware.common.fmq.SynchronizedReadWrite;
 import android.hardware.common.NativeHandle;
-import android.media.tv.tuner.TunerFilterConfiguration;
-import android.media.tv.tuner.TunerFilterSharedHandleInfo;
+import android.hardware.tv.tuner.DemuxFilterSettings;
+import android.hardware.tv.tuner.DemuxFilterType;
+import android.hardware.tv.tuner.AvStreamType;
+import android.hardware.tv.tuner.DemuxFilterMonitorEventType;
+import android.hardware.tv.tuner.FilterDelayHint;
 
 /**
  * Tuner Filter interface handles tuner related operations.
@@ -46,12 +49,12 @@
     /**
      * Configure the filter.
      */
-    void configure(in TunerFilterConfiguration config);
+    void configure(in DemuxFilterSettings settings);
 
     /**
      * Configure the monitor event of the Filter.
      */
-    void configureMonitorEvent(in int monitorEventType);
+    void configureMonitorEvent(in int monitorEventTypes);
 
     /**
      * Configure the context id of the IP Filter.
@@ -61,12 +64,12 @@
     /**
      * Configure the stream type of the media Filter.
      */
-    void configureAvStreamType(in int avStreamType);
+    void configureAvStreamType(in AvStreamType avStreamType);
 
     /**
      * Get the a/v shared memory handle
      */
-    TunerFilterSharedHandleInfo getAvSharedHandleInfo();
+    long getAvSharedHandle(out NativeHandle avMemory);
 
     /**
      * Release the handle reported by the HAL for AV memory.
@@ -97,4 +100,28 @@
      * Close the filter.
      */
     void close();
+
+    /**
+     * Acquire a new SharedFilter token.
+     *
+     * @return a token of the newly created SharedFilter instance.
+     */
+    String acquireSharedFilterToken();
+
+    /**
+     * Free a SharedFilter token.
+     *
+     * @param filterToken the SharedFilter token will be released.
+     * @return a token of the newly created SharedFilter instance.
+     */
+    void freeSharedFilterToken(in String filterToken);
+
+    /**
+     * Get filter type.
+     *
+     * @return filter type.
+     */
+    DemuxFilterType getFilterType();
+
+    void setDelayHint(in FilterDelayHint hint);
 }
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl
index e7a52a7..6c53042 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl
@@ -16,7 +16,8 @@
 
 package android.media.tv.tuner;
 
-import android.media.tv.tuner.TunerFilterEvent;
+import android.hardware.tv.tuner.DemuxFilterEvent;
+import android.hardware.tv.tuner.DemuxFilterStatus;
 
 /**
  * TunerFilterCallback interface handles tuner filter related callbacks.
@@ -27,10 +28,10 @@
     /**
      * Notify the client a new status of a filter.
      */
-    void onFilterStatus(int status);
+    void onFilterStatus(in DemuxFilterStatus status);
 
     /**
      * Notify the client that a new filter event happened.
      */
-    void onFilterEvent(in TunerFilterEvent[] filterEvent);
+    void onFilterEvent(in DemuxFilterEvent[] events);
 }
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl
index ef0255a..0493f05 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl
@@ -1,5 +1,5 @@
 /**
- * Copyright 2020, The Android Open Source Project
+ * Copyright 2021, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,10 +16,13 @@
 
 package android.media.tv.tuner;
 
+import android.hardware.tv.tuner.FrontendScanType;
+import android.hardware.tv.tuner.FrontendSettings;
+import android.hardware.tv.tuner.FrontendStatus;
+import android.hardware.tv.tuner.FrontendStatusReadiness;
+import android.hardware.tv.tuner.FrontendStatusType;
 import android.media.tv.tuner.ITunerFrontendCallback;
 import android.media.tv.tuner.ITunerLnb;
-import android.media.tv.tuner.TunerFrontendSettings;
-import android.media.tv.tuner.TunerFrontendStatus;
 
 /**
  * Tuner Frontend interface handles frontend related operations.
@@ -39,7 +42,7 @@
      *
      * @param settings the settings to tune with.
      */
-    void tune(in TunerFrontendSettings settings);
+    void tune(in FrontendSettings settings);
 
     /**
      * Stop the previous tuning.
@@ -52,7 +55,7 @@
      * @param settings the settings to scan with.
      * @param frontendScanType scan with given type.
      */
-    void scan(in TunerFrontendSettings settings, in int frontendScanType);
+    void scan(in FrontendSettings settings, in FrontendScanType frontendScanType);
 
     /**
      * Stop the previous scanning.
@@ -67,13 +70,6 @@
     void setLnb(in ITunerLnb lnb);
 
     /**
-     * Enable or Disable Low Noise Amplifier (LNA).
-     *
-     * @param bEnable enable Lna or not.
-     */
-    void setLna(in boolean bEnable);
-
-    /**
      * Link Frontend to the cicam with given id.
      *
      * @return lts id
@@ -93,15 +89,25 @@
     /**
      * Gets the statuses of the frontend.
      */
-    TunerFrontendStatus[] getStatus(in int[] statusTypes);
-
-    /**
-     * Gets the 1.1 extended statuses of the frontend.
-     */
-    TunerFrontendStatus[] getStatusExtended_1_1(in int[] statusTypes);
+    FrontendStatus[] getStatus(in FrontendStatusType[] statusTypes);
 
     /**
      * Gets the id of the frontend.
      */
     int getFrontendId();
+
+    /**
+     * Request hardware information about the frontend.
+     */
+    String getHardwareInfo();
+
+    /**
+     * Filter out unnecessary PID from frontend output.
+     */
+    void removeOutputPid(int pid);
+
+    /**
+     * Gets FrontendStatus’ readiness statuses for given status types.
+     */
+    FrontendStatusReadiness[] getFrontendStatusReadiness(in FrontendStatusType[] statusTypes);
 }
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFrontendCallback.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFrontendCallback.aidl
index c92f5ee..d0ab11d 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFrontendCallback.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFrontendCallback.aidl
@@ -1,5 +1,5 @@
 /**
- * Copyright 2020, The Android Open Source Project
+ * Copyright 2021, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,7 +16,9 @@
 
 package android.media.tv.tuner;
 
-import android.media.tv.tuner.TunerFrontendScanMessage;
+import android.hardware.tv.tuner.FrontendEventType;
+import android.hardware.tv.tuner.FrontendScanMessage;
+import android.hardware.tv.tuner.FrontendScanMessageType;
 
 /**
  * TunerFrontendCallback interface handles tuner frontend related callbacks.
@@ -24,13 +26,14 @@
  * {@hide}
  */
 interface ITunerFrontendCallback {
-        /**
+    /**
      * Notify the client that a new event happened on the frontend.
      */
-    void onEvent(in int frontendEventType);
+    void onEvent(in FrontendEventType frontendEventType);
 
     /**
      * notify the client of scan messages.
      */
-    void onScanMessage(in int messageType, in TunerFrontendScanMessage message);
+    void onScanMessage(in FrontendScanMessageType messageType,
+        in FrontendScanMessage message);
 }
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerLnb.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerLnb.aidl
index d62145e..79f0761 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerLnb.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerLnb.aidl
@@ -16,6 +16,9 @@
 
 package android.media.tv.tuner;
 
+import android.hardware.tv.tuner.LnbPosition;
+import android.hardware.tv.tuner.LnbTone;
+import android.hardware.tv.tuner.LnbVoltage;
 import android.media.tv.tuner.ITunerLnbCallback;
 
 /**
@@ -32,17 +35,17 @@
     /**
      * Set the lnb's power voltage.
      */
-    void setVoltage(in int voltage);
+    void setVoltage(in LnbVoltage voltage);
 
     /**
      * Set the lnb's tone mode.
      */
-    void setTone(in int tone);
+    void setTone(in LnbTone tone);
 
     /**
      * Select the lnb's position.
      */
-    void setSatellitePosition(in int position);
+    void setSatellitePosition(in LnbPosition position);
 
     /**
      * Sends DiSEqC (Digital Satellite Equipment Control) message.
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerLnbCallback.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerLnbCallback.aidl
index 117352f..2b6eb5f 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerLnbCallback.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerLnbCallback.aidl
@@ -16,6 +16,8 @@
 
 package android.media.tv.tuner;
 
+import android.hardware.tv.tuner.LnbEventType;
+
 /**
  * TuneLnbCallback interface handles tuner lnb related callbacks.
  *
@@ -25,7 +27,7 @@
     /**
      * Notify the client that a new event happened on the Lnb.
      */
-    void onEvent(in int lnbEventType);
+    void onEvent(in LnbEventType lnbEventType);
 
     /**
      * notify the client of new DiSEqC message.
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl
index 755b152..b8084ab 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl
@@ -1,5 +1,5 @@
 /**
- * Copyright (c) 2020, The Android Open Source Project
+ * Copyright (c) 2021, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,16 +16,15 @@
 
 package android.media.tv.tuner;
 
-import android.hardware.common.fmq.MQDescriptor;
-import android.hardware.common.fmq.SynchronizedReadWrite;
-import android.hardware.common.fmq.UnsynchronizedWrite;
+import android.hardware.tv.tuner.DemuxCapabilities;
+import android.hardware.tv.tuner.FrontendInfo;
+import android.hardware.tv.tuner.FrontendType;
 import android.media.tv.tuner.ITunerDemux;
 import android.media.tv.tuner.ITunerDescrambler;
+import android.media.tv.tuner.ITunerFilter;
+import android.media.tv.tuner.ITunerFilterCallback;
 import android.media.tv.tuner.ITunerFrontend;
 import android.media.tv.tuner.ITunerLnb;
-import android.media.tv.tuner.TunerDemuxCapabilities;
-import android.media.tv.tuner.TunerFrontendDtmbCapabilities;
-import android.media.tv.tuner.TunerFrontendInfo;
 
 /**
  * TunerService interface handles tuner related operations.
@@ -33,8 +32,8 @@
  * {@hide}
  */
 //@VintfStability
+@SuppressWarnings(value={"out-array"})
 interface ITunerService {
-
     /**
      * Gets frontend IDs.
      */
@@ -43,15 +42,10 @@
     /**
      * Retrieve the frontend's information.
      *
-     * @param frontendHandle the handle of the frontend granted by TRM.
+     * @param frontendId the ID of the frontend.
      * @return the information of the frontend.
      */
-    TunerFrontendInfo getFrontendInfo(in int frontendHandle);
-
-    /**
-     * Get Dtmb Frontend Capabilities.
-     */
-    TunerFrontendDtmbCapabilities getFrontendDtmbCapabilities(in int id);
+    FrontendInfo getFrontendInfo(in int frontendId);
 
     /**
      * Open a Tuner Frontend interface.
@@ -87,7 +81,7 @@
      *
      * @return the demux’s capabilities.
      */
-    TunerDemuxCapabilities getDemuxCaps();
+    DemuxCapabilities getDemuxCaps();
 
     /* Open a new interface of ITunerDescrambler given a descramblerHandle.
      *
@@ -102,4 +96,38 @@
      * value is unknown version 0.
      */
     int getTunerHalVersion();
+
+    /**
+     * Open a new SharedFilter instance of ITunerFilter.
+     *
+     * @param filterToken the SharedFilter token created by ITunerFilter.
+     * @param cb the ITunerFilterCallback used to receive callback events
+     * @return a newly created ITunerFilter interface.
+     */
+    ITunerFilter openSharedFilter(in String filterToken, in ITunerFilterCallback cb);
+
+    /**
+     * Enable or Disable Low Noise Amplifier (LNA).
+     *
+     * @param bEnable enable Lna or not.
+     */
+    void setLna(in boolean bEnable);
+
+    /**
+     * Set the maximum usable frontends number of a given frontend type. It's used by client
+     * to enable or disable frontends when cable connection status is changed by user.
+     *
+     * @param frontendType the frontend type which the maximum usable number will be set.
+     * @param maxNumber the new maximum usable number.
+     */
+    void setMaxNumberOfFrontends(in FrontendType frontendType, in int maxNumber);
+
+    /**
+     * Get the maximum usable frontends number of a given frontend type.
+     *
+     * @param frontendType the frontend type which the maximum usable number will be queried.
+     *
+     * @return the maximum usable number of the queried frontend type.
+     */
+    int getMaxNumberOfFrontends(in FrontendType frontendType);
 }
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerAudioExtraMetaData.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerAudioExtraMetaData.aidl
deleted file mode 100644
index df3374a..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerAudioExtraMetaData.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Extra Meta Data from AD (Audio Descriptor) according to ETSI TS 101 154 V2.1.1.
- *
- * {@hide}
- */
-parcelable TunerAudioExtraMetaData {
-    byte adFade;
-
-    byte adPan;
-
-    byte versionTextTag;
-
-    byte adGainCenter;
-
-    byte adGainFront;
-
-    byte adGainSurround;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxCapabilities.aidl
deleted file mode 100644
index 71ab151..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxCapabilities.aidl
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Tuner Demux capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerDemuxCapabilities {
-    int numDemux;
-
-    int numRecord;
-
-    int numPlayback;
-
-    int numTsFilter;
-
-    int numSectionFilter;
-
-    int numAudioFilter;
-
-    int numVideoFilter;
-
-    int numPesFilter;
-
-    int numPcrFilter;
-
-    int numBytesInSectionFilter;
-
-    int filterCaps;
-
-    int[] linkCaps;
-
-    boolean bTimeFilter;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddress.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddress.aidl
deleted file mode 100644
index b65f404..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddress.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Demux ip address configure.
- *
- * {@hide}
- */
-parcelable TunerDemuxIpAddress {
-    boolean isIpV6;
-
-    byte[] addr;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddressSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddressSettings.aidl
deleted file mode 100644
index b244388..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddressSettings.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerDemuxIpAddress;
-
-/**
- * Filter Settings for an Ip filter.
- *
- * {@hide}
- */
-parcelable TunerDemuxIpAddressSettings {
-    TunerDemuxIpAddress srcIpAddress;
-
-    TunerDemuxIpAddress dstIpAddress;
-
-    char srcPort;
-
-    char dstPort;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxPid.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxPid.aidl
deleted file mode 100644
index 8b238b6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxPid.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Demux PID interface.
- *
- * {@hide}
- */
-union TunerDemuxPid {
-    char tPid;
-
-    char mmtpPid;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDvrSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDvrSettings.aidl
deleted file mode 100644
index 4ec4d75..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDvrSettings.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Dvr Settings interface.
- *
- * {@hide}
- */
-parcelable TunerDvrSettings {
-    int statusMask;
-
-    int lowThreshold;
-
-    int highThreshold;
-
-    int dataFormat;
-
-    int packetSize;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterAlpConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterAlpConfiguration.aidl
deleted file mode 100644
index 4c9e3af..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterAlpConfiguration.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSettings;
-
-/**
- * Filter Settings for an ALP filter.
- *
- * {@hide}
- */
-parcelable TunerFilterAlpConfiguration {
-    byte packetType;
-
-    byte lengthType;
-
-    TunerFilterSettings filterSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterAvSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterAvSettings.aidl
deleted file mode 100644
index 6bf88f0..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterAvSettings.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Settings for a Video and Audio.
- *
- * {@hide}
- */
-parcelable TunerFilterAvSettings {
-    /**
-     * true if the filter output goes to decoder directly in pass through mode.
-     */
-    boolean isPassthrough;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterConfiguration.aidl
deleted file mode 100644
index 808cfd1..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterConfiguration.aidl
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterAlpConfiguration;
-import android.media.tv.tuner.TunerFilterIpConfiguration;
-import android.media.tv.tuner.TunerFilterMmtpConfiguration;
-import android.media.tv.tuner.TunerFilterTlvConfiguration;
-import android.media.tv.tuner.TunerFilterTsConfiguration;
-
-/**
- * Filter configuration.
- *
- * {@hide}
- */
-union TunerFilterConfiguration {
-    TunerFilterTsConfiguration ts;
-
-    TunerFilterMmtpConfiguration mmtp;
-
-    TunerFilterIpConfiguration ip;
-
-    TunerFilterTlvConfiguration tlv;
-
-    TunerFilterAlpConfiguration alp;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadEvent.aidl
deleted file mode 100644
index b971dd3..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadEvent.aidl
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for Download data.
- *
- * {@hide}
- */
-parcelable TunerFilterDownloadEvent {
-    int itemId;
-
-    /**
-     * MPU sequence number of filtered data (only for MMTP)
-     */
-    int mpuSequenceNumber;
-
-    int itemFragmentIndex;
-
-    int lastItemFragmentIndex;
-
-    /**
-     * Data size in bytes of filtered data
-     */
-    char dataLength;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadSettings.aidl
deleted file mode 100644
index 417a5fe..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadSettings.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Settings for downloading.
- *
- * {@hide}
- */
-parcelable TunerFilterDownloadSettings {
-    int downloadId;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterEvent.aidl
deleted file mode 100644
index 1305510..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterEvent.aidl
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterDownloadEvent;
-import android.media.tv.tuner.TunerFilterIpPayloadEvent;
-import android.media.tv.tuner.TunerFilterMediaEvent;
-import android.media.tv.tuner.TunerFilterMmtpRecordEvent;
-import android.media.tv.tuner.TunerFilterMonitorEvent;
-import android.media.tv.tuner.TunerFilterPesEvent;
-import android.media.tv.tuner.TunerFilterSectionEvent;
-import android.media.tv.tuner.TunerFilterTemiEvent;
-import android.media.tv.tuner.TunerFilterTsRecordEvent;
-
-/**
- * Filter events.
- *
- * {@hide}
- */
-union TunerFilterEvent {
-    TunerFilterMediaEvent media;
-
-    TunerFilterSectionEvent section;
-
-    TunerFilterPesEvent pes;
-
-    TunerFilterTsRecordEvent tsRecord;
-
-    TunerFilterMmtpRecordEvent mmtpRecord;
-
-    TunerFilterDownloadEvent download;
-
-    TunerFilterIpPayloadEvent ipPayload;
-
-    TunerFilterTemiEvent temi;
-
-    TunerFilterMonitorEvent monitor;
-
-    int startId;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpConfiguration.aidl
deleted file mode 100644
index 8b4d889..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpConfiguration.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerDemuxIpAddressSettings;
-import android.media.tv.tuner.TunerFilterSettings;
-
-/**
- * Filter Settings for a ip filter.
- *
- * {@hide}
- */
-parcelable TunerFilterIpConfiguration {
-    TunerDemuxIpAddressSettings ipAddr;
-
-    TunerFilterSettings filterSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpPayloadEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpPayloadEvent.aidl
deleted file mode 100644
index d5bda93..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpPayloadEvent.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for IP payload data.
- *
- * {@hide}
- */
-parcelable TunerFilterIpPayloadEvent {
-    /**
-     * Data size in bytes of ip data
-     */
-    char dataLength;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl
deleted file mode 100644
index c3dbce9..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.hardware.common.NativeHandle;
-import android.media.tv.tuner.TunerAudioExtraMetaData;
-
-/**
- * Filter Event for Audio or Video Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterMediaEvent {
-    char streamId;
-
-    /**
-     * true if PTS is present in PES header.
-     */
-    boolean isPtsPresent;
-
-    /**
-     * Presentation Time Stamp for audio or video frame. It based on 90KHz has
-     * the same format as PTS (Presentation Time Stamp).
-     */
-    long pts;
-
-    /**
-     * Data size in bytes of audio or video frame
-     */
-    int dataLength;
-
-    /**
-     *  The offset in the memory block which is shared among multiple
-     *  MediaEvents.
-     */
-    int offset;
-
-    /**
-     * A handle associated to the memory where audio or video data stays.
-     */
-    NativeHandle avMemory;
-
-    /**
-     * True if the avMemory is in secure area, and isn't mappable.
-     */
-    boolean isSecureMemory;
-
-    /**
-     * An Id is used by HAL to provide additional information for AV data.
-     * For secure audio, it's the audio handle used by Audio Track.
-     */
-    long avDataId;
-
-    /**
-     * MPU sequence number of filtered data (only for MMTP)
-     */
-    int mpuSequenceNumber;
-
-    boolean isPesPrivateData;
-
-    /**
-     * If TunerAudioExtraMetaData field is valid or not
-     */
-    boolean isAudioExtraMetaData;
-
-    /**
-     * Only valid when isAudioExtraMetaData is true
-     */
-    TunerAudioExtraMetaData audio;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpConfiguration.aidl
deleted file mode 100644
index 162ca8e..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpConfiguration.aidl
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSettings;
-
-/**
- * Filter Settings for an mmtp filter.
- *
- * {@hide}
- */
-parcelable TunerFilterMmtpConfiguration {
-    char mmtpPid;
-
-    TunerFilterSettings filterSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpRecordEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpRecordEvent.aidl
deleted file mode 100644
index b8871cf..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpRecordEvent.aidl
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for an MMTP Record Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterMmtpRecordEvent {
-    int scHevcIndexMask;
-
-    /**
-     * Byte number from beginning of the filter's output
-     */
-    long byteNumber;
-
-    /**
-     * If the current event contains extended information or not
-     */
-    boolean isExtended;
-
-    /**
-     * The Presentation Time Stamp(PTS) for the audio or video frame. It is based on 90KHz
-     * and has the same format as the PTS in ISO/IEC 13818-1.
-     */
-    long pts;
-
-    /**
-     * MPU sequence number of the filtered data. This is only used for MMTP.
-     */
-    int mpuSequenceNumber;
-
-    /**
-     * Specifies the address of the first macroblock in the slice defined in ITU-T Rec. H.264.
-     */
-    int firstMbInSlice;
-
-    /**
-     * TS index mask.
-     */
-    int tsIndexMask;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMonitorEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMonitorEvent.aidl
deleted file mode 100644
index 31ab5e6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMonitorEvent.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter monitor events.
- *
- * {@hide}
- */
-union TunerFilterMonitorEvent {
-    /**
-     * New scrambling status.
-     */
-    int scramblingStatus;
-
-    /**
-     * New cid for the IP filter.
-     */
-    int cid;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesDataSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesDataSettings.aidl
deleted file mode 100644
index 312f314..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesDataSettings.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Settings for Pes Data.
- *
- * {@hide}
- */
-parcelable TunerFilterPesDataSettings {
-    char streamId;
-
-    boolean isRaw;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl
deleted file mode 100644
index dc1ecc6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for PES Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterPesEvent {
-    char streamId;
-
-    /**
-     * Data size in bytes of PES data
-     */
-    char dataLength;
-
-    /**
-     * MPU sequence number of filtered data
-     */
-    int mpuSequenceNumber;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterRecordSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterRecordSettings.aidl
deleted file mode 100644
index 29be624..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterRecordSettings.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterScIndexMask;
-
-/**
- * Filter Settings for recording.
- *
- * {@hide}
- */
-parcelable TunerFilterRecordSettings {
-    int tsIndexMask;
-
-    int scIndexType;
-
-    TunerFilterScIndexMask scIndexMask;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterScIndexMask.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterScIndexMask.aidl
deleted file mode 100644
index ed37fce..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterScIndexMask.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter SC Index Mask
- *
- * {@hide}
- */
-union TunerFilterScIndexMask {
-    int sc;
-
-    int scHevc;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionBits.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionBits.aidl
deleted file mode 100644
index dd4f842..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionBits.aidl
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Bits settings of a section Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterSectionBits {
-    byte[] filter;
-
-    byte[] mask;
-
-    byte[] mode;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionCondition.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionCondition.aidl
deleted file mode 100644
index 00aabe4..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionCondition.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSectionBits;
-import android.media.tv.tuner.TunerFilterSectionTableInfo;
-
-/**
- * Section filter condition settings.
- *
- * {@hide}
- */
-union TunerFilterSectionCondition {
-    TunerFilterSectionBits sectionBits;
-
-    TunerFilterSectionTableInfo tableInfo;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionEvent.aidl
deleted file mode 100644
index 5f20926..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionEvent.aidl
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for Section Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterSectionEvent {
-    /**
-     * Table ID of filtered data
-     */
-    char tableId;
-
-    /**
-     * Version number of filtered data
-     */
-    char version;
-
-    /**
-     * Section number of filtered data
-     */
-    char sectionNum;
-
-    /**
-     * Data size in bytes of filtered data
-     */
-    char dataLength;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionSettings.aidl
deleted file mode 100644
index 22129b6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionSettings.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSectionCondition;
-
-/**
- * Filter Settings for a section filter.
- *
- * {@hide}
- */
-parcelable TunerFilterSectionSettings {
-    TunerFilterSectionCondition condition;
-
-    boolean isCheckCrc;
-
-    boolean isRepeat;
-
-    boolean isRaw;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionTableInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionTableInfo.aidl
deleted file mode 100644
index cc78c9d..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionTableInfo.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Table info settings of a section Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterSectionTableInfo {
-    char tableId;
-
-    char version;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSettings.aidl
deleted file mode 100644
index eb7eaa5..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSettings.aidl
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterAvSettings;
-import android.media.tv.tuner.TunerFilterDownloadSettings;
-import android.media.tv.tuner.TunerFilterPesDataSettings;
-import android.media.tv.tuner.TunerFilterRecordSettings;
-import android.media.tv.tuner.TunerFilterSectionSettings;
-
-/**
- * Filter Settings.
- *
- * {@hide}
- */
-union TunerFilterSettings {
-    boolean nothing;
-
-    TunerFilterAvSettings av;
-
-    TunerFilterSectionSettings section;
-
-    TunerFilterPesDataSettings pesData;
-
-    TunerFilterRecordSettings record;
-
-    TunerFilterDownloadSettings download;
-
-    boolean isPassthrough;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSharedHandleInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSharedHandleInfo.aidl
deleted file mode 100644
index 122dfc3..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSharedHandleInfo.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.hardware.common.NativeHandle;
-
-/**
- * Filter Shared Handle Information.
- *
- * {@hide}
- */
-parcelable TunerFilterSharedHandleInfo {
-    NativeHandle handle;
-    long size;
-}
\ No newline at end of file
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTemiEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTemiEvent.aidl
deleted file mode 100644
index 4c4e993..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTemiEvent.aidl
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for Timed External Media Information (TEMI) data.
- *
- * {@hide}
- */
-parcelable TunerFilterTemiEvent {
-    /**
-     * Presentation Time Stamp for audio or video frame. It based on 90KHz has
-     * the same format as PTS (Presentation Time Stamp) in ISO/IEC 13818-1.
-     */
-    long pts;
-
-    /**
-     * TEMI Descriptor Tag
-     */
-    byte descrTag;
-
-    /**
-     * TEMI Descriptor
-     */
-    byte[] descrData;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTlvConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTlvConfiguration.aidl
deleted file mode 100644
index 0b237b4..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTlvConfiguration.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSettings;
-
-/**
- * Filter Settings for a tlv filter.
- *
- * {@hide}
- */
-parcelable TunerFilterTlvConfiguration {
-    byte packetType;
-
-    boolean isCompressedIpPacket;
-
-    TunerFilterSettings filterSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsConfiguration.aidl
deleted file mode 100644
index 2e386e6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsConfiguration.aidl
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSettings;
-
-/**
- * Filter Settings for a TS filter.
- *
- * {@hide}
- */
-parcelable TunerFilterTsConfiguration {
-    char tpid;
-
-    TunerFilterSettings filterSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsRecordEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsRecordEvent.aidl
deleted file mode 100644
index c52a749..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsRecordEvent.aidl
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterScIndexMask;
-
-/**
- * Filter Event for TS Record Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterTsRecordEvent {
-    char pid;
-
-    int tsIndexMask;
-
-    /**
-     * Indexes of record output
-     */
-    TunerFilterScIndexMask scIndexMask;
-
-    /**
-     * Byte number from beginning of the filter's output
-     */
-    long byteNumber;
-
-    /**
-     * If the current event contains extended information or not
-     */
-    boolean isExtended;
-
-    /**
-     * The Presentation Time Stamp(PTS) for the audio or video frame. It is based on 90KHz
-     * and has the same format as the PTS in ISO/IEC 13818-1.
-     */
-    long pts;
-
-    /**
-     * Specifies the address of the first macroblock in the slice defined in ITU-T Rec. H.264.
-     */
-    int firstMbInSlice;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogCapabilities.aidl
deleted file mode 100644
index 74bf04e..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogCapabilities.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Analog Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAnalogCapabilities {
-	/**
-     * Signal Type capability
-     */
-    int typeCap;
-
-    /**
-     * Standard Interchange Format (SIF) capability
-     */
-    int sifStandardCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogSettings.aidl
deleted file mode 100644
index 40cd8c9..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogSettings.aidl
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Analog Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAnalogSettings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    int signalType;
-
-    /**
-     * Standard Interchange Format (SIF) setting
-     */
-    int sifStandard;
-
-    /**
-     * Fields after isExtended are only valid when isExtended is true
-     */
-    boolean isExtended;
-
-    int aftFlag;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Capabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Capabilities.aidl
deleted file mode 100644
index 6c9be77..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Capabilities.aidl
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * ATSC3 Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAtsc3Capabilities {
-    /**
-     * Bandwidth capability
-     */
-    int bandwidthCap;
-
-    /**
-     * Modulation capability
-     */
-    int modulationCap;
-
-    /**
-     * TimeInterleaveMode capability
-     */
-    int timeInterleaveModeCap;
-
-    /**
-     * CodeRate capability
-     */
-    int codeRateCap;
-
-    /**
-     * FEC capability
-     */
-    int fecCap;
-
-    /**
-     * Demodulator Output Format capability
-     */
-    int demodOutputFormatCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3PlpSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3PlpSettings.aidl
deleted file mode 100644
index b29e1f7..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3PlpSettings.aidl
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Atsc3 Frontend Physical Layer Pipe Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAtsc3PlpSettings {
-    int plpId;
-
-    int modulation;
-
-    int interleaveMode;
-
-    int codeRate;
-
-    /**
-     * Forward Error Correction Type.
-     */
-    int fec;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Settings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Settings.aidl
deleted file mode 100644
index 32fb8c7..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Settings.aidl
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendAtsc3PlpSettings;
-
-/**
- * Atsc3 Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAtsc3Settings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    /**
-     * Bandwidth of tuning band.
-     */
-    int bandwidth;
-
-    int demodOutputFormat;
-
-    TunerFrontendAtsc3PlpSettings[] plpSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscCapabilities.aidl
deleted file mode 100644
index 2b6c2fc..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscCapabilities.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * ATSC Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAtscCapabilities {
-    /**
-     * Modulation capability
-     */
-    int modulationCap;
-}
\ No newline at end of file
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscSettings.aidl
deleted file mode 100644
index c7a8c07..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscSettings.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Atsc Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAtscSettings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    int modulation;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableCapabilities.aidl
deleted file mode 100644
index b880c60..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableCapabilities.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Cable(DVBC) Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendCableCapabilities {
-    /**
-     * Modulation capability
-     */
-    int modulationCap;
-
-    /**
-     * Code Rate capability
-     */
-    long codeRateCap; // inner FEC will converge to codeRate
-
-    /**
-     * Annex capability
-     */
-    int annexCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableSettings.aidl
deleted file mode 100644
index b9bcf29..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableSettings.aidl
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Cable Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendCableSettings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    int modulation;
-
-    /**
-     * Inner Forward Error Correction type as specified in ETSI EN 300 468 V1.15.1
-     * and ETSI EN 302 307-2 V1.1.1.
-     */
-    long innerFec;
-
-    /**
-     * Symbols per second
-     */
-    int symbolRate;
-
-    /**
-     * Outer Forward Error Correction (FEC) Type.
-     */
-    int outerFec;
-
-    int annex;
-
-    /**
-     * Spectral Inversion Type.
-     */
-    int spectralInversion;
-
-    /**
-     * Fields after isExtended are only valid when isExtended is true
-     */
-    boolean isExtended;
-
-    int interleaveMode;
-
-    int bandwidth;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCapabilities.aidl
deleted file mode 100644
index 19f31f1..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCapabilities.aidl
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendAnalogCapabilities;
-import android.media.tv.tuner.TunerFrontendAtscCapabilities;
-import android.media.tv.tuner.TunerFrontendAtsc3Capabilities;
-import android.media.tv.tuner.TunerFrontendCableCapabilities;
-import android.media.tv.tuner.TunerFrontendDvbsCapabilities;
-import android.media.tv.tuner.TunerFrontendDvbtCapabilities;
-import android.media.tv.tuner.TunerFrontendIsdbsCapabilities;
-import android.media.tv.tuner.TunerFrontendIsdbs3Capabilities;
-import android.media.tv.tuner.TunerFrontendIsdbtCapabilities;
-
-/**
- * Frontend Capabilities interface.
- *
- * Use a group of vectors as the workaround for Union structure that is not fully supported
- * in AIDL currently.
- *
- * Client may use FrontendInfo.type as the discriminar to check the corresponding vector. If
- * the vector is not null, it contains valid value.
- *
- * {@hide}
- */
-union TunerFrontendCapabilities {
-    /**
-     * Analog Frontend Capabilities
-     */
-    TunerFrontendAnalogCapabilities analogCaps;
-
-    /**
-     * ATSC Frontend Capabilities
-     */
-    TunerFrontendAtscCapabilities atscCaps;
-
-    /**
-     * ATSC3 Frontend Capabilities
-     */
-    TunerFrontendAtsc3Capabilities atsc3Caps;
-
-    /**
-     * Cable Frontend Capabilities
-     */
-    TunerFrontendCableCapabilities cableCaps;
-
-    /**
-     * DVBS Frontend Capabilities
-     */
-    TunerFrontendDvbsCapabilities dvbsCaps;
-
-    /**
-     * DVBT Frontend Capabilities
-     */
-    TunerFrontendDvbtCapabilities dvbtCaps;
-
-    /**
-     * ISDB-S Frontend Capabilities
-     */
-    TunerFrontendIsdbsCapabilities isdbsCaps;
-
-    /**
-     * ISDB-S3 Frontend Capabilities
-     */
-    TunerFrontendIsdbs3Capabilities isdbs3Caps;
-
-    /**
-     * ISDB-T Frontend Capabilities
-     */
-    TunerFrontendIsdbtCapabilities isdbtCaps;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbCapabilities.aidl
deleted file mode 100644
index e8e4933..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbCapabilities.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * DTMB Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDtmbCapabilities {
-    int transmissionModeCap;
-
-    int bandwidthCap;
-
-    int modulationCap;
-
-    int codeRateCap;
-
-    int guardIntervalCap;
-
-    int interleaveModeCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbSettings.aidl
deleted file mode 100644
index 45e7ff9..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbSettings.aidl
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * DTMB Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDtmbSettings {
-    int frequency;
-
-    int transmissionMode;
-
-    int bandwidth;
-
-    int modulation;
-
-    int codeRate;
-
-    int guardInterval;
-
-    int interleaveMode;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCapabilities.aidl
deleted file mode 100644
index 5e4322c..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCapabilities.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * DVBS Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDvbsCapabilities {
-    /**
-     * Modulation capability
-     */
-    int modulationCap;
-
-    /**
-     * Code Rate capability
-     */
-    long codeRateCap;  // inner FEC will converge to codeRate
-
-    /**
-     * Sub standards capability
-     */
-    int standard;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCodeRate.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCodeRate.aidl
deleted file mode 100644
index 59b7de3..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCodeRate.aidl
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Dvbs Frontend CodeRate interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDvbsCodeRate {
-    /**
-     * Inner Forward Error Correction type as specified in ETSI EN 300 468 V1.15.1
-     * and ETSI EN 302 307-2 V1.1.1.
-     */
-    long fec;
-
-    boolean isLinear;
-
-    /**
-     * true if enable short frame
-     */
-    boolean isShortFrames;
-
-    /**
-     * bits number in 1000 symbol. 0 if use the default.
-     */
-    int bitsPer1000Symbol;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsSettings.aidl
deleted file mode 100644
index ec3e4b9..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsSettings.aidl
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendDvbsCodeRate;
-
-/**
- * Dvbs Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDvbsSettings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    int modulation;
-
-    TunerFrontendDvbsCodeRate codeRate;
-
-    int symbolRate;
-
-    /**
-     * Roll off type.
-     */
-    int rolloff;
-
-    /**
-     * Pilot mode.
-     */
-    int pilot;
-
-    int inputStreamId;
-
-    int standard;
-
-    /**
-     * Vcm mode.
-     */
-    int vcm;
-
-    /**
-     * Fields after isExtended are only valid when isExtended is true
-     */
-    boolean isExtended;
-
-    int scanType;
-
-    boolean isDiseqcRxMessage;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtCapabilities.aidl
deleted file mode 100644
index 73f16dd..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtCapabilities.aidl
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * DVBT Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDvbtCapabilities {
-    /**
-     * Transmission Mode capability
-     */
-    int transmissionModeCap;
-
-    /**
-     * Bandwidth capability
-     */
-    int bandwidthCap;
-
-    /**
-     * Constellation capability
-     */
-    int constellationCap;
-
-    /**
-     * Code Rate capability
-     */
-    int codeRateCap;
-
-    /**
-     * Hierarchy Type capability
-     */
-    int hierarchyCap;
-
-    /**
-     * Guard Interval capability
-     */
-    int guardIntervalCap;
-
-    /**
-     * T2 Support capability
-     */
-    boolean isT2Supported;
-
-    /**
-     * Miso Support capability
-     */
-    boolean isMisoSupported;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtSettings.aidl
deleted file mode 100644
index 14c942a..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtSettings.aidl
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Dvbt Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDvbtSettings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    int transmissionMode;
-
-    int bandwidth;
-
-    int constellation;
-
-    int hierarchy;
-
-    /**
-     * Code Rate for High Priority level
-     */
-    int hpCodeRate;
-
-    /**
-     * Code Rate for Low Priority level
-     */
-    int lpCodeRate;
-
-    int guardInterval;
-
-    boolean isHighPriority;
-
-    int standard;
-
-    boolean isMiso;
-
-    /**
-     * Physical Layer Pipe (PLP) mode
-     */
-    int plpMode;
-
-    /**
-     * Physical Layer Pipe (PLP) Id
-     */
-    int plpId;
-
-    /**
-     * Physical Layer Pipe (PLP) Group Id
-     */
-    int plpGroupId;
-
-    /**
-     * Fields after isExtended are only valid when isExtended is true
-     */
-    boolean isExtended;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendInfo.aidl
deleted file mode 100644
index 4bccd56..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendInfo.aidl
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendCapabilities;
-
-/**
- * FrontendInfo interface that carries tuner frontend information.
- *
- * <p>This is used to update the TunerResourceManager and pass Frontend
- * information from HAL to the client side.
- *
- * {@hide}
- */
-parcelable TunerFrontendInfo {
-    /**
-     * Frontend Handle
-     */
-    int handle;
-
-    /**
-     * Frontend Type
-     */
-    int type;
-
-    /**
-     * Minimum Frequency in Hertz
-     */
-    int minFrequency;
-
-    /**
-     * Maximum Frequency in Hertz
-     */
-    int maxFrequency;
-
-    /**
-     * Minimum symbols per second
-     */
-    int minSymbolRate;
-
-    /**
-     * Maximum symbols per second
-     */
-    int maxSymbolRate;
-
-    /**
-     * Range in Hertz
-     */
-    int acquireRange;
-
-    /**
-     * Frontends are assigned with the same exclusiveGroupId if they can't
-     * function at same time. For instance, they share same hardware module.
-     */
-    int exclusiveGroupId;
-
-    /**
-     * A list of supported status types which client can inquiry
-     */
-    int[] statusCaps;
-
-    /**
-     * Frontend Capabilities
-     */
-    TunerFrontendCapabilities caps;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Capabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Capabilities.aidl
deleted file mode 100644
index 84dd67a..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Capabilities.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * ISDB-S3 Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbs3Capabilities {
-    /**
-     * Modulation capability
-     */
-    int modulationCap;
-
-    /**
-     * Code Rate capability
-     */
-    int codeRateCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Settings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Settings.aidl
deleted file mode 100644
index 9a11fd5..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Settings.aidl
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Isdbs3 Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbs3Settings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    char streamId;
-
-    int streamIdType;
-
-    int modulation;
-
-    int codeRate;
-
-    /**
-     * Symbols per second
-     */
-    int symbolRate;
-
-    int rolloff;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsCapabilities.aidl
deleted file mode 100644
index 15dfdf7..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsCapabilities.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * ISDB-S Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbsCapabilities {
-    /**
-     * Modulation capability
-     */
-    int modulationCap;
-
-    /**
-     * Code Rate capability
-     */
-    int codeRateCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsSettings.aidl
deleted file mode 100644
index dff9f4a..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsSettings.aidl
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Isdbs Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbsSettings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    char streamId;
-
-    int streamIdType;
-
-    int modulation;
-
-    int codeRate;
-
-    /**
-     * Symbols per second
-     */
-    int symbolRate;
-
-    int rolloff;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtCapabilities.aidl
deleted file mode 100644
index c9295d8..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtCapabilities.aidl
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * ISDB-T Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbtCapabilities {
-    /**
-     * ISDB-T Mode capability
-     */
-    int modeCap;
-
-    /**
-     * Bandwidth capability
-     */
-    int bandwidthCap;
-
-    /**
-     * Modulation capability
-     */
-    int modulationCap;
-
-    /**
-     * Code Rate capability
-     */
-    int codeRateCap;
-
-    /**
-     * Guard Interval capability
-     */
-    int guardIntervalCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtSettings.aidl
deleted file mode 100644
index 191f3a6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtSettings.aidl
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Isdbt Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbtSettings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    int modulation;
-
-    int bandwidth;
-
-    int mode;
-
-    int codeRate;
-
-    int guardInterval;
-
-    int serviceAreaId;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanAtsc3PlpInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanAtsc3PlpInfo.aidl
deleted file mode 100644
index 1b8fcbb..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanAtsc3PlpInfo.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Atsc3 Frontend Physical Layer Pipe Info.
- *
- * {@hide}
- */
-parcelable TunerFrontendScanAtsc3PlpInfo {
-    byte plpId;
-
-    boolean llsFlag;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanMessage.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanMessage.aidl
deleted file mode 100644
index 9921ca1..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanMessage.aidl
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendScanAtsc3PlpInfo;
-
-/**
- * Tuner Frontend Scan Message interface.
- *
- * {@hide}
- */
-union TunerFrontendScanMessage {
-    boolean isLocked;
-
-    boolean isEnd;
-
-    byte progressPercent;
-
-    int[] frequencies;
-
-    int[] symbolRates;
-
-    int hierarchy;
-
-    int analogType;
-
-    byte[] plpIds;
-
-    byte[] groupIds;
-
-    char[] inputStreamIds;
-
-    int std;
-
-    TunerFrontendScanAtsc3PlpInfo[] atsc3PlpInfos;
-
-    int modulation;
-
-    int annex;
-
-    boolean isHighPriority;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendSettings.aidl
deleted file mode 100644
index 70a5f3e..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendSettings.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendUnionSettings;
-
-/**
- * Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendSettings {
-    TunerFrontendUnionSettings settings;
-
-    boolean isExtended;
-
-    int endFrequency;
-
-    int inversion;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatus.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatus.aidl
deleted file mode 100644
index 2b3c01b..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatus.aidl
+++ /dev/null
@@ -1,187 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendStatusAtsc3PlpInfo;
-
-/**
- * Tuner Frontend Status interface.
- *
- * {@hide}
- */
-union TunerFrontendStatus {
-    /**
-     * Lock status for Demod in True/False.
-     */
-    boolean isDemodLocked;
-
-    /**
-     * SNR value measured by 0.001 dB.
-     */
-    int snr;
-
-    /**
-     * The number of error bits per 1 billion bits.
-     */
-    int ber;
-
-    /**
-     * The number of error packages per 1 billion packages.
-     */
-    int per;
-
-    /**
-     * The number of error bits per 1 billion bits before FEC.
-     */
-    int preBer;
-
-    /**
-     * Signal Quality in percent.
-     */
-    int signalQuality;
-
-    /**
-     * Signal Strength measured by 0.001 dBm.
-     */
-    int signalStrength;
-
-    /**
-     * Symbols per second
-     */
-    int symbolRate;
-
-    long innerFec;
-
-    /**
-     * Check frontend type to decide the hidl type value
-     */
-    int modulation;
-
-    int inversion;
-
-    int lnbVoltage;
-
-    byte plpId;
-
-    boolean isEWBS;
-
-    /**
-     * AGC value is normalized from 0 to 255.
-     */
-    byte agc;
-
-    boolean isLnaOn;
-
-    boolean[] isLayerError;
-
-    /**
-     * MER value measured by 0.001 dB
-     */
-    int mer;
-
-    /**
-     * Frequency difference in Hertz.
-     */
-    int freqOffset;
-
-    int hierarchy;
-
-    boolean isRfLocked;
-
-    /**
-     * A list of PLP status for tuned PLPs for ATSC3 frontend.
-     */
-    TunerFrontendStatusAtsc3PlpInfo[] plpInfo;
-
-    // 1.1 Extension Starting
-
-    /**
-     * Extended modulation status. Check frontend type to decide the hidl type value.
-     */
-    int[] modulations;
-
-    /**
-     * Extended bit error ratio status.
-     */
-    int[] bers;
-
-    /**
-     * Extended code rate status.
-     */
-    long[] codeRates;
-
-    /**
-     * Extended bandwidth status. Check frontend type to decide the hidl type value.
-     */
-    int bandwidth;
-
-    /**
-     * Extended guard interval status. Check frontend type to decide the hidl type value.
-     */
-    int interval;
-
-    /**
-     * Extended transmission mode status. Check frontend type to decide the hidl type value.
-     */
-    int transmissionMode;
-
-    /**
-     * Uncorrectable Error Counts of the frontend's Physical Layer Pipe (PLP)
-     * since the last tune operation.
-     */
-    int uec;
-
-    /**
-     * The current DVB-T2 system id status.
-     */
-    char systemId;
-
-    /**
-     * Frontend Interleaving Modes. Check frontend type to decide the hidl type value.
-     */
-    int[] interleaving;
-
-    /**
-     * Segments in ISDB-T Specification of all the channels.
-     */
-    byte[] isdbtSegment;
-
-    /**
-     * Transport Stream Data Rate in BPS of the current channel.
-     */
-    int[] tsDataRate;
-
-    /**
-     * Roll Off Type status of the frontend. Check frontend type to decide the hidl type value.
-     */
-    int rollOff;
-
-    /**
-     * If the frontend currently supports MISO or not.
-     */
-    boolean isMiso;
-
-    /**
-     * If the frontend code rate is linear or not.
-     */
-    boolean isLinear;
-
-    /**
-     * If short frames are enabled or not.
-     */
-    boolean isShortFrames;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatusAtsc3PlpInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatusAtsc3PlpInfo.aidl
deleted file mode 100644
index 4116c34..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatusAtsc3PlpInfo.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Atsc3 Frontend Physical Layer Pipe Info in Frontend status.
- *
- * {@hide}
- */
-parcelable TunerFrontendStatusAtsc3PlpInfo {
-    /**
-     * PLP Id value.
-     */
-    byte plpId;
-
-    /**
-     * Demod Lock/Unlock status of this particular PLP.
-     */
-    boolean isLocked;
-
-    /**
-     * Uncorrectable Error Counts (UEC) of this particular PLP since last tune operation.
-     */
-    int uec;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendUnionSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendUnionSettings.aidl
deleted file mode 100644
index c362c2a..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendUnionSettings.aidl
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendAnalogSettings;
-import android.media.tv.tuner.TunerFrontendAtscSettings;
-import android.media.tv.tuner.TunerFrontendAtsc3Settings;
-import android.media.tv.tuner.TunerFrontendCableSettings;
-import android.media.tv.tuner.TunerFrontendDtmbSettings;
-import android.media.tv.tuner.TunerFrontendDvbsSettings;
-import android.media.tv.tuner.TunerFrontendDvbtSettings;
-import android.media.tv.tuner.TunerFrontendIsdbsSettings;
-import android.media.tv.tuner.TunerFrontendIsdbs3Settings;
-import android.media.tv.tuner.TunerFrontendIsdbtSettings;
-
-/**
- * Frontend Settings Union interface.
- *
- * {@hide}
- */
-union TunerFrontendUnionSettings {
-    TunerFrontendAnalogSettings analog;
-
-    TunerFrontendAtscSettings atsc;
-
-    TunerFrontendAtsc3Settings atsc3;
-
-    TunerFrontendCableSettings cable;
-
-    TunerFrontendDvbsSettings dvbs;
-
-    TunerFrontendDvbtSettings dvbt;
-
-    TunerFrontendIsdbsSettings isdbs;
-
-    TunerFrontendIsdbs3Settings isdbs3;
-
-    TunerFrontendIsdbtSettings isdbt;
-
-    TunerFrontendDtmbSettings dtmb;
-}
diff --git a/services/tuner/hidl/TunerHidlDemux.cpp b/services/tuner/hidl/TunerHidlDemux.cpp
new file mode 100644
index 0000000..a8151d2
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDemux.cpp
@@ -0,0 +1,278 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlDemux"
+
+#include "TunerHidlDemux.h"
+
+#include "TunerHidlDvr.h"
+#include "TunerHidlFilter.h"
+#include "TunerHidlTimeFilter.h"
+
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSubType;
+
+using HidlDemuxAlpFilterType = ::android::hardware::tv::tuner::V1_0::DemuxAlpFilterType;
+using HidlDemuxFilterMainType = ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
+using HidlDemuxFilterType = ::android::hardware::tv::tuner::V1_0::DemuxFilterType;
+using HidlDemuxIpFilterType = ::android::hardware::tv::tuner::V1_0::DemuxIpFilterType;
+using HidlDemuxMmtpFilterType = ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterType;
+using HidlDemuxTlvFilterType = ::android::hardware::tv::tuner::V1_0::DemuxTlvFilterType;
+using HidlDemuxTsFilterType = ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
+using HidlDvrType = ::android::hardware::tv::tuner::V1_0::DvrType;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlDemux::TunerHidlDemux(sp<IDemux> demux, int id) {
+    mDemux = demux;
+    mDemuxId = id;
+}
+
+TunerHidlDemux::~TunerHidlDemux() {
+    mDemux = nullptr;
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::setFrontendDataSource(
+        const shared_ptr<ITunerFrontend>& in_frontend) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    int frontendId;
+    in_frontend->getFrontendId(&frontendId);
+    HidlResult res = mDemux->setFrontendDataSource(frontendId);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::setFrontendDataSourceById(int frontendId) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    HidlResult res = mDemux->setFrontendDataSource(frontendId);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::openFilter(const DemuxFilterType& in_type,
+                                                int32_t in_bufferSize,
+                                                const shared_ptr<ITunerFilterCallback>& in_cb,
+                                                shared_ptr<ITunerFilter>* _aidl_return) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    HidlDemuxFilterMainType mainType = static_cast<HidlDemuxFilterMainType>(in_type.mainType);
+    HidlDemuxFilterType filterType{
+            .mainType = mainType,
+    };
+
+    switch (mainType) {
+    case HidlDemuxFilterMainType::TS:
+        filterType.subType.tsFilterType(static_cast<HidlDemuxTsFilterType>(
+                in_type.subType.get<DemuxFilterSubType::Tag::tsFilterType>()));
+        break;
+    case HidlDemuxFilterMainType::MMTP:
+        filterType.subType.mmtpFilterType(static_cast<HidlDemuxMmtpFilterType>(
+                in_type.subType.get<DemuxFilterSubType::Tag::mmtpFilterType>()));
+        break;
+    case HidlDemuxFilterMainType::IP:
+        filterType.subType.ipFilterType(static_cast<HidlDemuxIpFilterType>(
+                in_type.subType.get<DemuxFilterSubType::Tag::ipFilterType>()));
+        break;
+    case HidlDemuxFilterMainType::TLV:
+        filterType.subType.tlvFilterType(static_cast<HidlDemuxTlvFilterType>(
+                in_type.subType.get<DemuxFilterSubType::Tag::tlvFilterType>()));
+        break;
+    case HidlDemuxFilterMainType::ALP:
+        filterType.subType.alpFilterType(static_cast<HidlDemuxAlpFilterType>(
+                in_type.subType.get<DemuxFilterSubType::Tag::alpFilterType>()));
+        break;
+    }
+    HidlResult status;
+    sp<HidlIFilter> filterSp;
+    sp<TunerHidlFilter::FilterCallback> filterCb = new TunerHidlFilter::FilterCallback(in_cb);
+    sp<::android::hardware::tv::tuner::V1_0::IFilterCallback> cbSp = filterCb;
+    mDemux->openFilter(filterType, static_cast<uint32_t>(in_bufferSize), cbSp,
+                       [&](HidlResult r, const sp<HidlIFilter>& filter) {
+                           filterSp = filter;
+                           status = r;
+                       });
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+
+    *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlFilter>(filterSp, filterCb, in_type);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    sp<HidlITimeFilter> filterSp;
+    mDemux->openTimeFilter([&](HidlResult r, const sp<HidlITimeFilter>& filter) {
+        filterSp = filter;
+        status = r;
+    });
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+
+    *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlTimeFilter>(filterSp);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::getAvSyncHwId(const shared_ptr<ITunerFilter>& tunerFilter,
+                                                   int32_t* _aidl_return) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    uint32_t avSyncHwId;
+    HidlResult res;
+    sp<HidlIFilter> halFilter = static_cast<TunerHidlFilter*>(tunerFilter.get())->getHalFilter();
+    mDemux->getAvSyncHwId(halFilter, [&](HidlResult r, uint32_t id) {
+        res = r;
+        avSyncHwId = id;
+    });
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    *_aidl_return = (int)avSyncHwId;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::getAvSyncTime(int32_t avSyncHwId, int64_t* _aidl_return) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    uint64_t time;
+    HidlResult res;
+    mDemux->getAvSyncTime(static_cast<uint32_t>(avSyncHwId), [&](HidlResult r, uint64_t ts) {
+        res = r;
+        time = ts;
+    });
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    *_aidl_return = (int64_t)time;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::openDvr(DvrType in_dvbType, int32_t in_bufferSize,
+                                             const shared_ptr<ITunerDvrCallback>& in_cb,
+                                             shared_ptr<ITunerDvr>* _aidl_return) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    HidlResult res;
+    sp<HidlIDvrCallback> callback = new TunerHidlDvr::DvrCallback(in_cb);
+    sp<HidlIDvr> hidlDvr;
+    mDemux->openDvr(static_cast<HidlDvrType>(in_dvbType), in_bufferSize, callback,
+                    [&](HidlResult r, const sp<HidlIDvr>& dvr) {
+                        hidlDvr = dvr;
+                        res = r;
+                    });
+    if (res != HidlResult::SUCCESS) {
+        *_aidl_return = nullptr;
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlDvr>(hidlDvr, in_dvbType);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::connectCiCam(int32_t ciCamId) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    HidlResult res = mDemux->connectCiCam(static_cast<uint32_t>(ciCamId));
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::disconnectCiCam() {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    HidlResult res = mDemux->disconnectCiCam();
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::close() {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    HidlResult res = mDemux->close();
+    mDemux = nullptr;
+
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlDemux.h b/services/tuner/hidl/TunerHidlDemux.h
new file mode 100644
index 0000000..d535da6
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDemux.h
@@ -0,0 +1,75 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLDEMUX_H
+#define ANDROID_MEDIA_TUNERHIDLDEMUX_H
+
+#include <aidl/android/media/tv/tuner/BnTunerDemux.h>
+#include <android/hardware/tv/tuner/1.0/ITuner.h>
+
+using ::aidl::android::hardware::tv::tuner::DemuxFilterType;
+using ::aidl::android::hardware::tv::tuner::DvrType;
+using ::android::sp;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::tv::tuner::V1_0::IDemux;
+using ::std::shared_ptr;
+using ::std::vector;
+
+using HidlIDemux = ::android::hardware::tv::tuner::V1_0::IDemux;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlDemux : public BnTunerDemux {
+public:
+    TunerHidlDemux(sp<HidlIDemux> demux, int demuxId);
+    virtual ~TunerHidlDemux();
+
+    ::ndk::ScopedAStatus setFrontendDataSource(
+            const shared_ptr<ITunerFrontend>& in_frontend) override;
+    ::ndk::ScopedAStatus setFrontendDataSourceById(int frontendId) override;
+    ::ndk::ScopedAStatus openFilter(const DemuxFilterType& in_type, int32_t in_bufferSize,
+                                    const shared_ptr<ITunerFilterCallback>& in_cb,
+                                    shared_ptr<ITunerFilter>* _aidl_return) override;
+    ::ndk::ScopedAStatus openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) override;
+    ::ndk::ScopedAStatus getAvSyncHwId(const shared_ptr<ITunerFilter>& in_tunerFilter,
+                                       int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getAvSyncTime(int32_t in_avSyncHwId, int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus openDvr(DvrType in_dvbType, int32_t in_bufferSize,
+                                 const shared_ptr<ITunerDvrCallback>& in_cb,
+                                 shared_ptr<ITunerDvr>* _aidl_return) override;
+    ::ndk::ScopedAStatus connectCiCam(int32_t in_ciCamId) override;
+    ::ndk::ScopedAStatus disconnectCiCam() override;
+    ::ndk::ScopedAStatus close() override;
+
+    int getId() { return mDemuxId; }
+
+private:
+    sp<HidlIDemux> mDemux;
+    int mDemuxId;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLDEMUX_H
diff --git a/services/tuner/hidl/TunerHidlDescrambler.cpp b/services/tuner/hidl/TunerHidlDescrambler.cpp
new file mode 100644
index 0000000..dd8cd9c
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDescrambler.cpp
@@ -0,0 +1,149 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlDescrambler"
+
+#include "TunerHidlDescrambler.h"
+
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
+#include "TunerHidlDemux.h"
+#include "TunerHidlFilter.h"
+
+using ::aidl::android::hardware::tv::tuner::Result;
+
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlDescrambler::TunerHidlDescrambler(sp<HidlIDescrambler> descrambler) {
+    mDescrambler = descrambler;
+}
+
+TunerHidlDescrambler::~TunerHidlDescrambler() {
+    mDescrambler = nullptr;
+}
+
+::ndk::ScopedAStatus TunerHidlDescrambler::setDemuxSource(
+        const shared_ptr<ITunerDemux>& in_tunerDemux) {
+    if (mDescrambler == nullptr) {
+        ALOGE("IDescrambler is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDescrambler->setDemuxSource(
+            static_cast<TunerHidlDemux*>(in_tunerDemux.get())->getId());
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDescrambler::setKeyToken(const vector<uint8_t>& in_keyToken) {
+    if (mDescrambler == nullptr) {
+        ALOGE("IDescrambler is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDescrambler->setKeyToken(in_keyToken);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDescrambler::addPid(
+        const DemuxPid& in_pid, const shared_ptr<ITunerFilter>& in_optionalSourceFilter) {
+    if (mDescrambler == nullptr) {
+        ALOGE("IDescrambler is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    sp<HidlIFilter> halFilter =
+            (in_optionalSourceFilter == nullptr)
+                    ? nullptr
+                    : static_cast<TunerHidlFilter*>(in_optionalSourceFilter.get())->getHalFilter();
+    HidlResult res = mDescrambler->addPid(getHidlDemuxPid(in_pid), halFilter);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDescrambler::removePid(
+        const DemuxPid& in_pid, const shared_ptr<ITunerFilter>& in_optionalSourceFilter) {
+    if (mDescrambler == nullptr) {
+        ALOGE("IDescrambler is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    sp<HidlIFilter> halFilter =
+            (in_optionalSourceFilter == nullptr)
+                    ? nullptr
+                    : static_cast<TunerHidlFilter*>(in_optionalSourceFilter.get())->getHalFilter();
+    HidlResult res = mDescrambler->removePid(getHidlDemuxPid(in_pid), halFilter);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDescrambler::close() {
+    if (mDescrambler == nullptr) {
+        ALOGE("IDescrambler is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDescrambler->close();
+    mDescrambler = nullptr;
+
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+HidlDemuxPid TunerHidlDescrambler::getHidlDemuxPid(const DemuxPid& pid) {
+    HidlDemuxPid hidlPid;
+    switch (pid.getTag()) {
+    case DemuxPid::tPid: {
+        hidlPid.tPid((uint16_t)pid.get<DemuxPid::Tag::tPid>());
+        break;
+    }
+    case DemuxPid::mmtpPid: {
+        hidlPid.mmtpPid((uint16_t)pid.get<DemuxPid::Tag::mmtpPid>());
+        break;
+    }
+    }
+    return hidlPid;
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlDescrambler.h b/services/tuner/hidl/TunerHidlDescrambler.h
new file mode 100644
index 0000000..9494968
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDescrambler.h
@@ -0,0 +1,66 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLDESCRAMBLER_H
+#define ANDROID_MEDIA_TUNERHIDLDESCRAMBLER_H
+
+#include <aidl/android/hardware/tv/tuner/IDescrambler.h>
+#include <aidl/android/media/tv/tuner/BnTunerDescrambler.h>
+#include <android/hardware/tv/tuner/1.0/IDescrambler.h>
+#include <android/hardware/tv/tuner/1.0/ITuner.h>
+
+using ::aidl::android::hardware::tv::tuner::DemuxPid;
+using ::android::sp;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+using HidlDemuxPid = ::android::hardware::tv::tuner::V1_0::DemuxPid;
+using HidlIDescrambler = ::android::hardware::tv::tuner::V1_0::IDescrambler;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlDescrambler : public BnTunerDescrambler {
+public:
+    TunerHidlDescrambler(sp<HidlIDescrambler> descrambler);
+    virtual ~TunerHidlDescrambler();
+
+    ::ndk::ScopedAStatus setDemuxSource(const std::shared_ptr<ITunerDemux>& in_tunerDemux) override;
+    ::ndk::ScopedAStatus setKeyToken(const std::vector<uint8_t>& in_keyToken) override;
+    ::ndk::ScopedAStatus addPid(
+            const DemuxPid& in_pid,
+            const std::shared_ptr<ITunerFilter>& in_optionalSourceFilter) override;
+    ::ndk::ScopedAStatus removePid(
+            const DemuxPid& in_pid,
+            const std::shared_ptr<ITunerFilter>& in_optionalSourceFilter) override;
+    ::ndk::ScopedAStatus close() override;
+
+private:
+    HidlDemuxPid getHidlDemuxPid(const DemuxPid& pid);
+
+    sp<HidlIDescrambler> mDescrambler;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLDESCRAMBLER_H
diff --git a/services/tuner/hidl/TunerHidlDvr.cpp b/services/tuner/hidl/TunerHidlDvr.cpp
new file mode 100644
index 0000000..1a619d5
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDvr.cpp
@@ -0,0 +1,257 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlDvr"
+
+#include "TunerHidlDvr.h"
+
+#include <aidl/android/hardware/tv/tuner/DataFormat.h>
+#include <aidl/android/hardware/tv/tuner/PlaybackStatus.h>
+#include <aidl/android/hardware/tv/tuner/RecordStatus.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <fmq/ConvertMQDescriptors.h>
+
+using ::aidl::android::hardware::tv::tuner::DataFormat;
+using ::aidl::android::hardware::tv::tuner::PlaybackStatus;
+using ::aidl::android::hardware::tv::tuner::RecordStatus;
+using ::aidl::android::hardware::tv::tuner::Result;
+using ::android::unsafeHidlToAidlMQDescriptor;
+using ::android::hardware::MessageQueue;
+using ::android::hardware::MQDescriptorSync;
+
+using HidlDataFormat = ::android::hardware::tv::tuner::V1_0::DataFormat;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+using MQDesc = MQDescriptorSync<uint8_t>;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlDvr::TunerHidlDvr(sp<HidlIDvr> dvr, DvrType type) {
+    mDvr = dvr;
+    mType = type;
+}
+
+TunerHidlDvr::~TunerHidlDvr() {
+    mDvr = nullptr;
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::getQueueDesc(AidlMQDesc* _aidl_return) {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    MQDesc dvrMQDesc;
+    HidlResult res;
+    mDvr->getQueueDesc([&](HidlResult r, const MQDesc& desc) {
+        dvrMQDesc = desc;
+        res = r;
+    });
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    AidlMQDesc aidlMQDesc;
+    unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(dvrMQDesc, &aidlMQDesc);
+    *_aidl_return = move(aidlMQDesc);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::configure(const DvrSettings& in_settings) {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDvr->configure(getHidlDvrSettings(in_settings));
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::attachFilter(const shared_ptr<ITunerFilter>& in_filter) {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (in_filter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    sp<HidlIFilter> hidlFilter = static_cast<TunerHidlFilter*>(in_filter.get())->getHalFilter();
+    if (hidlFilter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    HidlResult res = mDvr->attachFilter(hidlFilter);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::detachFilter(const shared_ptr<ITunerFilter>& in_filter) {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (in_filter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    sp<HidlIFilter> halFilter = (static_cast<TunerHidlFilter*>(in_filter.get()))->getHalFilter();
+    if (halFilter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    HidlResult res = mDvr->detachFilter(halFilter);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::start() {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDvr->start();
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::stop() {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDvr->stop();
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::flush() {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDvr->flush();
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::close() {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDvr->close();
+    mDvr = nullptr;
+
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+HidlDvrSettings TunerHidlDvr::getHidlDvrSettings(const DvrSettings& settings) {
+    HidlDvrSettings s;
+    switch (mType) {
+    case DvrType::PLAYBACK: {
+        s.playback({
+                .statusMask =
+                        static_cast<uint8_t>(settings.get<DvrSettings::playback>().statusMask),
+                .lowThreshold =
+                        static_cast<uint32_t>(settings.get<DvrSettings::playback>().lowThreshold),
+                .highThreshold =
+                        static_cast<uint32_t>(settings.get<DvrSettings::playback>().highThreshold),
+                .dataFormat = static_cast<HidlDataFormat>(
+                        settings.get<DvrSettings::playback>().dataFormat),
+                .packetSize =
+                        static_cast<uint8_t>(settings.get<DvrSettings::playback>().packetSize),
+        });
+        return s;
+    }
+    case DvrType::RECORD: {
+        s.record({
+                .statusMask = static_cast<uint8_t>(settings.get<DvrSettings::record>().statusMask),
+                .lowThreshold =
+                        static_cast<uint32_t>(settings.get<DvrSettings::record>().lowThreshold),
+                .highThreshold =
+                        static_cast<uint32_t>(settings.get<DvrSettings::record>().highThreshold),
+                .dataFormat =
+                        static_cast<HidlDataFormat>(settings.get<DvrSettings::record>().dataFormat),
+                .packetSize = static_cast<uint8_t>(settings.get<DvrSettings::record>().packetSize),
+        });
+        return s;
+    }
+    default:
+        break;
+    }
+    return s;
+}
+
+/////////////// IDvrCallback ///////////////////////
+Return<void> TunerHidlDvr::DvrCallback::onRecordStatus(const HidlRecordStatus status) {
+    if (mTunerDvrCallback != nullptr) {
+        mTunerDvrCallback->onRecordStatus(static_cast<RecordStatus>(status));
+    }
+    return Void();
+}
+
+Return<void> TunerHidlDvr::DvrCallback::onPlaybackStatus(const HidlPlaybackStatus status) {
+    if (mTunerDvrCallback != nullptr) {
+        mTunerDvrCallback->onPlaybackStatus(static_cast<PlaybackStatus>(status));
+    }
+    return Void();
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlDvr.h b/services/tuner/hidl/TunerHidlDvr.h
new file mode 100644
index 0000000..a280ff7
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDvr.h
@@ -0,0 +1,91 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLDVR_H
+#define ANDROID_MEDIA_TUNERHIDLDVR_H
+
+#include <aidl/android/hardware/tv/tuner/DvrSettings.h>
+#include <aidl/android/hardware/tv/tuner/DvrType.h>
+#include <aidl/android/media/tv/tuner/BnTunerDvr.h>
+#include <aidl/android/media/tv/tuner/ITunerDvrCallback.h>
+#include <android/hardware/tv/tuner/1.0/IDvr.h>
+#include <android/hardware/tv/tuner/1.0/IDvrCallback.h>
+
+#include "TunerHidlFilter.h"
+
+using ::aidl::android::hardware::common::fmq::MQDescriptor;
+using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
+using ::aidl::android::hardware::tv::tuner::DvrSettings;
+using ::aidl::android::hardware::tv::tuner::DvrType;
+using ::android::sp;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::std::shared_ptr;
+using ::std::vector;
+
+using HidlDvrSettings = ::android::hardware::tv::tuner::V1_0::DvrSettings;
+using HidlIDvr = ::android::hardware::tv::tuner::V1_0::IDvr;
+using HidlIDvrCallback = ::android::hardware::tv::tuner::V1_0::IDvrCallback;
+using HidlPlaybackStatus = ::android::hardware::tv::tuner::V1_0::PlaybackStatus;
+using HidlRecordStatus = ::android::hardware::tv::tuner::V1_0::RecordStatus;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+using AidlMQDesc = MQDescriptor<int8_t, SynchronizedReadWrite>;
+
+class TunerHidlDvr : public BnTunerDvr {
+public:
+    TunerHidlDvr(sp<HidlIDvr> dvr, DvrType type);
+    ~TunerHidlDvr();
+
+    ::ndk::ScopedAStatus getQueueDesc(AidlMQDesc* _aidl_return) override;
+    ::ndk::ScopedAStatus configure(const DvrSettings& in_settings) override;
+    ::ndk::ScopedAStatus attachFilter(const shared_ptr<ITunerFilter>& in_filter) override;
+    ::ndk::ScopedAStatus detachFilter(const shared_ptr<ITunerFilter>& in_filter) override;
+    ::ndk::ScopedAStatus start() override;
+    ::ndk::ScopedAStatus stop() override;
+    ::ndk::ScopedAStatus flush() override;
+    ::ndk::ScopedAStatus close() override;
+
+    struct DvrCallback : public HidlIDvrCallback {
+        DvrCallback(const shared_ptr<ITunerDvrCallback> tunerDvrCallback)
+              : mTunerDvrCallback(tunerDvrCallback){};
+
+        virtual Return<void> onRecordStatus(const HidlRecordStatus status);
+        virtual Return<void> onPlaybackStatus(const HidlPlaybackStatus status);
+
+    private:
+        shared_ptr<ITunerDvrCallback> mTunerDvrCallback;
+    };
+
+private:
+    HidlDvrSettings getHidlDvrSettings(const DvrSettings& settings);
+
+    sp<HidlIDvr> mDvr;
+    DvrType mType;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLDVR_H
diff --git a/services/tuner/hidl/TunerHidlFilter.cpp b/services/tuner/hidl/TunerHidlFilter.cpp
new file mode 100644
index 0000000..a5bbf39
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlFilter.cpp
@@ -0,0 +1,1275 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlFilter"
+
+#include "TunerHidlFilter.h"
+
+#include <aidl/android/hardware/tv/tuner/Constant.h>
+#include <aidl/android/hardware/tv/tuner/DemuxScIndex.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <aidlcommonsupport/NativeHandle.h>
+#include <binder/IPCThreadState.h>
+#include <fmq/ConvertMQDescriptors.h>
+
+#include "TunerHelper.h"
+#include "TunerHidlService.h"
+
+using ::aidl::android::hardware::tv::tuner::AudioExtraMetaData;
+using ::aidl::android::hardware::tv::tuner::AudioStreamType;
+using ::aidl::android::hardware::tv::tuner::Constant;
+using ::aidl::android::hardware::tv::tuner::DemuxAlpFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxAlpFilterSettingsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterDownloadEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterIpPayloadEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterMainType;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterMediaEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterMediaEventExtraMetaData;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterMmtpRecordEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterMonitorEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterPesEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterScIndexMask;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSectionBits;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSectionEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSectionSettingsCondition;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSectionSettingsConditionTableInfo;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSubType;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterTemiEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterTsRecordEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxIpAddress;
+using ::aidl::android::hardware::tv::tuner::DemuxIpAddressIpAddress;
+using ::aidl::android::hardware::tv::tuner::DemuxIpFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxIpFilterSettingsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxMmtpFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxMmtpFilterSettingsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxMmtpFilterType;
+using ::aidl::android::hardware::tv::tuner::DemuxPid;
+using ::aidl::android::hardware::tv::tuner::DemuxScIndex;
+using ::aidl::android::hardware::tv::tuner::DemuxTlvFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxTlvFilterSettingsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxTsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxTsFilterSettingsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxTsFilterType;
+using ::aidl::android::hardware::tv::tuner::Result;
+using ::aidl::android::hardware::tv::tuner::ScramblingStatus;
+using ::android::dupToAidl;
+using ::android::IPCThreadState;
+using ::android::makeFromAidl;
+using ::android::unsafeHidlToAidlMQDescriptor;
+using ::android::hardware::hidl_handle;
+
+using HidlDemuxAlpLengthType = ::android::hardware::tv::tuner::V1_0::DemuxAlpLengthType;
+using HidlDemuxFilterMainType = ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
+using HidlDemuxIpAddress = ::android::hardware::tv::tuner::V1_0::DemuxIpAddress;
+using HidlDemuxMmtpFilterType = ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterType;
+using HidlDemuxMmtpPid = ::android::hardware::tv::tuner::V1_0::DemuxMmtpPid;
+using HidlDemuxRecordScIndexType = ::android::hardware::tv::tuner::V1_0::DemuxRecordScIndexType;
+using HidlDemuxStreamId = ::android::hardware::tv::tuner::V1_0::DemuxStreamId;
+using HidlDemuxTsFilterType = ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+using HidlAudioStreamType = ::android::hardware::tv::tuner::V1_1::AudioStreamType;
+using HidlConstant = ::android::hardware::tv::tuner::V1_1::Constant;
+using HidlVideoStreamType = ::android::hardware::tv::tuner::V1_1::VideoStreamType;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlFilter::TunerHidlFilter(sp<HidlIFilter> filter, sp<FilterCallback> cb,
+                                 DemuxFilterType type)
+      : mFilter(filter),
+        mType(type),
+        mStarted(false),
+        mShared(false),
+        mClientPid(-1),
+        mFilterCallback(cb) {
+    mFilter_1_1 = ::android::hardware::tv::tuner::V1_1::IFilter::castFrom(filter);
+}
+
+TunerHidlFilter::~TunerHidlFilter() {
+    Mutex::Autolock _l(mLock);
+    mFilter = nullptr;
+    mFilter_1_1 = nullptr;
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::getQueueDesc(AidlMQDesc* _aidl_return) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
+    }
+
+    MQDesc filterMQDesc;
+    HidlResult res;
+    mFilter->getQueueDesc([&](HidlResult r, const MQDesc& desc) {
+        filterMQDesc = desc;
+        res = r;
+    });
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    AidlMQDesc aidlMQDesc;
+    unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(filterMQDesc, &aidlMQDesc);
+    *_aidl_return = move(aidlMQDesc);
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::getId(int32_t* _aidl_return) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlResult res;
+    mFilter->getId([&](HidlResult r, uint32_t filterId) {
+        res = r;
+        mId = filterId;
+    });
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    *_aidl_return = mId;
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::getId64Bit(int64_t* _aidl_return) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter_1_1 == nullptr) {
+        ALOGE("IFilter_1_1 is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlResult res;
+    mFilter_1_1->getId64Bit([&](HidlResult r, uint64_t filterId) {
+        res = r;
+        mId64Bit = filterId;
+    });
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    *_aidl_return = mId64Bit;
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::configure(const DemuxFilterSettings& in_settings) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlDemuxFilterSettings settings;
+    switch (in_settings.getTag()) {
+    case DemuxFilterSettings::ts: {
+        getHidlTsSettings(in_settings, settings);
+        break;
+    }
+    case DemuxFilterSettings::mmtp: {
+        getHidlMmtpSettings(in_settings, settings);
+        break;
+    }
+    case DemuxFilterSettings::ip: {
+        getHidlIpSettings(in_settings, settings);
+        break;
+    }
+    case DemuxFilterSettings::tlv: {
+        getHidlTlvSettings(in_settings, settings);
+        break;
+    }
+    case DemuxFilterSettings::alp: {
+        getHidlAlpSettings(in_settings, settings);
+        break;
+    }
+    }
+
+    HidlResult res = mFilter->configure(settings);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::configureMonitorEvent(int32_t monitorEventType) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter_1_1 == nullptr) {
+        ALOGE("IFilter_1_1 is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlResult res = mFilter_1_1->configureMonitorEvent(monitorEventType);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::configureIpFilterContextId(int32_t cid) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter_1_1 == nullptr) {
+        ALOGE("IFilter_1_1 is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlResult res = mFilter_1_1->configureIpCid(cid);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::configureAvStreamType(const AvStreamType& in_avStreamType) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter_1_1 == nullptr) {
+        ALOGE("IFilter_1_1 is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlAvStreamType type;
+    if (!getHidlAvStreamType(in_avStreamType, type)) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlResult res = mFilter_1_1->configureAvStreamType(type);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::setDataSource(const shared_ptr<ITunerFilter>& filter) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (filter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    sp<HidlIFilter> hidlFilter = static_cast<TunerHidlFilter*>(filter.get())->getHalFilter();
+    HidlResult res = mFilter->setDataSource(hidlFilter);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::getAvSharedHandle(NativeHandle* out_avMemory,
+                                                        int64_t* _aidl_return) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter_1_1 == nullptr) {
+        ALOGE("IFilter_1_1 is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlResult res;
+    mFilter_1_1->getAvSharedHandle([&](HidlResult r, hidl_handle avMemory, uint64_t avMemSize) {
+        res = r;
+        if (res == HidlResult::SUCCESS) {
+            *out_avMemory = dupToAidl(avMemory);
+            *_aidl_return = static_cast<int64_t>(avMemSize);
+        }
+    });
+
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::releaseAvHandle(const NativeHandle& in_handle,
+                                                      int64_t in_avDataId) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlResult res = mFilter->releaseAvHandle(hidl_handle(makeFromAidl(in_handle)), in_avDataId);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    // Call to HAL to make sure the transport FD was able to be closed by binder.
+    // This is a tricky workaround for a problem in Binder.
+    // TODO:[b/192048842] When that problem is fixed we may be able to remove or change this code.
+    mFilter->getId([&](HidlResult /* r */, uint32_t /* filterId*/){});
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::start() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
+    }
+
+    HidlResult res = mFilter->start();
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    mStarted = true;
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::stop() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
+    }
+
+    HidlResult res = mFilter->stop();
+    mStarted = false;
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::flush() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
+    }
+
+    HidlResult res = mFilter->flush();
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::close() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            if (mFilterCallback != nullptr) {
+                mFilterCallback->sendSharedFilterStatus(STATUS_INACCESSIBLE);
+                mFilterCallback->detachSharedFilterCallback();
+            }
+            TunerHidlService::getTunerService()->removeSharedFilter(this->ref<TunerHidlFilter>());
+        } else {
+            // Calling from shared process, do not really close this filter.
+            if (mFilterCallback != nullptr) {
+                mFilterCallback->detachSharedFilterCallback();
+            }
+            mStarted = false;
+            return ::ndk::ScopedAStatus::ok();
+        }
+    }
+
+    HidlResult res = mFilter->close();
+    mFilter = nullptr;
+    mFilter_1_1 = nullptr;
+    mStarted = false;
+    mShared = false;
+    mClientPid = -1;
+
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::acquireSharedFilterToken(string* _aidl_return) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared || mStarted) {
+        ALOGD("create SharedFilter in wrong state");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    IPCThreadState* ipc = IPCThreadState::self();
+    mClientPid = ipc->getCallingPid();
+    string token =
+            TunerHidlService::getTunerService()->addFilterToShared(this->ref<TunerHidlFilter>());
+    _aidl_return->assign(token);
+    mShared = true;
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::freeSharedFilterToken(const string& /* in_filterToken */) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (!mShared) {
+        // The filter is not shared or the shared filter has been closed.
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    if (mFilterCallback != nullptr) {
+        mFilterCallback->sendSharedFilterStatus(STATUS_INACCESSIBLE);
+        mFilterCallback->detachSharedFilterCallback();
+    }
+
+    TunerHidlService::getTunerService()->removeSharedFilter(this->ref<TunerHidlFilter>());
+    mShared = false;
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::getFilterType(DemuxFilterType* _aidl_return) {
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    *_aidl_return = mType;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::setDelayHint(const FilterDelayHint&) {
+    // setDelayHint is not supported in HIDL HAL
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+}
+
+bool TunerHidlFilter::isSharedFilterAllowed(int callingPid) {
+    return mShared && mClientPid != callingPid;
+}
+
+void TunerHidlFilter::attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb) {
+    if (mFilterCallback != nullptr) {
+        mFilterCallback->attachSharedFilterCallback(in_cb);
+    }
+}
+
+sp<HidlIFilter> TunerHidlFilter::getHalFilter() {
+    return mFilter;
+}
+
+bool TunerHidlFilter::getHidlAvStreamType(const AvStreamType avStreamType, HidlAvStreamType& type) {
+    if (isAudioFilter()) {
+        AudioStreamType audio = avStreamType.get<AvStreamType::audio>();
+        if (static_cast<int32_t>(audio) > static_cast<int32_t>(HidlAudioStreamType::DRA)) {
+            return false;
+        }
+        type.audio(static_cast<HidlAudioStreamType>(audio));
+        return true;
+    }
+
+    if (isVideoFilter()) {
+        type.video(static_cast<HidlVideoStreamType>(avStreamType.get<AvStreamType::video>()));
+        return true;
+    }
+
+    return false;
+}
+
+bool TunerHidlFilter::isAudioFilter() {
+    return (mType.mainType == DemuxFilterMainType::TS &&
+            mType.subType.get<DemuxFilterSubType::tsFilterType>() == DemuxTsFilterType::AUDIO) ||
+           (mType.mainType == DemuxFilterMainType::MMTP &&
+            mType.subType.get<DemuxFilterSubType::mmtpFilterType>() == DemuxMmtpFilterType::AUDIO);
+}
+
+bool TunerHidlFilter::isVideoFilter() {
+    return (mType.mainType == DemuxFilterMainType::TS &&
+            mType.subType.get<DemuxFilterSubType::tsFilterType>() == DemuxTsFilterType::VIDEO) ||
+           (mType.mainType == DemuxFilterMainType::MMTP &&
+            mType.subType.get<DemuxFilterSubType::mmtpFilterType>() == DemuxMmtpFilterType::VIDEO);
+}
+
+void TunerHidlFilter::getHidlTsSettings(const DemuxFilterSettings& settings,
+                                        HidlDemuxFilterSettings& hidlSettings) {
+    const DemuxTsFilterSettings& tsConf = settings.get<DemuxFilterSettings::ts>();
+    HidlDemuxTsFilterSettings ts{
+            .tpid = static_cast<uint16_t>(tsConf.tpid),
+    };
+
+    switch (tsConf.filterSettings.getTag()) {
+    case DemuxTsFilterSettingsFilterSettings::av: {
+        ts.filterSettings.av(getHidlAvSettings(
+                tsConf.filterSettings.get<DemuxTsFilterSettingsFilterSettings::av>()));
+        break;
+    }
+    case DemuxTsFilterSettingsFilterSettings::section: {
+        ts.filterSettings.section(getHidlSectionSettings(
+                tsConf.filterSettings.get<DemuxTsFilterSettingsFilterSettings::section>()));
+        break;
+    }
+    case DemuxTsFilterSettingsFilterSettings::pesData: {
+        ts.filterSettings.pesData(getHidlPesDataSettings(
+                tsConf.filterSettings.get<DemuxTsFilterSettingsFilterSettings::pesData>()));
+        break;
+    }
+    case DemuxTsFilterSettingsFilterSettings::record: {
+        ts.filterSettings.record(getHidlRecordSettings(
+                tsConf.filterSettings.get<DemuxTsFilterSettingsFilterSettings::record>()));
+        break;
+    }
+    default: {
+        ts.filterSettings.noinit();
+        break;
+    }
+    }
+    hidlSettings.ts(ts);
+}
+
+void TunerHidlFilter::getHidlMmtpSettings(const DemuxFilterSettings& settings,
+                                          HidlDemuxFilterSettings& hidlSettings) {
+    const DemuxMmtpFilterSettings& mmtpConf = settings.get<DemuxFilterSettings::mmtp>();
+    HidlDemuxMmtpFilterSettings mmtp{
+            .mmtpPid = static_cast<HidlDemuxMmtpPid>(mmtpConf.mmtpPid),
+    };
+
+    switch (mmtpConf.filterSettings.getTag()) {
+    case DemuxMmtpFilterSettingsFilterSettings::av: {
+        mmtp.filterSettings.av(getHidlAvSettings(
+                mmtpConf.filterSettings.get<DemuxMmtpFilterSettingsFilterSettings::av>()));
+        break;
+    }
+    case DemuxMmtpFilterSettingsFilterSettings::section: {
+        mmtp.filterSettings.section(getHidlSectionSettings(
+                mmtpConf.filterSettings.get<DemuxMmtpFilterSettingsFilterSettings::section>()));
+        break;
+    }
+    case DemuxMmtpFilterSettingsFilterSettings::pesData: {
+        mmtp.filterSettings.pesData(getHidlPesDataSettings(
+                mmtpConf.filterSettings.get<DemuxMmtpFilterSettingsFilterSettings::pesData>()));
+        break;
+    }
+    case DemuxMmtpFilterSettingsFilterSettings::record: {
+        mmtp.filterSettings.record(getHidlRecordSettings(
+                mmtpConf.filterSettings.get<DemuxMmtpFilterSettingsFilterSettings::record>()));
+        break;
+    }
+    case DemuxMmtpFilterSettingsFilterSettings::download: {
+        mmtp.filterSettings.download(getHidlDownloadSettings(
+                mmtpConf.filterSettings.get<DemuxMmtpFilterSettingsFilterSettings::download>()));
+        break;
+    }
+    default: {
+        mmtp.filterSettings.noinit();
+        break;
+    }
+    }
+    hidlSettings.mmtp(mmtp);
+}
+
+void TunerHidlFilter::getHidlIpSettings(const DemuxFilterSettings& settings,
+                                        HidlDemuxFilterSettings& hidlSettings) {
+    const DemuxIpFilterSettings& ipConf = settings.get<DemuxFilterSettings::ip>();
+    HidlDemuxIpAddress ipAddr{
+            .srcPort = static_cast<uint16_t>(ipConf.ipAddr.srcPort),
+            .dstPort = static_cast<uint16_t>(ipConf.ipAddr.dstPort),
+    };
+
+    ipConf.ipAddr.srcIpAddress.getTag() == DemuxIpAddressIpAddress::v6
+            ? ipAddr.srcIpAddress.v6(getIpV6Address(ipConf.ipAddr.srcIpAddress))
+            : ipAddr.srcIpAddress.v4(getIpV4Address(ipConf.ipAddr.srcIpAddress));
+    ipConf.ipAddr.dstIpAddress.getTag() == DemuxIpAddressIpAddress::v6
+            ? ipAddr.dstIpAddress.v6(getIpV6Address(ipConf.ipAddr.dstIpAddress))
+            : ipAddr.dstIpAddress.v4(getIpV4Address(ipConf.ipAddr.dstIpAddress));
+
+    HidlDemuxIpFilterSettings ip;
+    ip.ipAddr = ipAddr;
+
+    switch (ipConf.filterSettings.getTag()) {
+    case DemuxIpFilterSettingsFilterSettings::section: {
+        ip.filterSettings.section(getHidlSectionSettings(
+                ipConf.filterSettings.get<DemuxIpFilterSettingsFilterSettings::section>()));
+        break;
+    }
+    case DemuxIpFilterSettingsFilterSettings::bPassthrough: {
+        ip.filterSettings.bPassthrough(
+                ipConf.filterSettings.get<DemuxIpFilterSettingsFilterSettings::bPassthrough>());
+        break;
+    }
+    default: {
+        ip.filterSettings.noinit();
+        break;
+    }
+    }
+    hidlSettings.ip(ip);
+}
+
+hidl_array<uint8_t, IP_V6_LENGTH> TunerHidlFilter::getIpV6Address(
+        const DemuxIpAddressIpAddress& addr) {
+    hidl_array<uint8_t, IP_V6_LENGTH> ip;
+    if (addr.get<DemuxIpAddressIpAddress::v6>().size() != IP_V6_LENGTH) {
+        return ip;
+    }
+    copy(addr.get<DemuxIpAddressIpAddress::v6>().begin(),
+         addr.get<DemuxIpAddressIpAddress::v6>().end(), ip.data());
+    return ip;
+}
+
+hidl_array<uint8_t, IP_V4_LENGTH> TunerHidlFilter::getIpV4Address(
+        const DemuxIpAddressIpAddress& addr) {
+    hidl_array<uint8_t, IP_V4_LENGTH> ip;
+    if (addr.get<DemuxIpAddressIpAddress::v4>().size() != IP_V4_LENGTH) {
+        return ip;
+    }
+    copy(addr.get<DemuxIpAddressIpAddress::v4>().begin(),
+         addr.get<DemuxIpAddressIpAddress::v4>().end(), ip.data());
+    return ip;
+}
+
+void TunerHidlFilter::getHidlTlvSettings(const DemuxFilterSettings& settings,
+                                         HidlDemuxFilterSettings& hidlSettings) {
+    const DemuxTlvFilterSettings& tlvConf = settings.get<DemuxFilterSettings::tlv>();
+    HidlDemuxTlvFilterSettings tlv{
+            .packetType = static_cast<uint8_t>(tlvConf.packetType),
+            .isCompressedIpPacket = tlvConf.isCompressedIpPacket,
+    };
+
+    switch (tlvConf.filterSettings.getTag()) {
+    case DemuxTlvFilterSettingsFilterSettings::section: {
+        tlv.filterSettings.section(getHidlSectionSettings(
+                tlvConf.filterSettings.get<DemuxTlvFilterSettingsFilterSettings::section>()));
+        break;
+    }
+    case DemuxTlvFilterSettingsFilterSettings::bPassthrough: {
+        tlv.filterSettings.bPassthrough(
+                tlvConf.filterSettings.get<DemuxTlvFilterSettingsFilterSettings::bPassthrough>());
+        break;
+    }
+    default: {
+        tlv.filterSettings.noinit();
+        break;
+    }
+    }
+    hidlSettings.tlv(tlv);
+}
+
+void TunerHidlFilter::getHidlAlpSettings(const DemuxFilterSettings& settings,
+                                         HidlDemuxFilterSettings& hidlSettings) {
+    const DemuxAlpFilterSettings& alpConf = settings.get<DemuxFilterSettings::alp>();
+    HidlDemuxAlpFilterSettings alp{
+            .packetType = static_cast<uint8_t>(alpConf.packetType),
+            .lengthType = static_cast<HidlDemuxAlpLengthType>(alpConf.lengthType),
+    };
+
+    switch (alpConf.filterSettings.getTag()) {
+    case DemuxAlpFilterSettingsFilterSettings::section: {
+        alp.filterSettings.section(getHidlSectionSettings(
+                alpConf.filterSettings.get<DemuxAlpFilterSettingsFilterSettings::section>()));
+        break;
+    }
+    default: {
+        alp.filterSettings.noinit();
+        break;
+    }
+    }
+    hidlSettings.alp(alp);
+}
+
+HidlDemuxFilterAvSettings TunerHidlFilter::getHidlAvSettings(
+        const DemuxFilterAvSettings& settings) {
+    HidlDemuxFilterAvSettings av{
+            .isPassthrough = settings.isPassthrough,
+    };
+    return av;
+}
+
+HidlDemuxFilterSectionSettings TunerHidlFilter::getHidlSectionSettings(
+        const DemuxFilterSectionSettings& settings) {
+    HidlDemuxFilterSectionSettings section{
+            .isCheckCrc = settings.isCheckCrc,
+            .isRepeat = settings.isRepeat,
+            .isRaw = settings.isRaw,
+    };
+
+    switch (settings.condition.getTag()) {
+    case DemuxFilterSectionSettingsCondition::sectionBits: {
+        const DemuxFilterSectionBits& sectionBits =
+                settings.condition.get<DemuxFilterSectionSettingsCondition::sectionBits>();
+        vector<uint8_t> filter(sectionBits.filter.begin(), sectionBits.filter.end());
+        vector<uint8_t> mask(sectionBits.mask.begin(), sectionBits.mask.end());
+        vector<uint8_t> mode(sectionBits.mode.begin(), sectionBits.mode.end());
+        section.condition.sectionBits({
+                .filter = filter,
+                .mask = mask,
+                .mode = mode,
+        });
+        break;
+    }
+    case DemuxFilterSectionSettingsCondition::tableInfo: {
+        const DemuxFilterSectionSettingsConditionTableInfo& tableInfo =
+                settings.condition.get<DemuxFilterSectionSettingsCondition::tableInfo>();
+        section.condition.tableInfo({
+                .tableId = static_cast<uint16_t>(tableInfo.tableId),
+                .version = static_cast<uint16_t>(tableInfo.version),
+        });
+        break;
+    }
+    default: {
+        break;
+    }
+    }
+    return section;
+}
+
+HidlDemuxFilterPesDataSettings TunerHidlFilter::getHidlPesDataSettings(
+        const DemuxFilterPesDataSettings& settings) {
+    HidlDemuxFilterPesDataSettings pes{
+            .streamId = static_cast<HidlDemuxStreamId>(settings.streamId),
+            .isRaw = settings.isRaw,
+    };
+    return pes;
+}
+
+HidlDemuxFilterRecordSettings TunerHidlFilter::getHidlRecordSettings(
+        const DemuxFilterRecordSettings& settings) {
+    HidlDemuxFilterRecordSettings record{
+            .tsIndexMask = static_cast<uint32_t>(settings.tsIndexMask),
+    };
+
+    switch (settings.scIndexMask.getTag()) {
+    case DemuxFilterScIndexMask::scIndex: {
+        record.scIndexType = static_cast<HidlDemuxRecordScIndexType>(settings.scIndexType);
+        record.scIndexMask.sc(
+                static_cast<uint32_t>(settings.scIndexMask.get<DemuxFilterScIndexMask::scIndex>()));
+        break;
+    }
+    case DemuxFilterScIndexMask::scAvc: {
+        record.scIndexType = HidlDemuxRecordScIndexType::SC;
+        uint32_t index =
+                static_cast<uint32_t>(settings.scIndexMask.get<DemuxFilterScIndexMask::scAvc>());
+        // HIDL HAL starting from 1 << 4; AIDL starting from 1 << 0.
+        index = index << 4;
+        record.scIndexMask.sc(index);
+        break;
+    }
+    case DemuxFilterScIndexMask::scHevc: {
+        record.scIndexType = static_cast<HidlDemuxRecordScIndexType>(settings.scIndexType);
+        record.scIndexMask.scHevc(
+                static_cast<uint32_t>(settings.scIndexMask.get<DemuxFilterScIndexMask::scHevc>()));
+        break;
+    }
+    }
+    return record;
+}
+
+HidlDemuxFilterDownloadSettings TunerHidlFilter::getHidlDownloadSettings(
+        const DemuxFilterDownloadSettings& settings) {
+    HidlDemuxFilterDownloadSettings download{
+            .downloadId = static_cast<uint32_t>(settings.downloadId),
+    };
+    return download;
+}
+
+/////////////// FilterCallback ///////////////////////
+Return<void> TunerHidlFilter::FilterCallback::onFilterStatus(HidlDemuxFilterStatus status) {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr) {
+        mTunerFilterCallback->onFilterStatus(static_cast<DemuxFilterStatus>(status));
+    }
+    return Void();
+}
+
+Return<void> TunerHidlFilter::FilterCallback::onFilterEvent(
+        const HidlDemuxFilterEvent& filterEvent) {
+    vector<HidlDemuxFilterEventExt::Event> emptyEventsExt;
+    HidlDemuxFilterEventExt emptyFilterEventExt{
+            .events = emptyEventsExt,
+    };
+    onFilterEvent_1_1(filterEvent, emptyFilterEventExt);
+    return Void();
+}
+
+Return<void> TunerHidlFilter::FilterCallback::onFilterEvent_1_1(
+        const HidlDemuxFilterEvent& filterEvent, const HidlDemuxFilterEventExt& filterEventExt) {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr) {
+        vector<HidlDemuxFilterEvent::Event> events = filterEvent.events;
+        vector<HidlDemuxFilterEventExt::Event> eventsExt = filterEventExt.events;
+        vector<DemuxFilterEvent> tunerEvents;
+
+        getAidlFilterEvent(events, eventsExt, tunerEvents);
+        mTunerFilterCallback->onFilterEvent(tunerEvents);
+    }
+    return Void();
+}
+
+void TunerHidlFilter::FilterCallback::sendSharedFilterStatus(int32_t status) {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr && mOriginalCallback != nullptr) {
+        mTunerFilterCallback->onFilterStatus(static_cast<DemuxFilterStatus>(status));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::attachSharedFilterCallback(
+        const shared_ptr<ITunerFilterCallback>& in_cb) {
+    Mutex::Autolock _l(mCallbackLock);
+    mOriginalCallback = mTunerFilterCallback;
+    mTunerFilterCallback = in_cb;
+}
+
+void TunerHidlFilter::FilterCallback::detachSharedFilterCallback() {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr && mOriginalCallback != nullptr) {
+        mTunerFilterCallback = mOriginalCallback;
+        mOriginalCallback = nullptr;
+    }
+}
+
+/////////////// FilterCallback Helper Methods ///////////////////////
+void TunerHidlFilter::FilterCallback::getAidlFilterEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events,
+        const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+        vector<DemuxFilterEvent>& aidlEvents) {
+    if (events.empty() && !eventsExt.empty()) {
+        switch (eventsExt[0].getDiscriminator()) {
+        case HidlDemuxFilterEventExt::Event::hidl_discriminator::monitorEvent: {
+            getMonitorEvent(eventsExt, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEventExt::Event::hidl_discriminator::startId: {
+            getRestartEvent(eventsExt, aidlEvents);
+            break;
+        }
+        default: {
+            break;
+        }
+        }
+    }
+
+    if (!events.empty()) {
+        switch (events[0].getDiscriminator()) {
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::media: {
+            getMediaEvent(events, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::section: {
+            getSectionEvent(events, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::pes: {
+            getPesEvent(events, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::tsRecord: {
+            getTsRecordEvent(events, eventsExt, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::mmtpRecord: {
+            getMmtpRecordEvent(events, eventsExt, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::download: {
+            getDownloadEvent(events, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::ipPayload: {
+            getIpPayloadEvent(events, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::temi: {
+            getTemiEvent(events, aidlEvents);
+            break;
+        }
+        default: {
+            break;
+        }
+        }
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getMediaEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events, vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        const HidlDemuxFilterMediaEvent& mediaEvent = events[i].media();
+        DemuxFilterMediaEvent media;
+
+        media.streamId = static_cast<int32_t>(mediaEvent.streamId);
+        media.isPtsPresent = mediaEvent.isPtsPresent;
+        media.pts = static_cast<int64_t>(mediaEvent.pts);
+        media.isDtsPresent = false;
+        media.dts = static_cast<int64_t>(-1);
+        media.dataLength = static_cast<int64_t>(mediaEvent.dataLength);
+        media.offset = static_cast<int64_t>(mediaEvent.offset);
+        media.isSecureMemory = mediaEvent.isSecureMemory;
+        media.avDataId = static_cast<int64_t>(mediaEvent.avDataId);
+        media.mpuSequenceNumber = static_cast<int32_t>(mediaEvent.mpuSequenceNumber);
+        media.isPesPrivateData = mediaEvent.isPesPrivateData;
+        media.scIndexMask.set<DemuxFilterScIndexMask::scIndex>(
+                static_cast<int32_t>(DemuxScIndex::UNDEFINED));
+
+        if (mediaEvent.extraMetaData.getDiscriminator() ==
+            HidlDemuxFilterMediaEvent::ExtraMetaData::hidl_discriminator::audio) {
+            AudioExtraMetaData audio;
+            audio.adFade = static_cast<int8_t>(mediaEvent.extraMetaData.audio().adFade);
+            audio.adPan = static_cast<int8_t>(mediaEvent.extraMetaData.audio().adPan);
+            audio.versionTextTag =
+                    static_cast<int16_t>(mediaEvent.extraMetaData.audio().versionTextTag);
+            audio.adGainCenter = static_cast<int8_t>(mediaEvent.extraMetaData.audio().adGainCenter);
+            audio.adGainFront = static_cast<int8_t>(mediaEvent.extraMetaData.audio().adGainFront);
+            audio.adGainSurround =
+                    static_cast<int8_t>(mediaEvent.extraMetaData.audio().adGainSurround);
+            media.extraMetaData.set<DemuxFilterMediaEventExtraMetaData::audio>(audio);
+        } else {
+            media.extraMetaData.set<DemuxFilterMediaEventExtraMetaData::noinit>(true);
+        }
+
+        if (mediaEvent.avMemory.getNativeHandle() != nullptr) {
+            media.avMemory = dupToAidl(mediaEvent.avMemory.getNativeHandle());
+        }
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::media>(move(media));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getSectionEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events, vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        const HidlDemuxFilterSectionEvent& sectionEvent = events[i].section();
+        DemuxFilterSectionEvent section;
+
+        section.tableId = static_cast<int32_t>(sectionEvent.tableId);
+        section.version = static_cast<int32_t>(sectionEvent.version);
+        section.sectionNum = static_cast<int32_t>(sectionEvent.sectionNum);
+        section.dataLength = static_cast<int64_t>(sectionEvent.dataLength);
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::section>(move(section));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getPesEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                                                  vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        const HidlDemuxFilterPesEvent& pesEvent = events[i].pes();
+        DemuxFilterPesEvent pes;
+
+        pes.streamId = static_cast<int32_t>(pesEvent.streamId);
+        pes.dataLength = static_cast<int32_t>(pesEvent.dataLength);
+        pes.mpuSequenceNumber = static_cast<int32_t>(pesEvent.mpuSequenceNumber);
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::pes>(move(pes));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getTsRecordEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events,
+        const vector<HidlDemuxFilterEventExt::Event>& eventsExt, vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        DemuxFilterTsRecordEvent tsRecord;
+        const HidlDemuxFilterTsRecordEvent& tsRecordEvent = events[i].tsRecord();
+
+        DemuxFilterScIndexMask scIndexMask;
+        if (tsRecordEvent.scIndexMask.getDiscriminator() ==
+            HidlDemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::sc) {
+            int32_t hidlScIndex = static_cast<int32_t>(tsRecordEvent.scIndexMask.sc());
+            if (hidlScIndex <= static_cast<int32_t>(DemuxScIndex::SEQUENCE)) {
+                scIndexMask.set<DemuxFilterScIndexMask::scIndex>(hidlScIndex);
+            } else {
+                // HIDL HAL starting from 1 << 4; AIDL starting from 1 << 0.
+                scIndexMask.set<DemuxFilterScIndexMask::scAvc>(hidlScIndex >> 4);
+            }
+        } else if (tsRecordEvent.scIndexMask.getDiscriminator() ==
+                   HidlDemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::scHevc) {
+            scIndexMask.set<DemuxFilterScIndexMask::scHevc>(
+                    static_cast<int32_t>(tsRecordEvent.scIndexMask.scHevc()));
+        }
+
+        if (tsRecordEvent.pid.getDiscriminator() == HidlDemuxPid::hidl_discriminator::tPid) {
+            DemuxPid pid;
+            pid.set<DemuxPid::tPid>(static_cast<int32_t>(tsRecordEvent.pid.tPid()));
+            tsRecord.pid = pid;
+        } else {
+            DemuxPid pid;
+            pid.set<DemuxPid::tPid>(static_cast<int32_t>(Constant::INVALID_TS_PID));
+            tsRecord.pid = pid;
+        }
+
+        tsRecord.scIndexMask = scIndexMask;
+        tsRecord.tsIndexMask = static_cast<int32_t>(tsRecordEvent.tsIndexMask);
+        tsRecord.byteNumber = static_cast<int64_t>(tsRecordEvent.byteNumber);
+
+        if (eventsExt.size() > i &&
+            eventsExt[i].getDiscriminator() ==
+                    HidlDemuxFilterEventExt::Event::hidl_discriminator::tsRecord) {
+            tsRecord.pts = static_cast<int64_t>(eventsExt[i].tsRecord().pts);
+            tsRecord.firstMbInSlice = static_cast<int32_t>(eventsExt[i].tsRecord().firstMbInSlice);
+        }
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::tsRecord>(move(tsRecord));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getMmtpRecordEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events,
+        const vector<HidlDemuxFilterEventExt::Event>& eventsExt, vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        DemuxFilterMmtpRecordEvent mmtpRecord;
+        const HidlDemuxFilterMmtpRecordEvent& mmtpRecordEvent = events[i].mmtpRecord();
+
+        mmtpRecord.scHevcIndexMask = static_cast<int32_t>(mmtpRecordEvent.scHevcIndexMask);
+        mmtpRecord.byteNumber = static_cast<int64_t>(mmtpRecordEvent.byteNumber);
+
+        if (eventsExt.size() > i &&
+            eventsExt[i].getDiscriminator() ==
+                    HidlDemuxFilterEventExt::Event::hidl_discriminator::mmtpRecord) {
+            mmtpRecord.pts = static_cast<int64_t>(eventsExt[i].mmtpRecord().pts);
+            mmtpRecord.mpuSequenceNumber =
+                    static_cast<int32_t>(eventsExt[i].mmtpRecord().mpuSequenceNumber);
+            mmtpRecord.firstMbInSlice =
+                    static_cast<int32_t>(eventsExt[i].mmtpRecord().firstMbInSlice);
+            mmtpRecord.tsIndexMask = static_cast<int32_t>(eventsExt[i].mmtpRecord().tsIndexMask);
+        }
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::mmtpRecord>(move(mmtpRecord));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getDownloadEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events, vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        const HidlDemuxFilterDownloadEvent& downloadEvent = events[i].download();
+        DemuxFilterDownloadEvent download;
+
+        download.itemId = static_cast<int32_t>(downloadEvent.itemId);
+        download.downloadId = -1;
+        download.itemFragmentIndex = static_cast<int32_t>(downloadEvent.itemFragmentIndex);
+        download.mpuSequenceNumber = static_cast<int32_t>(downloadEvent.mpuSequenceNumber);
+        download.lastItemFragmentIndex = static_cast<int32_t>(downloadEvent.lastItemFragmentIndex);
+        download.dataLength = static_cast<int32_t>(downloadEvent.dataLength);
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::download>(move(download));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getIpPayloadEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events, vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        const HidlDemuxFilterIpPayloadEvent& ipPayloadEvent = events[i].ipPayload();
+        DemuxFilterIpPayloadEvent ipPayload;
+
+        ipPayload.dataLength = static_cast<int32_t>(ipPayloadEvent.dataLength);
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::ipPayload>(move(ipPayload));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getTemiEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events, vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        const HidlDemuxFilterTemiEvent& temiEvent = events[i].temi();
+        DemuxFilterTemiEvent temi;
+
+        temi.pts = static_cast<int64_t>(temiEvent.pts);
+        temi.descrTag = static_cast<int8_t>(temiEvent.descrTag);
+        vector<uint8_t> descrData = temiEvent.descrData;
+        temi.descrData.resize(descrData.size());
+        copy(descrData.begin(), descrData.end(), temi.descrData.begin());
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::temi>(move(temi));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getMonitorEvent(
+        const vector<HidlDemuxFilterEventExt::Event>& eventsExt, vector<DemuxFilterEvent>& res) {
+    HidlDemuxFilterMonitorEvent monitorEvent = eventsExt[0].monitorEvent();
+    DemuxFilterMonitorEvent monitor;
+
+    switch (monitorEvent.getDiscriminator()) {
+    case HidlDemuxFilterMonitorEvent::hidl_discriminator::scramblingStatus: {
+        monitor.set<DemuxFilterMonitorEvent::scramblingStatus>(
+                static_cast<ScramblingStatus>(monitorEvent.scramblingStatus()));
+        break;
+    }
+    case HidlDemuxFilterMonitorEvent::hidl_discriminator::cid: {
+        monitor.set<DemuxFilterMonitorEvent::cid>(static_cast<int32_t>(monitorEvent.cid()));
+        break;
+    }
+    }
+
+    DemuxFilterEvent filterEvent;
+    filterEvent.set<DemuxFilterEvent::monitorEvent>(move(monitor));
+    res.push_back(move(filterEvent));
+}
+
+void TunerHidlFilter::FilterCallback::getRestartEvent(
+        const vector<HidlDemuxFilterEventExt::Event>& eventsExt, vector<DemuxFilterEvent>& res) {
+    DemuxFilterEvent filterEvent;
+    filterEvent.set<DemuxFilterEvent::startId>(static_cast<int32_t>(eventsExt[0].startId()));
+    res.push_back(move(filterEvent));
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlFilter.h b/services/tuner/hidl/TunerHidlFilter.h
new file mode 100644
index 0000000..b8fad22
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlFilter.h
@@ -0,0 +1,240 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLFILTER_H
+#define ANDROID_MEDIA_TUNERHIDLFILTER_H
+
+#include <aidl/android/hardware/tv/tuner/AvStreamType.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterAvSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterDownloadSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterEvent.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterPesDataSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterRecordSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterSectionSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterStatus.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterType.h>
+#include <aidl/android/media/tv/tuner/BnTunerFilter.h>
+#include <aidl/android/media/tv/tuner/ITunerFilterCallback.h>
+#include <android/hardware/tv/tuner/1.0/ITuner.h>
+#include <android/hardware/tv/tuner/1.1/IFilter.h>
+#include <android/hardware/tv/tuner/1.1/IFilterCallback.h>
+#include <android/hardware/tv/tuner/1.1/types.h>
+#include <fmq/MessageQueue.h>
+#include <utils/Mutex.h>
+
+#include <map>
+
+using ::aidl::android::hardware::common::NativeHandle;
+using ::aidl::android::hardware::common::fmq::MQDescriptor;
+using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
+using ::aidl::android::hardware::tv::tuner::AvStreamType;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterAvSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterDownloadSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterPesDataSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterRecordSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSectionSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterType;
+using ::aidl::android::hardware::tv::tuner::DemuxIpAddressIpAddress;
+using ::aidl::android::hardware::tv::tuner::FilterDelayHint;
+using ::aidl::android::media::tv::tuner::BnTunerFilter;
+using ::aidl::android::media::tv::tuner::ITunerFilterCallback;
+using ::android::Mutex;
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::MQDescriptorSync;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::std::shared_ptr;
+using ::std::string;
+using ::std::vector;
+
+using HidlAvStreamType = ::android::hardware::tv::tuner::V1_1::AvStreamType;
+using HidlDemuxAlpFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxAlpFilterSettings;
+using HidlDemuxFilterAvSettings = ::android::hardware::tv::tuner::V1_0::DemuxFilterAvSettings;
+using HidlDemuxFilterDownloadEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterDownloadEvent;
+using HidlDemuxFilterDownloadSettings =
+        ::android::hardware::tv::tuner::V1_0::DemuxFilterDownloadSettings;
+using HidlDemuxFilterIpPayloadEvent =
+        ::android::hardware::tv::tuner::V1_0::DemuxFilterIpPayloadEvent;
+using HidlDemuxFilterEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterEvent;
+using HidlDemuxFilterMediaEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterMediaEvent;
+using HidlDemuxFilterMmtpRecordEvent =
+        ::android::hardware::tv::tuner::V1_0::DemuxFilterMmtpRecordEvent;
+using HidlDemuxFilterPesDataSettings =
+        ::android::hardware::tv::tuner::V1_0::DemuxFilterPesDataSettings;
+using HidlDemuxFilterPesEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterPesEvent;
+using HidlDemuxFilterRecordSettings =
+        ::android::hardware::tv::tuner::V1_0::DemuxFilterRecordSettings;
+using HidlDemuxFilterSectionEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterSectionEvent;
+using HidlDemuxFilterSectionSettings =
+        ::android::hardware::tv::tuner::V1_0::DemuxFilterSectionSettings;
+using HidlDemuxFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxFilterSettings;
+using HidlDemuxFilterStatus = ::android::hardware::tv::tuner::V1_0::DemuxFilterStatus;
+using HidlDemuxFilterTemiEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterTemiEvent;
+using HidlDemuxFilterTsRecordEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterTsRecordEvent;
+using HidlDemuxIpFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxIpFilterSettings;
+using HidlDemuxMmtpFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterSettings;
+using HidlDemuxTlvFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxTlvFilterSettings;
+using HidlDemuxTsFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxTsFilterSettings;
+using HidlDemuxPid = ::android::hardware::tv::tuner::V1_0::DemuxPid;
+using HidlIFilter = ::android::hardware::tv::tuner::V1_0::IFilter;
+using HidlDvStreamType = ::android::hardware::tv::tuner::V1_1::AvStreamType;
+using HidlDemuxFilterEventExt = ::android::hardware::tv::tuner::V1_1::DemuxFilterEventExt;
+using HidlDemuxFilterMonitorEvent = ::android::hardware::tv::tuner::V1_1::DemuxFilterMonitorEvent;
+using HidlDemuxFilterTsRecordEventExt =
+        ::android::hardware::tv::tuner::V1_1::DemuxFilterTsRecordEventExt;
+using HidlIFilterCallback = ::android::hardware::tv::tuner::V1_1::IFilterCallback;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+using MQDesc = MQDescriptorSync<uint8_t>;
+using AidlMQDesc = MQDescriptor<int8_t, SynchronizedReadWrite>;
+
+const static int IP_V4_LENGTH = 4;
+const static int IP_V6_LENGTH = 16;
+
+class TunerHidlFilter : public BnTunerFilter {
+public:
+    class FilterCallback : public HidlIFilterCallback {
+    public:
+        FilterCallback(const shared_ptr<ITunerFilterCallback> tunerFilterCallback)
+              : mTunerFilterCallback(tunerFilterCallback){};
+
+        virtual Return<void> onFilterEvent(const HidlDemuxFilterEvent& filterEvent);
+        virtual Return<void> onFilterEvent_1_1(const HidlDemuxFilterEvent& filterEvent,
+                                               const HidlDemuxFilterEventExt& filterEventExt);
+        virtual Return<void> onFilterStatus(HidlDemuxFilterStatus status);
+
+        void sendSharedFilterStatus(int32_t status);
+        void attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb);
+        void detachSharedFilterCallback();
+
+    private:
+        void getAidlFilterEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                                const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+                                vector<DemuxFilterEvent>& aidlEvents);
+
+        void getMediaEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                           vector<DemuxFilterEvent>& res);
+        void getSectionEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                             vector<DemuxFilterEvent>& res);
+        void getPesEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                         vector<DemuxFilterEvent>& res);
+        void getTsRecordEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                              const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+                              vector<DemuxFilterEvent>& res);
+        void getMmtpRecordEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                                const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+                                vector<DemuxFilterEvent>& res);
+        void getDownloadEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                              vector<DemuxFilterEvent>& res);
+        void getIpPayloadEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                               vector<DemuxFilterEvent>& res);
+        void getTemiEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                          vector<DemuxFilterEvent>& res);
+        void getMonitorEvent(const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+                             vector<DemuxFilterEvent>& res);
+        void getRestartEvent(const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+                             vector<DemuxFilterEvent>& res);
+
+    private:
+        shared_ptr<ITunerFilterCallback> mTunerFilterCallback;
+        shared_ptr<ITunerFilterCallback> mOriginalCallback;
+        Mutex mCallbackLock;
+    };
+
+    TunerHidlFilter(sp<HidlIFilter> filter, sp<FilterCallback> cb, DemuxFilterType type);
+    virtual ~TunerHidlFilter();
+
+    ::ndk::ScopedAStatus getId(int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getId64Bit(int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getQueueDesc(AidlMQDesc* _aidl_return) override;
+    ::ndk::ScopedAStatus configure(const DemuxFilterSettings& in_settings) override;
+    ::ndk::ScopedAStatus configureMonitorEvent(int32_t in_monitorEventTypes) override;
+    ::ndk::ScopedAStatus configureIpFilterContextId(int32_t in_cid) override;
+    ::ndk::ScopedAStatus configureAvStreamType(const AvStreamType& in_avStreamType) override;
+    ::ndk::ScopedAStatus getAvSharedHandle(NativeHandle* out_avMemory,
+                                           int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus releaseAvHandle(const NativeHandle& in_handle,
+                                         int64_t in_avDataId) override;
+    ::ndk::ScopedAStatus setDataSource(const shared_ptr<ITunerFilter>& in_filter) override;
+    ::ndk::ScopedAStatus start() override;
+    ::ndk::ScopedAStatus stop() override;
+    ::ndk::ScopedAStatus flush() override;
+    ::ndk::ScopedAStatus close() override;
+    ::ndk::ScopedAStatus acquireSharedFilterToken(string* _aidl_return) override;
+    ::ndk::ScopedAStatus freeSharedFilterToken(const string& in_filterToken) override;
+    ::ndk::ScopedAStatus getFilterType(DemuxFilterType* _aidl_return) override;
+    ::ndk::ScopedAStatus setDelayHint(const FilterDelayHint& in_hint) override;
+
+    bool isSharedFilterAllowed(int32_t pid);
+    void attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb);
+    sp<HidlIFilter> getHalFilter();
+
+private:
+    bool isAudioFilter();
+    bool isVideoFilter();
+
+    HidlDemuxFilterAvSettings getHidlAvSettings(const DemuxFilterAvSettings& settings);
+    HidlDemuxFilterSectionSettings getHidlSectionSettings(
+            const DemuxFilterSectionSettings& settings);
+    HidlDemuxFilterPesDataSettings getHidlPesDataSettings(
+            const DemuxFilterPesDataSettings& settings);
+    HidlDemuxFilterRecordSettings getHidlRecordSettings(const DemuxFilterRecordSettings& settings);
+    HidlDemuxFilterDownloadSettings getHidlDownloadSettings(
+            const DemuxFilterDownloadSettings& settings);
+    bool getHidlAvStreamType(const AvStreamType avStreamType, HidlAvStreamType& type);
+    void getHidlTsSettings(const DemuxFilterSettings& settings,
+                           HidlDemuxFilterSettings& hidlSettings);
+    void getHidlMmtpSettings(const DemuxFilterSettings& settings,
+                             HidlDemuxFilterSettings& hidlSettings);
+    void getHidlIpSettings(const DemuxFilterSettings& settings,
+                           HidlDemuxFilterSettings& hidlSettings);
+    void getHidlTlvSettings(const DemuxFilterSettings& settings,
+                            HidlDemuxFilterSettings& hidlSettings);
+    void getHidlAlpSettings(const DemuxFilterSettings& settings,
+                            HidlDemuxFilterSettings& hidlSettings);
+
+    hidl_array<uint8_t, IP_V4_LENGTH> getIpV4Address(const DemuxIpAddressIpAddress& addr);
+    hidl_array<uint8_t, IP_V6_LENGTH> getIpV6Address(const DemuxIpAddressIpAddress& addr);
+
+    sp<HidlIFilter> mFilter;
+    sp<::android::hardware::tv::tuner::V1_1::IFilter> mFilter_1_1;
+    int32_t mId;
+    int64_t mId64Bit;
+    DemuxFilterType mType;
+    bool mStarted;
+    bool mShared;
+    int32_t mClientPid;
+    sp<FilterCallback> mFilterCallback;
+    Mutex mLock;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLFILTER_H
diff --git a/services/tuner/hidl/TunerHidlFrontend.cpp b/services/tuner/hidl/TunerHidlFrontend.cpp
new file mode 100644
index 0000000..03957f3
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlFrontend.cpp
@@ -0,0 +1,1224 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TunerHidlFrontend"
+
+#include "TunerHidlFrontend.h"
+
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
+#include "TunerHidlLnb.h"
+#include "TunerHidlService.h"
+
+using ::aidl::android::hardware::tv::tuner::FrontendAnalogSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendAnalogSifStandard;
+using ::aidl::android::hardware::tv::tuner::FrontendAnalogType;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Bandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3CodeRate;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Fec;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Modulation;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3PlpSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Settings;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3TimeInterleaveMode;
+using ::aidl::android::hardware::tv::tuner::FrontendAtscModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendAtscSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendBandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendCableTimeInterleaveMode;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbBandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbGuardInterval;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbTimeInterleaveMode;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbTransmissionMode;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbcAnnex;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbcBandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbcModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbcSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsRolloff;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsStandard;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtBandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtConstellation;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtGuardInterval;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtHierarchy;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtStandard;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtTransmissionMode;
+using ::aidl::android::hardware::tv::tuner::FrontendGuardInterval;
+using ::aidl::android::hardware::tv::tuner::FrontendInnerFec;
+using ::aidl::android::hardware::tv::tuner::FrontendInterleaveMode;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbs3Modulation;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbs3Rolloff;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbs3Settings;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbsModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbsRolloff;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbsSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtBandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtCoderate;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtGuardInterval;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtMode;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendModulationStatus;
+using ::aidl::android::hardware::tv::tuner::FrontendRollOff;
+using ::aidl::android::hardware::tv::tuner::FrontendScanAtsc3PlpInfo;
+using ::aidl::android::hardware::tv::tuner::FrontendScanMessageStandard;
+using ::aidl::android::hardware::tv::tuner::FrontendSpectralInversion;
+using ::aidl::android::hardware::tv::tuner::FrontendStatusAtsc3PlpInfo;
+using ::aidl::android::hardware::tv::tuner::FrontendTransmissionMode;
+using ::aidl::android::hardware::tv::tuner::Result;
+
+using HidlFrontendStatusAtsc3PlpInfo =
+        ::aidl::android::hardware::tv::tuner::FrontendStatusAtsc3PlpInfo;
+using HidlFrontendAnalogSifStandard =
+        ::android::hardware::tv::tuner::V1_0::FrontendAnalogSifStandard;
+using HidlFrontendAnalogType = ::android::hardware::tv::tuner::V1_0::FrontendAnalogType;
+using HidlFrontendAtscModulation = ::android::hardware::tv::tuner::V1_0::FrontendAtscModulation;
+using HidlFrontendAtsc3Bandwidth = ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Bandwidth;
+using HidlFrontendAtsc3CodeRate = ::android::hardware::tv::tuner::V1_0::FrontendAtsc3CodeRate;
+using HidlFrontendAtsc3DemodOutputFormat =
+        ::android::hardware::tv::tuner::V1_0::FrontendAtsc3DemodOutputFormat;
+using HidlFrontendAtsc3Fec = ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Fec;
+using HidlFrontendAtsc3Modulation = ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Modulation;
+using HidlFrontendAtsc3TimeInterleaveMode =
+        ::android::hardware::tv::tuner::V1_0::FrontendAtsc3TimeInterleaveMode;
+using HidlFrontendDvbcAnnex = ::android::hardware::tv::tuner::V1_0::FrontendDvbcAnnex;
+using HidlFrontendDvbcModulation = ::android::hardware::tv::tuner::V1_0::FrontendDvbcModulation;
+using HidlFrontendDvbcOuterFec = ::android::hardware::tv::tuner::V1_0::FrontendDvbcOuterFec;
+using HidlFrontendDvbcSpectralInversion =
+        ::android::hardware::tv::tuner::V1_0::FrontendDvbcSpectralInversion;
+using HidlFrontendDvbsModulation = ::android::hardware::tv::tuner::V1_0::FrontendDvbsModulation;
+using HidlFrontendDvbsPilot = ::android::hardware::tv::tuner::V1_0::FrontendDvbsPilot;
+using HidlFrontendDvbsRolloff = ::android::hardware::tv::tuner::V1_0::FrontendDvbsRolloff;
+using HidlFrontendDvbsSettings = ::android::hardware::tv::tuner::V1_0::FrontendDvbsSettings;
+using HidlFrontendDvbsStandard = ::android::hardware::tv::tuner::V1_0::FrontendDvbsStandard;
+using HidlFrontendDvbsVcmMode = ::android::hardware::tv::tuner::V1_0::FrontendDvbsVcmMode;
+using HidlFrontendDvbtBandwidth = ::android::hardware::tv::tuner::V1_0::FrontendDvbtBandwidth;
+using HidlFrontendDvbtCoderate = ::android::hardware::tv::tuner::V1_0::FrontendDvbtCoderate;
+using HidlFrontendDvbtConstellation =
+        ::android::hardware::tv::tuner::V1_0::FrontendDvbtConstellation;
+using HidlFrontendDvbtGuardInterval =
+        ::android::hardware::tv::tuner::V1_0::FrontendDvbtGuardInterval;
+using HidlFrontendDvbtHierarchy = ::android::hardware::tv::tuner::V1_0::FrontendDvbtHierarchy;
+using HidlFrontendDvbtPlpMode = ::android::hardware::tv::tuner::V1_0::FrontendDvbtPlpMode;
+using HidlFrontendDvbtSettings = ::android::hardware::tv::tuner::V1_0::FrontendDvbtSettings;
+using HidlFrontendDvbtStandard = ::android::hardware::tv::tuner::V1_0::FrontendDvbtStandard;
+using HidlFrontendDvbtTransmissionMode =
+        ::android::hardware::tv::tuner::V1_0::FrontendDvbtTransmissionMode;
+using HidlFrontendInnerFec = ::android::hardware::tv::tuner::V1_0::FrontendInnerFec;
+using HidlFrontendIsdbs3Coderate = ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Coderate;
+using HidlFrontendIsdbs3Modulation = ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Modulation;
+using HidlFrontendIsdbs3Rolloff = ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Rolloff;
+using HidlFrontendIsdbs3Settings = ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Settings;
+using HidlFrontendIsdbsCoderate = ::android::hardware::tv::tuner::V1_0::FrontendIsdbsCoderate;
+using HidlFrontendIsdbsModulation = ::android::hardware::tv::tuner::V1_0::FrontendIsdbsModulation;
+using HidlFrontendIsdbsRolloff = ::android::hardware::tv::tuner::V1_0::FrontendIsdbsRolloff;
+using HidlFrontendIsdbsSettings = ::android::hardware::tv::tuner::V1_0::FrontendIsdbsSettings;
+using HidlFrontendIsdbsStreamIdType =
+        ::android::hardware::tv::tuner::V1_0::FrontendIsdbsStreamIdType;
+using HidlFrontendIsdbtBandwidth = ::android::hardware::tv::tuner::V1_0::FrontendIsdbtBandwidth;
+using HidlFrontendIsdbtCoderate = ::android::hardware::tv::tuner::V1_0::FrontendIsdbtCoderate;
+using HidlFrontendIsdbtGuardInterval =
+        ::android::hardware::tv::tuner::V1_0::FrontendIsdbtGuardInterval;
+using HidlFrontendIsdbtMode = ::android::hardware::tv::tuner::V1_0::FrontendIsdbtMode;
+using HidlFrontendIsdbtModulation = ::android::hardware::tv::tuner::V1_0::FrontendIsdbtModulation;
+using HidlFrontendIsdbtSettings = ::android::hardware::tv::tuner::V1_0::FrontendIsdbtSettings;
+using HidlFrontendModulationStatus = ::android::hardware::tv::tuner::V1_0::FrontendModulationStatus;
+using HidlFrontendScanAtsc3PlpInfo = ::android::hardware::tv::tuner::V1_0::FrontendScanAtsc3PlpInfo;
+using HidlFrontendScanType = ::android::hardware::tv::tuner::V1_0::FrontendScanType;
+using HidlFrontendStatusType = ::android::hardware::tv::tuner::V1_0::FrontendStatusType;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+using HidlFrontendAnalogAftFlag = ::android::hardware::tv::tuner::V1_1::FrontendAnalogAftFlag;
+using HidlFrontendBandwidth = ::android::hardware::tv::tuner::V1_1::FrontendBandwidth;
+using HidlFrontendCableTimeInterleaveMode =
+        ::android::hardware::tv::tuner::V1_1::FrontendCableTimeInterleaveMode;
+using HidlFrontendDvbcBandwidth = ::android::hardware::tv::tuner::V1_1::FrontendDvbcBandwidth;
+using HidlFrontendDtmbBandwidth = ::android::hardware::tv::tuner::V1_1::FrontendDtmbBandwidth;
+using HidlFrontendDtmbCodeRate = ::android::hardware::tv::tuner::V1_1::FrontendDtmbCodeRate;
+using HidlFrontendDtmbGuardInterval =
+        ::android::hardware::tv::tuner::V1_1::FrontendDtmbGuardInterval;
+using HidlFrontendDtmbModulation = ::android::hardware::tv::tuner::V1_1::FrontendDtmbModulation;
+using HidlFrontendDtmbTimeInterleaveMode =
+        ::android::hardware::tv::tuner::V1_1::FrontendDtmbTimeInterleaveMode;
+using HidlFrontendDtmbTransmissionMode =
+        ::android::hardware::tv::tuner::V1_1::FrontendDtmbTransmissionMode;
+using HidlFrontendDvbsScanType = ::android::hardware::tv::tuner::V1_1::FrontendDvbsScanType;
+using HidlFrontendGuardInterval = ::android::hardware::tv::tuner::V1_1::FrontendGuardInterval;
+using HidlFrontendInterleaveMode = ::android::hardware::tv::tuner::V1_1::FrontendInterleaveMode;
+using HidlFrontendModulation = ::android::hardware::tv::tuner::V1_1::FrontendModulation;
+using HidlFrontendRollOff = ::android::hardware::tv::tuner::V1_1::FrontendRollOff;
+using HidlFrontendTransmissionMode = ::android::hardware::tv::tuner::V1_1::FrontendTransmissionMode;
+using HidlFrontendSpectralInversion =
+        ::android::hardware::tv::tuner::V1_1::FrontendSpectralInversion;
+using HidlFrontendStatusTypeExt1_1 = ::android::hardware::tv::tuner::V1_1::FrontendStatusTypeExt1_1;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlFrontend::TunerHidlFrontend(sp<HidlIFrontend> frontend, int id) {
+    mFrontend = frontend;
+    mFrontend_1_1 = ::android::hardware::tv::tuner::V1_1::IFrontend::castFrom(mFrontend);
+    mId = id;
+}
+
+TunerHidlFrontend::~TunerHidlFrontend() {
+    mFrontend = nullptr;
+    mFrontend_1_1 = nullptr;
+    mId = -1;
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::setCallback(
+        const shared_ptr<ITunerFrontendCallback>& tunerFrontendCallback) {
+    if (mFrontend == nullptr) {
+        ALOGE("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (tunerFrontendCallback == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    sp<HidlIFrontendCallback> frontendCallback = new FrontendCallback(tunerFrontendCallback);
+    HidlResult status = mFrontend->setCallback(frontendCallback);
+    if (status == HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::tune(const FrontendSettings& settings) {
+    if (mFrontend == nullptr) {
+        ALOGE("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    HidlFrontendSettings frontendSettings;
+    HidlFrontendSettingsExt1_1 frontendSettingsExt;
+    getHidlFrontendSettings(settings, frontendSettings, frontendSettingsExt);
+    if (mFrontend_1_1 != nullptr) {
+        status = mFrontend_1_1->tune_1_1(frontendSettings, frontendSettingsExt);
+    } else {
+        status = mFrontend->tune(frontendSettings);
+    }
+    if (status == HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::stopTune() {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mFrontend->stopTune();
+    if (status == HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::scan(const FrontendSettings& settings,
+                                             FrontendScanType frontendScanType) {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    HidlFrontendSettings frontendSettings;
+    HidlFrontendSettingsExt1_1 frontendSettingsExt;
+    getHidlFrontendSettings(settings, frontendSettings, frontendSettingsExt);
+    if (mFrontend_1_1 != nullptr) {
+        status = mFrontend_1_1->scan_1_1(frontendSettings,
+                                         static_cast<HidlFrontendScanType>(frontendScanType),
+                                         frontendSettingsExt);
+    } else {
+        status = mFrontend->scan(frontendSettings,
+                                 static_cast<HidlFrontendScanType>(frontendScanType));
+    }
+    if (status == HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::stopScan() {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mFrontend->stopScan();
+    if (status == HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::setLnb(const shared_ptr<ITunerLnb>& lnb) {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (lnb == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    HidlResult status = mFrontend->setLnb(static_cast<TunerHidlLnb*>(lnb.get())->getId());
+    if (status == HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::linkCiCamToFrontend(int32_t ciCamId,
+                                                            int32_t* _aidl_return) {
+    if (mFrontend_1_1 == nullptr) {
+        ALOGD("IFrontend_1_1 is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    int ltsId;
+    HidlResult status;
+    mFrontend_1_1->linkCiCam(static_cast<uint32_t>(ciCamId), [&](HidlResult r, uint32_t id) {
+        status = r;
+        ltsId = id;
+    });
+
+    if (status == HidlResult::SUCCESS) {
+        *_aidl_return = ltsId;
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::unlinkCiCamToFrontend(int32_t ciCamId) {
+    if (mFrontend_1_1 == nullptr) {
+        ALOGD("IFrontend_1_1 is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mFrontend_1_1->unlinkCiCam(ciCamId);
+    if (status == HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::close() {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    TunerHidlService::getTunerService()->removeFrontend(this->ref<TunerHidlFrontend>());
+
+    HidlResult status = mFrontend->close();
+    mFrontend = nullptr;
+    mFrontend_1_1 = nullptr;
+
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::getStatus(const vector<FrontendStatusType>& in_statusTypes,
+                                                  vector<FrontendStatus>* _aidl_return) {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res;
+    vector<HidlFrontendStatus> status;
+    vector<HidlFrontendStatusExt1_1> statusExt;
+    vector<HidlFrontendStatusType> types;
+    vector<HidlFrontendStatusTypeExt1_1> typesExt;
+    for (auto s : in_statusTypes) {
+        if (static_cast<int32_t>(s) <=
+            static_cast<int32_t>(HidlFrontendStatusType::ATSC3_PLP_INFO)) {
+            types.push_back(static_cast<HidlFrontendStatusType>(s));
+        } else {
+            typesExt.push_back(static_cast<HidlFrontendStatusTypeExt1_1>(s));
+        }
+    }
+
+    mFrontend->getStatus(types, [&](HidlResult r, const hidl_vec<HidlFrontendStatus>& ss) {
+        res = r;
+        for (auto s : ss) {
+            status.push_back(s);
+        }
+    });
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    if (mFrontend_1_1 != nullptr) {
+        mFrontend_1_1->getStatusExt1_1(
+                typesExt, [&](HidlResult r, const hidl_vec<HidlFrontendStatusExt1_1>& ss) {
+                    res = r;
+                    for (auto s : ss) {
+                        statusExt.push_back(s);
+                    }
+                });
+        if (res != HidlResult::SUCCESS) {
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+        }
+    }
+
+    getAidlFrontendStatus(status, statusExt, *_aidl_return);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::getFrontendId(int32_t* _aidl_return) {
+    *_aidl_return = mId;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::getHardwareInfo(std::string* _aidl_return) {
+    _aidl_return->clear();
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(
+            static_cast<int32_t>(Result::UNAVAILABLE));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::removeOutputPid(int32_t /* in_pid */) {
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(
+            static_cast<int32_t>(Result::UNAVAILABLE));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::getFrontendStatusReadiness(
+        const std::vector<FrontendStatusType>& /* in_statusTypes */,
+        std::vector<FrontendStatusReadiness>* _aidl_return) {
+    _aidl_return->clear();
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(
+            static_cast<int32_t>(Result::UNAVAILABLE));
+}
+
+void TunerHidlFrontend::setLna(bool bEnable) {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return;
+    }
+
+    mFrontend->setLna(bEnable);
+}
+
+/////////////// FrontendCallback ///////////////////////
+Return<void> TunerHidlFrontend::FrontendCallback::onEvent(HidlFrontendEventType frontendEventType) {
+    ALOGV("FrontendCallback::onEvent, type=%d", frontendEventType);
+    mTunerFrontendCallback->onEvent(static_cast<FrontendEventType>(frontendEventType));
+    return Void();
+}
+
+Return<void> TunerHidlFrontend::FrontendCallback::onScanMessage(
+        HidlFrontendScanMessageType type, const HidlFrontendScanMessage& message) {
+    ALOGV("FrontendCallback::onScanMessage, type=%d", type);
+    FrontendScanMessage scanMessage;
+    switch (type) {
+    case HidlFrontendScanMessageType::LOCKED: {
+        scanMessage.set<FrontendScanMessage::isLocked>(message.isLocked());
+        break;
+    }
+    case HidlFrontendScanMessageType::END: {
+        scanMessage.set<FrontendScanMessage::isEnd>(message.isEnd());
+        break;
+    }
+    case HidlFrontendScanMessageType::PROGRESS_PERCENT: {
+        scanMessage.set<FrontendScanMessage::progressPercent>(message.progressPercent());
+        break;
+    }
+    case HidlFrontendScanMessageType::FREQUENCY: {
+        const vector<uint32_t>& f = message.frequencies();
+        vector<int64_t> lf(begin(f), end(f));
+        scanMessage.set<FrontendScanMessage::frequencies>(lf);
+        break;
+    }
+    case HidlFrontendScanMessageType::SYMBOL_RATE: {
+        const vector<uint32_t>& s = message.symbolRates();
+        vector<int32_t> symbolRates(begin(s), end(s));
+        scanMessage.set<FrontendScanMessage::symbolRates>(symbolRates);
+        break;
+    }
+    case HidlFrontendScanMessageType::HIERARCHY: {
+        scanMessage.set<FrontendScanMessage::hierarchy>(
+                static_cast<FrontendDvbtHierarchy>(message.hierarchy()));
+        break;
+    }
+    case HidlFrontendScanMessageType::ANALOG_TYPE: {
+        scanMessage.set<FrontendScanMessage::analogType>(
+                static_cast<FrontendAnalogType>(message.analogType()));
+        break;
+    }
+    case HidlFrontendScanMessageType::PLP_IDS: {
+        const vector<uint8_t>& p = message.plpIds();
+        vector<int32_t> plpIds(begin(p), end(p));
+        scanMessage.set<FrontendScanMessage::plpIds>(plpIds);
+        break;
+    }
+    case HidlFrontendScanMessageType::GROUP_IDS: {
+        const vector<uint8_t>& g = message.groupIds();
+        vector<int32_t> groupIds(begin(g), end(g));
+        scanMessage.set<FrontendScanMessage::groupIds>(groupIds);
+        break;
+    }
+    case HidlFrontendScanMessageType::INPUT_STREAM_IDS: {
+        const vector<uint16_t>& i = message.inputStreamIds();
+        vector<int32_t> streamIds(begin(i), end(i));
+        scanMessage.set<FrontendScanMessage::inputStreamIds>(streamIds);
+        break;
+    }
+    case HidlFrontendScanMessageType::STANDARD: {
+        const HidlFrontendScanMessage::Standard& std = message.std();
+        FrontendScanMessageStandard standard;
+        if (std.getDiscriminator() == HidlFrontendScanMessage::Standard::hidl_discriminator::sStd) {
+            standard.set<FrontendScanMessageStandard::sStd>(
+                    static_cast<FrontendDvbsStandard>(std.sStd()));
+        } else if (std.getDiscriminator() ==
+                   HidlFrontendScanMessage::Standard::hidl_discriminator::tStd) {
+            standard.set<FrontendScanMessageStandard::tStd>(
+                    static_cast<FrontendDvbtStandard>(std.tStd()));
+        } else if (std.getDiscriminator() ==
+                   HidlFrontendScanMessage::Standard::hidl_discriminator::sifStd) {
+            standard.set<FrontendScanMessageStandard::sifStd>(
+                    static_cast<FrontendAnalogSifStandard>(std.sifStd()));
+        }
+        scanMessage.set<FrontendScanMessage::std>(standard);
+        break;
+    }
+    case HidlFrontendScanMessageType::ATSC3_PLP_INFO: {
+        const vector<HidlFrontendScanAtsc3PlpInfo>& plpInfos = message.atsc3PlpInfos();
+        vector<FrontendScanAtsc3PlpInfo> tunerPlpInfos;
+        for (int i = 0; i < plpInfos.size(); i++) {
+            FrontendScanAtsc3PlpInfo plpInfo{
+                    .plpId = static_cast<int32_t>(plpInfos[i].plpId),
+                    .bLlsFlag = plpInfos[i].bLlsFlag,
+            };
+            tunerPlpInfos.push_back(plpInfo);
+        }
+        scanMessage.set<FrontendScanMessage::atsc3PlpInfos>(tunerPlpInfos);
+        break;
+    }
+    default:
+        break;
+    }
+    mTunerFrontendCallback->onScanMessage(static_cast<FrontendScanMessageType>(type), scanMessage);
+    return Void();
+}
+
+Return<void> TunerHidlFrontend::FrontendCallback::onScanMessageExt1_1(
+        HidlFrontendScanMessageTypeExt1_1 type, const HidlFrontendScanMessageExt1_1& message) {
+    ALOGV("onScanMessageExt1_1::onScanMessage, type=%d", type);
+    FrontendScanMessage scanMessage;
+    switch (type) {
+    case HidlFrontendScanMessageTypeExt1_1::MODULATION: {
+        HidlFrontendModulation m = message.modulation();
+        FrontendModulation modulation;
+        switch (m.getDiscriminator()) {
+        case HidlFrontendModulation::hidl_discriminator::dvbc: {
+            modulation.set<FrontendModulation::dvbc>(static_cast<FrontendDvbcModulation>(m.dvbc()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::dvbt: {
+            modulation.set<FrontendModulation::dvbt>(
+                    static_cast<FrontendDvbtConstellation>(m.dvbt()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::dvbs: {
+            modulation.set<FrontendModulation::dvbs>(static_cast<FrontendDvbsModulation>(m.dvbs()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::isdbs: {
+            modulation.set<FrontendModulation::isdbs>(
+                    static_cast<FrontendIsdbsModulation>(m.isdbs()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::isdbs3: {
+            modulation.set<FrontendModulation::isdbs3>(
+                    static_cast<FrontendIsdbs3Modulation>(m.isdbs3()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::isdbt: {
+            modulation.set<FrontendModulation::isdbt>(
+                    static_cast<FrontendIsdbtModulation>(m.isdbt()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::atsc: {
+            modulation.set<FrontendModulation::atsc>(static_cast<FrontendAtscModulation>(m.atsc()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::atsc3: {
+            modulation.set<FrontendModulation::atsc3>(
+                    static_cast<FrontendAtsc3Modulation>(m.atsc3()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::dtmb: {
+            modulation.set<FrontendModulation::dtmb>(static_cast<FrontendDtmbModulation>(m.dtmb()));
+            break;
+        }
+        }
+        scanMessage.set<FrontendScanMessage::modulation>(modulation);
+        break;
+    }
+    case HidlFrontendScanMessageTypeExt1_1::DVBC_ANNEX: {
+        scanMessage.set<FrontendScanMessage::annex>(
+                static_cast<FrontendDvbcAnnex>(message.annex()));
+        break;
+    }
+    case HidlFrontendScanMessageTypeExt1_1::HIGH_PRIORITY: {
+        scanMessage.set<FrontendScanMessage::isHighPriority>(message.isHighPriority());
+        break;
+    }
+    default: {
+        break;
+    }
+    }
+    mTunerFrontendCallback->onScanMessage(static_cast<FrontendScanMessageType>(type), scanMessage);
+    return Void();
+}
+
+/////////////// TunerHidlFrontend Helper Methods ///////////////////////
+void TunerHidlFrontend::getAidlFrontendStatus(const vector<HidlFrontendStatus>& hidlStatus,
+                                              const vector<HidlFrontendStatusExt1_1>& hidlStatusExt,
+                                              vector<FrontendStatus>& aidlStatus) {
+    for (HidlFrontendStatus s : hidlStatus) {
+        FrontendStatus status;
+        switch (s.getDiscriminator()) {
+        case HidlFrontendStatus::hidl_discriminator::isDemodLocked: {
+            status.set<FrontendStatus::isDemodLocked>(s.isDemodLocked());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::snr: {
+            status.set<FrontendStatus::snr>((int)s.snr());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::ber: {
+            status.set<FrontendStatus::ber>((int)s.ber());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::per: {
+            status.set<FrontendStatus::per>((int)s.per());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::preBer: {
+            status.set<FrontendStatus::preBer>((int)s.preBer());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::signalQuality: {
+            status.set<FrontendStatus::signalQuality>((int)s.signalQuality());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::signalStrength: {
+            status.set<FrontendStatus::signalStrength>((int)s.signalStrength());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::symbolRate: {
+            status.set<FrontendStatus::symbolRate>((int)s.symbolRate());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::innerFec: {
+            status.set<FrontendStatus::innerFec>(static_cast<FrontendInnerFec>(s.innerFec()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::modulation: {
+            FrontendModulationStatus modulationStatus;
+            switch (s.modulation().getDiscriminator()) {
+            case HidlFrontendModulationStatus::hidl_discriminator::dvbc:
+                modulationStatus.set<FrontendModulationStatus::dvbc>(
+                        static_cast<FrontendDvbcModulation>(s.modulation().dvbc()));
+                break;
+            case HidlFrontendModulationStatus::hidl_discriminator::dvbs:
+                modulationStatus.set<FrontendModulationStatus::dvbs>(
+                        static_cast<FrontendDvbsModulation>(s.modulation().dvbs()));
+                break;
+            case HidlFrontendModulationStatus::hidl_discriminator::isdbs:
+                modulationStatus.set<FrontendModulationStatus::isdbs>(
+                        static_cast<FrontendIsdbsModulation>(s.modulation().isdbs()));
+                break;
+            case HidlFrontendModulationStatus::hidl_discriminator::isdbs3:
+                modulationStatus.set<FrontendModulationStatus::isdbs3>(
+                        static_cast<FrontendIsdbs3Modulation>(s.modulation().isdbs3()));
+                break;
+            case HidlFrontendModulationStatus::hidl_discriminator::isdbt:
+                modulationStatus.set<FrontendModulationStatus::isdbt>(
+                        static_cast<FrontendIsdbtModulation>(s.modulation().isdbt()));
+                break;
+            }
+            status.set<FrontendStatus::modulationStatus>(modulationStatus);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::inversion: {
+            status.set<FrontendStatus::inversion>(
+                    static_cast<FrontendSpectralInversion>(s.inversion()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::lnbVoltage: {
+            status.set<FrontendStatus::lnbVoltage>(static_cast<LnbVoltage>(s.lnbVoltage()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::plpId: {
+            status.set<FrontendStatus::plpId>((int32_t)s.plpId());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::isEWBS: {
+            status.set<FrontendStatus::isEWBS>(s.isEWBS());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::agc: {
+            status.set<FrontendStatus::agc>((int32_t)s.agc());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::isLnaOn: {
+            status.set<FrontendStatus::isLnaOn>(s.isLnaOn());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::isLayerError: {
+            vector<bool> e(s.isLayerError().begin(), s.isLayerError().end());
+            status.set<FrontendStatus::isLayerError>(e);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::mer: {
+            status.set<FrontendStatus::mer>(static_cast<int32_t>(s.mer()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::freqOffset: {
+            status.set<FrontendStatus::freqOffset>(static_cast<int64_t>(s.freqOffset()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::hierarchy: {
+            status.set<FrontendStatus::hierarchy>(
+                    static_cast<FrontendDvbtHierarchy>(s.hierarchy()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::isRfLocked: {
+            status.set<FrontendStatus::isRfLocked>(s.isRfLocked());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::plpInfo: {
+            vector<FrontendStatusAtsc3PlpInfo> info;
+            for (auto i : s.plpInfo()) {
+                info.push_back({
+                        .plpId = static_cast<int32_t>(i.plpId),
+                        .isLocked = i.isLocked,
+                        .uec = static_cast<int32_t>(i.uec),
+                });
+            }
+            status.set<FrontendStatus::plpInfo>(info);
+            aidlStatus.push_back(status);
+            break;
+        }
+        }
+    }
+
+    for (HidlFrontendStatusExt1_1 s : hidlStatusExt) {
+        FrontendStatus status;
+        switch (s.getDiscriminator()) {
+        case HidlFrontendStatusExt1_1::hidl_discriminator::modulations: {
+            vector<FrontendModulation> aidlMod;
+            for (auto m : s.modulations()) {
+                switch (m.getDiscriminator()) {
+                case HidlFrontendModulation::hidl_discriminator::dvbc:
+                    aidlMod.push_back(static_cast<FrontendDvbcModulation>(m.dvbc()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::dvbs:
+                    aidlMod.push_back(static_cast<FrontendDvbsModulation>(m.dvbs()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::dvbt:
+                    aidlMod.push_back(static_cast<FrontendDvbtConstellation>(m.dvbt()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::isdbs:
+                    aidlMod.push_back(static_cast<FrontendIsdbsModulation>(m.isdbs()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::isdbs3:
+                    aidlMod.push_back(static_cast<FrontendIsdbs3Modulation>(m.isdbs3()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::isdbt:
+                    aidlMod.push_back(static_cast<FrontendIsdbtModulation>(m.isdbt()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::atsc:
+                    aidlMod.push_back(static_cast<FrontendAtscModulation>(m.atsc()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::atsc3:
+                    aidlMod.push_back(static_cast<FrontendAtsc3Modulation>(m.atsc3()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::dtmb:
+                    aidlMod.push_back(static_cast<FrontendDtmbModulation>(m.dtmb()));
+                    break;
+                }
+            }
+            status.set<FrontendStatus::modulations>(aidlMod);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::bers: {
+            vector<int> b(s.bers().begin(), s.bers().end());
+            status.set<FrontendStatus::bers>(b);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::codeRates: {
+            vector<FrontendInnerFec> codeRates;
+            for (auto c : s.codeRates()) {
+                codeRates.push_back(static_cast<FrontendInnerFec>(c));
+            }
+            status.set<FrontendStatus::codeRates>(codeRates);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::bandwidth: {
+            FrontendBandwidth bandwidth;
+            switch (s.bandwidth().getDiscriminator()) {
+            case HidlFrontendBandwidth::hidl_discriminator::atsc3:
+                bandwidth.set<FrontendBandwidth::atsc3>(
+                        static_cast<FrontendAtsc3Bandwidth>(s.bandwidth().atsc3()));
+                break;
+            case HidlFrontendBandwidth::hidl_discriminator::dvbc:
+                bandwidth.set<FrontendBandwidth::dvbc>(
+                        static_cast<FrontendDvbcBandwidth>(s.bandwidth().dvbc()));
+                break;
+            case HidlFrontendBandwidth::hidl_discriminator::dvbt:
+                bandwidth.set<FrontendBandwidth::dvbt>(
+                        static_cast<FrontendDvbtBandwidth>(s.bandwidth().dvbt()));
+                break;
+            case HidlFrontendBandwidth::hidl_discriminator::isdbt:
+                bandwidth.set<FrontendBandwidth::isdbt>(
+                        static_cast<FrontendIsdbtBandwidth>(s.bandwidth().isdbt()));
+                break;
+            case HidlFrontendBandwidth::hidl_discriminator::dtmb:
+                bandwidth.set<FrontendBandwidth::dtmb>(
+                        static_cast<FrontendDtmbBandwidth>(s.bandwidth().dtmb()));
+                break;
+            }
+            status.set<FrontendStatus::bandwidth>(bandwidth);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::interval: {
+            FrontendGuardInterval interval;
+            switch (s.interval().getDiscriminator()) {
+            case HidlFrontendGuardInterval::hidl_discriminator::dvbt:
+                interval.set<FrontendGuardInterval::dvbt>(
+                        static_cast<FrontendDvbtGuardInterval>(s.interval().dvbt()));
+                break;
+            case HidlFrontendGuardInterval::hidl_discriminator::isdbt:
+                interval.set<FrontendGuardInterval::isdbt>(
+                        static_cast<FrontendIsdbtGuardInterval>(s.interval().isdbt()));
+                break;
+            case HidlFrontendGuardInterval::hidl_discriminator::dtmb:
+                interval.set<FrontendGuardInterval::dtmb>(
+                        static_cast<FrontendDtmbGuardInterval>(s.interval().dtmb()));
+                break;
+            }
+            status.set<FrontendStatus::interval>(interval);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::transmissionMode: {
+            FrontendTransmissionMode transmissionMode;
+            switch (s.transmissionMode().getDiscriminator()) {
+            case HidlFrontendTransmissionMode::hidl_discriminator::dvbt:
+                transmissionMode.set<FrontendTransmissionMode::dvbt>(
+                        static_cast<FrontendDvbtTransmissionMode>(s.transmissionMode().dvbt()));
+                break;
+            case HidlFrontendTransmissionMode::hidl_discriminator::isdbt:
+                transmissionMode.set<FrontendTransmissionMode::isdbt>(
+                        static_cast<FrontendIsdbtMode>(s.transmissionMode().isdbt()));
+                break;
+            case HidlFrontendTransmissionMode::hidl_discriminator::dtmb:
+                transmissionMode.set<FrontendTransmissionMode::dtmb>(
+                        static_cast<FrontendDtmbTransmissionMode>(s.transmissionMode().dtmb()));
+                break;
+            }
+            status.set<FrontendStatus::transmissionMode>(transmissionMode);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::uec: {
+            status.set<FrontendStatus::uec>(static_cast<int32_t>(s.uec()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::systemId: {
+            status.set<FrontendStatus::systemId>(static_cast<int32_t>(s.systemId()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::interleaving: {
+            vector<FrontendInterleaveMode> aidlInter;
+            for (auto i : s.interleaving()) {
+                FrontendInterleaveMode leaveMode;
+                switch (i.getDiscriminator()) {
+                case HidlFrontendInterleaveMode::hidl_discriminator::atsc3:
+                    leaveMode.set<FrontendInterleaveMode::atsc3>(
+                            static_cast<FrontendAtsc3TimeInterleaveMode>(i.atsc3()));
+                    break;
+                case HidlFrontendInterleaveMode::hidl_discriminator::dvbc:
+                    leaveMode.set<FrontendInterleaveMode::dvbc>(
+                            static_cast<FrontendCableTimeInterleaveMode>(i.dvbc()));
+                    break;
+                case HidlFrontendInterleaveMode::hidl_discriminator::dtmb:
+                    leaveMode.set<FrontendInterleaveMode::dtmb>(
+                            static_cast<FrontendDtmbTimeInterleaveMode>(i.dtmb()));
+                    break;
+                }
+                aidlInter.push_back(leaveMode);
+            }
+            status.set<FrontendStatus::interleaving>(aidlInter);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::isdbtSegment: {
+            const vector<uint8_t>& seg = s.isdbtSegment();
+            vector<int32_t> i(seg.begin(), seg.end());
+            status.set<FrontendStatus::isdbtSegment>(i);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::tsDataRate: {
+            vector<int32_t> ts(s.tsDataRate().begin(), s.tsDataRate().end());
+            status.set<FrontendStatus::tsDataRate>(ts);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::rollOff: {
+            FrontendRollOff rollOff;
+            switch (s.rollOff().getDiscriminator()) {
+            case HidlFrontendRollOff::hidl_discriminator::dvbs:
+                rollOff.set<FrontendRollOff::dvbs>(
+                        static_cast<FrontendDvbsRolloff>(s.rollOff().dvbs()));
+                break;
+            case HidlFrontendRollOff::hidl_discriminator::isdbs:
+                rollOff.set<FrontendRollOff::isdbs>(
+                        static_cast<FrontendIsdbsRolloff>(s.rollOff().isdbs()));
+                break;
+            case HidlFrontendRollOff::hidl_discriminator::isdbs3:
+                rollOff.set<FrontendRollOff::isdbs3>(
+                        static_cast<FrontendIsdbs3Rolloff>(s.rollOff().isdbs3()));
+                break;
+            }
+            status.set<FrontendStatus::rollOff>(rollOff);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::isMiso: {
+            status.set<FrontendStatus::isMiso>(s.isMiso());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::isLinear: {
+            status.set<FrontendStatus::isLinear>(s.isLinear());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::isShortFrames: {
+            status.set<FrontendStatus::isShortFrames>(s.isShortFrames());
+            aidlStatus.push_back(status);
+            break;
+        }
+        }
+    }
+}
+
+hidl_vec<HidlFrontendAtsc3PlpSettings> TunerHidlFrontend::getAtsc3PlpSettings(
+        const FrontendAtsc3Settings& settings) {
+    int len = settings.plpSettings.size();
+    hidl_vec<HidlFrontendAtsc3PlpSettings> plps = hidl_vec<HidlFrontendAtsc3PlpSettings>(len);
+    // parse PLP settings
+    for (int i = 0; i < len; i++) {
+        uint8_t plpId = static_cast<uint8_t>(settings.plpSettings[i].plpId);
+        HidlFrontendAtsc3Modulation modulation =
+                static_cast<HidlFrontendAtsc3Modulation>(settings.plpSettings[i].modulation);
+        HidlFrontendAtsc3TimeInterleaveMode interleaveMode =
+                static_cast<HidlFrontendAtsc3TimeInterleaveMode>(
+                        settings.plpSettings[i].interleaveMode);
+        HidlFrontendAtsc3CodeRate codeRate =
+                static_cast<HidlFrontendAtsc3CodeRate>(settings.plpSettings[i].codeRate);
+        HidlFrontendAtsc3Fec fec = static_cast<HidlFrontendAtsc3Fec>(settings.plpSettings[i].fec);
+        HidlFrontendAtsc3PlpSettings frontendAtsc3PlpSettings{
+                .plpId = plpId,
+                .modulation = modulation,
+                .interleaveMode = interleaveMode,
+                .codeRate = codeRate,
+                .fec = fec,
+        };
+        plps[i] = frontendAtsc3PlpSettings;
+    }
+    return plps;
+}
+
+HidlFrontendDvbsCodeRate TunerHidlFrontend::getDvbsCodeRate(const FrontendDvbsCodeRate& codeRate) {
+    HidlFrontendInnerFec innerFec = static_cast<HidlFrontendInnerFec>(codeRate.fec);
+    bool isLinear = codeRate.isLinear;
+    bool isShortFrames = codeRate.isShortFrames;
+    uint32_t bitsPer1000Symbol = static_cast<uint32_t>(codeRate.bitsPer1000Symbol);
+    HidlFrontendDvbsCodeRate coderate{
+            .fec = innerFec,
+            .isLinear = isLinear,
+            .isShortFrames = isShortFrames,
+            .bitsPer1000Symbol = bitsPer1000Symbol,
+    };
+    return coderate;
+}
+
+void TunerHidlFrontend::getHidlFrontendSettings(const FrontendSettings& aidlSettings,
+                                                HidlFrontendSettings& settings,
+                                                HidlFrontendSettingsExt1_1& settingsExt) {
+    switch (aidlSettings.getTag()) {
+    case FrontendSettings::analog: {
+        const FrontendAnalogSettings& analog = aidlSettings.get<FrontendSettings::analog>();
+        settings.analog({
+                .frequency = static_cast<uint32_t>(analog.frequency),
+                .type = static_cast<HidlFrontendAnalogType>(analog.type),
+                .sifStandard = static_cast<HidlFrontendAnalogSifStandard>(analog.sifStandard),
+        });
+        settingsExt.settingExt.analog({
+                .aftFlag = static_cast<HidlFrontendAnalogAftFlag>(analog.aftFlag),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(analog.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(analog.inversion);
+        break;
+    }
+    case FrontendSettings::atsc: {
+        const FrontendAtscSettings& atsc = aidlSettings.get<FrontendSettings::atsc>();
+        settings.atsc({
+                .frequency = static_cast<uint32_t>(atsc.frequency),
+                .modulation = static_cast<HidlFrontendAtscModulation>(atsc.modulation),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(atsc.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(atsc.inversion);
+        settingsExt.settingExt.noinit();
+        break;
+    }
+    case FrontendSettings::atsc3: {
+        const FrontendAtsc3Settings& atsc3 = aidlSettings.get<FrontendSettings::atsc3>();
+        settings.atsc3({
+                .frequency = static_cast<uint32_t>(atsc3.frequency),
+                .bandwidth = static_cast<HidlFrontendAtsc3Bandwidth>(atsc3.bandwidth),
+                .demodOutputFormat =
+                        static_cast<HidlFrontendAtsc3DemodOutputFormat>(atsc3.demodOutputFormat),
+                .plpSettings = getAtsc3PlpSettings(atsc3),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(atsc3.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(atsc3.inversion);
+        settingsExt.settingExt.noinit();
+        break;
+    }
+    case FrontendSettings::dvbc: {
+        const FrontendDvbcSettings& dvbc = aidlSettings.get<FrontendSettings::dvbc>();
+        settings.dvbc({
+                .frequency = static_cast<uint32_t>(dvbc.frequency),
+                .modulation = static_cast<HidlFrontendDvbcModulation>(dvbc.modulation),
+                .fec = static_cast<HidlFrontendInnerFec>(dvbc.fec),
+                .symbolRate = static_cast<uint32_t>(dvbc.symbolRate),
+                .outerFec = static_cast<HidlFrontendDvbcOuterFec>(dvbc.outerFec),
+                .annex = static_cast<HidlFrontendDvbcAnnex>(dvbc.annex),
+                .spectralInversion = static_cast<HidlFrontendDvbcSpectralInversion>(dvbc.inversion),
+        });
+        settingsExt.settingExt.dvbc({
+                .interleaveMode =
+                        static_cast<HidlFrontendCableTimeInterleaveMode>(dvbc.interleaveMode),
+                .bandwidth = static_cast<HidlFrontendDvbcBandwidth>(dvbc.bandwidth),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(dvbc.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(dvbc.inversion);
+        break;
+    }
+    case FrontendSettings::dvbs: {
+        const FrontendDvbsSettings& dvbs = aidlSettings.get<FrontendSettings::dvbs>();
+        settings.dvbs({
+                .frequency = static_cast<uint32_t>(dvbs.frequency),
+                .modulation = static_cast<HidlFrontendDvbsModulation>(dvbs.modulation),
+                .coderate = getDvbsCodeRate(dvbs.coderate),
+                .symbolRate = static_cast<uint32_t>(dvbs.symbolRate),
+                .rolloff = static_cast<HidlFrontendDvbsRolloff>(dvbs.rolloff),
+                .pilot = static_cast<HidlFrontendDvbsPilot>(dvbs.pilot),
+                .inputStreamId = static_cast<uint32_t>(dvbs.inputStreamId),
+                .standard = static_cast<HidlFrontendDvbsStandard>(dvbs.standard),
+                .vcmMode = static_cast<HidlFrontendDvbsVcmMode>(dvbs.vcmMode),
+        });
+        settingsExt.settingExt.dvbs({
+                .scanType = static_cast<HidlFrontendDvbsScanType>(dvbs.scanType),
+                .isDiseqcRxMessage = dvbs.isDiseqcRxMessage,
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(dvbs.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(dvbs.inversion);
+        break;
+    }
+    case FrontendSettings::dvbt: {
+        const FrontendDvbtSettings& dvbt = aidlSettings.get<FrontendSettings::dvbt>();
+        settings.dvbt({
+                .frequency = static_cast<uint32_t>(dvbt.frequency),
+                .transmissionMode =
+                        static_cast<HidlFrontendDvbtTransmissionMode>(dvbt.transmissionMode),
+                .bandwidth = static_cast<HidlFrontendDvbtBandwidth>(dvbt.bandwidth),
+                .constellation = static_cast<HidlFrontendDvbtConstellation>(dvbt.constellation),
+                .hierarchy = static_cast<HidlFrontendDvbtHierarchy>(dvbt.hierarchy),
+                .hpCoderate = static_cast<HidlFrontendDvbtCoderate>(dvbt.hpCoderate),
+                .lpCoderate = static_cast<HidlFrontendDvbtCoderate>(dvbt.lpCoderate),
+                .guardInterval = static_cast<HidlFrontendDvbtGuardInterval>(dvbt.guardInterval),
+                .isHighPriority = dvbt.isHighPriority,
+                .standard = static_cast<HidlFrontendDvbtStandard>(dvbt.standard),
+                .isMiso = dvbt.isMiso,
+                .plpMode = static_cast<HidlFrontendDvbtPlpMode>(dvbt.plpMode),
+                .plpId = static_cast<uint8_t>(dvbt.plpId),
+                .plpGroupId = static_cast<uint8_t>(dvbt.plpGroupId),
+        });
+        settingsExt.settingExt.dvbt({
+                .constellation = static_cast<
+                        ::android::hardware::tv::tuner::V1_1::FrontendDvbtConstellation>(
+                        dvbt.constellation),
+                .transmissionMode = static_cast<
+                        ::android::hardware::tv::tuner::V1_1::FrontendDvbtTransmissionMode>(
+                        dvbt.transmissionMode),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(dvbt.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(dvbt.inversion);
+        break;
+    }
+    case FrontendSettings::isdbs: {
+        const FrontendIsdbsSettings& isdbs = aidlSettings.get<FrontendSettings::isdbs>();
+        settings.isdbs({
+                .frequency = static_cast<uint32_t>(isdbs.frequency),
+                .streamId = static_cast<uint16_t>(isdbs.streamId),
+                .streamIdType = static_cast<HidlFrontendIsdbsStreamIdType>(isdbs.streamIdType),
+                .modulation = static_cast<HidlFrontendIsdbsModulation>(isdbs.modulation),
+                .coderate = static_cast<HidlFrontendIsdbsCoderate>(isdbs.coderate),
+                .symbolRate = static_cast<uint32_t>(isdbs.symbolRate),
+                .rolloff = static_cast<HidlFrontendIsdbsRolloff>(isdbs.rolloff),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(isdbs.endFrequency);
+        settingsExt.settingExt.noinit();
+        break;
+    }
+    case FrontendSettings::isdbs3: {
+        const FrontendIsdbs3Settings& isdbs3 = aidlSettings.get<FrontendSettings::isdbs3>();
+        settings.isdbs3({
+                .frequency = static_cast<uint32_t>(isdbs3.frequency),
+                .streamId = static_cast<uint16_t>(isdbs3.streamId),
+                .streamIdType = static_cast<HidlFrontendIsdbsStreamIdType>(isdbs3.streamIdType),
+                .modulation = static_cast<HidlFrontendIsdbs3Modulation>(isdbs3.modulation),
+                .coderate = static_cast<HidlFrontendIsdbs3Coderate>(isdbs3.coderate),
+                .symbolRate = static_cast<uint32_t>(isdbs3.symbolRate),
+                .rolloff = static_cast<HidlFrontendIsdbs3Rolloff>(isdbs3.rolloff),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(isdbs3.endFrequency);
+        settingsExt.settingExt.noinit();
+        break;
+    }
+    case FrontendSettings::isdbt: {
+        const FrontendIsdbtSettings& isdbt = aidlSettings.get<FrontendSettings::isdbt>();
+        HidlFrontendIsdbtModulation modulation = HidlFrontendIsdbtModulation::UNDEFINED;
+        HidlFrontendIsdbtCoderate coderate = HidlFrontendIsdbtCoderate::UNDEFINED;
+        if (isdbt.layerSettings.size() > 0) {
+            modulation =
+                    static_cast<HidlFrontendIsdbtModulation>(isdbt.layerSettings[0].modulation);
+            coderate = static_cast<HidlFrontendIsdbtCoderate>(isdbt.layerSettings[0].coderate);
+        }
+        settings.isdbt({
+                .frequency = static_cast<uint32_t>(isdbt.frequency),
+                .modulation = modulation,
+                .bandwidth = static_cast<HidlFrontendIsdbtBandwidth>(isdbt.bandwidth),
+                .mode = static_cast<HidlFrontendIsdbtMode>(isdbt.mode),
+                .coderate = coderate,
+                .guardInterval = static_cast<HidlFrontendIsdbtGuardInterval>(isdbt.guardInterval),
+                .serviceAreaId = static_cast<uint32_t>(isdbt.serviceAreaId),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(isdbt.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(isdbt.inversion);
+        settingsExt.settingExt.noinit();
+        break;
+    }
+    case FrontendSettings::dtmb: {
+        const FrontendDtmbSettings& dtmb = aidlSettings.get<FrontendSettings::dtmb>();
+        settingsExt.settingExt.dtmb({
+                .frequency = static_cast<uint32_t>(dtmb.frequency),
+                .transmissionMode =
+                        static_cast<HidlFrontendDtmbTransmissionMode>(dtmb.transmissionMode),
+                .bandwidth = static_cast<HidlFrontendDtmbBandwidth>(dtmb.bandwidth),
+                .modulation = static_cast<HidlFrontendDtmbModulation>(dtmb.modulation),
+                .codeRate = static_cast<HidlFrontendDtmbCodeRate>(dtmb.codeRate),
+                .guardInterval = static_cast<HidlFrontendDtmbGuardInterval>(dtmb.guardInterval),
+                .interleaveMode =
+                        static_cast<HidlFrontendDtmbTimeInterleaveMode>(dtmb.interleaveMode),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(dtmb.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(dtmb.inversion);
+        break;
+    }
+    default:
+        break;
+    }
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlFrontend.h b/services/tuner/hidl/TunerHidlFrontend.h
new file mode 100644
index 0000000..f698655
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlFrontend.h
@@ -0,0 +1,129 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLFRONTEND_H
+#define ANDROID_MEDIA_TUNERHIDLFRONTEND_H
+
+#include <aidl/android/hardware/tv/tuner/IFrontendCallback.h>
+#include <aidl/android/media/tv/tuner/BnTunerFrontend.h>
+#include <android/hardware/tv/tuner/1.0/ITuner.h>
+#include <android/hardware/tv/tuner/1.1/IFrontend.h>
+#include <android/hardware/tv/tuner/1.1/IFrontendCallback.h>
+#include <utils/Log.h>
+
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Settings;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsCodeRate;
+using ::aidl::android::hardware::tv::tuner::FrontendEventType;
+using ::aidl::android::hardware::tv::tuner::FrontendScanMessage;
+using ::aidl::android::hardware::tv::tuner::FrontendScanMessageType;
+using ::aidl::android::hardware::tv::tuner::FrontendScanType;
+using ::aidl::android::hardware::tv::tuner::FrontendSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendStatus;
+using ::aidl::android::hardware::tv::tuner::FrontendStatusReadiness;
+using ::aidl::android::hardware::tv::tuner::FrontendStatusType;
+using ::android::sp;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::std::shared_ptr;
+using ::std::vector;
+
+using HidlFrontendAtsc3PlpSettings = ::android::hardware::tv::tuner::V1_0::FrontendAtsc3PlpSettings;
+using HidlFrontendDvbsCodeRate = ::android::hardware::tv::tuner::V1_0::FrontendDvbsCodeRate;
+using HidlFrontendEventType = ::android::hardware::tv::tuner::V1_0::FrontendEventType;
+using HidlFrontendId = ::android::hardware::tv::tuner::V1_0::FrontendId;
+using HidlFrontendScanMessage = ::android::hardware::tv::tuner::V1_0::FrontendScanMessage;
+using HidlFrontendScanMessageType = ::android::hardware::tv::tuner::V1_0::FrontendScanMessageType;
+using HidlFrontendSettings = ::android::hardware::tv::tuner::V1_0::FrontendSettings;
+using HidlFrontendStatus = ::android::hardware::tv::tuner::V1_0::FrontendStatus;
+using HidlIFrontend = ::android::hardware::tv::tuner::V1_0::IFrontend;
+using HidlIFrontendCallback = ::android::hardware::tv::tuner::V1_1::IFrontendCallback;
+using HidlFrontendScanMessageExt1_1 =
+        ::android::hardware::tv::tuner::V1_1::FrontendScanMessageExt1_1;
+using HidlFrontendScanMessageTypeExt1_1 =
+        ::android::hardware::tv::tuner::V1_1::FrontendScanMessageTypeExt1_1;
+using HidlFrontendSettingsExt1_1 = ::android::hardware::tv::tuner::V1_1::FrontendSettingsExt1_1;
+using HidlFrontendStatusExt1_1 = ::android::hardware::tv::tuner::V1_1::FrontendStatusExt1_1;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlFrontend : public BnTunerFrontend {
+public:
+    TunerHidlFrontend(sp<HidlIFrontend> frontend, int id);
+    virtual ~TunerHidlFrontend();
+
+    ::ndk::ScopedAStatus setCallback(
+            const shared_ptr<ITunerFrontendCallback>& in_tunerFrontendCallback) override;
+    ::ndk::ScopedAStatus tune(const FrontendSettings& in_settings) override;
+    ::ndk::ScopedAStatus stopTune() override;
+    ::ndk::ScopedAStatus scan(const FrontendSettings& in_settings,
+                              FrontendScanType in_frontendScanType) override;
+    ::ndk::ScopedAStatus stopScan() override;
+    ::ndk::ScopedAStatus setLnb(const shared_ptr<ITunerLnb>& in_lnb) override;
+    ::ndk::ScopedAStatus linkCiCamToFrontend(int32_t in_ciCamId, int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus unlinkCiCamToFrontend(int32_t in_ciCamId) override;
+    ::ndk::ScopedAStatus close() override;
+    ::ndk::ScopedAStatus getStatus(const vector<FrontendStatusType>& in_statusTypes,
+                                   vector<FrontendStatus>* _aidl_return) override;
+    ::ndk::ScopedAStatus getFrontendId(int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getHardwareInfo(std::string* _aidl_return) override;
+    ::ndk::ScopedAStatus removeOutputPid(int32_t in_pid) override;
+    ::ndk::ScopedAStatus getFrontendStatusReadiness(
+            const std::vector<FrontendStatusType>& in_statusTypes,
+            std::vector<FrontendStatusReadiness>* _aidl_return) override;
+
+    void setLna(bool in_bEnable);
+
+    struct FrontendCallback : public HidlIFrontendCallback {
+        FrontendCallback(const shared_ptr<ITunerFrontendCallback> tunerFrontendCallback)
+              : mTunerFrontendCallback(tunerFrontendCallback){};
+
+        virtual Return<void> onEvent(HidlFrontendEventType frontendEventType);
+        virtual Return<void> onScanMessage(HidlFrontendScanMessageType type,
+                                           const HidlFrontendScanMessage& message);
+        virtual Return<void> onScanMessageExt1_1(HidlFrontendScanMessageTypeExt1_1 type,
+                                                 const HidlFrontendScanMessageExt1_1& message);
+
+        shared_ptr<ITunerFrontendCallback> mTunerFrontendCallback;
+    };
+
+private:
+    hidl_vec<HidlFrontendAtsc3PlpSettings> getAtsc3PlpSettings(
+            const FrontendAtsc3Settings& settings);
+    HidlFrontendDvbsCodeRate getDvbsCodeRate(const FrontendDvbsCodeRate& codeRate);
+    void getHidlFrontendSettings(const FrontendSettings& aidlSettings,
+                                 HidlFrontendSettings& settings,
+                                 HidlFrontendSettingsExt1_1& settingsExt);
+    void getAidlFrontendStatus(const vector<HidlFrontendStatus>& hidlStatus,
+                               const vector<HidlFrontendStatusExt1_1>& hidlStatusExt,
+                               vector<FrontendStatus>& aidlStatus);
+
+    int mId;
+    sp<HidlIFrontend> mFrontend;
+    sp<::android::hardware::tv::tuner::V1_1::IFrontend> mFrontend_1_1;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLFRONTEND_H
diff --git a/services/tuner/hidl/TunerHidlLnb.cpp b/services/tuner/hidl/TunerHidlLnb.cpp
new file mode 100644
index 0000000..a7e20bb
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlLnb.cpp
@@ -0,0 +1,160 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlLnb"
+
+#include "TunerHidlLnb.h"
+
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
+using ::aidl::android::hardware::tv::tuner::Result;
+using HidlLnbPosition = ::android::hardware::tv::tuner::V1_0::LnbPosition;
+using HidlLnbTone = ::android::hardware::tv::tuner::V1_0::LnbTone;
+using HidlLnbVoltage = ::android::hardware::tv::tuner::V1_0::LnbVoltage;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlLnb::TunerHidlLnb(sp<HidlILnb> lnb, int id) {
+    mLnb = lnb;
+    mId = id;
+}
+
+TunerHidlLnb::~TunerHidlLnb() {
+    mLnb = nullptr;
+    mId = -1;
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::setCallback(
+        const shared_ptr<ITunerLnbCallback>& in_tunerLnbCallback) {
+    if (mLnb == nullptr) {
+        ALOGE("ILnb is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (in_tunerLnbCallback == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    sp<HidlILnbCallback> lnbCallback = new LnbCallback(in_tunerLnbCallback);
+    HidlResult status = mLnb->setCallback(lnbCallback);
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::setVoltage(LnbVoltage in_voltage) {
+    if (mLnb == nullptr) {
+        ALOGE("ILnb is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mLnb->setVoltage(static_cast<HidlLnbVoltage>(in_voltage));
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::setTone(LnbTone in_tone) {
+    if (mLnb == nullptr) {
+        ALOGE("ILnb is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mLnb->setTone(static_cast<HidlLnbTone>(in_tone));
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::setSatellitePosition(LnbPosition in_position) {
+    if (mLnb == nullptr) {
+        ALOGE("ILnb is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mLnb->setSatellitePosition(static_cast<HidlLnbPosition>(in_position));
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::sendDiseqcMessage(const vector<uint8_t>& in_diseqcMessage) {
+    if (mLnb == nullptr) {
+        ALOGE("ILnb is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mLnb->sendDiseqcMessage(in_diseqcMessage);
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::close() {
+    if (mLnb == nullptr) {
+        ALOGE("ILnb is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mLnb->close();
+    mLnb = nullptr;
+
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+/////////////// ILnbCallback ///////////////////////
+Return<void> TunerHidlLnb::LnbCallback::onEvent(const HidlLnbEventType lnbEventType) {
+    if (mTunerLnbCallback != nullptr) {
+        mTunerLnbCallback->onEvent(static_cast<LnbEventType>(lnbEventType));
+    }
+    return Void();
+}
+
+Return<void> TunerHidlLnb::LnbCallback::onDiseqcMessage(const hidl_vec<uint8_t>& diseqcMessage) {
+    if (mTunerLnbCallback != nullptr) {
+        vector<uint8_t> msg(begin(diseqcMessage), end(diseqcMessage));
+        mTunerLnbCallback->onDiseqcMessage(msg);
+    }
+    return Void();
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlLnb.h b/services/tuner/hidl/TunerHidlLnb.h
new file mode 100644
index 0000000..becf848
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlLnb.h
@@ -0,0 +1,83 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLLNB_H
+#define ANDROID_MEDIA_TUNERHIDLLNB_H
+
+#include <aidl/android/hardware/tv/tuner/ILnb.h>
+#include <aidl/android/media/tv/tuner/BnTunerLnb.h>
+#include <android/hardware/tv/tuner/1.0/ILnb.h>
+#include <android/hardware/tv/tuner/1.0/ILnbCallback.h>
+#include <utils/Log.h>
+
+using ::aidl::android::hardware::tv::tuner::LnbEventType;
+using ::aidl::android::hardware::tv::tuner::LnbPosition;
+using ::aidl::android::hardware::tv::tuner::LnbTone;
+using ::aidl::android::hardware::tv::tuner::LnbVoltage;
+using ::android::sp;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::std::shared_ptr;
+using ::std::vector;
+
+using HidlILnb = ::android::hardware::tv::tuner::V1_0::ILnb;
+using HidlILnbCallback = ::android::hardware::tv::tuner::V1_0::ILnbCallback;
+using HidlLnbEventType = ::android::hardware::tv::tuner::V1_0::LnbEventType;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlLnb : public BnTunerLnb {
+public:
+    TunerHidlLnb(sp<HidlILnb> lnb, int id);
+    virtual ~TunerHidlLnb();
+
+    ::ndk::ScopedAStatus setCallback(
+            const shared_ptr<ITunerLnbCallback>& in_tunerLnbCallback) override;
+    ::ndk::ScopedAStatus setVoltage(LnbVoltage in_voltage) override;
+    ::ndk::ScopedAStatus setTone(LnbTone in_tone) override;
+    ::ndk::ScopedAStatus setSatellitePosition(LnbPosition in_position) override;
+    ::ndk::ScopedAStatus sendDiseqcMessage(const vector<uint8_t>& in_diseqcMessage) override;
+    ::ndk::ScopedAStatus close() override;
+
+    int getId() { return mId; }
+
+    struct LnbCallback : public HidlILnbCallback {
+        LnbCallback(const shared_ptr<ITunerLnbCallback> tunerLnbCallback)
+              : mTunerLnbCallback(tunerLnbCallback){};
+
+        virtual Return<void> onEvent(const HidlLnbEventType lnbEventType);
+        virtual Return<void> onDiseqcMessage(const hidl_vec<uint8_t>& diseqcMessage);
+
+        shared_ptr<ITunerLnbCallback> mTunerLnbCallback;
+    };
+
+private:
+    int mId;
+    sp<HidlILnb> mLnb;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLLNB_H
diff --git a/services/tuner/hidl/TunerHidlService.cpp b/services/tuner/hidl/TunerHidlService.cpp
new file mode 100644
index 0000000..6f55f1e
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlService.cpp
@@ -0,0 +1,711 @@
+/**
+ * Copyright (c) 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TunerHidlService"
+
+#include "TunerHidlService.h"
+
+#include <aidl/android/hardware/tv/tuner/FrontendIsdbtTimeInterleaveMode.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <android/binder_manager.h>
+#include <binder/IPCThreadState.h>
+#include <binder/PermissionCache.h>
+#include <utils/Log.h>
+
+#include "TunerHelper.h"
+#include "TunerHidlDemux.h"
+#include "TunerHidlDescrambler.h"
+#include "TunerHidlFrontend.h"
+#include "TunerHidlLnb.h"
+
+using ::aidl::android::hardware::tv::tuner::FrontendAnalogCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Capabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendAtscCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbcCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbs3Capabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbsCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtTimeInterleaveMode;
+using ::aidl::android::hardware::tv::tuner::FrontendType;
+using ::aidl::android::hardware::tv::tuner::Result;
+using ::aidl::android::media::tv::tunerresourcemanager::TunerFrontendInfo;
+using ::android::IPCThreadState;
+using ::android::PermissionCache;
+using ::android::hardware::hidl_vec;
+
+using HidlFrontendId = ::android::hardware::tv::tuner::V1_0::FrontendId;
+using HidlLnbId = ::android::hardware::tv::tuner::V1_0::LnbId;
+using HidlFrontendType = ::android::hardware::tv::tuner::V1_1::FrontendType;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+shared_ptr<TunerHidlService> TunerHidlService::sTunerService = nullptr;
+
+TunerHidlService::TunerHidlService() {
+    if (!TunerHelper::checkTunerFeature()) {
+        ALOGD("Device doesn't have tuner hardware.");
+        return;
+    }
+
+    updateTunerResources();
+}
+
+TunerHidlService::~TunerHidlService() {
+    mOpenedFrontends.clear();
+    mLnaStatus = -1;
+}
+
+binder_status_t TunerHidlService::instantiate() {
+    if (HidlITuner::getService() == nullptr) {
+        ALOGD("Failed to get ITuner HIDL HAL");
+        return STATUS_NAME_NOT_FOUND;
+    }
+
+    sTunerService = ::ndk::SharedRefBase::make<TunerHidlService>();
+    return AServiceManager_addService(sTunerService->asBinder().get(), getServiceName());
+}
+
+shared_ptr<TunerHidlService> TunerHidlService::getTunerService() {
+    return sTunerService;
+}
+
+bool TunerHidlService::hasITuner() {
+    ALOGV("hasITuner");
+    if (mTuner != nullptr) {
+        return true;
+    }
+
+    mTuner = HidlITuner::getService();
+    if (mTuner == nullptr) {
+        ALOGE("Failed to get ITuner service");
+        return false;
+    }
+    mTunerVersion = TUNER_HAL_VERSION_1_0;
+
+    mTuner_1_1 = ::android::hardware::tv::tuner::V1_1::ITuner::castFrom(mTuner);
+    if (mTuner_1_1 != nullptr) {
+        mTunerVersion = TUNER_HAL_VERSION_1_1;
+    } else {
+        ALOGD("Failed to get ITuner_1_1 service");
+    }
+
+    return true;
+}
+
+bool TunerHidlService::hasITuner_1_1() {
+    ALOGV("hasITuner_1_1");
+    hasITuner();
+    return (mTunerVersion == TUNER_HAL_VERSION_1_1);
+}
+
+::ndk::ScopedAStatus TunerHidlService::openDemux(int32_t /* in_demuxHandle */,
+                                                 shared_ptr<ITunerDemux>* _aidl_return) {
+    ALOGV("openDemux");
+    if (!hasITuner()) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res;
+    uint32_t id;
+    sp<IDemux> demuxSp = nullptr;
+    mTuner->openDemux([&](HidlResult r, uint32_t demuxId, const sp<IDemux>& demux) {
+        demuxSp = demux;
+        id = demuxId;
+        res = r;
+        ALOGD("open demux, id = %d", demuxId);
+    });
+    if (res == HidlResult::SUCCESS) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlDemux>(demuxSp, id);
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    ALOGW("open demux failed, res = %d", res);
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+}
+
+::ndk::ScopedAStatus TunerHidlService::getDemuxCaps(DemuxCapabilities* _aidl_return) {
+    ALOGV("getDemuxCaps");
+    if (!hasITuner()) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res;
+    HidlDemuxCapabilities caps;
+    mTuner->getDemuxCaps([&](HidlResult r, const HidlDemuxCapabilities& demuxCaps) {
+        caps = demuxCaps;
+        res = r;
+    });
+    if (res == HidlResult::SUCCESS) {
+        *_aidl_return = getAidlDemuxCaps(caps);
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    ALOGW("Get demux caps failed, res = %d", res);
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+}
+
+::ndk::ScopedAStatus TunerHidlService::getFrontendIds(vector<int32_t>* ids) {
+    if (!hasITuner()) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    hidl_vec<HidlFrontendId> feIds;
+    HidlResult res = getHidlFrontendIds(feIds);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    ids->resize(feIds.size());
+    copy(feIds.begin(), feIds.end(), ids->begin());
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::getFrontendInfo(int32_t id, FrontendInfo* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGE("ITuner service is not init.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlFrontendInfo info;
+    HidlResult res = getHidlFrontendInfo(id, info);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    HidlFrontendDtmbCapabilities dtmbCaps;
+    if (static_cast<HidlFrontendType>(info.type) == HidlFrontendType::DTMB) {
+        if (!hasITuner_1_1()) {
+            ALOGE("ITuner_1_1 service is not init.");
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::UNAVAILABLE));
+        }
+
+        mTuner_1_1->getFrontendDtmbCapabilities(
+                id, [&](HidlResult r, const HidlFrontendDtmbCapabilities& caps) {
+                    dtmbCaps = caps;
+                    res = r;
+                });
+        if (res != HidlResult::SUCCESS) {
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+        }
+    }
+
+    *_aidl_return = getAidlFrontendInfo(info, dtmbCaps);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::openFrontend(int32_t frontendHandle,
+                                                    shared_ptr<ITunerFrontend>* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGE("ITuner service is not init.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    sp<HidlIFrontend> frontend;
+    int id = TunerHelper::getResourceIdFromHandle(frontendHandle, FRONTEND);
+    mTuner->openFrontendById(id, [&](HidlResult result, const sp<HidlIFrontend>& fe) {
+        frontend = fe;
+        status = result;
+    });
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+
+    shared_ptr<TunerHidlFrontend> tunerFrontend =
+            ::ndk::SharedRefBase::make<TunerHidlFrontend>(frontend, id);
+    if (mLnaStatus != -1) {
+        tunerFrontend->setLna(mLnaStatus == 1);
+    }
+    {
+        Mutex::Autolock _l(mOpenedFrontendsLock);
+        mOpenedFrontends.insert(tunerFrontend);
+    }
+    *_aidl_return = tunerFrontend;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::openLnb(int lnbHandle, shared_ptr<ITunerLnb>* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGD("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    sp<HidlILnb> lnb;
+    int id = TunerHelper::getResourceIdFromHandle(lnbHandle, LNB);
+    mTuner->openLnbById(id, [&](HidlResult result, const sp<HidlILnb>& lnbSp) {
+        lnb = lnbSp;
+        status = result;
+    });
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+
+    *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlLnb>(lnb, id);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::openLnbByName(const string& lnbName,
+                                                     shared_ptr<ITunerLnb>* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGE("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    int lnbId;
+    HidlResult status;
+    sp<HidlILnb> lnb;
+    mTuner->openLnbByName(lnbName, [&](HidlResult r, HidlLnbId id, const sp<HidlILnb>& lnbSp) {
+        status = r;
+        lnb = lnbSp;
+        lnbId = static_cast<int32_t>(id);
+    });
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+
+    *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlLnb>(lnb, lnbId);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::openDescrambler(
+        int32_t /*descramblerHandle*/, shared_ptr<ITunerDescrambler>* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGD("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    sp<HidlIDescrambler> descrambler;
+    //int id = TunerHelper::getResourceIdFromHandle(descramblerHandle, DESCRAMBLER);
+    mTuner->openDescrambler([&](HidlResult r, const sp<HidlIDescrambler>& descramblerSp) {
+        status = r;
+        descrambler = descramblerSp;
+    });
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+
+    *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlDescrambler>(descrambler);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::getTunerHalVersion(int* _aidl_return) {
+    hasITuner();
+    *_aidl_return = mTunerVersion;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::openSharedFilter(
+        const string& in_filterToken, const shared_ptr<ITunerFilterCallback>& in_cb,
+        shared_ptr<ITunerFilter>* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGE("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (!PermissionCache::checkCallingPermission(sSharedFilterPermission)) {
+        ALOGE("Request requires android.permission.ACCESS_TV_SHARED_FILTER");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    Mutex::Autolock _l(mSharedFiltersLock);
+    if (mSharedFilters.find(in_filterToken) == mSharedFilters.end()) {
+        *_aidl_return = nullptr;
+        ALOGD("fail to find %s", in_filterToken.c_str());
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    shared_ptr<TunerHidlFilter> filter = mSharedFilters.at(in_filterToken);
+    IPCThreadState* ipc = IPCThreadState::self();
+    const int pid = ipc->getCallingPid();
+    if (!filter->isSharedFilterAllowed(pid)) {
+        *_aidl_return = nullptr;
+        ALOGD("shared filter %s is opened in the same process", in_filterToken.c_str());
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    filter->attachSharedFilterCallback(in_cb);
+
+    *_aidl_return = filter;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::setLna(bool bEnable) {
+    if (!hasITuner()) {
+        ALOGE("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    mLnaStatus = bEnable ? 1 : 0;
+
+    {
+        Mutex::Autolock _l(mOpenedFrontendsLock);
+        for (auto it = mOpenedFrontends.begin(); it != mOpenedFrontends.end(); ++it) {
+            (*it)->setLna(mLnaStatus == 1);
+        }
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::setMaxNumberOfFrontends(FrontendType /* in_frontendType */,
+                                                               int32_t /* in_maxNumber */) {
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(
+            static_cast<int32_t>(Result::UNAVAILABLE));
+}
+
+::ndk::ScopedAStatus TunerHidlService::getMaxNumberOfFrontends(FrontendType /* in_frontendType */,
+                                                               int32_t* _aidl_return) {
+    *_aidl_return = -1;
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(
+            static_cast<int32_t>(Result::UNAVAILABLE));
+}
+
+string TunerHidlService::addFilterToShared(const shared_ptr<TunerHidlFilter>& sharedFilter) {
+    Mutex::Autolock _l(mSharedFiltersLock);
+
+    // Use sharedFilter address as token.
+    string token = to_string(reinterpret_cast<std::uintptr_t>(sharedFilter.get()));
+    mSharedFilters[token] = sharedFilter;
+
+    return token;
+}
+
+void TunerHidlService::removeSharedFilter(const shared_ptr<TunerHidlFilter>& sharedFilter) {
+    Mutex::Autolock _l(mSharedFiltersLock);
+
+    // Use sharedFilter address as token.
+    mSharedFilters.erase(to_string(reinterpret_cast<std::uintptr_t>(sharedFilter.get())));
+}
+
+void TunerHidlService::removeFrontend(const shared_ptr<TunerHidlFrontend>& frontend) {
+    Mutex::Autolock _l(mOpenedFrontendsLock);
+    for (auto it = mOpenedFrontends.begin(); it != mOpenedFrontends.end(); ++it) {
+        if (it->get() == frontend.get()) {
+            mOpenedFrontends.erase(it);
+            break;
+        }
+    }
+}
+
+void TunerHidlService::updateTunerResources() {
+    if (!hasITuner()) {
+        ALOGE("Failed to updateTunerResources");
+        return;
+    }
+
+    TunerHelper::updateTunerResources(getTRMFrontendInfos(), getTRMLnbHandles());
+}
+
+vector<TunerFrontendInfo> TunerHidlService::getTRMFrontendInfos() {
+    vector<TunerFrontendInfo> infos;
+    hidl_vec<HidlFrontendId> ids;
+    HidlResult res = getHidlFrontendIds(ids);
+    if (res != HidlResult::SUCCESS) {
+        return infos;
+    }
+
+    for (int i = 0; i < ids.size(); i++) {
+        HidlFrontendInfo frontendInfo;
+        HidlResult res = getHidlFrontendInfo(static_cast<int32_t>(ids[i]), frontendInfo);
+        if (res != HidlResult::SUCCESS) {
+            continue;
+        }
+        TunerFrontendInfo tunerFrontendInfo{
+                .handle = TunerHelper::getResourceHandleFromId(static_cast<int32_t>(ids[i]),
+                                                               FRONTEND),
+                .type = static_cast<int32_t>(frontendInfo.type),
+                .exclusiveGroupId = static_cast<int32_t>(frontendInfo.exclusiveGroupId),
+        };
+        infos.push_back(tunerFrontendInfo);
+    }
+
+    return infos;
+}
+
+vector<int32_t> TunerHidlService::getTRMLnbHandles() {
+    vector<int32_t> lnbHandles;
+    if (mTuner != nullptr) {
+        HidlResult res;
+        vector<HidlLnbId> lnbIds;
+        mTuner->getLnbIds([&](HidlResult r, const hidl_vec<HidlLnbId>& ids) {
+            lnbIds = ids;
+            res = r;
+        });
+        if (res == HidlResult::SUCCESS && lnbIds.size() > 0) {
+            for (int i = 0; i < lnbIds.size(); i++) {
+                lnbHandles.push_back(
+                        TunerHelper::getResourceHandleFromId(static_cast<int32_t>(lnbIds[i]), LNB));
+            }
+        }
+    }
+
+    return lnbHandles;
+}
+
+HidlResult TunerHidlService::getHidlFrontendIds(hidl_vec<HidlFrontendId>& ids) {
+    if (mTuner == nullptr) {
+        return HidlResult::NOT_INITIALIZED;
+    }
+    HidlResult res;
+    mTuner->getFrontendIds([&](HidlResult r, const hidl_vec<HidlFrontendId>& frontendIds) {
+        ids = frontendIds;
+        res = r;
+    });
+    return res;
+}
+
+HidlResult TunerHidlService::getHidlFrontendInfo(const int id, HidlFrontendInfo& info) {
+    if (mTuner == nullptr) {
+        return HidlResult::NOT_INITIALIZED;
+    }
+    HidlResult res;
+    mTuner->getFrontendInfo(id, [&](HidlResult r, const HidlFrontendInfo& feInfo) {
+        info = feInfo;
+        res = r;
+    });
+    return res;
+}
+
+DemuxCapabilities TunerHidlService::getAidlDemuxCaps(const HidlDemuxCapabilities& caps) {
+    DemuxCapabilities aidlCaps{
+            .numDemux = static_cast<int32_t>(caps.numDemux),
+            .numRecord = static_cast<int32_t>(caps.numRecord),
+            .numPlayback = static_cast<int32_t>(caps.numPlayback),
+            .numTsFilter = static_cast<int32_t>(caps.numTsFilter),
+            .numSectionFilter = static_cast<int32_t>(caps.numSectionFilter),
+            .numAudioFilter = static_cast<int32_t>(caps.numAudioFilter),
+            .numVideoFilter = static_cast<int32_t>(caps.numVideoFilter),
+            .numPesFilter = static_cast<int32_t>(caps.numPesFilter),
+            .numPcrFilter = static_cast<int32_t>(caps.numPcrFilter),
+            .numBytesInSectionFilter = static_cast<int64_t>(caps.numBytesInSectionFilter),
+            .filterCaps = static_cast<int32_t>(caps.filterCaps),
+            .bTimeFilter = caps.bTimeFilter,
+    };
+    aidlCaps.linkCaps.resize(caps.linkCaps.size());
+    copy(caps.linkCaps.begin(), caps.linkCaps.end(), aidlCaps.linkCaps.begin());
+    return aidlCaps;
+}
+
+FrontendInfo TunerHidlService::getAidlFrontendInfo(
+        const HidlFrontendInfo& halInfo, const HidlFrontendDtmbCapabilities& halDtmbCaps) {
+    FrontendInfo info{
+            .type = static_cast<FrontendType>(halInfo.type),
+            .minFrequency = static_cast<int64_t>(halInfo.minFrequency),
+            .maxFrequency = static_cast<int64_t>(halInfo.maxFrequency),
+            .minSymbolRate = static_cast<int32_t>(halInfo.minSymbolRate),
+            .maxSymbolRate = static_cast<int32_t>(halInfo.maxSymbolRate),
+            .acquireRange = static_cast<int64_t>(halInfo.acquireRange),
+            .exclusiveGroupId = static_cast<int32_t>(halInfo.exclusiveGroupId),
+    };
+    for (int i = 0; i < halInfo.statusCaps.size(); i++) {
+        info.statusCaps.push_back(static_cast<FrontendStatusType>(halInfo.statusCaps[i]));
+    }
+
+    FrontendCapabilities caps;
+    switch (halInfo.type) {
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::ANALOG: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::analogCaps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendAnalogCapabilities analogCaps{
+                    .typeCap = static_cast<int32_t>(halInfo.frontendCaps.analogCaps().typeCap),
+                    .sifStandardCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.analogCaps().sifStandardCap),
+            };
+            caps.set<FrontendCapabilities::analogCaps>(analogCaps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::ATSC: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::atscCaps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendAtscCapabilities atscCaps{
+                    .modulationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.atscCaps().modulationCap),
+            };
+            caps.set<FrontendCapabilities::atscCaps>(atscCaps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::ATSC3: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::atsc3Caps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendAtsc3Capabilities atsc3Caps{
+                    .bandwidthCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.atsc3Caps().bandwidthCap),
+                    .modulationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.atsc3Caps().modulationCap),
+                    .timeInterleaveModeCap = static_cast<int32_t>(
+                            halInfo.frontendCaps.atsc3Caps().timeInterleaveModeCap),
+                    .codeRateCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.atsc3Caps().codeRateCap),
+                    .demodOutputFormatCap = static_cast<int8_t>(
+                            halInfo.frontendCaps.atsc3Caps().demodOutputFormatCap),
+                    .fecCap = static_cast<int32_t>(halInfo.frontendCaps.atsc3Caps().fecCap),
+            };
+            caps.set<FrontendCapabilities::atsc3Caps>(atsc3Caps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::DVBC: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::dvbcCaps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendDvbcCapabilities dvbcCaps{
+                    .modulationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.dvbcCaps().modulationCap),
+                    .fecCap = static_cast<int64_t>(halInfo.frontendCaps.dvbcCaps().fecCap),
+                    .annexCap = static_cast<int8_t>(halInfo.frontendCaps.dvbcCaps().annexCap),
+            };
+            caps.set<FrontendCapabilities::dvbcCaps>(dvbcCaps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::DVBS: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::dvbsCaps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendDvbsCapabilities dvbsCaps{
+                    .modulationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.dvbsCaps().modulationCap),
+                    .innerfecCap =
+                            static_cast<int64_t>(halInfo.frontendCaps.dvbsCaps().innerfecCap),
+                    .standard = static_cast<int8_t>(halInfo.frontendCaps.dvbsCaps().standard),
+            };
+            caps.set<FrontendCapabilities::dvbsCaps>(dvbsCaps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::DVBT: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::dvbtCaps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendDvbtCapabilities dvbtCaps{
+                    .transmissionModeCap = static_cast<int32_t>(
+                            halInfo.frontendCaps.dvbtCaps().transmissionModeCap),
+                    .bandwidthCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.dvbtCaps().bandwidthCap),
+                    .constellationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.dvbtCaps().constellationCap),
+                    .coderateCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.dvbtCaps().coderateCap),
+                    .hierarchyCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.dvbtCaps().hierarchyCap),
+                    .guardIntervalCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.dvbtCaps().guardIntervalCap),
+                    .isT2Supported = halInfo.frontendCaps.dvbtCaps().isT2Supported,
+                    .isMisoSupported = halInfo.frontendCaps.dvbtCaps().isMisoSupported,
+            };
+            caps.set<FrontendCapabilities::dvbtCaps>(dvbtCaps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::ISDBS: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::isdbsCaps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendIsdbsCapabilities isdbsCaps{
+                    .modulationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbsCaps().modulationCap),
+                    .coderateCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbsCaps().coderateCap),
+            };
+            caps.set<FrontendCapabilities::isdbsCaps>(isdbsCaps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::ISDBS3: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::isdbs3Caps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendIsdbs3Capabilities isdbs3Caps{
+                    .modulationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbs3Caps().modulationCap),
+                    .coderateCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbs3Caps().coderateCap),
+            };
+            caps.set<FrontendCapabilities::isdbs3Caps>(isdbs3Caps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::ISDBT: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::isdbtCaps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendIsdbtCapabilities isdbtCaps{
+                    .modeCap = static_cast<int32_t>(halInfo.frontendCaps.isdbtCaps().modeCap),
+                    .bandwidthCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbtCaps().bandwidthCap),
+                    .modulationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbtCaps().modulationCap),
+                    .coderateCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbtCaps().coderateCap),
+                    .guardIntervalCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbtCaps().guardIntervalCap),
+                    .timeInterleaveCap =
+                            static_cast<int32_t>(FrontendIsdbtTimeInterleaveMode::UNDEFINED),
+                    .isSegmentAuto = false,
+                    .isFullSegment = false,
+            };
+            caps.set<FrontendCapabilities::isdbtCaps>(isdbtCaps);
+        }
+        break;
+    }
+    default: {
+        if (static_cast<HidlFrontendType>(info.type) == HidlFrontendType::DTMB) {
+            FrontendDtmbCapabilities dtmbCaps{
+                    .transmissionModeCap = static_cast<int32_t>(halDtmbCaps.transmissionModeCap),
+                    .bandwidthCap = static_cast<int32_t>(halDtmbCaps.bandwidthCap),
+                    .modulationCap = static_cast<int32_t>(halDtmbCaps.modulationCap),
+                    .codeRateCap = static_cast<int32_t>(halDtmbCaps.codeRateCap),
+                    .guardIntervalCap = static_cast<int32_t>(halDtmbCaps.guardIntervalCap),
+                    .interleaveModeCap = static_cast<int32_t>(halDtmbCaps.interleaveModeCap),
+            };
+            caps.set<FrontendCapabilities::dtmbCaps>(dtmbCaps);
+        }
+        break;
+    }
+    }
+
+    info.frontendCaps = caps;
+    return info;
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlService.h b/services/tuner/hidl/TunerHidlService.h
new file mode 100644
index 0000000..2252d35
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlService.h
@@ -0,0 +1,134 @@
+/**
+ * Copyright (c) 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLSERVICE_H
+#define ANDROID_MEDIA_TUNERHIDLSERVICE_H
+
+#include <aidl/android/hardware/tv/tuner/DemuxFilterEvent.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterStatus.h>
+#include <aidl/android/media/tv/tuner/BnTunerService.h>
+#include <aidl/android/media/tv/tunerresourcemanager/TunerFrontendInfo.h>
+#include <android/hardware/tv/tuner/1.1/ITuner.h>
+#include <utils/Mutex.h>
+
+#include <unordered_set>
+
+#include "TunerHelper.h"
+#include "TunerHidlFilter.h"
+#include "TunerHidlFrontend.h"
+
+using ::aidl::android::hardware::tv::tuner::DemuxCapabilities;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
+using ::aidl::android::hardware::tv::tuner::FrontendInfo;
+using ::aidl::android::hardware::tv::tuner::FrontendType;
+using ::aidl::android::media::tv::tuner::ITunerDemux;
+using ::aidl::android::media::tv::tuner::ITunerDescrambler;
+using ::aidl::android::media::tv::tuner::ITunerFrontend;
+using ::aidl::android::media::tv::tuner::ITunerLnb;
+using ::aidl::android::media::tv::tunerresourcemanager::TunerFrontendInfo;
+using ::android::Mutex;
+using ::android::sp;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::std::shared_ptr;
+using ::std::string;
+using ::std::vector;
+
+using HidlFrontendDtmbCapabilities = ::android::hardware::tv::tuner::V1_1::FrontendDtmbCapabilities;
+using HidlDemuxFilterEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterEvent;
+using HidlDemuxFilterStatus = ::android::hardware::tv::tuner::V1_0::DemuxFilterStatus;
+using HidlDemuxCapabilities = ::android::hardware::tv::tuner::V1_0::DemuxCapabilities;
+using HidlFrontendInfo = ::android::hardware::tv::tuner::V1_0::FrontendInfo;
+using HidlITuner = ::android::hardware::tv::tuner::V1_0::ITuner;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+using HidlFrontendId = ::android::hardware::tv::tuner::V1_0::FrontendId;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlService : public BnTunerService {
+public:
+    static char const* getServiceName() { return "media.tuner"; }
+    static binder_status_t instantiate();
+    TunerHidlService();
+    virtual ~TunerHidlService();
+
+    ::ndk::ScopedAStatus getFrontendIds(vector<int32_t>* out_ids) override;
+    ::ndk::ScopedAStatus getFrontendInfo(int32_t in_frontendHandle,
+                                         FrontendInfo* _aidl_return) override;
+    ::ndk::ScopedAStatus openFrontend(int32_t in_frontendHandle,
+                                      shared_ptr<ITunerFrontend>* _aidl_return) override;
+    ::ndk::ScopedAStatus openLnb(int32_t in_lnbHandle,
+                                 shared_ptr<ITunerLnb>* _aidl_return) override;
+    ::ndk::ScopedAStatus openLnbByName(const std::string& in_lnbName,
+                                       shared_ptr<ITunerLnb>* _aidl_return) override;
+    ::ndk::ScopedAStatus openDemux(int32_t in_demuxHandle,
+                                   shared_ptr<ITunerDemux>* _aidl_return) override;
+    ::ndk::ScopedAStatus getDemuxCaps(DemuxCapabilities* _aidl_return) override;
+    ::ndk::ScopedAStatus openDescrambler(int32_t in_descramblerHandle,
+                                         shared_ptr<ITunerDescrambler>* _aidl_return) override;
+    ::ndk::ScopedAStatus getTunerHalVersion(int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus openSharedFilter(const string& in_filterToken,
+                                          const shared_ptr<ITunerFilterCallback>& in_cb,
+                                          shared_ptr<ITunerFilter>* _aidl_return) override;
+    ::ndk::ScopedAStatus setLna(bool in_bEnable) override;
+    ::ndk::ScopedAStatus setMaxNumberOfFrontends(FrontendType in_frontendType,
+                                                 int32_t in_maxNumber) override;
+    ::ndk::ScopedAStatus getMaxNumberOfFrontends(FrontendType in_frontendType,
+                                                 int32_t* _aidl_return) override;
+
+    string addFilterToShared(const shared_ptr<TunerHidlFilter>& sharedFilter);
+    void removeSharedFilter(const shared_ptr<TunerHidlFilter>& sharedFilter);
+    void removeFrontend(const shared_ptr<TunerHidlFrontend>& frontend);
+
+    static shared_ptr<TunerHidlService> getTunerService();
+
+private:
+    bool hasITuner();
+    bool hasITuner_1_1();
+    void updateTunerResources();
+    vector<TunerFrontendInfo> getTRMFrontendInfos();
+    vector<int32_t> getTRMLnbHandles();
+    HidlResult getHidlFrontendIds(hidl_vec<HidlFrontendId>& ids);
+    HidlResult getHidlFrontendInfo(const int id, HidlFrontendInfo& info);
+    DemuxCapabilities getAidlDemuxCaps(const HidlDemuxCapabilities& caps);
+    FrontendInfo getAidlFrontendInfo(const HidlFrontendInfo& halInfo,
+                                     const HidlFrontendDtmbCapabilities& dtmbCaps);
+
+    sp<HidlITuner> mTuner;
+    sp<::android::hardware::tv::tuner::V1_1::ITuner> mTuner_1_1;
+    int mTunerVersion = TUNER_HAL_VERSION_UNKNOWN;
+    Mutex mSharedFiltersLock;
+    map<string, shared_ptr<TunerHidlFilter>> mSharedFilters;
+    Mutex mOpenedFrontendsLock;
+    unordered_set<shared_ptr<TunerHidlFrontend>> mOpenedFrontends;
+    int mLnaStatus = -1;
+
+    static shared_ptr<TunerHidlService> sTunerService;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLSERVICE_H
diff --git a/services/tuner/hidl/TunerHidlTimeFilter.cpp b/services/tuner/hidl/TunerHidlTimeFilter.cpp
new file mode 100644
index 0000000..d0606d6
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlTimeFilter.cpp
@@ -0,0 +1,133 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlTimeFilter"
+
+#include "TunerHidlTimeFilter.h"
+
+#include <aidl/android/hardware/tv/tuner/Constant64Bit.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
+using ::aidl::android::hardware::tv::tuner::Constant64Bit;
+using ::aidl::android::hardware::tv::tuner::Result;
+
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlTimeFilter::TunerHidlTimeFilter(sp<HidlITimeFilter> timeFilter) {
+    mTimeFilter = timeFilter;
+}
+
+TunerHidlTimeFilter::~TunerHidlTimeFilter() {
+    mTimeFilter = nullptr;
+}
+
+::ndk::ScopedAStatus TunerHidlTimeFilter::setTimeStamp(int64_t timeStamp) {
+    if (mTimeFilter == nullptr) {
+        ALOGE("ITimeFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mTimeFilter->setTimeStamp(static_cast<uint64_t>(timeStamp));
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlTimeFilter::clearTimeStamp() {
+    if (mTimeFilter == nullptr) {
+        ALOGE("ITimeFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mTimeFilter->clearTimeStamp();
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlTimeFilter::getSourceTime(int64_t* _aidl_return) {
+    if (mTimeFilter == nullptr) {
+        *_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
+        ALOGE("ITimeFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    mTimeFilter->getSourceTime([&](HidlResult r, uint64_t t) {
+        status = r;
+        *_aidl_return = static_cast<int64_t>(t);
+    });
+    if (status != HidlResult::SUCCESS) {
+        *_aidl_return = static_cast<int64_t>(Constant64Bit::INVALID_PRESENTATION_TIME_STAMP);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlTimeFilter::getTimeStamp(int64_t* _aidl_return) {
+    if (mTimeFilter == nullptr) {
+        *_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
+        ALOGE("ITimeFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    mTimeFilter->getTimeStamp([&](HidlResult r, uint64_t t) {
+        status = r;
+        *_aidl_return = static_cast<int64_t>(t);
+    });
+    if (status != HidlResult::SUCCESS) {
+        *_aidl_return = static_cast<int64_t>(Constant64Bit::INVALID_PRESENTATION_TIME_STAMP);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlTimeFilter::close() {
+    if (mTimeFilter == nullptr) {
+        ALOGE("ITimeFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mTimeFilter->close();
+    mTimeFilter = nullptr;
+
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlTimeFilter.h b/services/tuner/hidl/TunerHidlTimeFilter.h
new file mode 100644
index 0000000..78f9c5e
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlTimeFilter.h
@@ -0,0 +1,57 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLTIMEFILTER_H
+#define ANDROID_MEDIA_TUNERHIDLTIMEFILTER_H
+
+#include <aidl/android/media/tv/tuner/BnTunerTimeFilter.h>
+#include <android/hardware/tv/tuner/1.0/ITimeFilter.h>
+#include <utils/Log.h>
+
+using ::android::sp;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+using HidlITimeFilter = ::android::hardware::tv::tuner::V1_0::ITimeFilter;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlTimeFilter : public BnTunerTimeFilter {
+public:
+    TunerHidlTimeFilter(sp<HidlITimeFilter> timeFilter);
+    virtual ~TunerHidlTimeFilter();
+
+    ::ndk::ScopedAStatus setTimeStamp(int64_t in_timeStamp) override;
+    ::ndk::ScopedAStatus clearTimeStamp() override;
+    ::ndk::ScopedAStatus getSourceTime(int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getTimeStamp(int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus close() override;
+
+private:
+    sp<HidlITimeFilter> mTimeFilter;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLTIMEFILTER_H
diff --git a/services/tuner/main_tunerservice.cpp b/services/tuner/main_tunerservice.cpp
index 586a0e2..a014dea 100644
--- a/services/tuner/main_tunerservice.cpp
+++ b/services/tuner/main_tunerservice.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright (C) 2021 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,30 +14,33 @@
  * limitations under the License.
  */
 
-#include <utils/Log.h>
+#include <android-base/logging.h>
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
-#include <binder/ProcessState.h>
-#include <hidl/HidlTransportSupport.h>
+#include <utils/Log.h>
 
 #include "TunerService.h"
+#include "hidl/TunerHidlService.h"
+
+using ::aidl::android::media::tv::tuner::TunerHidlService;
+using ::aidl::android::media::tv::tuner::TunerService;
 
 using namespace android;
 
-int main(int argc __unused, char** argv) {
+int main() {
     ALOGD("Tuner service starting");
 
-    strcpy(argv[0], "media.tuner");
     sp<ProcessState> proc(ProcessState::self());
     sp<IServiceManager> sm = defaultServiceManager();
-    ALOGD("ServiceManager: %p", sm.get());
 
-    binder_status_t status = TunerService::instantiate();
+    // Check legacy HIDL HAL first. If it's not existed, use AIDL HAL.
+    binder_status_t status = TunerHidlService::instantiate();
     if (status != STATUS_OK) {
-        ALOGD("Failed to add tuner service as AIDL interface");
-        return -1;
+        status = TunerService::instantiate();
+        CHECK(status == STATUS_OK);
     }
 
     ProcessState::self()->startThreadPool();
     IPCThreadState::self()->joinThreadPool();
+    return EXIT_FAILURE;  // should not reached
 }
diff --git a/services/tuner/mediatuner.rc b/services/tuner/mediatuner.rc
index fd30618..6a3e199 100644
--- a/services/tuner/mediatuner.rc
+++ b/services/tuner/mediatuner.rc
@@ -2,4 +2,7 @@
     class main
     group media
     ioprio rt 4
+    onrestart restart vendor.tuner-hal-1-0
+    onrestart restart vendor.tuner-hal-1-1
+    onrestart restart vendor.tuner-default
     task_profiles ProcessCapacityHigh HighPerformance