Merge "libaudiopolicy: Add static variant of library" am: 3c401859b1 am: f26ab14fd6 am: 08f286d569 am: dfbb372d05

Original change: https://android-review.googlesource.com/c/platform/frameworks/av/+/1952204

Change-Id: Ia06659c29f1b9bcb247e5e35f82d93ef9fcfdcbf
diff --git a/camera/Android.bp b/camera/Android.bp
index 4ed3269..e44202b 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -113,6 +113,30 @@
 
 }
 
+cc_library_host_static {
+    name: "libcamera_client_host",
+
+    srcs: [
+        "CameraMetadata.cpp",
+        "VendorTagDescriptor.cpp",
+    ],
+
+    shared_libs: [
+        "libbase",
+        "libcamera_metadata",
+    ],
+
+    include_dirs: [
+        "system/media/private/camera/include",
+        "frameworks/native/include/media/openmax",
+    ],
+
+    export_include_dirs: [
+        "include",
+        "include/camera"
+    ],
+}
+
 // AIDL interface between camera clients and the camera service.
 filegroup {
     name: "libcamera_client_aidl",
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index 03439fd..24c9108 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -68,6 +68,9 @@
         unavailablePhysicalIds16.push_back(String16(id8));
     }
     res = parcel->writeString16Vector(unavailablePhysicalIds16);
+    if (res != OK) return res;
+
+    res = parcel->writeString16(String16(clientPackage));
     return res;
 }
 
@@ -86,6 +89,12 @@
     for (auto& id16 : unavailablePhysicalIds16) {
         unavailablePhysicalIds.push_back(String8(id16));
     }
+
+    String16 tempClientPackage;
+    res = parcel->readString16(&tempClientPackage);
+    if (res != OK) return res;
+    clientPackage = String8(tempClientPackage);
+
     return res;
 }
 
diff --git a/camera/CameraSessionStats.cpp b/camera/CameraSessionStats.cpp
index 28e037f..bc83ec1 100644
--- a/camera/CameraSessionStats.cpp
+++ b/camera/CameraSessionStats.cpp
@@ -112,6 +112,12 @@
         return err;
     }
 
+    int dynamicRangeProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
+    if ((err = parcel->readInt32(&dynamicRangeProfile)) != OK) {
+        ALOGE("%s: Failed to read dynamic range profile type from parcel", __FUNCTION__);
+        return err;
+    }
+
     mWidth = width;
     mHeight = height;
     mFormat = format;
@@ -125,6 +131,7 @@
     mHistogramType = histogramType;
     mHistogramBins = std::move(histogramBins);
     mHistogramCounts = std::move(histogramCounts);
+    mDynamicRangeProfile = dynamicRangeProfile;
 
     return OK;
 }
@@ -202,6 +209,11 @@
         return err;
     }
 
+    if ((err = parcel->writeInt32(mDynamicRangeProfile)) != OK) {
+        ALOGE("%s: Failed to write dynamic range profile type", __FUNCTION__);
+        return err;
+    }
+
     return OK;
 }
 
diff --git a/camera/OWNERS b/camera/OWNERS
index 2a1d523..385c163 100644
--- a/camera/OWNERS
+++ b/camera/OWNERS
@@ -1,4 +1,3 @@
-
 # Bug component: 41727
 etalvala@google.com
 arakesh@google.com
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index 78a77d4..1e748c7 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -173,6 +173,13 @@
 
     void setTorchMode(String cameraId, boolean enabled, IBinder clientBinder);
 
+    // Change the brightness level of the flash unit associated with cameraId to strengthLevel.
+    // If the torch is in OFF state and strengthLevel > 0 then the torch will also be turned ON.
+    void turnOnTorchWithStrengthLevel(String cameraId, int strengthLevel, IBinder clientBinder);
+
+    // Get the brightness level of the flash unit associated with cameraId.
+    int getTorchStrengthLevel(String cameraId);
+
     /**
      * Notify the camera service of a system event.  Should only be called from system_server.
      *
@@ -180,6 +187,8 @@
      */
     const int EVENT_NONE = 0;
     const int EVENT_USER_SWITCHED = 1; // The argument is the set of new foreground user IDs.
+    const int EVENT_USB_DEVICE_ATTACHED = 2; // The argument is the deviceId and vendorId
+    const int EVENT_USB_DEVICE_DETACHED = 3; // The argument is the deviceId and vendorId
     oneway void notifySystemEvent(int eventId, in int[] args);
 
     /**
diff --git a/camera/aidl/android/hardware/ICameraServiceListener.aidl b/camera/aidl/android/hardware/ICameraServiceListener.aidl
index c54813c..5f17f5b 100644
--- a/camera/aidl/android/hardware/ICameraServiceListener.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceListener.aidl
@@ -83,6 +83,8 @@
 
     oneway void onTorchStatusChanged(int status, String cameraId);
 
+    oneway void onTorchStrengthLevelChanged(String cameraId, int newTorchStrength);
+
     /**
      * Notify registered clients about camera access priority changes.
      * Clients which were previously unable to open a certain camera device
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index 2bccd87..15c9dc9 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -23,6 +23,7 @@
 #include <camera/camera2/OutputConfiguration.h>
 #include <binder/Parcel.h>
 #include <gui/view/Surface.h>
+#include <system/camera_metadata.h>
 #include <utils/String8.h>
 
 namespace android {
@@ -76,6 +77,10 @@
     return mSensorPixelModesUsed;
 }
 
+int OutputConfiguration::getDynamicRangeProfile() const {
+    return mDynamicRangeProfile;
+}
+
 OutputConfiguration::OutputConfiguration() :
         mRotation(INVALID_ROTATION),
         mSurfaceSetID(INVALID_SET_ID),
@@ -84,7 +89,8 @@
         mHeight(0),
         mIsDeferred(false),
         mIsShared(false),
-        mIsMultiResolution(false) {
+        mIsMultiResolution(false),
+        mDynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {
 }
 
 OutputConfiguration::OutputConfiguration(const android::Parcel& parcel) :
@@ -165,6 +171,12 @@
         ALOGE("%s: Failed to read sensor pixel mode(s) from parcel", __FUNCTION__);
         return err;
     }
+    int dynamicProfile;
+    if ((err = parcel->readInt32(&dynamicProfile)) != OK) {
+        ALOGE("%s: Failed to read surface dynamic range profile flag from parcel", __FUNCTION__);
+        return err;
+    }
+
     mRotation = rotation;
     mSurfaceSetID = setID;
     mSurfaceType = surfaceType;
@@ -181,6 +193,7 @@
     }
 
     mSensorPixelModesUsed = std::move(sensorPixelModesUsed);
+    mDynamicRangeProfile = dynamicProfile;
 
     ALOGV("%s: OutputConfiguration: rotation = %d, setId = %d, surfaceType = %d,"
           " physicalCameraId = %s, isMultiResolution = %d", __FUNCTION__, mRotation,
@@ -199,6 +212,7 @@
     mIsShared = isShared;
     mPhysicalCameraId = physicalId;
     mIsMultiResolution = false;
+    mDynamicRangeProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
 }
 
 OutputConfiguration::OutputConfiguration(
@@ -207,7 +221,8 @@
     int width, int height, bool isShared)
   : mGbps(gbps), mRotation(rotation), mSurfaceSetID(surfaceSetID), mSurfaceType(surfaceType),
     mWidth(width), mHeight(height), mIsDeferred(false), mIsShared(isShared),
-    mPhysicalCameraId(physicalCameraId), mIsMultiResolution(false) { }
+    mPhysicalCameraId(physicalCameraId), mIsMultiResolution(false),
+    mDynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) { }
 
 status_t OutputConfiguration::writeToParcel(android::Parcel* parcel) const {
 
@@ -254,6 +269,9 @@
     err = parcel->writeParcelableVector(mSensorPixelModesUsed);
     if (err != OK) return err;
 
+    err = parcel->writeInt32(mDynamicRangeProfile ? 1 : 0);
+    if (err != OK) return err;
+
     return OK;
 }
 
diff --git a/camera/cameraserver/Android.bp b/camera/cameraserver/Android.bp
index 8ca8920..6d884cb 100644
--- a/camera/cameraserver/Android.bp
+++ b/camera/cameraserver/Android.bp
@@ -46,6 +46,7 @@
         "android.hardware.camera.device@1.0",
         "android.hardware.camera.device@3.2",
         "android.hardware.camera.device@3.4",
+        "android.hardware.camera.device@3.8",
     ],
     compile_multilib: "first",
     cflags: [
diff --git a/camera/include/camera/CameraBase.h b/camera/include/camera/CameraBase.h
index e156994..8e53968 100644
--- a/camera/include/camera/CameraBase.h
+++ b/camera/include/camera/CameraBase.h
@@ -85,11 +85,17 @@
      */
     std::vector<String8> unavailablePhysicalIds;
 
+    /**
+     * Client package name if camera is open, otherwise not applicable
+     */
+    String8 clientPackage;
+
     virtual status_t writeToParcel(android::Parcel* parcel) const;
     virtual status_t readFromParcel(const android::Parcel* parcel);
 
-    CameraStatus(String8 id, int32_t s, const std::vector<String8>& unavailSubIds) :
-            cameraId(id), status(s), unavailablePhysicalIds(unavailSubIds) {}
+    CameraStatus(String8 id, int32_t s, const std::vector<String8>& unavailSubIds,
+            const String8& clientPkg) : cameraId(id), status(s),
+            unavailablePhysicalIds(unavailSubIds), clientPackage(clientPkg) {}
     CameraStatus() : status(ICameraServiceListener::STATUS_PRESENT) {}
 };
 
diff --git a/camera/include/camera/CameraSessionStats.h b/camera/include/camera/CameraSessionStats.h
index c398aca..1209a20 100644
--- a/camera/include/camera/CameraSessionStats.h
+++ b/camera/include/camera/CameraSessionStats.h
@@ -19,6 +19,8 @@
 
 #include <binder/Parcelable.h>
 
+#include <camera/CameraMetadata.h>
+
 namespace android {
 namespace hardware {
 
@@ -60,16 +62,21 @@
     // size(mHistogramBins) + 1 = size(mHistogramCounts)
     std::vector<int64_t> mHistogramCounts;
 
+    // Dynamic range profile
+    int mDynamicRangeProfile;
+
     CameraStreamStats() :
             mWidth(0), mHeight(0), mFormat(0), mDataSpace(0), mUsage(0),
             mRequestCount(0), mErrorCount(0), mStartLatencyMs(0),
-            mMaxHalBuffers(0), mMaxAppBuffers(0), mHistogramType(HISTOGRAM_TYPE_UNKNOWN) {}
+            mMaxHalBuffers(0), mMaxAppBuffers(0), mHistogramType(HISTOGRAM_TYPE_UNKNOWN),
+            mDynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {}
     CameraStreamStats(int width, int height, int format, int dataSpace, int64_t usage,
-            int maxHalBuffers, int maxAppBuffers)
+            int maxHalBuffers, int maxAppBuffers, int dynamicRangeProfile)
             : mWidth(width), mHeight(height), mFormat(format), mDataSpace(dataSpace),
               mUsage(usage), mRequestCount(0), mErrorCount(0), mStartLatencyMs(0),
               mMaxHalBuffers(maxHalBuffers), mMaxAppBuffers(maxAppBuffers),
-              mHistogramType(HISTOGRAM_TYPE_UNKNOWN) {}
+              mHistogramType(HISTOGRAM_TYPE_UNKNOWN),
+              mDynamicRangeProfile(dynamicRangeProfile) {}
 
     virtual status_t readFromParcel(const android::Parcel* parcel) override;
     virtual status_t writeToParcel(android::Parcel* parcel) const override;
diff --git a/camera/include/camera/camera2/OutputConfiguration.h b/camera/include/camera/camera2/OutputConfiguration.h
index f80ed3a..1631903 100644
--- a/camera/include/camera/camera2/OutputConfiguration.h
+++ b/camera/include/camera/camera2/OutputConfiguration.h
@@ -44,6 +44,7 @@
     int                        getSurfaceType() const;
     int                        getWidth() const;
     int                        getHeight() const;
+    int                        getDynamicRangeProfile() const;
     bool                       isDeferred() const;
     bool                       isShared() const;
     String16                   getPhysicalCameraId() const;
@@ -89,7 +90,8 @@
                 gbpsEqual(other) &&
                 mPhysicalCameraId == other.mPhysicalCameraId &&
                 mIsMultiResolution == other.mIsMultiResolution &&
-                sensorPixelModesUsedEqual(other));
+                sensorPixelModesUsedEqual(other) &&
+                mDynamicRangeProfile == other.mDynamicRangeProfile);
     }
     bool operator != (const OutputConfiguration& other) const {
         return !(*this == other);
@@ -126,6 +128,9 @@
         if (!sensorPixelModesUsedEqual(other)) {
             return sensorPixelModesUsedLessThan(other);
         }
+        if (mDynamicRangeProfile != other.mDynamicRangeProfile) {
+            return mDynamicRangeProfile < other.mDynamicRangeProfile;
+        }
         return gbpsLessThan(other);
     }
 
@@ -150,6 +155,7 @@
     String16                   mPhysicalCameraId;
     bool                       mIsMultiResolution;
     std::vector<int32_t>       mSensorPixelModesUsed;
+    int                        mDynamicRangeProfile;
 };
 } // namespace params
 } // namespace camera2
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index 95ef2b2..5892f1a 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -189,8 +189,12 @@
     sp<CameraManagerGlobal> cm = mCameraManager.promote();
     if (cm != nullptr) {
         AutoMutex lock(cm->mLock);
+        std::vector<String8> cameraIdList;
         for (auto& pair : cm->mDeviceStatusMap) {
-            const String8 &cameraId = pair.first;
+            cameraIdList.push_back(pair.first);
+        }
+
+        for (String8 cameraId : cameraIdList) {
             cm->onStatusChangedLocked(
                     CameraServiceListener::STATUS_NOT_PRESENT, cameraId);
         }
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
index da887a2..d53d809 100644
--- a/camera/ndk/impl/ACameraManager.h
+++ b/camera/ndk/impl/ACameraManager.h
@@ -95,6 +95,9 @@
         virtual binder::Status onTorchStatusChanged(int32_t, const String16&) {
             return binder::Status::ok();
         }
+        virtual binder::Status onTorchStrengthLevelChanged(const String16&, int32_t) {
+            return binder::Status::ok();
+        }
 
         virtual binder::Status onCameraAccessPrioritiesChanged();
         virtual binder::Status onCameraOpened(const String16&, const String16&) {
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 816303c..bd281c8 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -1090,6 +1090,15 @@
      * (ACAMERA_LENS_OPTICAL_STABILIZATION_MODE), turning both modes on may
      * produce undesirable interaction, so it is recommended not to enable
      * both at the same time.</p>
+     * <p>If video stabilization is set to "PREVIEW_STABILIZATION",
+     * ACAMERA_LENS_OPTICAL_STABILIZATION_MODE is overridden. The camera sub-system may choose
+     * to turn on hardware based image stabilization in addition to software based stabilization
+     * if it deems that appropriate.
+     * This key may be a part of the available session keys, which camera clients may
+     * query via
+     * {@link ACameraManager_getCameraCharacteristics }.
+     * If this is the case, changing this key over the life-time of a capture session may
+     * cause delays / glitches.</p>
      *
      * @see ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE
      * @see ACAMERA_LENS_OPTICAL_STABILIZATION_MODE
@@ -2144,6 +2153,51 @@
      */
     ACAMERA_FLASH_INFO_AVAILABLE =                              // byte (acamera_metadata_enum_android_flash_info_available_t)
             ACAMERA_FLASH_INFO_START,
+    /**
+     * <p>Maximum flashlight brightness level.</p>
+     *
+     * <p>Type: int32</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>If this value is greater than 1, then the device supports controlling the
+     * flashlight brightness level via
+     * {android.hardware.camera2.CameraManager#turnOnTorchWithStrengthLevel}.
+     * If this value is equal to 1, flashlight brightness control is not supported.
+     * The value for this key will be null for devices with no flash unit.</p>
+     */
+    ACAMERA_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL =                 // int32
+            ACAMERA_FLASH_INFO_START + 2,
+    /**
+     * <p>Default flashlight brightness level to be set via
+     * {android.hardware.camera2.CameraManager#turnOnTorchWithStrengthLevel}.</p>
+     *
+     * <p>Type: int32</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>If flash unit is available this will be greater than or equal to 1 and less
+     * or equal to <code>ACAMERA_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL</code>.</p>
+     * <p>Setting flashlight brightness above the default level
+     * (i.e.<code>ACAMERA_FLASH_INFO_STRENGTH_DEFAULT_LEVEL</code>) may make the device more
+     * likely to reach thermal throttling conditions and slow down, or drain the
+     * battery quicker than normal. To minimize such issues, it is recommended to
+     * start the flashlight at this default brightness until a user explicitly requests
+     * a brighter level.
+     * Note that the value for this key will be null for devices with no flash unit.
+     * The default level should always be &gt; 0.</p>
+     *
+     * @see ACAMERA_FLASH_INFO_STRENGTH_DEFAULT_LEVEL
+     * @see ACAMERA_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL
+     */
+    ACAMERA_FLASH_INFO_STRENGTH_DEFAULT_LEVEL =                 // int32
+            ACAMERA_FLASH_INFO_START + 3,
     ACAMERA_FLASH_INFO_END,
 
     /**
@@ -2526,12 +2580,18 @@
      * <p>If a camera device supports both OIS and digital image stabilization
      * (ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE), turning both modes on may produce undesirable
      * interaction, so it is recommended not to enable both at the same time.</p>
+     * <p>If ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE is set to "PREVIEW_STABILIZATION",
+     * ACAMERA_LENS_OPTICAL_STABILIZATION_MODE is overridden. The camera sub-system may choose
+     * to turn on hardware based image stabilization in addition to software based stabilization
+     * if it deems that appropriate. This key's value in the capture result will reflect which
+     * OIS mode was chosen.</p>
      * <p>Not all devices will support OIS; see
      * ACAMERA_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION for
      * available controls.</p>
      *
      * @see ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE
      * @see ACAMERA_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION
+     * @see ACAMERA_LENS_OPTICAL_STABILIZATION_MODE
      */
     ACAMERA_LENS_OPTICAL_STABILIZATION_MODE =                   // byte (acamera_metadata_enum_android_lens_optical_stabilization_mode_t)
             ACAMERA_LENS_START + 4,
@@ -3403,6 +3463,25 @@
      */
     ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS =    // int32[n]
             ACAMERA_REQUEST_START + 17,
+    /**
+     * <p>A map of all available 10-bit dynamic range profiles along with their
+     * capture request constraints.</p>
+     *
+     * <p>Type: int32[n*2] (acamera_metadata_enum_android_request_available_dynamic_range_profiles_map_t)</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>Devices supporting the 10-bit output capability
+     * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#REQUEST_AVAILABLE_CAPABILITIES_DYNAMIC_RANGE_TEN_BIT">CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES_DYNAMIC_RANGE_TEN_BIT</a>
+     * must list their supported dynamic range profiles. In case the camera is not able to
+     * support every possible profile combination within a single capture request, then the
+     * constraints must be listed here as well.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP =      // int32[n*2] (acamera_metadata_enum_android_request_available_dynamic_range_profiles_map_t)
+            ACAMERA_REQUEST_START + 19,
     ACAMERA_REQUEST_END,
 
     /**
@@ -7972,6 +8051,17 @@
      */
     ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE_ON                      = 1,
 
+    /**
+     * <p>Preview stabilization, where the preview in addition to all other non-RAW streams are
+     * stabilized with the same quality of stabilization, is enabled. This mode aims to give
+     * clients a 'what you see is what you get' effect. In this mode, the FoV reduction will
+     * be a maximum of 20 % both horizontally and vertically
+     * (10% from left, right, top, bottom) for the given zoom ratio / crop region.
+     * The resultant FoV will also be the same across all processed streams
+     * (that have the same aspect ratio).</p>
+     */
+    ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE_PREVIEW_STABILIZATION   = 2,
+
 } acamera_metadata_enum_android_control_video_stabilization_mode_t;
 
 // ACAMERA_CONTROL_AE_STATE
@@ -9054,6 +9144,97 @@
 
 } acamera_metadata_enum_android_request_available_capabilities_t;
 
+// ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP
+typedef enum acamera_metadata_enum_acamera_request_available_dynamic_range_profiles_map {
+    /**
+     * <p>8-bit SDR profile which is the default for all non 10-bit output capable devices.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD    = 0x1,
+
+    /**
+     * <p>10-bit pixel samples encoded using the Hybrid log-gamma transfer function.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HLG10       = 0x2,
+
+    /**
+     * <p>10-bit pixel samples encoded using the SMPTE ST 2084 transfer function.
+     * This profile utilizes internal static metadata to increase the quality
+     * of the capture.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10       = 0x4,
+
+    /**
+     * <p>10-bit pixel samples encoded using the SMPTE ST 2084 transfer function.
+     * In contrast to HDR10, this profile uses internal per-frame metadata
+     * to further enhance the quality of the capture.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10_PLUS  = 0x8,
+
+    /**
+     * <p>This is a camera mode for Dolby Vision capture optimized for a more scene
+     * accurate capture. This would typically differ from what a specific device
+     * might want to tune for a consumer optimized Dolby Vision general capture.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_REF
+                                                                      = 0x10,
+
+    /**
+     * <p>This is the power optimized mode for 10-bit Dolby Vision HDR Reference Mode.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_REF_PO
+                                                                      = 0x20,
+
+    /**
+     * <p>This is the camera mode for the default Dolby Vision capture mode for the
+     * specific device. This would be tuned by each specific device for consumer
+     * pleasing results that resonate with their particular audience. We expect
+     * that each specific device would have a different look for their default
+     * Dolby Vision capture.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_OEM
+                                                                      = 0x40,
+
+    /**
+     * <p>This is the power optimized mode for 10-bit Dolby Vision HDR device specific
+     * capture Mode.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_OEM_PO
+                                                                      = 0x80,
+
+    /**
+     * <p>This is the 8-bit version of the Dolby Vision reference capture mode optimized
+     * for scene accuracy.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_REF
+                                                                      = 0x100,
+
+    /**
+     * <p>This is the power optimized mode for 8-bit Dolby Vision HDR Reference Mode.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_REF_PO
+                                                                      = 0x200,
+
+    /**
+     * <p>This is the 8-bit version of device specific tuned and optimized Dolby Vision
+     * capture mode.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_OEM
+                                                                      = 0x400,
+
+    /**
+     * <p>This is the power optimized mode for 8-bit Dolby Vision HDR device specific
+     * capture Mode.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_OEM_PO
+                                                                      = 0x800,
+
+    /**
+     *
+     */
+    ACAMERA_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_MAX         = 0x1000,
+
+} acamera_metadata_enum_android_request_available_dynamic_range_profiles_map_t;
+
 
 // ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS
 typedef enum acamera_metadata_enum_acamera_scaler_available_stream_configurations {
@@ -9145,6 +9326,20 @@
                                                                       = 0x7,
 
     /**
+     * <p>If supported, the recommended 10-bit output stream configurations must include
+     * a subset of the advertised <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#YCBCR_P010">ImageFormat#YCBCR_P010</a> and
+     * <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#PRIVATE">ImageFormat#PRIVATE</a> outputs that are optimized for power
+     * and performance when registered along with a supported 10-bit dynamic range profile.
+     * see android.hardware.camera2.params.OutputConfiguration#setDynamicRangeProfile for
+     * details.</p>
+     */
+    ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_10BIT_OUTPUT
+                                                                      = 0x8,
+
+    ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_PUBLIC_END_3_8
+                                                                      = 0x9,
+
+    /**
      * <p>Vendor defined use cases. These depend on the vendor implementation.</p>
      */
     ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_VENDOR_START
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index 9f2f430..17ea512 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -96,6 +96,12 @@
         return binder::Status::ok();
     };
 
+    virtual binder::Status onTorchStrengthLevelChanged(const String16& /*cameraId*/,
+            int32_t /*torchStrength*/) {
+        // No op
+        return binder::Status::ok();
+    }
+
     virtual binder::Status onCameraAccessPrioritiesChanged() {
         // No op
         return binder::Status::ok();
diff --git a/cmds/screenrecord/Android.bp b/cmds/screenrecord/Android.bp
index 359a835..d0b3ce0 100644
--- a/cmds/screenrecord/Android.bp
+++ b/cmds/screenrecord/Android.bp
@@ -55,12 +55,6 @@
         "libGLESv2",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-        "frameworks/av/media/libstagefright/include",
-        "frameworks/native/include/media/openmax",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index e6e3473..2e0b678 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -701,7 +701,7 @@
         printf("Display is %dx%d @%.2ffps (orientation=%s), layerStack=%u\n",
                 layerStackSpaceRect.getWidth(), layerStackSpaceRect.getHeight(),
                 displayMode.refreshRate, toCString(displayState.orientation),
-                displayState.layerStack);
+                displayState.layerStack.id);
         fflush(stdout);
     }
 
@@ -1067,7 +1067,7 @@
 
     std::optional<PhysicalDisplayId> displayId = SurfaceComposerClient::getInternalDisplayId();
     if (!displayId) {
-        fprintf(stderr, "Failed to get token for internal display\n");
+        fprintf(stderr, "Failed to get ID for internal display\n");
         return 1;
     }
 
@@ -1168,17 +1168,14 @@
             }
             break;
         case 'd':
-            gPhysicalDisplayId = PhysicalDisplayId(atoll(optarg));
-            if (gPhysicalDisplayId.value == 0) {
-                fprintf(stderr, "Please specify a valid physical display id\n");
-                return 2;
-            } else if (SurfaceComposerClient::
-                    getPhysicalDisplayToken(gPhysicalDisplayId) == nullptr) {
-                fprintf(stderr, "Invalid physical display id: %s\n",
-                        to_string(gPhysicalDisplayId).c_str());
-                return 2;
+            if (const auto id = android::DisplayId::fromValue<PhysicalDisplayId>(atoll(optarg));
+                id && SurfaceComposerClient::getPhysicalDisplayToken(*id)) {
+                gPhysicalDisplayId = *id;
+                break;
             }
-            break;
+
+            fprintf(stderr, "Invalid physical display ID\n");
+            return 2;
         default:
             if (ic != '?') {
                 fprintf(stderr, "getopt_long returned unexpected value 0x%x\n", ic);
diff --git a/cmds/stagefright/Android.bp b/cmds/stagefright/Android.bp
index c4783d3..e1fe07e 100644
--- a/cmds/stagefright/Android.bp
+++ b/cmds/stagefright/Android.bp
@@ -227,8 +227,6 @@
         "rs-headers",
     ],
 
-    include_dirs: ["frameworks/av/media/libstagefright"],
-
     shared_libs: [
         "libstagefright",
         "liblog",
diff --git a/cmds/stagefright/AudioPlayer.cpp b/cmds/stagefright/AudioPlayer.cpp
index 55427ca..a63bde6 100644
--- a/cmds/stagefright/AudioPlayer.cpp
+++ b/cmds/stagefright/AudioPlayer.cpp
@@ -249,7 +249,8 @@
 
         mAudioTrack = new AudioTrack(
                 AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask,
-                0 /*frameCount*/, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this,
+                0 /*frameCount*/, AUDIO_OUTPUT_FLAG_NONE,
+                wp<IAudioTrackCallback>::fromExisting(this),
                 0 /*notificationFrames*/);
 
         if ((err = mAudioTrack->initCheck()) != OK) {
@@ -397,10 +398,6 @@
     mStartPosUs = 0;
 }
 
-// static
-void AudioPlayer::AudioCallback(int event, void *user, void *info) {
-    static_cast<AudioPlayer *>(user)->AudioCallback(event, info);
-}
 
 bool AudioPlayer::reachedEOS(status_t *finalStatus) {
     *finalStatus = OK;
@@ -455,20 +452,12 @@
     return 0;
 }
 
-void AudioPlayer::AudioCallback(int event, void *info) {
-    switch (event) {
-    case AudioTrack::EVENT_MORE_DATA:
-        {
-        AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
-        size_t numBytesWritten = fillBuffer(buffer->raw, buffer->size);
-        buffer->size = numBytesWritten;
-        }
-        break;
+size_t AudioPlayer::onMoreData(const AudioTrack::Buffer& buffer) {
+    return fillBuffer(buffer.raw, buffer.size);
+}
 
-    case AudioTrack::EVENT_STREAM_END:
-        mReachedEOS = true;
-        break;
-    }
+void AudioPlayer::onStreamEnd() {
+    mReachedEOS = true;
 }
 
 size_t AudioPlayer::fillBuffer(void *data, size_t size) {
diff --git a/cmds/stagefright/AudioPlayer.h b/cmds/stagefright/AudioPlayer.h
index 43550ea..608f54b 100644
--- a/cmds/stagefright/AudioPlayer.h
+++ b/cmds/stagefright/AudioPlayer.h
@@ -19,6 +19,7 @@
 #define AUDIO_PLAYER_H_
 
 #include <media/AudioResamplerPublic.h>
+#include <media/AudioTrack.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/MediaPlayerInterface.h>
 #include <media/stagefright/MediaBuffer.h>
@@ -26,10 +27,9 @@
 
 namespace android {
 
-class AudioTrack;
 struct AwesomePlayer;
 
-class AudioPlayer {
+class AudioPlayer : AudioTrack::IAudioTrackCallback {
 public:
     enum {
         REACHED_EOS,
@@ -66,6 +66,9 @@
     status_t getPlaybackRate(AudioPlaybackRate *rate /* nonnull */);
 
 private:
+    friend sp<AudioPlayer>;
+    size_t onMoreData(const AudioTrack::Buffer& buffer) override;
+    void onStreamEnd() override;
     sp<MediaSource> mSource;
     sp<AudioTrack> mAudioTrack;
 
@@ -99,9 +102,6 @@
     int64_t mStartPosUs;
     const uint32_t mCreateFlags;
 
-    static void AudioCallback(int event, void *user, void *info);
-    void AudioCallback(int event, void *info);
-
     static size_t AudioSinkCallback(
             MediaPlayerBase::AudioSink *audioSink,
             void *data, size_t size, void *me,
diff --git a/cmds/stagefright/audioloop.cpp b/cmds/stagefright/audioloop.cpp
index 4b41ff8..83f8fe9 100644
--- a/cmds/stagefright/audioloop.cpp
+++ b/cmds/stagefright/audioloop.cpp
@@ -166,9 +166,9 @@
         sp<MediaSource> decoder = SimpleDecodingSource::Create(encoder);
 
         if (playToSpeaker) {
-            AudioPlayer player(NULL);
-            player.setSource(decoder);
-            player.start();
+            sp<AudioPlayer> player = sp<AudioPlayer>::make(nullptr);
+            player->setSource(decoder);
+            player->start();
             sleep(duration);
 
 ALOGI("Line: %d", __LINE__);
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index 098c278..5743ad6 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -32,7 +32,6 @@
 #include <media/stagefright/SimpleDecodingSource.h>
 #include <media/MediaPlayerInterface.h>
 
-#include "AudioPlayer.h"
 
 using namespace android;
 
@@ -274,17 +273,6 @@
     const int32_t kNumChannels = 2;
     sp<MediaSource> audioSource = new SineSource(kSampleRate, kNumChannels);
 
-#if 0
-    sp<MediaPlayerBase::AudioSink> audioSink;
-    AudioPlayer *player = new AudioPlayer(audioSink);
-    player->setSource(audioSource);
-    player->start();
-
-    sleep(10);
-
-    player->stop();
-#endif
-
     sp<AMessage> encMeta = new AMessage;
     encMeta->setString("mime",
             0 ? MEDIA_MIMETYPE_AUDIO_AMR_WB : MEDIA_MIMETYPE_AUDIO_AAC);
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index c430f05..ec16bc2 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -220,7 +220,7 @@
     }
 
     if (gPlaybackAudio) {
-        AudioPlayer *player = new AudioPlayer(NULL);
+        sp<AudioPlayer> player = sp<AudioPlayer>::make(nullptr);
         player->setSource(rawSource);
         rawSource.clear();
 
@@ -235,9 +235,6 @@
             fprintf(stderr, "unable to start playback err=%d (0x%08x)\n", err, err);
         }
 
-        delete player;
-        player = NULL;
-
         return;
     } else if (gReproduceBug >= 3 && gReproduceBug <= 5) {
         int64_t durationUs;
diff --git a/drm/mediadrm/plugins/TEST_MAPPING b/drm/mediadrm/plugins/TEST_MAPPING
index 7bd1568..fd4ef95 100644
--- a/drm/mediadrm/plugins/TEST_MAPPING
+++ b/drm/mediadrm/plugins/TEST_MAPPING
@@ -1,16 +1,19 @@
 {
   "presubmit": [
     {
-      "name": "CtsMediaTestCases",
+      "name": "CtsMediaDrmTestCases",
       "options" : [
         {
           "include-annotation": "android.platform.test.annotations.Presubmit"
         },
         {
-          "include-filter": "android.media.cts.MediaDrmClearkeyTest"
+          "include-filter": "android.mediadrm.cts.MediaDrmClearkeyTest"
         },
         {
-          "include-filter": "android.media.cts.MediaDrmMetricsTest"
+          "include-filter": "android.mediadrm.cts.MediaDrmMetricsTest"
+        },
+        {
+          "include-filter": "android.mediadrm.cts.NativeMediaDrmClearkeyTest"
         }
       ]
     }
diff --git a/drm/mediadrm/plugins/clearkey/hidl/Android.bp b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
index 6c68532..02ac943 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
@@ -93,6 +93,11 @@
     srcs: ["protos/DeviceFiles.proto"],
 }
 
+cc_library {
+    name: "libclearkeyhidl",
+    defaults: ["clearkey_service_defaults"],
+}
+
 cc_binary {
     name: "android.hardware.drm@1.2-service.clearkey",
     defaults: ["clearkey_service_defaults"],
@@ -126,3 +131,37 @@
     init_rc: ["android.hardware.drm@1.4-service-lazy.clearkey.rc"],
     vintf_fragments: ["manifest_android.hardware.drm@1.4-service.clearkey.xml"],
 }
+
+cc_fuzz {
+    name: "clearkeyV1.4_fuzzer",
+    vendor: true,
+    srcs: [
+        "fuzzer/clearkeyV1.4_fuzzer.cpp",
+    ],
+    static_libs: [
+        "libclearkeyhidl",
+        "libclearkeycommon",
+        "libclearkeydevicefiles-protos",
+        "libjsmn",
+        "libprotobuf-cpp-lite",
+        "libutils",
+    ],
+    shared_libs: [
+        "android.hidl.allocator@1.0",
+        "android.hardware.drm@1.0",
+        "android.hardware.drm@1.1",
+        "android.hardware.drm@1.2",
+        "android.hardware.drm@1.3",
+        "android.hardware.drm@1.4",
+        "libcrypto",
+        "libhidlbase",
+        "libhidlmemory",
+        "liblog",
+    ],
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
index 6a374f9..32d7723 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -187,7 +187,7 @@
         return Status_V1_2::ERROR_DRM_CANNOT_HANDLE;
     }
 
-    *defaultUrl = "";
+    *defaultUrl = "https://default.url";
     *keyRequestType = KeyRequestType_V1_1::UNKNOWN;
     *request = std::vector<uint8_t>();
 
@@ -221,7 +221,6 @@
         if (requestString.find(kOfflineLicense) != std::string::npos) {
             std::string emptyResponse;
             std::string keySetIdString(keySetId.begin(), keySetId.end());
-            Mutex::Autolock lock(mFileHandleLock);
             if (!mFileHandle.StoreLicense(keySetIdString,
                     DeviceFiles::kLicenseStateReleasing,
                     emptyResponse)) {
@@ -337,7 +336,6 @@
         }
         *keySetId = kKeySetIdPrefix + ByteArrayToHexString(
                 reinterpret_cast<const uint8_t*>(randomData.data()), randomData.size());
-        Mutex::Autolock lock(mFileHandleLock);
         if (mFileHandle.LicenseExists(*keySetId)) {
             // collision, regenerate
             ALOGV("Retry generating KeySetId");
@@ -395,7 +393,6 @@
     if (status == Status::OK) {
         if (isOfflineLicense) {
             if (isRelease) {
-                Mutex::Autolock lock(mFileHandleLock);
                 mFileHandle.DeleteLicense(keySetId);
                 mSessionLibrary->destroySession(session);
             } else {
@@ -404,7 +401,6 @@
                     return Void();
                 }
 
-                Mutex::Autolock lock(mFileHandleLock);
                 bool ok = mFileHandle.StoreLicense(
                         keySetId,
                         DeviceFiles::kLicenseStateActive,
@@ -459,7 +455,6 @@
         DeviceFiles::LicenseState licenseState;
         std::string offlineLicense;
         Status status = Status::OK;
-        Mutex::Autolock lock(mFileHandleLock);
         if (!mFileHandle.RetrieveLicense(std::string(keySetId.begin(), keySetId.end()),
                 &licenseState, &offlineLicense)) {
             ALOGE("Failed to restore offline license");
@@ -769,8 +764,6 @@
 }
 
 Return<void> DrmPlugin::getOfflineLicenseKeySetIds(getOfflineLicenseKeySetIds_cb _hidl_cb) {
-    Mutex::Autolock lock(mFileHandleLock);
-
     std::vector<std::string> licenseNames = mFileHandle.ListLicenses();
     std::vector<KeySetId> keySetIds;
     if (mMockError != Status_V1_2::OK) {
@@ -791,7 +784,6 @@
         return toStatus_1_0(mMockError);
     }
     std::string licenseName(keySetId.begin(), keySetId.end());
-    Mutex::Autolock lock(mFileHandleLock);
     if (mFileHandle.DeleteLicense(licenseName)) {
         return Status::OK;
     }
@@ -800,8 +792,6 @@
 
 Return<void> DrmPlugin::getOfflineLicenseState(const KeySetId& keySetId,
         getOfflineLicenseState_cb _hidl_cb) {
-    Mutex::Autolock lock(mFileHandleLock);
-
     std::string licenseName(keySetId.begin(), keySetId.end());
     DeviceFiles::LicenseState state;
     std::string license;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
index e61db3f..56910be 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
@@ -24,13 +24,11 @@
 }
 
 bool MemoryFileSystem::FileExists(const std::string& fileName) const {
-    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     auto result = mMemoryFileSystem.find(fileName);
     return result != mMemoryFileSystem.end();
 }
 
 ssize_t MemoryFileSystem::GetFileSize(const std::string& fileName) const {
-    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     auto result = mMemoryFileSystem.find(fileName);
     if (result != mMemoryFileSystem.end()) {
         return static_cast<ssize_t>(result->second.getFileSize());
@@ -42,7 +40,6 @@
 
 std::vector<std::string> MemoryFileSystem::ListFiles() const {
     std::vector<std::string> list;
-    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     for (const auto& filename : mMemoryFileSystem) {
         list.push_back(filename.first);
     }
@@ -51,7 +48,6 @@
 
 size_t MemoryFileSystem::Read(const std::string& path, std::string* buffer) {
     std::string key = GetFileName(path);
-    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     auto result = mMemoryFileSystem.find(key);
     if (result != mMemoryFileSystem.end()) {
         std::string serializedHashFile = result->second.getContent();
@@ -65,7 +61,6 @@
 
 size_t MemoryFileSystem::Write(const std::string& path, const MemoryFile& memoryFile) {
     std::string key = GetFileName(path);
-    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     auto result = mMemoryFileSystem.find(key);
     if (result != mMemoryFileSystem.end()) {
         mMemoryFileSystem.erase(key);
@@ -75,7 +70,6 @@
 }
 
 bool MemoryFileSystem::RemoveFile(const std::string& fileName) {
-    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     auto result = mMemoryFileSystem.find(fileName);
     if (result != mMemoryFileSystem.end()) {
         mMemoryFileSystem.erase(result);
@@ -87,7 +81,6 @@
 }
 
 bool MemoryFileSystem::RemoveAllFiles() {
-    std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
     mMemoryFileSystem.clear();
     return mMemoryFileSystem.empty();
 }
diff --git a/drm/mediadrm/plugins/clearkey/hidl/fuzzer/README.md b/drm/mediadrm/plugins/clearkey/hidl/fuzzer/README.md
new file mode 100644
index 0000000..cb45460
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/fuzzer/README.md
@@ -0,0 +1,52 @@
+# Fuzzer for android.hardware.drm@1.4-service.clearkey
+
+## Plugin Design Considerations
+The fuzzer plugin for android.hardware.drm@1.4-service.clearkey is designed based on the understanding of the
+source code and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+android.hardware.drm@1.4-service.clearkey supports the following parameters:
+1. Security Level (parameter name: `securityLevel`)
+2. Mime Type (parameter name: `mimeType`)
+3. Key Type (parameter name: `keyType`)
+4. Crypto Mode (parameter name: `cryptoMode`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `securityLevel` | 0.`SecurityLevel::UNKNOWN` 1.`SecurityLevel::SW_SECURE_CRYPTO` 2.`SecurityLevel::SW_SECURE_DECODE` 3.`SecurityLevel::HW_SECURE_CRYPTO`  4.`SecurityLevel::HW_SECURE_DECODE` 5.`SecurityLevel::HW_SECURE_ALL`| Value obtained from FuzzedDataProvider in the range 0 to 5|
+| `mimeType` | 0.`video/mp4` 1.`video/mpeg` 2.`video/x-flv` 3.`video/mj2` 4.`video/3gp2` 5.`video/3gpp` 6.`video/3gpp2` 7.`audio/mp4` 8.`audio/mpeg` 9.`audio/aac` 10.`audio/3gp2` 11.`audio/3gpp` 12.`audio/3gpp2` 13.`audio/webm` 14.`video/webm` 15.`webm` 16.`cenc` 17.`video/unknown` 18.`audio/unknown`| Value obtained from FuzzedDataProvider in the range 0 to 18|
+| `keyType` | 0.`KeyType::OFFLINE` 1.`KeyType::STREAMING` 2.`KeyType::RELEASE` | Value obtained from FuzzedDataProvider in the range 0 to 2|
+| `cryptoMode` | 0.`Mode::UNENCRYPTED` 1.`Mode::AES_CTR` 2.`Mode::AES_CBC_CTS` 3.`Mode::AES_CBC` | Value obtained from FuzzedDataProvider in the range 0 to 3|
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feeds the entire input data to the module.
+This ensures that the plugin tolerates any kind of input (empty, huge,
+malformed, etc) and doesnt `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build clearkeyV1.4_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+  $ mm -j$(nproc) clearkeyV1.4_fuzzer
+```
+#### Steps to run
+To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/${TARGET_ARCH}/clearkeyV1.4_fuzzer/vendor/hw/clearkeyV1.4_fuzzer
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/drm/mediadrm/plugins/clearkey/hidl/fuzzer/clearkeyV1.4_fuzzer.cpp b/drm/mediadrm/plugins/clearkey/hidl/fuzzer/clearkeyV1.4_fuzzer.cpp
new file mode 100644
index 0000000..afe0e6c
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/fuzzer/clearkeyV1.4_fuzzer.cpp
@@ -0,0 +1,719 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <include/CreatePluginFactories.h>
+
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <hidlmemory/mapping.h>
+#include <include/ClearKeyDrmProperties.h>
+#include <include/CryptoFactory.h>
+#include <include/CryptoPlugin.h>
+#include <include/DrmPlugin.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+namespace drm = ::android::hardware::drm;
+using namespace std;
+using namespace android;
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hidl::allocator::V1_0::IAllocator;
+using ::android::hidl::memory::V1_0::IMemory;
+using drm::V1_0::BufferType;
+using drm::V1_0::DestinationBuffer;
+using drm::V1_0::EventType;
+using drm::V1_0::ICryptoPlugin;
+using drm::V1_0::IDrmPlugin;
+using drm::V1_0::IDrmPluginListener;
+using drm::V1_0::KeyedVector;
+using drm::V1_0::KeyStatus;
+using drm::V1_0::KeyStatusType;
+using drm::V1_0::KeyType;
+using drm::V1_0::Mode;
+using drm::V1_0::Pattern;
+using drm::V1_0::SecureStop;
+using drm::V1_0::SharedBuffer;
+using drm::V1_0::Status;
+using drm::V1_0::SubSample;
+using drm::V1_1::DrmMetricGroup;
+using drm::V1_1::HdcpLevel;
+using drm::V1_1::SecureStopRelease;
+using drm::V1_1::SecurityLevel;
+using drm::V1_2::KeySetId;
+using drm::V1_2::OfflineLicenseState;
+using drm::V1_4::clearkey::ICryptoFactory;
+using drm::V1_4::clearkey::IDrmFactory;
+using drm::V1_4::clearkey::kAlgorithmsKey;
+using drm::V1_4::clearkey::kClientIdKey;
+using drm::V1_4::clearkey::kDeviceIdKey;
+using drm::V1_4::clearkey::kDrmErrorTestKey;
+using drm::V1_4::clearkey::kListenerTestSupportKey;
+using drm::V1_4::clearkey::kMetricsKey;
+using drm::V1_4::clearkey::kPluginDescriptionKey;
+using drm::V1_4::clearkey::kVendorKey;
+using drm::V1_4::clearkey::kVersionKey;
+
+typedef ::android::hardware::hidl_vec<uint8_t> SessionId;
+typedef ::android::hardware::hidl_vec<uint8_t> SecureStopId;
+
+static const uint8_t kInvalidUUID[] = {0x10, 0x20, 0x30, 0x40, 0x50, 0x60,
+                                       0x70, 0x80, 0x10, 0x20, 0x30, 0x40,
+                                       0x50, 0x60, 0x70, 0x80};
+
+static const uint8_t kClearKeyUUID[] = {0xE2, 0x71, 0x9D, 0x58, 0xA9, 0x85,
+                                        0xB3, 0xC9, 0x78, 0x1A, 0xB0, 0x30,
+                                        0xAF, 0x78, 0xD3, 0x0E};
+
+const SecurityLevel kSecurityLevel[] = {
+    SecurityLevel::UNKNOWN,          SecurityLevel::SW_SECURE_CRYPTO,
+    SecurityLevel::SW_SECURE_DECODE, SecurityLevel::HW_SECURE_CRYPTO,
+    SecurityLevel::HW_SECURE_DECODE, SecurityLevel::HW_SECURE_ALL};
+
+const char *kMimeType[] = {
+    "video/mp4",  "video/mpeg",  "video/x-flv",   "video/mj2",    "video/3gp2",
+    "video/3gpp", "video/3gpp2", "audio/mp4",     "audio/mpeg",   "audio/aac",
+    "audio/3gp2", "audio/3gpp",  "audio/3gpp2",   "audio/webm",   "video/webm",
+    "webm",       "cenc",        "video/unknown", "audio/unknown"};
+
+const char *kCipherAlgorithm[] = {"AES/CBC/NoPadding", ""};
+
+const char *kMacAlgorithm[] = {"HmacSHA256", ""};
+
+const char *kRSAAlgorithm[] = {"RSASSA-PSS-SHA1", ""};
+
+const std::string kProperty[] = {kVendorKey,
+                                 kVersionKey,
+                                 kPluginDescriptionKey,
+                                 kAlgorithmsKey,
+                                 kListenerTestSupportKey,
+                                 kDrmErrorTestKey,
+                                 kDeviceIdKey,
+                                 kClientIdKey,
+                                 kMetricsKey,
+                                 "placeholder"};
+
+const KeyType kKeyType[] = {KeyType::OFFLINE, KeyType::STREAMING,
+                            KeyType::RELEASE};
+
+const Mode kCryptoMode[] = {Mode::UNENCRYPTED, Mode::AES_CTR, Mode::AES_CBC_CTS,
+                            Mode::AES_CBC};
+
+const hidl_vec<uint8_t> validInitData = {
+    // BMFF box header (4 bytes size + 'pssh')
+    0x00, 0x00, 0x00, 0x34, 0x70, 0x73, 0x73, 0x68,
+    // full box header (version = 1 flags = 0)
+    0x01, 0x00, 0x00, 0x00,
+    // system id
+    0x10, 0x77, 0xef, 0xec, 0xc0, 0xb2, 0x4d, 0x02, 0xac, 0xe3, 0x3c, 0x1e,
+    0x52, 0xe2, 0xfb, 0x4b,
+    // number of key ids
+    0x00, 0x00, 0x00, 0x01,
+    // key id
+    0x60, 0x06, 0x1e, 0x01, 0x7e, 0x47, 0x7e, 0x87, 0x7e, 0x57, 0xd0, 0x0d,
+    0x1e, 0xd0, 0x0d, 0x1e,
+    // size of data, must be zero
+    0x00, 0x00, 0x00, 0x00};
+
+const hidl_vec<uint8_t> validKeyResponse = {
+    0x7b, 0x22, 0x6b, 0x65, 0x79, 0x73, 0x22, 0x3a, 0x5b, 0x7b, 0x22,
+    0x6b, 0x74, 0x79, 0x22, 0x3a, 0x22, 0x6f, 0x63, 0x74, 0x22, 0x2c,
+    0x22, 0x6b, 0x69, 0x64, 0x22, 0x3a, 0x22, 0x59, 0x41, 0x59, 0x65,
+    0x41, 0x58, 0x35, 0x48, 0x66, 0x6f, 0x64, 0x2d, 0x56, 0x39, 0x41,
+    0x4e, 0x48, 0x74, 0x41, 0x4e, 0x48, 0x67, 0x22, 0x2c, 0x22, 0x6b,
+    0x22, 0x3a, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x54, 0x65,
+    0x73, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x61, 0x73, 0x65, 0x36, 0x34,
+    0x67, 0x67, 0x67, 0x22, 0x7d, 0x5d, 0x7d, 0x0a};
+
+const size_t kAESBlockSize = 16;
+const size_t kMaxStringLength = 100;
+const size_t kMaxSubSamples = 10;
+const size_t kMaxNumBytes = 1000;
+const size_t kSegmentIndex = 0;
+
+template <typename T, size_t size>
+T getValueFromArray(FuzzedDataProvider *fdp, const T (&arr)[size]) {
+  return arr[fdp->ConsumeIntegralInRange<int32_t>(0, size - 1)];
+}
+
+class TestDrmPluginListener : public IDrmPluginListener {
+public:
+  TestDrmPluginListener() {}
+  virtual ~TestDrmPluginListener() {}
+
+  virtual Return<void> sendEvent(EventType /*eventType*/,
+                                 const hidl_vec<uint8_t> & /*sessionId*/,
+                                 const hidl_vec<uint8_t> & /*data*/) override {
+    return Return<void>();
+  }
+
+  virtual Return<void>
+  sendExpirationUpdate(const hidl_vec<uint8_t> & /*sessionId*/,
+                       int64_t /*expiryTimeInMS*/) override {
+    return Return<void>();
+  }
+
+  virtual Return<void>
+  sendKeysChange(const hidl_vec<uint8_t> & /*sessionId*/,
+                 const hidl_vec<KeyStatus> & /*keyStatusList*/,
+                 bool /*hasNewUsableKey*/) override {
+    return Return<void>();
+  }
+};
+
+class ClearKeyFuzzer {
+public:
+  ~ClearKeyFuzzer() { deInit(); }
+  bool init();
+  void process(const uint8_t *data, size_t size);
+
+private:
+  void deInit();
+  void invokeDrmPlugin(const uint8_t *data, size_t size);
+  void invokeCryptoPlugin(const uint8_t *data);
+  void invokeDrm(const uint8_t *data, size_t size);
+  void invokeCrypto(const uint8_t *data);
+  void invokeDrmDecryptEncryptAPI(const uint8_t *data, size_t size);
+  bool invokeDrmFactory();
+  bool invokeCryptoFactory();
+  void invokeDrmV1_4API();
+  void invokeDrmSetAlgorithmAPI();
+  void invokeDrmPropertyAPI();
+  void invokeDrmSecureStopAPI();
+  void invokeDrmOfflineLicenseAPI(const uint8_t *data, size_t size);
+  SessionId getSessionId();
+  SecureStopRelease makeSecureRelease(const SecureStop &stop);
+  sp<IDrmFactory> mDrmFactory = nullptr;
+  sp<ICryptoFactory> mCryptoFactory = nullptr;
+  sp<IDrmPlugin> mDrmPlugin = nullptr;
+  sp<drm::V1_1::IDrmPlugin> mDrmPluginV1_1 = nullptr;
+  sp<drm::V1_2::IDrmPlugin> mDrmPluginV1_2 = nullptr;
+  sp<drm::V1_4::IDrmPlugin> mDrmPluginV1_4 = nullptr;
+  sp<drm::V1_4::ICryptoPlugin> mCryptoPluginV1_4 = nullptr;
+  sp<ICryptoPlugin> mCryptoPlugin = nullptr;
+  FuzzedDataProvider *mFDP = nullptr;
+  SessionId mSessionId = {};
+  SessionId mSessionIdV1 = {};
+};
+
+void ClearKeyFuzzer::deInit() {
+  if (mDrmPluginV1_1) {
+    mDrmPluginV1_1->closeSession(mSessionIdV1);
+  }
+  if (mDrmPluginV1_2) {
+    mDrmPluginV1_2->closeSession(mSessionId);
+  }
+  mDrmFactory.clear();
+  mCryptoFactory.clear();
+  mDrmPlugin.clear();
+  mDrmPluginV1_1.clear();
+  mDrmPluginV1_2.clear();
+  mDrmPluginV1_4.clear();
+  mCryptoPlugin.clear();
+  mCryptoPluginV1_4.clear();
+  mSessionId = {};
+  mSessionIdV1 = {};
+}
+
+void ClearKeyFuzzer::invokeDrmV1_4API() {
+  mDrmPluginV1_4->requiresSecureDecoderDefault(
+      getValueFromArray(mFDP, kMimeType));
+  mDrmPluginV1_4->requiresSecureDecoder(
+      getValueFromArray(mFDP, kMimeType),
+      getValueFromArray(mFDP, kSecurityLevel));
+  mDrmPluginV1_4->setPlaybackId(
+      mSessionId, mFDP->ConsumeRandomLengthString(kMaxStringLength).c_str());
+  drm::V1_4::IDrmPlugin::getLogMessages_cb cb =
+      [&]([[maybe_unused]] drm::V1_4::Status status,
+          [[maybe_unused]] hidl_vec<drm::V1_4::LogMessage> logs) {};
+  mDrmPluginV1_4->getLogMessages(cb);
+}
+
+void ClearKeyFuzzer::invokeDrmSetAlgorithmAPI() {
+  const hidl_string cipherAlgo =
+      mFDP->ConsumeBool()
+          ? mFDP->ConsumeRandomLengthString(kMaxStringLength).c_str()
+          : hidl_string(kCipherAlgorithm[mFDP->ConsumeBool()]);
+  mDrmPluginV1_2->setCipherAlgorithm(mSessionId, cipherAlgo);
+
+  const hidl_string macAlgo =
+      mFDP->ConsumeBool()
+          ? mFDP->ConsumeRandomLengthString(kMaxStringLength).c_str()
+          : hidl_string(kMacAlgorithm[mFDP->ConsumeBool()]);
+  mDrmPluginV1_2->setMacAlgorithm(mSessionId, macAlgo);
+}
+
+void ClearKeyFuzzer::invokeDrmPropertyAPI() {
+  mDrmPluginV1_2->setPropertyString(
+      hidl_string(getValueFromArray(mFDP, kProperty)), hidl_string("value"));
+
+  hidl_string stringValue;
+  mDrmPluginV1_2->getPropertyString(
+      getValueFromArray(mFDP, kProperty),
+      [&](Status status, const hidl_string &hValue) {
+        if (status == Status::OK) {
+          stringValue = hValue;
+        }
+      });
+
+  hidl_vec<uint8_t> value = {};
+  mDrmPluginV1_2->setPropertyByteArray(
+      hidl_string(getValueFromArray(mFDP, kProperty)), value);
+
+  hidl_vec<uint8_t> byteValue;
+  mDrmPluginV1_2->getPropertyByteArray(
+      getValueFromArray(mFDP, kProperty),
+      [&](Status status, const hidl_vec<uint8_t> &hValue) {
+        if (status == Status::OK) {
+          byteValue = hValue;
+        }
+      });
+}
+
+SessionId ClearKeyFuzzer::getSessionId() {
+  SessionId emptySessionId = {};
+  return mFDP->ConsumeBool() ? mSessionId : emptySessionId;
+}
+
+void ClearKeyFuzzer::invokeDrmDecryptEncryptAPI(const uint8_t *data,
+                                                size_t size) {
+  uint32_t currSessions, maximumSessions;
+  mDrmPluginV1_2->getNumberOfSessions(
+      [&](Status status, uint32_t hCurrentSessions, uint32_t hMaxSessions) {
+        if (status == Status::OK) {
+          currSessions = hCurrentSessions;
+          maximumSessions = hMaxSessions;
+        }
+      });
+
+  HdcpLevel connected, maximum;
+  mDrmPluginV1_2->getHdcpLevels([&](Status status,
+                                    const HdcpLevel &hConnectedLevel,
+                                    const HdcpLevel &hMaxLevel) {
+    if (status == Status::OK) {
+      connected = hConnectedLevel;
+      maximum = hMaxLevel;
+    }
+  });
+
+  drm::V1_2::HdcpLevel connectedV1_2, maximumV1_2;
+  mDrmPluginV1_2->getHdcpLevels_1_2(
+      [&](drm::V1_2::Status status, const drm::V1_2::HdcpLevel &connectedLevel,
+          const drm::V1_2::HdcpLevel &maxLevel) {
+        if (status == drm::V1_2::Status::OK) {
+          connectedV1_2 = connectedLevel;
+          maximumV1_2 = maxLevel;
+        }
+      });
+
+  SecurityLevel securityLevel;
+  mDrmPluginV1_2->getSecurityLevel(mSessionId,
+                                   [&](Status status, SecurityLevel hLevel) {
+                                     if (status == Status::OK) {
+                                       securityLevel = hLevel;
+                                     }
+                                   });
+
+  hidl_vec<DrmMetricGroup> metrics;
+  mDrmPluginV1_2->getMetrics(
+      [&](Status status, hidl_vec<DrmMetricGroup> hMetricGroups) {
+        if (status == Status::OK) {
+          metrics = hMetricGroups;
+        }
+      });
+
+  hidl_string certificateType;
+  hidl_string certificateAuthority;
+  mDrmPluginV1_2->getProvisionRequest(certificateType, certificateAuthority,
+                                      [&]([[maybe_unused]] Status status,
+                                          const hidl_vec<uint8_t> &,
+                                          const hidl_string &) {});
+
+  mDrmPluginV1_2->getProvisionRequest_1_2(
+      certificateType, certificateAuthority,
+      [&]([[maybe_unused]] drm::V1_2::Status status, const hidl_vec<uint8_t> &,
+          const hidl_string &) {});
+
+  hidl_vec<uint8_t> response;
+  mDrmPluginV1_2->provideProvisionResponse(
+      response, [&]([[maybe_unused]] Status status, const hidl_vec<uint8_t> &,
+                    const hidl_vec<uint8_t> &) {});
+
+  hidl_vec<uint8_t> initData = {};
+  if (mFDP->ConsumeBool()) {
+    initData = validInitData;
+  } else {
+    initData.setToExternal(const_cast<uint8_t *>(data), kAESBlockSize);
+  }
+  hidl_string mimeType = getValueFromArray(mFDP, kMimeType);
+  KeyType keyType = mFDP->ConsumeBool()
+                        ? static_cast<KeyType>(mFDP->ConsumeIntegral<size_t>())
+                        : getValueFromArray(mFDP, kKeyType);
+  KeyedVector optionalParameters;
+  mDrmPluginV1_2->getKeyRequest_1_2(
+      mSessionId, initData, mimeType, keyType, optionalParameters,
+      [&]([[maybe_unused]] drm::V1_2::Status status, const hidl_vec<uint8_t> &,
+          drm::V1_1::KeyRequestType, const hidl_string &) {});
+  mDrmPluginV1_1->getKeyRequest_1_1(
+      mSessionIdV1, initData, mimeType, keyType, optionalParameters,
+      [&]([[maybe_unused]] drm::V1_0::Status status, const hidl_vec<uint8_t> &,
+          drm::V1_1::KeyRequestType, const hidl_string &) {});
+  hidl_vec<uint8_t> emptyInitData = {};
+  mDrmPlugin->getKeyRequest(
+      mSessionId, mFDP->ConsumeBool() ? initData : emptyInitData, mimeType,
+      keyType, optionalParameters,
+      [&]([[maybe_unused]] drm::V1_0::Status status, const hidl_vec<uint8_t> &,
+          drm::V1_0::KeyRequestType, const hidl_string &) {});
+
+  hidl_vec<uint8_t> keyResponse = {};
+  if (mFDP->ConsumeBool()) {
+    keyResponse = validKeyResponse;
+  } else {
+    keyResponse.setToExternal(const_cast<uint8_t *>(data), size);
+  }
+  hidl_vec<uint8_t> keySetId;
+  hidl_vec<uint8_t> emptyKeyResponse = {};
+  mDrmPluginV1_2->provideKeyResponse(
+      getSessionId(), mFDP->ConsumeBool() ? keyResponse : emptyKeyResponse,
+      [&](Status status, const hidl_vec<uint8_t> &hKeySetId) {
+        if (status == Status::OK) {
+          keySetId = hKeySetId;
+        }
+      });
+
+  mDrmPluginV1_2->restoreKeys(getSessionId(), keySetId);
+
+  mDrmPluginV1_2->queryKeyStatus(
+      getSessionId(),
+      [&]([[maybe_unused]] Status status, KeyedVector /* info */) {});
+
+  hidl_vec<uint8_t> keyId, input, iv;
+  keyId.setToExternal(const_cast<uint8_t *>(data), size);
+  input.setToExternal(const_cast<uint8_t *>(data), size);
+  iv.setToExternal(const_cast<uint8_t *>(data), size);
+  mDrmPluginV1_2->encrypt(
+      getSessionId(), keyId, input, iv,
+      [&]([[maybe_unused]] Status status, const hidl_vec<uint8_t> &) {});
+
+  mDrmPluginV1_2->decrypt(
+      getSessionId(), keyId, input, iv,
+      [&]([[maybe_unused]] Status status, const hidl_vec<uint8_t> &) {});
+
+  hidl_vec<uint8_t> message;
+  message.setToExternal(const_cast<uint8_t *>(data), size);
+  mDrmPluginV1_2->sign(
+      getSessionId(), keyId, message,
+      [&]([[maybe_unused]] Status status, const hidl_vec<uint8_t> &) {});
+
+  hidl_vec<uint8_t> signature;
+  signature.setToExternal(const_cast<uint8_t *>(data), size);
+  mDrmPluginV1_2->verify(getSessionId(), keyId, message, signature,
+                         [&]([[maybe_unused]] Status status, bool) {});
+
+  hidl_vec<uint8_t> wrappedKey;
+  signature.setToExternal(const_cast<uint8_t *>(data), size);
+  mDrmPluginV1_2->signRSA(
+      getSessionId(), kRSAAlgorithm[mFDP->ConsumeBool()], message, wrappedKey,
+      [&]([[maybe_unused]] Status status, const hidl_vec<uint8_t> &) {});
+
+  mDrmPluginV1_2->removeKeys(getSessionId());
+}
+
+/**
+ * Helper function to create a secure release message for
+ * a secure stop. The clearkey secure stop release format
+ * is just a count followed by the secure stop opaque data.
+ */
+SecureStopRelease ClearKeyFuzzer::makeSecureRelease(const SecureStop &stop) {
+  std::vector<uint8_t> stopData = stop.opaqueData;
+  std::vector<uint8_t> buffer;
+  std::string count = "0001";
+
+  auto it = buffer.insert(buffer.begin(), count.begin(), count.end());
+  buffer.insert(it + count.size(), stopData.begin(), stopData.end());
+  SecureStopRelease release = {.opaqueData = hidl_vec<uint8_t>(buffer)};
+  return release;
+}
+
+void ClearKeyFuzzer::invokeDrmSecureStopAPI() {
+  SecureStopId ssid;
+  mDrmPluginV1_2->getSecureStop(
+      ssid, [&]([[maybe_unused]] Status status, const SecureStop &) {});
+
+  mDrmPluginV1_2->getSecureStopIds(
+      [&]([[maybe_unused]] Status status,
+          [[maybe_unused]] const hidl_vec<SecureStopId> &secureStopIds) {});
+
+  SecureStopRelease release;
+  mDrmPluginV1_2->getSecureStops(
+      [&]([[maybe_unused]] Status status, const hidl_vec<SecureStop> &stops) {
+        if (stops.size() > 0) {
+          release = makeSecureRelease(
+              stops[mFDP->ConsumeIntegralInRange<size_t>(0, stops.size() - 1)]);
+        }
+      });
+
+  mDrmPluginV1_2->releaseSecureStops(release);
+
+  mDrmPluginV1_2->removeSecureStop(ssid);
+
+  mDrmPluginV1_2->removeAllSecureStops();
+
+  mDrmPluginV1_2->releaseSecureStop(ssid);
+
+  mDrmPluginV1_2->releaseAllSecureStops();
+}
+
+void ClearKeyFuzzer::invokeDrmOfflineLicenseAPI(const uint8_t *data,
+                                                size_t size) {
+  hidl_vec<KeySetId> keySetIds = {};
+  mDrmPluginV1_2->getOfflineLicenseKeySetIds(
+      [&](Status status, const hidl_vec<KeySetId> &hKeySetIds) {
+        if (status == Status::OK) {
+          keySetIds = hKeySetIds;
+        }
+      });
+
+  OfflineLicenseState licenseState;
+  KeySetId keySetId = {};
+  if (keySetIds.size() > 0) {
+    keySetId = keySetIds[mFDP->ConsumeIntegralInRange<size_t>(
+        0, keySetIds.size() - 1)];
+  } else {
+    keySetId.setToExternal(const_cast<uint8_t *>(data), size);
+  }
+  mDrmPluginV1_2->getOfflineLicenseState(
+      keySetId, [&](Status status, OfflineLicenseState hLicenseState) {
+        if (status == Status::OK) {
+          licenseState = hLicenseState;
+        }
+      });
+
+  mDrmPluginV1_2->removeOfflineLicense(keySetId);
+}
+
+void ClearKeyFuzzer::invokeDrmPlugin(const uint8_t *data, size_t size) {
+  SecurityLevel secLevel =
+      mFDP->ConsumeBool()
+          ? getValueFromArray(mFDP, kSecurityLevel)
+          : static_cast<SecurityLevel>(mFDP->ConsumeIntegral<uint32_t>());
+  mDrmPluginV1_1->openSession_1_1(
+      secLevel, [&]([[maybe_unused]] Status status, const SessionId &id) {
+        mSessionIdV1 = id;
+      });
+  mDrmPluginV1_2->openSession([&]([[maybe_unused]] Status status,
+                                  const SessionId &id) { mSessionId = id; });
+
+  sp<TestDrmPluginListener> listener = new TestDrmPluginListener();
+  mDrmPluginV1_2->setListener(listener);
+  const hidl_vec<KeyStatus> keyStatusList = {
+      {{1}, KeyStatusType::USABLE},
+      {{2}, KeyStatusType::EXPIRED},
+      {{3}, KeyStatusType::OUTPUTNOTALLOWED},
+      {{4}, KeyStatusType::STATUSPENDING},
+      {{5}, KeyStatusType::INTERNALERROR},
+  };
+  mDrmPluginV1_2->sendKeysChange(mSessionId, keyStatusList, true);
+
+  invokeDrmV1_4API();
+  invokeDrmSetAlgorithmAPI();
+  invokeDrmPropertyAPI();
+  invokeDrmDecryptEncryptAPI(data, size);
+  invokeDrmSecureStopAPI();
+  invokeDrmOfflineLicenseAPI(data, size);
+}
+
+void ClearKeyFuzzer::invokeCryptoPlugin(const uint8_t *data) {
+  mCryptoPlugin->requiresSecureDecoderComponent(
+      getValueFromArray(mFDP, kMimeType));
+
+  const uint32_t width = mFDP->ConsumeIntegral<uint32_t>();
+  const uint32_t height = mFDP->ConsumeIntegral<uint32_t>();
+  mCryptoPlugin->notifyResolution(width, height);
+
+  mCryptoPlugin->setMediaDrmSession(mSessionId);
+
+  size_t totalSize = 0;
+  const size_t numSubSamples =
+      mFDP->ConsumeIntegralInRange<size_t>(1, kMaxSubSamples);
+
+  const Pattern pattern = {0, 0};
+  hidl_vec<SubSample> subSamples;
+  subSamples.resize(numSubSamples);
+
+  for (size_t i = 0; i < numSubSamples; ++i) {
+    const uint32_t clearBytes =
+        mFDP->ConsumeIntegralInRange<uint32_t>(0, kMaxNumBytes);
+    const uint32_t encryptedBytes =
+        mFDP->ConsumeIntegralInRange<uint32_t>(0, kMaxNumBytes);
+    subSamples[i].numBytesOfClearData = clearBytes;
+    subSamples[i].numBytesOfEncryptedData = encryptedBytes;
+    totalSize += subSamples[i].numBytesOfClearData;
+    totalSize += subSamples[i].numBytesOfEncryptedData;
+  }
+
+  // The first totalSize bytes of shared memory is the encrypted
+  // input, the second totalSize bytes is the decrypted output.
+  size_t memoryBytes = totalSize * 2;
+
+  sp<IAllocator> ashmemAllocator = IAllocator::getService("ashmem");
+  if (!ashmemAllocator.get()) {
+    return;
+  }
+
+  hidl_memory hidlMemory;
+  ashmemAllocator->allocate(memoryBytes, [&]([[maybe_unused]] bool success,
+                                             const hidl_memory &memory) {
+    mCryptoPlugin->setSharedBufferBase(memory, kSegmentIndex);
+    hidlMemory = memory;
+  });
+
+  sp<IMemory> mappedMemory = mapMemory(hidlMemory);
+  if (!mappedMemory.get()) {
+    return;
+  }
+  mCryptoPlugin->setSharedBufferBase(hidlMemory, kSegmentIndex);
+
+  uint32_t srcBufferId =
+      mFDP->ConsumeBool() ? kSegmentIndex : mFDP->ConsumeIntegral<uint32_t>();
+  const SharedBuffer sourceBuffer = {
+      .bufferId = srcBufferId, .offset = 0, .size = totalSize};
+
+  BufferType type = mFDP->ConsumeBool() ? BufferType::SHARED_MEMORY
+                                        : BufferType::NATIVE_HANDLE;
+  uint32_t destBufferId =
+      mFDP->ConsumeBool() ? kSegmentIndex : mFDP->ConsumeIntegral<uint32_t>();
+  const DestinationBuffer destBuffer = {
+      .type = type,
+      {.bufferId = destBufferId, .offset = totalSize, .size = totalSize},
+      .secureMemory = nullptr};
+
+  const uint64_t offset = 0;
+  uint32_t bytesWritten = 0;
+  hidl_array<uint8_t, kAESBlockSize> keyId =
+      hidl_array<uint8_t, kAESBlockSize>(data);
+  hidl_array<uint8_t, kAESBlockSize> iv =
+      hidl_array<uint8_t, kAESBlockSize>(data);
+  Mode mode = getValueFromArray(mFDP, kCryptoMode);
+  mCryptoPlugin->decrypt(
+      mFDP->ConsumeBool(), keyId, iv, mode, pattern, subSamples, sourceBuffer,
+      offset, destBuffer,
+      [&]([[maybe_unused]] Status status, uint32_t count,
+          [[maybe_unused]] string detailedError) { bytesWritten = count; });
+  drm::V1_4::IDrmPlugin::getLogMessages_cb cb =
+      [&]([[maybe_unused]] drm::V1_4::Status status,
+          [[maybe_unused]] hidl_vec<drm::V1_4::LogMessage> logs) {};
+  mCryptoPluginV1_4->getLogMessages(cb);
+}
+
+bool ClearKeyFuzzer::invokeDrmFactory() {
+  hidl_string packageName(
+      mFDP->ConsumeRandomLengthString(kMaxStringLength).c_str());
+  hidl_string mimeType(getValueFromArray(mFDP, kMimeType));
+  SecurityLevel securityLevel =
+      mFDP->ConsumeBool()
+          ? getValueFromArray(mFDP, kSecurityLevel)
+          : static_cast<SecurityLevel>(mFDP->ConsumeIntegral<uint32_t>());
+  const hidl_array<uint8_t, 16> uuid =
+      mFDP->ConsumeBool() ? kClearKeyUUID : kInvalidUUID;
+  mDrmFactory->isCryptoSchemeSupported_1_2(uuid, mimeType, securityLevel);
+  mDrmFactory->createPlugin(
+      uuid, packageName, [&](Status status, const sp<IDrmPlugin> &plugin) {
+        if (status == Status::OK) {
+          mDrmPlugin = plugin.get();
+          mDrmPluginV1_1 = drm::V1_1::IDrmPlugin::castFrom(mDrmPlugin);
+          mDrmPluginV1_2 = drm::V1_2::IDrmPlugin::castFrom(mDrmPlugin);
+          mDrmPluginV1_4 = drm::V1_4::IDrmPlugin::castFrom(mDrmPlugin);
+        }
+      });
+
+  std::vector<hidl_array<uint8_t, 16>> supportedSchemes;
+  mDrmFactory->getSupportedCryptoSchemes(
+      [&](const hidl_vec<hidl_array<uint8_t, 16>> &schemes) {
+        for (const auto &scheme : schemes) {
+          supportedSchemes.push_back(scheme);
+        }
+      });
+
+  if (!(mDrmPlugin && mDrmPluginV1_1 && mDrmPluginV1_2 && mDrmPluginV1_4)) {
+    return false;
+  }
+  return true;
+}
+
+bool ClearKeyFuzzer::invokeCryptoFactory() {
+  const hidl_array<uint8_t, 16> uuid =
+      mFDP->ConsumeBool() ? kClearKeyUUID : kInvalidUUID;
+  mCryptoFactory->createPlugin(
+      uuid, mSessionId, [this](Status status, const sp<ICryptoPlugin> &plugin) {
+        if (status == Status::OK) {
+          mCryptoPlugin = plugin;
+          mCryptoPluginV1_4 = drm::V1_4::ICryptoPlugin::castFrom(mCryptoPlugin);
+        }
+      });
+
+  if (!mCryptoPlugin && !mCryptoPluginV1_4) {
+    return false;
+  }
+  return true;
+}
+
+void ClearKeyFuzzer::invokeDrm(const uint8_t *data, size_t size) {
+  if (!invokeDrmFactory()) {
+    return;
+  }
+  invokeDrmPlugin(data, size);
+}
+
+void ClearKeyFuzzer::invokeCrypto(const uint8_t *data) {
+  if (!invokeCryptoFactory()) {
+    return;
+  }
+  invokeCryptoPlugin(data);
+}
+
+void ClearKeyFuzzer::process(const uint8_t *data, size_t size) {
+  mFDP = new FuzzedDataProvider(data, size);
+  invokeDrm(data, size);
+  invokeCrypto(data);
+  delete mFDP;
+}
+
+bool ClearKeyFuzzer::init() {
+  mCryptoFactory =
+      android::hardware::drm::V1_4::clearkey::createCryptoFactory();
+  mDrmFactory = android::hardware::drm::V1_4::clearkey::createDrmFactory();
+  if (!mDrmFactory && !mCryptoFactory) {
+    return false;
+  }
+  return true;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+  if (size < kAESBlockSize) {
+    return 0;
+  }
+  ClearKeyFuzzer clearKeyFuzzer;
+  if (clearKeyFuzzer.init()) {
+    clearKeyFuzzer.process(data, size);
+  }
+  return 0;
+}
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
index 5d6e3da..cb5c9fe 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
@@ -432,8 +432,7 @@
         mMockError = Status_V1_2::OK;
     }
 
-    DeviceFiles mFileHandle GUARDED_BY(mFileHandleLock);
-    Mutex mFileHandleLock;
+    DeviceFiles mFileHandle;
     Mutex mSecureStopLock;
 
     CLEARKEY_DISALLOW_COPY_AND_ASSIGN_AND_NEW(DrmPlugin);
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h b/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
index a90d818..1d98860 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
@@ -5,9 +5,7 @@
 #ifndef CLEARKEY_MEMORY_FILE_SYSTEM_H_
 #define CLEARKEY_MEMORY_FILE_SYSTEM_H_
 
-#include <android-base/thread_annotations.h>
 #include <map>
-#include <mutex>
 #include <string>
 
 #include "ClearKeyTypes.h"
@@ -51,12 +49,10 @@
     size_t Write(const std::string& pathName, const MemoryFile& memoryFile);
 
  private:
-    mutable std::mutex mMemoryFileSystemLock;
-
     // License file name is made up of a unique keySetId, therefore,
     // the filename can be used as the key to locate licenses in the
     // memory file system.
-    std::map<std::string, MemoryFile> mMemoryFileSystem GUARDED_BY(mMemoryFileSystemLock);
+    std::map<std::string, MemoryFile> mMemoryFileSystem;
 
     std::string GetFileName(const std::string& path);
 
diff --git a/media/audioserver/Android.bp b/media/audioserver/Android.bp
index e5f9907..0b44700 100644
--- a/media/audioserver/Android.bp
+++ b/media/audioserver/Android.bp
@@ -27,6 +27,7 @@
     shared_libs: [
         "packagemanager_aidl-cpp",
         "libaaudioservice",
+        "libaudioclient",
         "libaudioflinger",
         "libaudiopolicyservice",
         "libaudioprocessing",
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index c5ac7f9..e3db5b4 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -17,11 +17,17 @@
 #define LOG_TAG "audioserver"
 //#define LOG_NDEBUG 0
 
+#include <algorithm>
+
 #include <fcntl.h>
 #include <sys/prctl.h>
 #include <sys/wait.h>
 #include <cutils/properties.h>
 
+#include <android/media/audio/common/AudioMMapPolicy.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
+#include <android/media/IAudioFlingerService.h>
 #include <binder/IPCThreadState.h>
 #include <binder/ProcessState.h>
 #include <binder/IServiceManager.h>
@@ -30,7 +36,6 @@
 #include <utils/Log.h>
 
 // from include_dirs
-#include "aaudio/AAudioTesting.h" // aaudio_policy_t, AAUDIO_PROP_MMAP_POLICY, AAUDIO_POLICY_*
 #include "AudioFlinger.h"
 #include "AudioPolicyService.h"
 #include "AAudioService.h"
@@ -39,6 +44,10 @@
 
 using namespace android;
 
+using android::media::audio::common::AudioMMapPolicy;
+using android::media::audio::common::AudioMMapPolicyInfo;
+using android::media::audio::common::AudioMMapPolicyType;
+
 int main(int argc __unused, char **argv)
 {
     // TODO: update with refined parameters
@@ -144,10 +153,24 @@
         // AAudioService should only be used in OC-MR1 and later.
         // And only enable the AAudioService if the system MMAP policy explicitly allows it.
         // This prevents a client from misusing AAudioService when it is not supported.
-        aaudio_policy_t mmapPolicy = property_get_int32(AAUDIO_PROP_MMAP_POLICY,
-                                                        AAUDIO_POLICY_NEVER);
-        if (mmapPolicy == AAUDIO_POLICY_AUTO || mmapPolicy == AAUDIO_POLICY_ALWAYS) {
+        // If we cannot get audio flinger here, there must be some serious problems. In that case,
+        // attempting to call audio flinger on a null pointer could make the process crash
+        // and attract attentions.
+        sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+        std::vector<AudioMMapPolicyInfo> policyInfos;
+        status_t status = af->getMmapPolicyInfos(
+                AudioMMapPolicyType::DEFAULT, &policyInfos);
+        // Initialize aaudio service when querying mmap policy succeeds and
+        // any of the policy supports MMAP.
+        if (status == NO_ERROR &&
+            std::any_of(policyInfos.begin(), policyInfos.end(), [](const auto& info) {
+                    return info.mmapPolicy == AudioMMapPolicy::AUTO ||
+                           info.mmapPolicy == AudioMMapPolicy::ALWAYS;
+            })) {
             AAudioService::instantiate();
+        } else {
+            ALOGD("Do not init aaudio service, status %d, policy info size %zu",
+                  status, policyInfos.size());
         }
 
         ProcessState::self()->startThreadPool();
diff --git a/media/bufferpool/1.0/vts/OWNERS b/media/bufferpool/1.0/vts/OWNERS
index 6733e0c..db54d45 100644
--- a/media/bufferpool/1.0/vts/OWNERS
+++ b/media/bufferpool/1.0/vts/OWNERS
@@ -1,6 +1,5 @@
 # Media team
 lajos@google.com
-pawin@google.com
 taklee@google.com
 wonsik@google.com
 
diff --git a/media/bufferpool/2.0/tests/OWNERS b/media/bufferpool/2.0/tests/OWNERS
index 6733e0c..db54d45 100644
--- a/media/bufferpool/2.0/tests/OWNERS
+++ b/media/bufferpool/2.0/tests/OWNERS
@@ -1,6 +1,5 @@
 # Media team
 lajos@google.com
-pawin@google.com
 taklee@google.com
 wonsik@google.com
 
diff --git a/media/codec2/OWNERS b/media/codec2/OWNERS
index 46a9fca..7d40041 100644
--- a/media/codec2/OWNERS
+++ b/media/codec2/OWNERS
@@ -1,5 +1,4 @@
 set noparent
 wonsik@google.com
 lajos@google.com
-pawin@google.com
 taklee@google.com
diff --git a/media/codec2/sfplugin/CCodecBuffers.cpp b/media/codec2/sfplugin/CCodecBuffers.cpp
index 97e1a01..20f2ecf 100644
--- a/media/codec2/sfplugin/CCodecBuffers.cpp
+++ b/media/codec2/sfplugin/CCodecBuffers.cpp
@@ -34,7 +34,7 @@
 
 namespace {
 
-sp<GraphicBlockBuffer> AllocateGraphicBuffer(
+sp<GraphicBlockBuffer> AllocateInputGraphicBuffer(
         const std::shared_ptr<C2BlockPool> &pool,
         const sp<AMessage> &format,
         uint32_t pixelFormat,
@@ -46,9 +46,13 @@
         return nullptr;
     }
 
+    int64_t usageValue = 0;
+    (void)format->findInt64("android._C2MemoryUsage", &usageValue);
+    C2MemoryUsage fullUsage{usageValue | usage.expected};
+
     std::shared_ptr<C2GraphicBlock> block;
     c2_status_t err = pool->fetchGraphicBlock(
-            width, height, pixelFormat, usage, &block);
+            width, height, pixelFormat, fullUsage, &block);
     if (err != C2_OK) {
         ALOGD("fetch graphic block failed: %d", err);
         return nullptr;
@@ -939,6 +943,10 @@
         return nullptr;
     }
 
+    int64_t usageValue = 0;
+    (void)format->findInt64("android._C2MemoryUsage", &usageValue);
+    usage = C2MemoryUsage(usage.expected | usageValue);
+
     std::shared_ptr<C2LinearBlock> block;
     c2_status_t err = pool->fetchLinearBlock(capacity, usage, &block);
     if (err != C2_OK || block == nullptr) {
@@ -1083,7 +1091,7 @@
             [pool = mPool, format = mFormat, lbp = mLocalBufferPool, pixelFormat]()
                     -> sp<Codec2Buffer> {
                 C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
-                return AllocateGraphicBuffer(
+                return AllocateInputGraphicBuffer(
                         pool, format, pixelFormat, usage, lbp);
             });
     return std::move(array);
@@ -1094,10 +1102,8 @@
 }
 
 sp<Codec2Buffer> GraphicInputBuffers::createNewBuffer() {
-    int64_t usageValue = 0;
-    (void)mFormat->findInt64("android._C2MemoryUsage", &usageValue);
-    C2MemoryUsage usage{usageValue | C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE};
-    return AllocateGraphicBuffer(
+    C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
+    return AllocateInputGraphicBuffer(
             mPool, mFormat, extractPixelFormat(mFormat), usage, mLocalBufferPool);
 }
 
diff --git a/media/codec2/vndk/C2AllocatorIon.cpp b/media/codec2/vndk/C2AllocatorIon.cpp
index 77b265a..7b593ee 100644
--- a/media/codec2/vndk/C2AllocatorIon.cpp
+++ b/media/codec2/vndk/C2AllocatorIon.cpp
@@ -207,6 +207,7 @@
 
         c2_status_t err = mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr);
         if (map.addr) {
+            std::lock_guard<std::mutex> guard(mMutexMappings);
             mMappings.push_back(map);
         }
         return err;
@@ -217,22 +218,26 @@
             ALOGD("tried to unmap unmapped buffer");
             return C2_NOT_FOUND;
         }
-        for (auto it = mMappings.begin(); it != mMappings.end(); ++it) {
-            if (addr != (uint8_t *)it->addr + it->alignmentBytes ||
-                    size + it->alignmentBytes != it->size) {
-                continue;
+        { // Scope for the lock_guard of mMutexMappings.
+            std::lock_guard<std::mutex> guard(mMutexMappings);
+            for (auto it = mMappings.begin(); it != mMappings.end(); ++it) {
+                if (addr != (uint8_t *)it->addr + it->alignmentBytes ||
+                        size + it->alignmentBytes != it->size) {
+                    continue;
+                }
+                int err = munmap(it->addr, it->size);
+                if (err != 0) {
+                    ALOGD("munmap failed");
+                    return c2_map_errno<EINVAL>(errno);
+                }
+                if (fence) {
+                    *fence = C2Fence(); // not using fences
+                }
+                (void)mMappings.erase(it);
+                ALOGV("successfully unmapped: addr=%p size=%zu fd=%d", addr, size,
+                          mHandle.bufferFd());
+                return C2_OK;
             }
-            int err = munmap(it->addr, it->size);
-            if (err != 0) {
-                ALOGD("munmap failed");
-                return c2_map_errno<EINVAL>(errno);
-            }
-            if (fence) {
-                *fence = C2Fence(); // not using fences
-            }
-            (void)mMappings.erase(it);
-            ALOGV("successfully unmapped: addr=%p size=%zu fd=%d", addr, size, mHandle.bufferFd());
-            return C2_OK;
         }
         ALOGD("unmap failed to find specified map");
         return C2_BAD_VALUE;
@@ -241,6 +246,7 @@
     virtual ~Impl() {
         if (!mMappings.empty()) {
             ALOGD("Dangling mappings!");
+            std::lock_guard<std::mutex> guard(mMutexMappings);
             for (const Mapping &map : mMappings) {
                 (void)munmap(map.addr, map.size);
             }
@@ -320,6 +326,7 @@
         size_t size;
     };
     std::list<Mapping> mMappings;
+    std::mutex mMutexMappings;
 };
 
 class C2AllocationIon::ImplV2 : public C2AllocationIon::Impl {
diff --git a/media/codecs/amrwb/enc/Android.bp b/media/codecs/amrwb/enc/Android.bp
index cc72eb7..d945531 100644
--- a/media/codecs/amrwb/enc/Android.bp
+++ b/media/codecs/amrwb/enc/Android.bp
@@ -139,11 +139,6 @@
         },
     },
 
-    include_dirs: [
-        "frameworks/av/include",
-        "frameworks/av/media/libstagefright/include",
-    ],
-
     local_include_dirs: ["src"],
     export_include_dirs: ["inc"],
 
diff --git a/media/codecs/mp3dec/Android.bp b/media/codecs/mp3dec/Android.bp
index 015b8b6..1ab0511 100644
--- a/media/codecs/mp3dec/Android.bp
+++ b/media/codecs/mp3dec/Android.bp
@@ -108,8 +108,6 @@
         cfi: true,
     },
 
-    include_dirs: ["frameworks/av/media/libstagefright/include"],
-
     header_libs: ["libstagefright_mp3dec_headers"],
     export_header_lib_headers: ["libstagefright_mp3dec_headers"],
 
diff --git a/media/codecs/mp3dec/src/pvmp3_stereo_proc.cpp b/media/codecs/mp3dec/src/pvmp3_stereo_proc.cpp
index c04f7f3..b3ecc77 100644
--- a/media/codecs/mp3dec/src/pvmp3_stereo_proc.cpp
+++ b/media/codecs/mp3dec/src/pvmp3_stereo_proc.cpp
@@ -219,6 +219,7 @@
 ; FUNCTION CODE
 ----------------------------------------------------------------------------*/
 
+// deliberately plays near overflow points of int32
 #if __has_attribute(no_sanitize)
 __attribute__((no_sanitize("integer")))
 #endif
diff --git a/media/extractors/Android.bp b/media/extractors/Android.bp
index 7513cb1..66585da 100644
--- a/media/extractors/Android.bp
+++ b/media/extractors/Android.bp
@@ -24,10 +24,6 @@
 cc_defaults {
     name: "extractor-defaults",
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright/include",
-    ],
-
     shared_libs: [
         "liblog",
     ],
diff --git a/media/extractors/TEST_MAPPING b/media/extractors/TEST_MAPPING
index 4984b8f..a7c2cfe 100644
--- a/media/extractors/TEST_MAPPING
+++ b/media/extractors/TEST_MAPPING
@@ -1,6 +1,9 @@
 {
   "presubmit": [
 
+        {
+            "name": "CtsMediaTranscodingTestCases"
+        }
     // TODO(b/153661591) enable test once the bug is fixed
     // This tests the extractor path
     // {
diff --git a/media/extractors/aac/Android.bp b/media/extractors/aac/Android.bp
index 7bf3a13..a926422 100644
--- a/media/extractors/aac/Android.bp
+++ b/media/extractors/aac/Android.bp
@@ -21,6 +21,10 @@
 
     srcs: ["AACExtractor.cpp"],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     static_libs: [
         "libstagefright_foundation",
         "libstagefright_metadatautils",
diff --git a/media/extractors/aac/AACExtractor.h b/media/extractors/aac/include/AACExtractor.h
similarity index 100%
rename from media/extractors/aac/AACExtractor.h
rename to media/extractors/aac/include/AACExtractor.h
diff --git a/media/extractors/amr/Android.bp b/media/extractors/amr/Android.bp
index 712360d..121b7a3 100644
--- a/media/extractors/amr/Android.bp
+++ b/media/extractors/amr/Android.bp
@@ -21,6 +21,10 @@
 
     srcs: ["AMRExtractor.cpp"],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     static_libs: [
         "libstagefright_foundation",
     ],
diff --git a/media/extractors/amr/AMRExtractor.h b/media/extractors/amr/include/AMRExtractor.h
similarity index 100%
rename from media/extractors/amr/AMRExtractor.h
rename to media/extractors/amr/include/AMRExtractor.h
diff --git a/media/extractors/flac/Android.bp b/media/extractors/flac/Android.bp
index f6ce969..834f4ad 100644
--- a/media/extractors/flac/Android.bp
+++ b/media/extractors/flac/Android.bp
@@ -23,8 +23,8 @@
 
     srcs: ["FLACExtractor.cpp"],
 
-    include_dirs: [
-        "external/flac/include",
+    export_include_dirs: [
+        "include",
     ],
 
     shared_libs: [
diff --git a/media/extractors/flac/FLACExtractor.h b/media/extractors/flac/include/FLACExtractor.h
similarity index 100%
rename from media/extractors/flac/FLACExtractor.h
rename to media/extractors/flac/include/FLACExtractor.h
diff --git a/media/extractors/fuzzers/Android.bp b/media/extractors/fuzzers/Android.bp
index 0e54b58..490e195 100644
--- a/media/extractors/fuzzers/Android.bp
+++ b/media/extractors/fuzzers/Android.bp
@@ -80,11 +80,6 @@
     defaults: ["extractor-fuzzer-defaults"],
     host_supported: true,
 
-    include_dirs: [
-        "frameworks/av/media/extractors/mpeg2",
-        "frameworks/av/media/libstagefright",
-    ],
-
     static_libs: [
         "libstagefright_foundation_without_imemory",
         "libstagefright_mpeg2support",
@@ -124,14 +119,6 @@
         "mp4_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/mp4",
-    ],
-
-    header_libs: [
-        "libaudioclient_headers",
-    ],
-
     static_libs: [
         "libstagefright_id3",
         "libstagefright_esds",
@@ -150,10 +137,6 @@
         "wav_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/wav",
-    ],
-
     static_libs: [
         "libfifo",
         "libwavextractor",
@@ -173,10 +156,6 @@
         "amr_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/amr",
-    ],
-
     static_libs: [
         "libamrextractor",
     ],
@@ -193,10 +172,6 @@
         "mkv_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/mkv",
-    ],
-
     static_libs: [
         "libwebm",
         "libstagefright_flacdec",
@@ -217,9 +192,6 @@
         "ogg_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/ogg",
-    ],
 
     static_libs: [
         "libstagefright_metadatautils",
@@ -265,10 +237,6 @@
         "mp3_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/mp3",
-    ],
-
     static_libs: [
         "libfifo",
         "libmp3extractor",
@@ -285,10 +253,6 @@
         "aac_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/aac",
-    ],
-
     static_libs: [
         "libaacextractor",
         "libstagefright_metadatautils",
@@ -304,10 +268,6 @@
         "flac_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/flac",
-    ],
-
     static_libs: [
         "libstagefright_metadatautils",
         "libFLAC",
@@ -329,10 +289,6 @@
         "midi_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/midi",
-    ],
-
     static_libs: [
         "libsonivox",
         "libmedia_midiiowrapper",
diff --git a/media/extractors/midi/Android.bp b/media/extractors/midi/Android.bp
index 08a6fa0..feabf9e 100644
--- a/media/extractors/midi/Android.bp
+++ b/media/extractors/midi/Android.bp
@@ -23,6 +23,10 @@
 
     srcs: ["MidiExtractor.cpp"],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     header_libs: [
         "libmedia_datasource_headers",
     ],
diff --git a/media/extractors/midi/MidiExtractor.h b/media/extractors/midi/include/MidiExtractor.h
similarity index 100%
rename from media/extractors/midi/MidiExtractor.h
rename to media/extractors/midi/include/MidiExtractor.h
diff --git a/media/extractors/mkv/Android.bp b/media/extractors/mkv/Android.bp
index 54c5b27..98ce305 100644
--- a/media/extractors/mkv/Android.bp
+++ b/media/extractors/mkv/Android.bp
@@ -21,10 +21,8 @@
 
     srcs: ["MatroskaExtractor.cpp"],
 
-    include_dirs: [
-        "external/flac/include",
-        "external/libvpx/libwebm",
-        "frameworks/av/media/libstagefright/flac/dec",
+    export_include_dirs: [
+        "include",
     ],
 
     shared_libs: [
diff --git a/media/extractors/mkv/MatroskaExtractor.h b/media/extractors/mkv/include/MatroskaExtractor.h
similarity index 100%
rename from media/extractors/mkv/MatroskaExtractor.h
rename to media/extractors/mkv/include/MatroskaExtractor.h
diff --git a/media/extractors/mp3/Android.bp b/media/extractors/mp3/Android.bp
index 75b9b7b..396a13a 100644
--- a/media/extractors/mp3/Android.bp
+++ b/media/extractors/mp3/Android.bp
@@ -16,6 +16,10 @@
             "XINGSeeker.cpp",
     ],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     static_libs: [
         "libutils",
         "libstagefright_id3",
diff --git a/media/extractors/mp3/MP3Extractor.h b/media/extractors/mp3/include/MP3Extractor.h
similarity index 100%
rename from media/extractors/mp3/MP3Extractor.h
rename to media/extractors/mp3/include/MP3Extractor.h
diff --git a/media/extractors/mp3/MP3Seeker.h b/media/extractors/mp3/include/MP3Seeker.h
similarity index 100%
rename from media/extractors/mp3/MP3Seeker.h
rename to media/extractors/mp3/include/MP3Seeker.h
diff --git a/media/extractors/mp3/VBRISeeker.h b/media/extractors/mp3/include/VBRISeeker.h
similarity index 100%
rename from media/extractors/mp3/VBRISeeker.h
rename to media/extractors/mp3/include/VBRISeeker.h
diff --git a/media/extractors/mp3/XINGSeeker.h b/media/extractors/mp3/include/XINGSeeker.h
similarity index 100%
rename from media/extractors/mp3/XINGSeeker.h
rename to media/extractors/mp3/include/XINGSeeker.h
diff --git a/media/extractors/mp4/Android.bp b/media/extractors/mp4/Android.bp
index 7fa6bfd..540d75d 100644
--- a/media/extractors/mp4/Android.bp
+++ b/media/extractors/mp4/Android.bp
@@ -15,6 +15,15 @@
     ],
 }
 
+cc_library_headers {
+    name: "libmp4extractor_headers",
+    host_supported: true,
+
+    export_include_dirs: [
+        "include",
+    ],
+}
+
 cc_library {
     name: "libmp4extractor",
     defaults: ["extractor-defaults"],
@@ -27,6 +36,10 @@
         "SampleTable.cpp",
     ],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     static_libs: [
         "libstagefright_esds",
         "libstagefright_foundation",
diff --git a/media/extractors/mp4/AC4Parser.h b/media/extractors/mp4/include/AC4Parser.h
similarity index 100%
rename from media/extractors/mp4/AC4Parser.h
rename to media/extractors/mp4/include/AC4Parser.h
diff --git a/media/extractors/mp4/ItemTable.h b/media/extractors/mp4/include/ItemTable.h
similarity index 100%
rename from media/extractors/mp4/ItemTable.h
rename to media/extractors/mp4/include/ItemTable.h
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/include/MPEG4Extractor.h
similarity index 100%
rename from media/extractors/mp4/MPEG4Extractor.h
rename to media/extractors/mp4/include/MPEG4Extractor.h
diff --git a/media/extractors/mp4/SampleIterator.h b/media/extractors/mp4/include/SampleIterator.h
similarity index 100%
rename from media/extractors/mp4/SampleIterator.h
rename to media/extractors/mp4/include/SampleIterator.h
diff --git a/media/extractors/mp4/SampleTable.h b/media/extractors/mp4/include/SampleTable.h
similarity index 100%
rename from media/extractors/mp4/SampleTable.h
rename to media/extractors/mp4/include/SampleTable.h
diff --git a/media/extractors/mpeg2/Android.bp b/media/extractors/mpeg2/Android.bp
index 7e6247b..8faecae 100644
--- a/media/extractors/mpeg2/Android.bp
+++ b/media/extractors/mpeg2/Android.bp
@@ -38,6 +38,10 @@
         "MPEG2TSExtractor.cpp",
     ],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     shared_libs: [
         "libbase",
         "libcgrouprc#29",
diff --git a/media/extractors/mpeg2/MPEG2PSExtractor.cpp b/media/extractors/mpeg2/MPEG2PSExtractor.cpp
index d431b05..afd28ef 100644
--- a/media/extractors/mpeg2/MPEG2PSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2PSExtractor.cpp
@@ -20,9 +20,6 @@
 
 #include "MPEG2PSExtractor.h"
 
-#include <AnotherPacketSource.h>
-#include <ESQueue.h>
-
 #include <media/stagefright/foundation/ABitReader.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -33,6 +30,8 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
+#include <mpeg2ts/AnotherPacketSource.h>
+#include <mpeg2ts/ESQueue.h>
 #include <utils/String8.h>
 
 #include <inttypes.h>
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.cpp b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
index 2e68809..9a3cd92 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
@@ -35,10 +35,9 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
+#include <mpeg2ts/AnotherPacketSource.h>
 #include <utils/String8.h>
 
-#include <AnotherPacketSource.h>
-
 #include <hidl/HybridInterface.h>
 #include <android/hardware/cas/1.0/ICas.h>
 
diff --git a/media/extractors/mpeg2/MPEG2PSExtractor.h b/media/extractors/mpeg2/include/MPEG2PSExtractor.h
similarity index 100%
rename from media/extractors/mpeg2/MPEG2PSExtractor.h
rename to media/extractors/mpeg2/include/MPEG2PSExtractor.h
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.h b/media/extractors/mpeg2/include/MPEG2TSExtractor.h
similarity index 98%
rename from media/extractors/mpeg2/MPEG2TSExtractor.h
rename to media/extractors/mpeg2/include/MPEG2TSExtractor.h
index fd77b08..0e3e484 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.h
+++ b/media/extractors/mpeg2/include/MPEG2TSExtractor.h
@@ -23,12 +23,11 @@
 #include <media/MediaExtractorPluginApi.h>
 #include <media/MediaExtractorPluginHelper.h>
 #include <media/stagefright/MetaDataBase.h>
+#include <mpeg2ts/ATSParser.h>
 #include <utils/threads.h>
 #include <utils/KeyedVector.h>
 #include <utils/Vector.h>
 
-#include <ATSParser.h>
-
 namespace android {
 
 struct AMessage;
diff --git a/media/extractors/ogg/Android.bp b/media/extractors/ogg/Android.bp
index d7540c4..dc3c25c 100644
--- a/media/extractors/ogg/Android.bp
+++ b/media/extractors/ogg/Android.bp
@@ -22,8 +22,8 @@
 
     srcs: ["OggExtractor.cpp"],
 
-    include_dirs: [
-        "external/tremolo",
+    export_include_dirs: [
+        "include",
     ],
 
     header_libs: [
diff --git a/media/extractors/ogg/OggExtractor.h b/media/extractors/ogg/include/OggExtractor.h
similarity index 100%
rename from media/extractors/ogg/OggExtractor.h
rename to media/extractors/ogg/include/OggExtractor.h
diff --git a/media/extractors/tests/Android.bp b/media/extractors/tests/Android.bp
index 23c74f7..3c3bbdc 100644
--- a/media/extractors/tests/Android.bp
+++ b/media/extractors/tests/Android.bp
@@ -79,11 +79,6 @@
         "libbase",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/",
-        "frameworks/av/media/libstagefright/",
-    ],
-
     compile_multilib: "first",
 
     cflags: [
diff --git a/media/extractors/tests/ExtractorUnitTest.cpp b/media/extractors/tests/ExtractorUnitTest.cpp
index 84ec1f2..2bd9c6a 100644
--- a/media/extractors/tests/ExtractorUnitTest.cpp
+++ b/media/extractors/tests/ExtractorUnitTest.cpp
@@ -27,18 +27,18 @@
 #include <media/stagefright/MetaDataUtils.h>
 #include <media/stagefright/foundation/OpusHeader.h>
 
-#include "aac/AACExtractor.h"
-#include "amr/AMRExtractor.h"
-#include "flac/FLACExtractor.h"
-#include "midi/MidiExtractor.h"
-#include "mkv/MatroskaExtractor.h"
-#include "mp3/MP3Extractor.h"
-#include "mp4/MPEG4Extractor.h"
-#include "mp4/SampleTable.h"
-#include "mpeg2/MPEG2PSExtractor.h"
-#include "mpeg2/MPEG2TSExtractor.h"
-#include "ogg/OggExtractor.h"
-#include "wav/WAVExtractor.h"
+#include <AACExtractor.h>
+#include <AMRExtractor.h>
+#include <FLACExtractor.h>
+#include <MidiExtractor.h>
+#include <MatroskaExtractor.h>
+#include <MP3Extractor.h>
+#include <MPEG4Extractor.h>
+#include <SampleTable.h>
+#include <MPEG2PSExtractor.h>
+#include <MPEG2TSExtractor.h>
+#include <OggExtractor.h>
+#include <WAVExtractor.h>
 
 #include "ExtractorUnitTestEnvironment.h"
 
diff --git a/media/extractors/wav/Android.bp b/media/extractors/wav/Android.bp
index 76546b8..cdf587c 100644
--- a/media/extractors/wav/Android.bp
+++ b/media/extractors/wav/Android.bp
@@ -22,8 +22,8 @@
 
     srcs: ["WAVExtractor.cpp"],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright/include",
+    export_include_dirs: [
+        "include",
     ],
 
     shared_libs: [
diff --git a/media/extractors/wav/WAVExtractor.h b/media/extractors/wav/include/WAVExtractor.h
similarity index 100%
rename from media/extractors/wav/WAVExtractor.h
rename to media/extractors/wav/include/WAVExtractor.h
diff --git a/media/libaaudio/examples/utils/AAudioSimplePlayer.h b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
index 7daac20..956b3cd 100644
--- a/media/libaaudio/examples/utils/AAudioSimplePlayer.h
+++ b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
@@ -40,6 +40,31 @@
     int64_t nanoseconds;
 } Timestamp;
 
+static constexpr int32_t   kWorkloadScaler = 500;
+
+// Linear congruential random number generator.
+static uint32_t s_random16() {
+    static uint32_t seed = 1234;
+    seed = ((seed * 31421) + 6927) & 0x0FFFF;
+    return seed;
+}
+
+/**
+ * The random number generator is good for burning CPU because the compiler cannot
+ * easily optimize away the computation.
+ * @param workload number of times to execute the loop
+ * @return a white noise value between -1.0 and +1.0
+ */
+static float s_burnCPU(int32_t workload) {
+    uint32_t random = 0;
+    for (int32_t i = 0; i < workload; i++) {
+        for (int32_t j = 0; j < 10; j++) {
+            random = random ^ s_random16();
+        }
+    }
+    return (random - 32768) * (1.0 / 32768);
+}
+
 /**
  * Simple wrapper for AAudio that opens an output stream either in callback or blocking write mode.
  */
@@ -268,11 +293,13 @@
     int32_t            timestampCount = 0; // in timestamps
     int32_t            sampleRate = 48000;
     int32_t            prefixToneFrames = 0;
+    double             workload = 0.0;
     bool               sweepSetup = false;
 
     int                scheduler = 0;
     bool               schedulerChecked = false;
     int32_t            hangTimeMSec = 0;
+    int                cpuAffinity = -1;
 
     AAudioSimplePlayer simplePlayer;
     int32_t            callbackCount = 0;
@@ -304,6 +331,14 @@
 
 } SineThreadedData_t;
 
+int setCpuAffinity(int cpuIndex) {
+cpu_set_t cpu_set;
+    CPU_ZERO(&cpu_set);
+    CPU_SET(cpuIndex, &cpu_set);
+    int err = sched_setaffinity((pid_t) 0, sizeof(cpu_set_t), &cpu_set);
+    return err == 0 ? 0 : -errno;
+}
+
 // Callback function that fills the audio output buffer.
 aaudio_data_callback_result_t SimplePlayerDataCallbackProc(
         AAudioStream *stream,
@@ -319,6 +354,10 @@
     }
     SineThreadedData_t *sineData = (SineThreadedData_t *) userData;
 
+    if (sineData->cpuAffinity >= 0) {
+        setCpuAffinity(sineData->cpuAffinity);
+        sineData->cpuAffinity = -1;
+    }
     // Play an initial high tone so we can tell whether the beginning was truncated.
     if (!sineData->sweepSetup && sineData->framesTotal >= sineData->prefixToneFrames) {
         sineData->setupSineSweeps();
@@ -398,6 +437,8 @@
             return AAUDIO_CALLBACK_RESULT_STOP;
     }
 
+    s_burnCPU((int32_t)(sineData->workload * kWorkloadScaler * numFrames));
+
     sineData->callbackCount++;
     sineData->framesTotal += numFrames;
     return AAUDIO_CALLBACK_RESULT_CONTINUE;
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index cdc987b..400fc7c 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -31,10 +31,10 @@
 #include "AAudioSimplePlayer.h"
 #include "AAudioArgsParser.h"
 
-#define APP_VERSION  "0.1.8"
+#define APP_VERSION  "0.2.1"
 
-constexpr int32_t kDefaultHangTimeMSec = 10;
-
+static constexpr int32_t kDefaultHangTimeMSec = 10;
+static constexpr int32_t kWorkPeriodSeconds = 6;
 /**
  * Open stream, play some sine waves, then close the stream.
  *
@@ -44,7 +44,11 @@
 static aaudio_result_t testOpenPlayClose(AAudioArgsParser &argParser,
                                          int32_t loopCount,
                                          int32_t prefixToneMsec,
-                                         int32_t hangTimeMSec)
+                                         int32_t hangTimeMSec,
+                                         int     cpuAffinity,
+                                         double  lowWorkLoad,
+                                         double  highWorkLoad,
+                                         int32_t workPeriodSeconds)
 {
     SineThreadedData_t myData;
     AAudioSimplePlayer &player = myData.simplePlayer;
@@ -57,6 +61,7 @@
     myData.schedulerChecked = false;
     myData.callbackCount = 0;
     myData.hangTimeMSec = hangTimeMSec; // test AAudioStream_getXRunCount()
+    myData.cpuAffinity = cpuAffinity;
 
     result = player.open(argParser,
                          SimplePlayerDataCallbackProc,
@@ -111,8 +116,8 @@
         }
 
         // Play a sine wave in the background.
-        printf("Sleep for %d seconds while audio plays in a callback thread. %d of %d\n",
-               argParser.getDurationSeconds(), (loopIndex + 1), loopCount);
+        printf("Monitor for %d seconds while audio plays in a callback thread. %d of %d, %d\n",
+               argParser.getDurationSeconds(), (loopIndex + 1), loopCount, workPeriodSeconds);
         startedAtNanos = getNanoseconds(CLOCK_MONOTONIC);
         for (int second = 0; second < durationSeconds; second++) {
             // Sleep a while. Wake up early if there is an error, for example a DISCONNECT.
@@ -123,13 +128,17 @@
             const int32_t framesWritten = (int32_t) AAudioStream_getFramesWritten(player.getStream());
             const int32_t framesRead = (int32_t) AAudioStream_getFramesRead(player.getStream());
             const int32_t xruns = AAudioStream_getXRunCount(player.getStream());
+            myData.workload = ((second % (2 * workPeriodSeconds)) < workPeriodSeconds)
+                    ? lowWorkLoad : highWorkLoad;
             printf(" waker result = %d, at %6d millis"
-                           ", second = %3d, frames written %8d - read %8d = %8d, underruns = %d\n",
+                   ", second = %3d, frames written %8d - read %8d = %8d"
+                   ", work = %5.1f, underruns = %d\n",
                    result, (int) millis,
                    second,
                    framesWritten,
                    framesRead,
                    framesWritten - framesRead,
+                   myData.workload,
                    xruns);
             if (result != AAUDIO_OK) {
                 disconnected = (result == AAUDIO_ERROR_DISCONNECTED);
@@ -220,6 +229,11 @@
     AAudioArgsParser::usage();
     printf("      -l{count} loopCount start/stop, every other one is silent\n");
     printf("      -t{msec}  play a high pitched tone at the beginning\n");
+    printf("      -w{workload}  set base workload, default 0.0\n");
+    printf("      -W{workload}  alternate between this higher workload and base workload\n");
+    printf("      -Z{duration}  number of seconds to spend at each workload, default = %d\n",
+           kWorkPeriodSeconds);
+    printf("      -a{cpu}   set CPU affinity, default none\n");
     printf("      -h{msec}  force periodic underruns by hanging in callback\n");
     printf("                If no value specified then %d used.\n",
             kDefaultHangTimeMSec);
@@ -232,6 +246,10 @@
     int32_t            loopCount = 1;
     int32_t            prefixToneMsec = 0;
     int32_t            hangTimeMSec = 0;
+    int                cpuAffinity = -1;
+    double             lowWorkLoad = 0.0;
+    double             highWorkLoad = -1.0;
+    int32_t            workPeriodSeconds = kWorkPeriodSeconds;
 
     // Make printf print immediately so that debug info is not stuck
     // in a buffer if we hang or crash.
@@ -247,6 +265,9 @@
             if (arg[0] == '-') {
                 char option = arg[1];
                 switch (option) {
+                    case 'a':
+                        cpuAffinity = atoi(&arg[2]);
+                        break;
                     case 'l':
                         loopCount = atoi(&arg[2]);
                         break;
@@ -258,6 +279,15 @@
                                 ? atoi(&arg[2])
                                 : kDefaultHangTimeMSec;
                         break;
+                    case 'w':
+                        lowWorkLoad = atof(&arg[2]);
+                        break;
+                    case 'W':
+                        highWorkLoad = atof(&arg[2]);
+                        break;
+                    case 'Z':
+                        workPeriodSeconds = atoi(&arg[2]);
+                        break;
                     default:
                         usage();
                         exit(EXIT_FAILURE);
@@ -271,9 +301,21 @@
         }
     }
 
+    if (highWorkLoad > 0) {
+        if (highWorkLoad < lowWorkLoad) {
+            printf("ERROR - -W%f workload lower than -w%f workload", highWorkLoad, lowWorkLoad);
+            return EXIT_FAILURE;
+        }
+    } else {
+        highWorkLoad = lowWorkLoad; // high not specified so use low
+    }
+
     // Keep looping until we can complete the test without disconnecting.
     while((result = testOpenPlayClose(argParser, loopCount,
-            prefixToneMsec, hangTimeMSec))
+            prefixToneMsec, hangTimeMSec,
+            cpuAffinity,
+            lowWorkLoad, highWorkLoad,
+            workPeriodSeconds))
             == AAUDIO_ERROR_DISCONNECTED);
 
     return (result) ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/media/libaaudio/fuzzer/Android.bp b/media/libaaudio/fuzzer/Android.bp
new file mode 100644
index 0000000..e2eec7a
--- /dev/null
+++ b/media/libaaudio/fuzzer/Android.bp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_av_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["frameworks_av_license"],
+}
+
+cc_fuzz {
+    name: "libaaudio_fuzzer",
+    srcs: [
+        "libaaudio_fuzzer.cpp",
+    ],
+    header_libs: [
+        "libaaudio_headers",
+    ],
+    shared_libs: [
+        "libbinder",
+        "libaudiomanager",
+        "libaudiopolicy",
+        "libaudioclient_aidl_conversion",
+    ],
+    static_libs: [
+        "android.media.audio.common.types-V1-cpp",
+        "liblog",
+        "libutils",
+        "libcutils",
+        "libaaudio",
+        "libjsoncpp",
+        "libbase_ndk",
+        "libcgrouprc",
+        "libaudioutils",
+        "libaudioclient",
+        "aaudio-aidl-cpp",
+        "libmedia_helper",
+        "libmediametrics",
+        "libprocessgroup",
+        "av-types-aidl-cpp",
+        "libaaudio_internal",
+        "libcgrouprc_format",
+        "audiopolicy-aidl-cpp",
+        "audioflinger-aidl-cpp",
+        "audiopolicy-types-aidl-cpp",
+        "audioclient-types-aidl-cpp",
+        "shared-file-region-aidl-cpp",
+        "framework-permission-aidl-cpp",
+        "mediametricsservice-aidl-cpp",
+    ],
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
diff --git a/media/libaaudio/fuzzer/README.md b/media/libaaudio/fuzzer/README.md
new file mode 100644
index 0000000..4ba15c5
--- /dev/null
+++ b/media/libaaudio/fuzzer/README.md
@@ -0,0 +1,77 @@
+# Fuzzer for libaaudio
+
+## Plugin Design Considerations
+The fuzzer plugin for `libaaudio` are designed based on the understanding of the
+source code and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+Fuzzers assigns values to the following parameters to pass on to libaaudio:
+1. Device Id (parameter name: `deviceId`)
+2. Sampling Rate (parameter name: `sampleRate`)
+3. Number of channels (parameter name: `channelCount`)
+4. Audio Travel Direction (parameter name: `direction`)
+5. Audio Format (parameter name: `format`)
+6. Audio Sharing Mode (parameter name: `sharingMode`)
+7. Audio Usage (parameter name: `usage`)
+8. Audio Content type (parameter name: `contentType`)
+9. Audio Input Preset (parameter name: `inputPreset`)
+10. Audio Privacy Sensitivity (parameter name: `privacySensitive`)
+11. Buffer Capacity In Frames (parameter name: `frames`)
+12. Performance Mode (parameter name: `mode`)
+13. Allowed Capture Policy (parameter name: `allowedCapturePolicy`)
+14. Session Id (parameter name: `sessionId`)
+15. Frames per Data Callback (parameter name: `framesPerDataCallback`)
+16. MMap Policy (parameter name: `policy`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `deviceId` | Any value of type `int32_t`  | Value obtained from FuzzedDataProvider |
+| `sampleRate` | Any value of type `int32_t`  | Value obtained from FuzzedDataProvider |
+| `channelCount` |  Any value of type `int32_t` | Value obtained from FuzzedDataProvider |
+| `direction` | 0. `AAUDIO_DIRECTION_OUTPUT` 1. `AAUDIO_DIRECTION_INPUT` | Value obtained from FuzzedDataProvider |
+| `format` | 0. `AAUDIO_FORMAT_INVALID` 1. `AAUDIO_FORMAT_UNSPECIFIED` 2. `AAUDIO_FORMAT_PCM_I16` 3. `AAUDIO_FORMAT_PCM_FLOAT` | Value obtained from FuzzedDataProvider |
+| `sharingMode` | 0. `AAUDIO_SHARING_MODE_EXCLUSIVE` 1. `AAUDIO_SHARING_MODE_SHARED` | Value obtained from FuzzedDataProvider |
+| `usage` | 0. `AAUDIO_USAGE_MEDIA` 1. `AAUDIO_USAGE_VOICE_COMMUNICATION` 2. `AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING` 3. `AAUDIO_USAGE_ALARM` 4. `AAUDIO_USAGE_NOTIFICATION` 5. `AAUDIO_USAGE_NOTIFICATION_RINGTONE` 6. `AAUDIO_USAGE_NOTIFICATION_EVENT` 7. `AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY` 8. `AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE` 9. `AAUDIO_USAGE_ASSISTANCE_SONIFICATION` 10. `AAUDIO_USAGE_GAME` 11. `AAUDIO_USAGE_ASSISTANT` 12. `AAUDIO_SYSTEM_USAGE_EMERGENCY` 13. `AAUDIO_SYSTEM_USAGE_SAFETY` 14. `AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS` 15. `AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT` | Value obtained from FuzzedDataProvider |
+| `contentType` | 0. `AAUDIO_CONTENT_TYPE_SPEECH` 1. `AAUDIO_CONTENT_TYPE_MUSIC` 2. `AAUDIO_CONTENT_TYPE_MOVIE` 3. `AAUDIO_CONTENT_TYPE_SONIFICATION` | Value obtained from FuzzedDataProvider |
+| `inputPreset` | 0. `AAUDIO_INPUT_PRESET_GENERIC` 1. `AAUDIO_INPUT_PRESET_CAMCORDER` 2. `AAUDIO_INPUT_PRESET_VOICE_RECOGNITION` 3. `AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION` 4. `AAUDIO_INPUT_PRESET_UNPROCESSED` 5. `AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE` | Value obtained from FuzzedDataProvider |
+| `privacySensitive` | 0. `true` 1. `false` | Value obtained from FuzzedDataProvider |
+| `frames` | Any value of type `int32_t`  | Value obtained from FuzzedDataProvider |
+| `mode` | 0. `AAUDIO_PERFORMANCE_MODE_NONE` 1. `AAUDIO_PERFORMANCE_MODE_POWER_SAVING` 2. `AAUDIO_PERFORMANCE_MODE_LOW_LATENCY` | Value obtained from FuzzedDataProvider |
+| `allowedCapturePolicy` | 0. `AAUDIO_ALLOW_CAPTURE_BY_ALL` 1. `AAUDIO_ALLOW_CAPTURE_BY_SYSTEM` 2. `AAUDIO_ALLOW_CAPTURE_BY_NONE` | Value obtained from FuzzedDataProvider |
+| `sessionId` | 0. `AAUDIO_SESSION_ID_NONE` 1. `AAUDIO_SESSION_ID_ALLOCATE` | Value obtained from FuzzedDataProvider |
+| `framesPerDataCallback` | Any value of type `int32_t` | Value obtained from FuzzedDataProvider |
+| `policy` | 0. `AAUDIO_POLICY_NEVER` 1. `AAUDIO_POLICY_AUTO` 2. `AAUDIO_POLICY_ALWAYS` | Value obtained from FuzzedDataProvider |
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feed the entire input data to the module.
+This ensures that the plugins tolerates any kind of input (empty, huge,
+malformed, etc) and doesn't `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build libaaudio_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+  $ mm -j$(nproc) libaaudio_fuzzer
+```
+### Steps to run
+
+To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/libaaudio_fuzzer/libaaudio_fuzzer
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/media/libaaudio/fuzzer/libaaudio_fuzzer.cpp b/media/libaaudio/fuzzer/libaaudio_fuzzer.cpp
new file mode 100644
index 0000000..1167bb0
--- /dev/null
+++ b/media/libaaudio/fuzzer/libaaudio_fuzzer.cpp
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "aaudio/AAudio.h"
+#include "aaudio/AAudioTesting.h"
+#include <fuzzer/FuzzedDataProvider.h>
+
+constexpr int32_t kRandomStringLength = 256;
+
+constexpr int64_t kNanosPerMillisecond = 1000 * 1000;
+
+constexpr aaudio_direction_t kDirections[] = {
+    AAUDIO_DIRECTION_OUTPUT, AAUDIO_DIRECTION_INPUT, AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_performance_mode_t kPerformanceModes[] = {
+    AAUDIO_PERFORMANCE_MODE_NONE, AAUDIO_PERFORMANCE_MODE_POWER_SAVING,
+    AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_format_t kFormats[] = {
+    AAUDIO_FORMAT_INVALID,        AAUDIO_FORMAT_UNSPECIFIED,
+    AAUDIO_FORMAT_PCM_I16,        AAUDIO_FORMAT_PCM_FLOAT,
+    AAUDIO_FORMAT_PCM_I24_PACKED, AAUDIO_FORMAT_PCM_I32};
+
+constexpr aaudio_sharing_mode_t kSharingModes[] = {
+    AAUDIO_SHARING_MODE_EXCLUSIVE, AAUDIO_SHARING_MODE_SHARED};
+
+constexpr int32_t kSampleRates[] = {AAUDIO_UNSPECIFIED,
+                                    8000,
+                                    11025,
+                                    16000,
+                                    22050,
+                                    32000,
+                                    44100,
+                                    48000,
+                                    88200,
+                                    96000};
+
+constexpr aaudio_usage_t kUsages[] = {
+    AAUDIO_USAGE_MEDIA,
+    AAUDIO_USAGE_VOICE_COMMUNICATION,
+    AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
+    AAUDIO_USAGE_ALARM,
+    AAUDIO_USAGE_NOTIFICATION,
+    AAUDIO_USAGE_NOTIFICATION_RINGTONE,
+    AAUDIO_USAGE_NOTIFICATION_EVENT,
+    AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
+    AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+    AAUDIO_USAGE_ASSISTANCE_SONIFICATION,
+    AAUDIO_USAGE_GAME,
+    AAUDIO_USAGE_ASSISTANT,
+    AAUDIO_SYSTEM_USAGE_EMERGENCY,
+    AAUDIO_SYSTEM_USAGE_SAFETY,
+    AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS,
+    AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT,
+    AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_content_type_t kContentTypes[] = {
+    AAUDIO_CONTENT_TYPE_SPEECH, AAUDIO_CONTENT_TYPE_MUSIC,
+    AAUDIO_CONTENT_TYPE_MOVIE, AAUDIO_CONTENT_TYPE_SONIFICATION,
+    AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_input_preset_t kInputPresets[] = {
+    AAUDIO_INPUT_PRESET_GENERIC,
+    AAUDIO_INPUT_PRESET_CAMCORDER,
+    AAUDIO_INPUT_PRESET_VOICE_RECOGNITION,
+    AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION,
+    AAUDIO_INPUT_PRESET_UNPROCESSED,
+    AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE,
+    AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_allowed_capture_policy_t kAllowedCapturePolicies[] = {
+    AAUDIO_ALLOW_CAPTURE_BY_ALL, AAUDIO_ALLOW_CAPTURE_BY_SYSTEM,
+    AAUDIO_ALLOW_CAPTURE_BY_NONE, AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_session_id_t kSessionIds[] = {
+    AAUDIO_SESSION_ID_NONE, AAUDIO_SESSION_ID_ALLOCATE, AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_policy_t kPolicies[] = {
+    AAUDIO_POLICY_NEVER, AAUDIO_POLICY_AUTO, AAUDIO_POLICY_ALWAYS,
+    AAUDIO_UNSPECIFIED};
+
+class LibAaudioFuzzer {
+public:
+  ~LibAaudioFuzzer() { deInit(); }
+  bool init();
+  void process(const uint8_t *data, size_t size);
+  void deInit();
+
+private:
+  AAudioStreamBuilder *mAaudioBuilder = nullptr;
+  AAudioStream *mAaudioStream = nullptr;
+};
+
+bool LibAaudioFuzzer::init() {
+  aaudio_result_t result = AAudio_createStreamBuilder(&mAaudioBuilder);
+  if ((result != AAUDIO_OK) || (!mAaudioBuilder)) {
+    return false;
+  }
+  return true;
+}
+
+void LibAaudioFuzzer::process(const uint8_t *data, size_t size) {
+  FuzzedDataProvider fdp(data, size);
+  aaudio_performance_mode_t mode =
+      fdp.PickValueInArray({fdp.PickValueInArray(kPerformanceModes),
+                            fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setPerformanceMode(mAaudioBuilder, mode);
+
+  int32_t deviceId = fdp.PickValueInArray(
+      {AAUDIO_UNSPECIFIED, fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setDeviceId(mAaudioBuilder, deviceId);
+
+  std::string packageName = fdp.PickValueInArray<std::string>(
+      {"android.nativemedia.aaudio", "android.app.appops.cts",
+       fdp.ConsumeRandomLengthString(kRandomStringLength)});
+  AAudioStreamBuilder_setPackageName(mAaudioBuilder, packageName.c_str());
+
+  std::string attributionTag =
+      fdp.ConsumeRandomLengthString(kRandomStringLength);
+  AAudioStreamBuilder_setAttributionTag(mAaudioBuilder, attributionTag.c_str());
+
+  int32_t sampleRate = fdp.PickValueInArray(kSampleRates);
+  AAudioStreamBuilder_setSampleRate(mAaudioBuilder, sampleRate);
+
+  int32_t channelCount = fdp.PickValueInArray(
+      {AAUDIO_UNSPECIFIED, fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setChannelCount(mAaudioBuilder, channelCount);
+
+  aaudio_direction_t direction = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kDirections), fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setDirection(mAaudioBuilder, direction);
+
+  aaudio_format_t format = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kFormats), fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setFormat(mAaudioBuilder, format);
+
+  aaudio_sharing_mode_t sharingMode = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kSharingModes), fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setSharingMode(mAaudioBuilder, sharingMode);
+
+  aaudio_usage_t usage = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kUsages), fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setUsage(mAaudioBuilder, usage);
+
+  aaudio_content_type_t contentType = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kContentTypes), fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setContentType(mAaudioBuilder, contentType);
+
+  aaudio_input_preset_t inputPreset = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kInputPresets), fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setInputPreset(mAaudioBuilder, inputPreset);
+
+  bool privacySensitive = fdp.ConsumeBool();
+  AAudioStreamBuilder_setPrivacySensitive(mAaudioBuilder, privacySensitive);
+
+  int32_t frames = fdp.PickValueInArray(
+      {AAUDIO_UNSPECIFIED, fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setBufferCapacityInFrames(mAaudioBuilder, frames);
+
+  aaudio_allowed_capture_policy_t allowedCapturePolicy =
+      fdp.PickValueInArray({fdp.PickValueInArray(kAllowedCapturePolicies),
+                            fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setAllowedCapturePolicy(mAaudioBuilder,
+                                              allowedCapturePolicy);
+
+  aaudio_session_id_t sessionId = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kSessionIds), fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setSessionId(mAaudioBuilder, sessionId);
+
+  AAudioStreamBuilder_setDataCallback(mAaudioBuilder, nullptr, nullptr);
+  AAudioStreamBuilder_setErrorCallback(mAaudioBuilder, nullptr, nullptr);
+
+  int32_t framesPerDataCallback = fdp.PickValueInArray(
+      {AAUDIO_UNSPECIFIED, fdp.ConsumeIntegral<int32_t>()});
+  AAudioStreamBuilder_setFramesPerDataCallback(mAaudioBuilder,
+                                               framesPerDataCallback);
+
+  aaudio_policy_t policy = fdp.PickValueInArray(
+      {fdp.PickValueInArray(kPolicies), fdp.ConsumeIntegral<int32_t>()});
+  AAudio_setMMapPolicy(policy);
+  (void)AAudio_getMMapPolicy();
+
+  aaudio_result_t result =
+      AAudioStreamBuilder_openStream(mAaudioBuilder, &mAaudioStream);
+  if ((result != AAUDIO_OK) || (!mAaudioStream)) {
+    return;
+  }
+
+  int32_t framesPerBurst = AAudioStream_getFramesPerBurst(mAaudioStream);
+  uint8_t numberOfBursts = fdp.ConsumeIntegral<uint8_t>();
+  int32_t maxInputFrames = numberOfBursts * framesPerBurst;
+  int32_t requestedBufferSize =
+      fdp.ConsumeIntegral<uint16_t>() * framesPerBurst;
+  AAudioStream_setBufferSizeInFrames(mAaudioStream, requestedBufferSize);
+
+  int64_t position = 0, nanoseconds = 0;
+  AAudioStream_getTimestamp(mAaudioStream, CLOCK_MONOTONIC, &position,
+                            &nanoseconds);
+
+  AAudioStream_requestStart(mAaudioStream);
+
+  aaudio_format_t actualFormat = AAudioStream_getFormat(mAaudioStream);
+  int32_t actualChannelCount = AAudioStream_getChannelCount(mAaudioStream);
+
+  int32_t count = fdp.ConsumeIntegral<int32_t>();
+  direction = AAudioStream_getDirection(mAaudioStream);
+  framesPerDataCallback = AAudioStream_getFramesPerDataCallback(mAaudioStream);
+
+  if (actualFormat == AAUDIO_FORMAT_PCM_I16) {
+    std::vector<int16_t> inputShortData(maxInputFrames * actualChannelCount,
+                                        0x0);
+    if (direction == AAUDIO_DIRECTION_INPUT) {
+      AAudioStream_read(mAaudioStream, inputShortData.data(),
+                        framesPerDataCallback, count * kNanosPerMillisecond);
+    } else if (direction == AAUDIO_DIRECTION_OUTPUT) {
+      AAudioStream_write(mAaudioStream, inputShortData.data(),
+                         framesPerDataCallback, count * kNanosPerMillisecond);
+    }
+  } else if (actualFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+    std::vector<float> inputFloatData(maxInputFrames * actualChannelCount, 0x0);
+    if (direction == AAUDIO_DIRECTION_INPUT) {
+      AAudioStream_read(mAaudioStream, inputFloatData.data(),
+                        framesPerDataCallback, count * kNanosPerMillisecond);
+    } else if (direction == AAUDIO_DIRECTION_OUTPUT) {
+      AAudioStream_write(mAaudioStream, inputFloatData.data(),
+                         framesPerDataCallback, count * kNanosPerMillisecond);
+    }
+  }
+
+  aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+  AAudioStream_waitForStateChange(mAaudioStream, AAUDIO_STREAM_STATE_UNKNOWN,
+                                  &state, count * kNanosPerMillisecond);
+  (void)AAudio_convertStreamStateToText(state);
+
+  (void)AAudioStream_getUsage(mAaudioStream);
+  (void)AAudioStream_getSampleRate(mAaudioStream);
+  (void)AAudioStream_getState(mAaudioStream);
+  (void)AAudioStream_getSamplesPerFrame(mAaudioStream);
+  (void)AAudioStream_getContentType(mAaudioStream);
+  (void)AAudioStream_getInputPreset(mAaudioStream);
+  (void)AAudioStream_isPrivacySensitive(mAaudioStream);
+  (void)AAudioStream_getAllowedCapturePolicy(mAaudioStream);
+  (void)AAudioStream_getPerformanceMode(mAaudioStream);
+  (void)AAudioStream_getDeviceId(mAaudioStream);
+  (void)AAudioStream_getSharingMode(mAaudioStream);
+  (void)AAudioStream_getSessionId(mAaudioStream);
+  (void)AAudioStream_getFramesRead(mAaudioStream);
+  (void)AAudioStream_getFramesWritten(mAaudioStream);
+  (void)AAudioStream_getXRunCount(mAaudioStream);
+  (void)AAudioStream_getBufferCapacityInFrames(mAaudioStream);
+  (void)AAudioStream_getBufferSizeInFrames(mAaudioStream);
+  (void)AAudioStream_isMMapUsed(mAaudioStream);
+
+  AAudioStream_requestPause(mAaudioStream);
+  AAudioStream_requestFlush(mAaudioStream);
+  AAudioStream_release(mAaudioStream);
+  AAudioStream_requestStop(mAaudioStream);
+}
+
+void LibAaudioFuzzer::deInit() {
+  if (mAaudioBuilder) {
+    AAudioStreamBuilder_delete(mAaudioBuilder);
+  }
+  if (mAaudioStream) {
+    AAudioStream_close(mAaudioStream);
+  }
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+  LibAaudioFuzzer libAaudioFuzzer;
+  if (libAaudioFuzzer.init()) {
+    libAaudioFuzzer.process(data, size);
+  }
+  return 0;
+}
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 212a787..efa9941 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -20,7 +20,7 @@
  */
 
 /**
- * @file AAudio.h
+ * @file aaudio/AAudio.h
  */
 
 /**
@@ -798,8 +798,11 @@
  * This is usually {@code Context#getPackageName()}.
  *
  * The default, if you do not call this function, is a random package in the calling uid.
- * The vast majority of apps have only one package per calling UID. If the package
- * name does not match the calling UID, then requests will be rejected.
+ * The vast majority of apps have only one package per calling UID.
+ * If an invalid package name is set, input streams may not be given permission to
+ * record when started.
+ *
+ * The package name is usually the applicationId in your app's build.gradle file.
  *
  * Available since API level 31.
  *
diff --git a/media/libaaudio/include/aaudio/AAudioTesting.h b/media/libaaudio/include/aaudio/AAudioTesting.h
index 02ec411..0f2d7a2 100644
--- a/media/libaaudio/include/aaudio/AAudioTesting.h
+++ b/media/libaaudio/include/aaudio/AAudioTesting.h
@@ -49,6 +49,12 @@
 };
 typedef int32_t aaudio_policy_t;
 
+// Internal error codes. Only used by the framework.
+enum {
+    AAUDIO_INTERNAL_ERROR_BASE = -1000,
+    AAUDIO_ERROR_STANDBY,
+};
+
 /**
  * Control whether AAudioStreamBuilder_openStream() will use the new MMAP data path
  * or the older "Legacy" data path.
diff --git a/media/libaaudio/scripts/measure_device_power.py b/media/libaaudio/scripts/measure_device_power.py
new file mode 100755
index 0000000..9603f88
--- /dev/null
+++ b/media/libaaudio/scripts/measure_device_power.py
@@ -0,0 +1,272 @@
+#!/usr/bin/python3
+"""
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+"""
+
+'''
+Measure CPU related power on Pixel 6 or later devices using ODPM,
+the On Device Power Measurement tool.
+Generate a CSV report for putting in a spreadsheet
+'''
+
+import argparse
+import os
+import re
+import subprocess
+import sys
+import time
+
+# defaults
+PRE_DELAY_SECONDS = 0.5 # time to sleep before command to avoid adb unroot error
+DEFAULT_NUM_ITERATIONS = 5
+DEFAULT_FILE_NAME = 'energy_commands.txt'
+
+'''
+Default rail assignments
+philburk-macbookpro3:expt philburk$ adb shell cat /sys/bus/iio/devices/iio\:device0/energy_value
+t=349894
+CH0(T=349894)[S10M_VDD_TPU], 5578756
+CH1(T=349894)[VSYS_PWR_MODEM], 29110940
+CH2(T=349894)[VSYS_PWR_RFFE], 3166046
+CH3(T=349894)[S2M_VDD_CPUCL2], 30203502
+CH4(T=349894)[S3M_VDD_CPUCL1], 23377533
+CH5(T=349894)[S4M_VDD_CPUCL0], 46356942
+CH6(T=349894)[S5M_VDD_INT], 10771876
+CH7(T=349894)[S1M_VDD_MIF], 21091363
+philburk-macbookpro3:expt philburk$ adb shell cat /sys/bus/iio/devices/iio\:device1/energy_value
+t=359458
+CH0(T=359458)[VSYS_PWR_WLAN_BT], 45993209
+CH1(T=359458)[L2S_VDD_AOC_RET], 2822928
+CH2(T=359458)[S9S_VDD_AOC], 6923706
+CH3(T=359458)[S5S_VDDQ_MEM], 4658202
+CH4(T=359458)[S10S_VDD2L], 5506273
+CH5(T=359458)[S4S_VDD2H_MEM], 14254574
+CH6(T=359458)[S2S_VDD_G3D], 5315420
+CH7(T=359458)[VSYS_PWR_DISPLAY], 81221665
+'''
+
+'''
+LDO2M(L2M_ALIVE):DDR  -> DRAM Array Core Power
+BUCK4S(S4S_VDD2H_MEM):DDR -> Normal operation data and control path logic circuits
+BUCK5S(S5S_VDDQ_MEM):DDR -> LPDDR I/O interface
+BUCK10S(S10S_VDD2L):DDR  -> DVFSC (1600Mbps or lower) operation data and control path logic circuits
+BUCK1M (S1M_VDD_MIF):  SoC side Memory InterFace and Controller
+'''
+
+# Map between rail name and human readable name.
+ENERGY_DICTIONARY = { \
+        'S4M_VDD_CPUCL0': 'CPU0', \
+        'S3M_VDD_CPUCL1': 'CPU1', \
+        'S2M_VDD_CPUCL2': 'CPU2', \
+        'S1M_VDD_MIF': 'MIF', \
+        'L2M_ALIVE': 'DDRAC', \
+        'S4S_VDD2H_MEM': 'DDRNO', \
+        'S10S_VDD2L': 'DDR16', \
+        'S5S_VDDQ_MEM': 'DDRIO', \
+        'VSYS_PWR_DISPLAY': 'SCREEN'}
+
+SORTED_ENERGY_LIST = sorted(ENERGY_DICTIONARY, key=ENERGY_DICTIONARY.get)
+
+# Sometimes "adb unroot" returns 1!
+# So try several times.
+# @return 0 on success
+def adbUnroot():
+    returnCode = 1
+    count = 0
+    limit = 5
+    while count < limit and returnCode != 0:
+        print(('Try to adb unroot {} of {}'.format(count, limit)))
+        subprocess.call(["adb", "wait-for-device"])
+        time.sleep(PRE_DELAY_SECONDS)
+        returnCode = subprocess.call(["adb", "unroot"])
+        print(('returnCode = {}'.format(returnCode)))
+        count += 1
+    return returnCode
+
+# @param commandString String containing shell command
+# @return Both the stdout and stderr of the commands run
+def runCommand(commandString):
+    print(commandString)
+    if commandString == "adb unroot":
+        result = adbUnroot()
+    else:
+        commandArray = commandString.split(' ')
+        result = subprocess.run(commandArray, check=True, capture_output=True).stdout
+    return result
+
+# @param commandString String containing ADB command
+# @return Both the stdout and stderr of the commands run
+def adbCommand(commandString):
+    if commandString == "unroot":
+        result = adbUnroot()
+    else:
+        print(("adb " + commandString))
+        commandArray = ["adb"] + commandString.split(' ')
+        subprocess.call(["adb", "wait-for-device"])
+        result = subprocess.run(commandArray, check=True, capture_output=True).stdout
+    return result
+
+# Parse a line that looks like "CH3(T=10697635)[S2M_VDD_CPUCL2], 116655335"
+# Use S2M_VDD_CPUCL2 as the tag and set value to the number
+# in the report dictionary.
+def parseEnergyValue(string):
+    return tuple(re.split('\[|\], +', string)[1:])
+
+# Read accumulated energy into a dictionary.
+def measureEnergyForDevice(deviceIndex, report):
+    # print("measureEnergyForDevice " + str(deviceIndex))
+    tableBytes = adbCommand( \
+            'shell cat /sys/bus/iio/devices/iio\:device{}/energy_value'\
+            .format(deviceIndex))
+    table = tableBytes.decode("utf-8")
+    # print(table)
+    for count, line in enumerate(table.splitlines()):
+        if count > 0:
+            tagEnergy = parseEnergyValue(line)
+            report[tagEnergy[0]] = int(tagEnergy[1].strip())
+    # print(report)
+
+def measureEnergyOnce():
+    adbCommand("root")
+    report = {}
+    d0 = measureEnergyForDevice(0, report)
+    d1 = measureEnergyForDevice(1, report)
+    adbUnroot()
+    return report
+
+# Subtract numeric values for matching keys.
+def subtractReports(A, B):
+    return {x: A[x] - B[x] for x in A if x in B}
+
+# Add numeric values for matching keys.
+def addReports(A, B):
+    return {x: A[x] + B[x] for x in A if x in B}
+
+# Divide numeric values by divisor.
+# @return Modified copy of report.
+def divideReport(report, divisor):
+    return {key: val / divisor for key, val in list(report.items())}
+
+# Generate a dictionary that is the difference between two measurements over time.
+def measureEnergyOverTime(duration):
+    report1 = measureEnergyOnce()
+    print(("Measure energy for " + str(duration) + " seconds."))
+    time.sleep(duration)
+    report2 = measureEnergyOnce()
+    return subtractReports(report2, report1)
+
+# Generate a CSV string containing the human readable headers.
+def formatEnergyHeader():
+    header = ""
+    for tag in SORTED_ENERGY_LIST:
+        header += ENERGY_DICTIONARY[tag] + ", "
+    return header
+
+# Generate a CSV string containing the numeric values.
+def formatEnergyData(report):
+    data = ""
+    for tag in SORTED_ENERGY_LIST:
+        if tag in list(report.keys()):
+            data += str(report[tag]) + ", "
+        else:
+            data += "-1,"
+    return data
+
+def printEnergyReport(report):
+    s = "\n"
+    s += "Values are in microWattSeconds\n"
+    s += "Report below is CSV format for pasting into a spreadsheet:\n"
+    s += formatEnergyHeader() + "\n"
+    s += formatEnergyData(report) + "\n"
+    print(s)
+
+# Generate a dictionary that is the difference between two measurements
+# before and after executing the command.
+def measureEnergyForCommand(command):
+    report1 = measureEnergyOnce()
+    print(("Measure energy for:  " + command))
+    result = runCommand(command)
+    report2 = measureEnergyOnce()
+    # print(result)
+    return subtractReports(report2, report1)
+
+# Average the results of several measurements for one command.
+def averageEnergyForCommand(command, count):
+    print("=================== #0\n")
+    sumReport = measureEnergyForCommand(command)
+    for i in range(1, count):
+        print(("=================== #" + str(i) + "\n"))
+        report = measureEnergyForCommand(command)
+        sumReport = addReports(sumReport, report)
+    print(sumReport)
+    return divideReport(sumReport, count)
+
+# Parse a list of commands in a file.
+# Lines ending in "\" are continuation lines.
+# Lines beginning with "#" are comments.
+def measureEnergyForCommands(fileName):
+    finalReport = "------------------------------------\n"
+    finalReport += "comment, command, " + formatEnergyHeader() + "\n"
+    comment = ""
+    try:
+        fp = open(fileName)
+        line = fp.readline()
+        while line:
+            command = line.strip()
+            if command.endswith('\\'):
+                command = command[:-1].strip() # remove \\:
+                runCommand(command)
+            elif command.startswith("#"):
+                # ignore comment
+                print((command + "\n"))
+                comment = command
+            elif command:
+                report = averageEnergyForCommand(command, DEFAULT_NUM_ITERATIONS)
+                finalReport += comment + ", " + command + ", " + formatEnergyData(report) + "\n"
+                print(finalReport)
+            line = fp.readline()
+    finally:
+        fp.close()
+    return finalReport
+
+def main():
+    # parse command line args
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-s', '--seconds',
+            help="Measure power for N seconds. Ignore scriptFile.",
+            type=float)
+    parser.add_argument("fileName",
+            nargs = '?',
+            help="Path to file containing commands to be measured."
+                    + " Default path = " + DEFAULT_FILE_NAME + "."
+                    + " Lines ending in '\' are continuation lines."
+                    + " Lines beginning with '#' are comments.",
+                    default=DEFAULT_FILE_NAME)
+    args=parser.parse_args();
+
+    print(("seconds  = " + str(args.seconds)))
+    print(("fileName = " + str(args.fileName)))
+    # Process command line
+    if args.seconds:
+        report = measureEnergyOverTime(args.seconds)
+        printEnergyReport(report)
+    else:
+        report = measureEnergyForCommands(args.fileName)
+        print(report)
+    print("Finished.\n")
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/media/libaaudio/scripts/setup_odpm_cpu_rails.sh b/media/libaaudio/scripts/setup_odpm_cpu_rails.sh
new file mode 100755
index 0000000..e9241b9
--- /dev/null
+++ b/media/libaaudio/scripts/setup_odpm_cpu_rails.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# Configure ODPM rails to measure CPU specific power.
+# See go/odpm-p21-userguide
+
+adb root
+
+# LDO2M(L2M_ALIVE) - DRAM Array Core Power
+adb shell 'echo "CH0=LDO2M" > /sys/bus/iio/devices/iio\:device0/enabled_rails'
+
+# These are the defaults.
+# BUCK2M(S2M_VDD_CPUCL2):CPU(BIG)
+# adb shell 'echo "CH3=BUCK2M" > /sys/bus/iio/devices/iio\:device0/enabled_rails'
+# BUCK3M(S3M_VDD_CPUCL1):CPU(MID)
+# adb shell 'echo "CH4=BUCK3M" > /sys/bus/iio/devices/iio\:device0/enabled_rails'
+# BUCK4M(S4M_VDD_CPUCL0):CPU(LITTLE)
+# adb shell 'echo "CH5=BUCK4M" > /sys/bus/iio/devices/iio\:device0/enabled_rails'
+# BUCK1M(S1M_VDD_MIF):MIF
+# adb shell 'echo "CH7=BUCK1M" > /sys/bus/iio/devices/iio\:device0/enabled_rails'
+
+# These are default on device1.
+# BUCK5S(S5S_VDDQ_MEM):DDR
+# adb shell 'echo "CH3=BUCK5S" > /sys/bus/iio/devices/iio\:device1/enabled_rails'
+# BUCK10S(S10S_VDD2L):DDR
+# adb shell 'echo "CH4=BUCK10S" > /sys/bus/iio/devices/iio\:device1/enabled_rails'
+# BUCK4S(S4S_VDD2H_MEM):DDR
+# adb shell 'echo "CH5=BUCK4S" > /sys/bus/iio/devices/iio\:device1/enabled_rails'
+
+adb shell 'cat /sys/bus/iio/devices/iio\:device0/enabled_rails'
+adb shell 'cat /sys/bus/iio/devices/iio\:device1/enabled_rails'
+
+adb unroot
+
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index 33a5c7f..f50b53a 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -7,6 +7,65 @@
     default_applicable_licenses: ["frameworks_av_license"],
 }
 
+tidy_errors = [
+    // https://clang.llvm.org/extra/clang-tidy/checks/list.html
+    // For many categories, the checks are too many to specify individually.
+    // Feel free to disable as needed - as warnings are generally ignored,
+    // we treat warnings as errors.
+    "android-*",
+    "bugprone-*",
+    "cert-*",
+    "clang-analyzer-security*",
+    "google-*",
+    "misc-*",
+    //"modernize-*",  // explicitly list the modernize as they can be subjective.
+    "modernize-avoid-bind",
+    //"modernize-avoid-c-arrays", // std::array<> can be verbose
+    "modernize-concat-nested-namespaces",
+    //"modernize-deprecated-headers", // C headers still ok even if there is C++ equivalent.
+    "modernize-deprecated-ios-base-aliases",
+    "modernize-loop-convert",
+    "modernize-make-shared",
+    "modernize-make-unique",
+    "modernize-pass-by-value",
+    "modernize-raw-string-literal",
+    "modernize-redundant-void-arg",
+    "modernize-replace-auto-ptr",
+    "modernize-replace-random-shuffle",
+    "modernize-return-braced-init-list",
+    "modernize-shrink-to-fit",
+    "modernize-unary-static-assert",
+    // "modernize-use-auto", // found in AAudioAudio.cpp
+    "modernize-use-bool-literals",
+    "modernize-use-default-member-init",
+    "modernize-use-emplace",
+    "modernize-use-equals-default",
+    "modernize-use-equals-delete",
+    // "modernize-use-nodiscard", // found in aidl generated files
+    "modernize-use-noexcept",
+    "modernize-use-nullptr",
+    // "modernize-use-override", // found in aidl generated files
+    // "modernize-use-trailing-return-type", // not necessarily more readable
+    "modernize-use-transparent-functors",
+    "modernize-use-uncaught-exceptions",
+    // "modernize-use-using", // found typedef in several files
+    "performance-*",
+
+    // Remove some pedantic stylistic requirements.
+    "-android-cloexec-dup", // found in SharedMemoryParcelable.cpp
+    "-bugprone-macro-parentheses", // found in SharedMemoryParcelable.h
+    "-bugprone-narrowing-conversions", // found in several interface from size_t to int32_t
+
+    "-google-readability-casting", // C++ casts not always necessary and may be verbose
+    "-google-readability-todo", // do not require TODO(info)
+    "-google-build-using-namespace", // Reenable and fix later.
+    "-google-global-names-in-headers", // found in several files
+
+    "-misc-non-private-member-variables-in-classes", // found in aidl generated files
+
+    "-performance-no-int-to-ptr", // found in SharedMemoryParcelable.h
+]
+
 cc_library {
     name: "libaaudio",
 
@@ -52,7 +111,7 @@
         "libcutils",
         "libutils",
         "libbinder",
-        "libpermission",
+        "framework-permission-aidl-cpp",
     ],
 
     sanitize: {
@@ -64,6 +123,13 @@
         symbol_file: "libaaudio.map.txt",
         versions: ["28"],
     },
+
+    tidy: true,
+    tidy_checks: tidy_errors,
+    tidy_checks_as_errors: tidy_errors,
+    tidy_flags: [
+        "-format-style=file",
+    ]
 }
 
 cc_library {
@@ -102,6 +168,8 @@
         "libbinder",
         "framework-permission-aidl-cpp",
         "aaudio-aidl-cpp",
+        "android.media.audio.common.types-V1-cpp",
+        "audioclient-types-aidl-cpp",
         "libaudioclient_aidl_conversion",
     ],
 
@@ -139,10 +207,16 @@
         "binding/RingBufferParcelable.cpp",
         "binding/SharedMemoryParcelable.cpp",
         "binding/SharedRegionParcelable.cpp",
-        "flowgraph/AudioProcessorBase.cpp",
+        "flowgraph/ChannelCountConverter.cpp",
         "flowgraph/ClipToRange.cpp",
+        "flowgraph/FlowGraphNode.cpp",
+        "flowgraph/ManyToMultiConverter.cpp",
+        "flowgraph/MonoBlend.cpp",
         "flowgraph/MonoToMultiConverter.cpp",
+        "flowgraph/MultiToMonoConverter.cpp",
+        "flowgraph/MultiToManyConverter.cpp",
         "flowgraph/RampLinear.cpp",
+        "flowgraph/SampleRateConverter.cpp",
         "flowgraph/SinkFloat.cpp",
         "flowgraph/SinkI16.cpp",
         "flowgraph/SinkI24.cpp",
@@ -151,11 +225,26 @@
         "flowgraph/SourceI16.cpp",
         "flowgraph/SourceI24.cpp",
         "flowgraph/SourceI32.cpp",
+        "flowgraph/resampler/IntegerRatio.cpp",
+        "flowgraph/resampler/LinearResampler.cpp",
+        "flowgraph/resampler/MultiChannelResampler.cpp",
+        "flowgraph/resampler/PolyphaseResampler.cpp",
+        "flowgraph/resampler/PolyphaseResamplerMono.cpp",
+        "flowgraph/resampler/PolyphaseResamplerStereo.cpp",
+        "flowgraph/resampler/SincResampler.cpp",
+        "flowgraph/resampler/SincResamplerStereo.cpp",
     ],
     sanitize: {
         integer_overflow: true,
         misc_undefined: ["bounds"],
     },
+
+    tidy: true,
+    tidy_checks: tidy_errors,
+    tidy_checks_as_errors: tidy_errors,
+    tidy_flags: [
+        "-format-style=file",
+    ]
 }
 
 aidl_interface {
@@ -172,19 +261,15 @@
         "binding/aidl/aaudio/IAAudioService.aidl",
     ],
     imports: [
-        "audio_common-aidl",
+        "android.media.audio.common.types",
+        "audioclient-types-aidl",
         "shared-file-region-aidl",
-        "framework-permission-aidl"
+        "framework-permission-aidl",
     ],
     backend:
     {
-        cpp: {
-            enabled: true,
-        },
         java: {
-            // TODO: need to have audio_common-aidl available in Java to enable
-            //       this.
-            enabled: false,
+            sdk_version: "module_current",
         },
     },
 }
diff --git a/media/libaaudio/src/binding/AAudioBinderAdapter.cpp b/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
index 6e3a1c8..42d81ca 100644
--- a/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
+++ b/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
@@ -124,4 +124,16 @@
     return result;
 }
 
+aaudio_result_t AAudioBinderAdapter::exitStandby(aaudio_handle_t streamHandle,
+                                                 AudioEndpointParcelable &endpointOut) {
+    aaudio_result_t result;
+    Endpoint endpoint;
+    Status status = mDelegate->exitStandby(streamHandle, &endpoint, &result);
+    if (!status.isOk()) {
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
+    }
+    endpointOut = std::move(endpoint);
+    return result;
+}
+
 }  // namespace aaudio
diff --git a/media/libaaudio/src/binding/AAudioBinderAdapter.h b/media/libaaudio/src/binding/AAudioBinderAdapter.h
index 5e9ab57..d170783 100644
--- a/media/libaaudio/src/binding/AAudioBinderAdapter.h
+++ b/media/libaaudio/src/binding/AAudioBinderAdapter.h
@@ -57,6 +57,9 @@
     aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
                                           pid_t clientThreadId) override;
 
+    aaudio_result_t exitStandby(aaudio_handle_t streamHandle,
+                                AudioEndpointParcelable &parcelable) override;
+
 private:
     IAAudioService* const mDelegate;
 };
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.cpp b/media/libaaudio/src/binding/AAudioBinderClient.cpp
index fa5a2da..8e5facc 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.cpp
+++ b/media/libaaudio/src/binding/AAudioBinderClient.cpp
@@ -36,13 +36,10 @@
 using android::IServiceManager;
 using android::defaultServiceManager;
 using android::interface_cast;
-using android::IInterface;
 using android::Mutex;
 using android::ProcessState;
 using android::sp;
 using android::status_t;
-using android::wp;
-using android::binder::Status;
 
 using namespace aaudio;
 
@@ -93,7 +90,7 @@
                     ALOGE("%s() - linkToDeath() returned %d", __func__, status);
                 }
                 aaudioService = interface_cast<IAAudioService>(binder);
-                mAdapter.reset(new Adapter(aaudioService, mAAudioClient));
+                mAdapter = std::make_shared<Adapter>(aaudioService, mAAudioClient);
                 needToRegister = true;
                 // Make sure callbacks can be received by mAAudioClient
                 ProcessState::self()->startThreadPool();
@@ -204,3 +201,11 @@
 
     return service->unregisterAudioThread(streamHandle, clientThreadId);
 }
+
+aaudio_result_t AAudioBinderClient::exitStandby(aaudio_handle_t streamHandle,
+                                                AudioEndpointParcelable &endpointOut) {
+    std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
+    if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
+
+    return service->exitStandby(streamHandle, endpointOut);
+}
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.h b/media/libaaudio/src/binding/AAudioBinderClient.h
index 6a7b639..0968f4c 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.h
+++ b/media/libaaudio/src/binding/AAudioBinderClient.h
@@ -108,7 +108,10 @@
         return AAUDIO_ERROR_UNAVAILABLE;
     }
 
-    void onStreamChange(aaudio_handle_t handle, int32_t opcode, int32_t value) {
+    aaudio_result_t exitStandby(aaudio_handle_t streamHandle,
+                                AudioEndpointParcelable &endpointOut) override;
+
+    void onStreamChange(aaudio_handle_t /*handle*/, int32_t /*opcode*/, int32_t /*value*/) {
         // TODO This is just a stub so we can have a client Binder to pass to the service.
         // TODO Implemented in a later CL.
         ALOGW("onStreamChange called!");
@@ -116,7 +119,7 @@
 
     class AAudioClient : public android::IBinder::DeathRecipient, public BnAAudioClient {
     public:
-        AAudioClient(android::wp<AAudioBinderClient> aaudioBinderClient)
+        explicit AAudioClient(const android::wp<AAudioBinderClient>& aaudioBinderClient)
                 : mBinderClient(aaudioBinderClient) {
         }
 
@@ -150,10 +153,10 @@
     class Adapter : public AAudioBinderAdapter {
     public:
         Adapter(const android::sp<IAAudioService>& delegate,
-                const android::sp<AAudioClient>& aaudioClient)
+                android::sp<AAudioClient> aaudioClient)
                 : AAudioBinderAdapter(delegate.get()),
                   mDelegate(delegate),
-                  mAAudioClient(aaudioClient) {}
+                  mAAudioClient(std::move(aaudioClient)) {}
 
         virtual ~Adapter() {
             if (mDelegate != nullptr) {
diff --git a/media/libaaudio/src/binding/AAudioServiceInterface.h b/media/libaaudio/src/binding/AAudioServiceInterface.h
index 5d11512..e901767 100644
--- a/media/libaaudio/src/binding/AAudioServiceInterface.h
+++ b/media/libaaudio/src/binding/AAudioServiceInterface.h
@@ -37,7 +37,7 @@
 class AAudioServiceInterface {
 public:
 
-    AAudioServiceInterface() {};
+    AAudioServiceInterface() = default;
     virtual ~AAudioServiceInterface() = default;
 
     virtual void registerClient(const android::sp<IAAudioClient>& client) = 0;
@@ -95,6 +95,16 @@
 
     virtual aaudio_result_t stopClient(aaudio_handle_t streamHandle,
                                        audio_port_handle_t clientHandle) = 0;
+
+    /**
+     * Exit the standby mode.
+     *
+     * @param streamHandle the stream handle
+     * @param parcelable contains new data queue information
+     * @return the result of the execution
+     */
+    virtual aaudio_result_t exitStandby(aaudio_handle_t streamHandle,
+                                        AudioEndpointParcelable &parcelable) = 0;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index bec4393..b60bac2 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -23,11 +23,13 @@
 #include <sys/mman.h>
 #include <aaudio/AAudio.h>
 
+#include <media/AidlConversion.h>
+
 #include "binding/AAudioStreamConfiguration.h"
 
 using namespace aaudio;
 
-using android::media::audio::common::AudioFormat;
+using android::media::audio::common::AudioFormatDescription;
 
 AAudioStreamConfiguration::AAudioStreamConfiguration(const StreamParameters& parcelable) {
     setChannelMask(parcelable.channelMask);
@@ -35,8 +37,9 @@
     setDeviceId(parcelable.deviceId);
     static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(parcelable.sharingMode));
     setSharingMode(parcelable.sharingMode);
-    static_assert(sizeof(audio_format_t) == sizeof(parcelable.audioFormat));
-    setFormat(static_cast<audio_format_t>(parcelable.audioFormat));
+    auto convFormat = android::aidl2legacy_AudioFormatDescription_audio_format_t(
+            parcelable.audioFormat);
+    setFormat(convFormat.ok() ? convFormat.value() : AUDIO_FORMAT_INVALID);
     static_assert(sizeof(aaudio_direction_t) == sizeof(parcelable.direction));
     setDirection(parcelable.direction);
     static_assert(sizeof(audio_usage_t) == sizeof(parcelable.usage));
@@ -75,8 +78,14 @@
     result.deviceId = getDeviceId();
     static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(result.sharingMode));
     result.sharingMode = getSharingMode();
-    static_assert(sizeof(audio_format_t) == sizeof(result.audioFormat));
-    result.audioFormat = static_cast<AudioFormat>(getFormat());
+    auto convAudioFormat = android::legacy2aidl_audio_format_t_AudioFormatDescription(getFormat());
+    if (convAudioFormat.ok()) {
+        result.audioFormat = convAudioFormat.value();
+    } else {
+        result.audioFormat = AudioFormatDescription{};
+        result.audioFormat.type =
+                android::media::audio::common::AudioFormatType::SYS_RESERVED_INVALID;
+    }
     static_assert(sizeof(aaudio_direction_t) == sizeof(result.direction));
     result.direction = getDirection();
     static_assert(sizeof(audio_usage_t) == sizeof(result.usage));
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.cpp b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
index 8d90034..a4cc2bd 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
@@ -30,7 +30,7 @@
 using namespace aaudio;
 
 AAudioStreamRequest::AAudioStreamRequest(const StreamRequest& parcelable) :
-        mConfiguration(std::move(parcelable.params)),
+        mConfiguration(parcelable.params),
         mAttributionSource(parcelable.attributionSource),
         mSharingModeMatchRequired(parcelable.sharingModeMatchRequired),
         mInService(parcelable.inService) {
@@ -38,7 +38,7 @@
 
 StreamRequest AAudioStreamRequest::parcelable() const {
     StreamRequest result;
-    result.params = std::move(mConfiguration).parcelable();
+    result.params = mConfiguration.parcelable();
     result.attributionSource = mAttributionSource;
     result.sharingModeMatchRequired = mSharingModeMatchRequired;
     result.inService = mInService;
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
index aa4ac27..b1262df 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
@@ -29,17 +29,15 @@
 #include "binding/AudioEndpointParcelable.h"
 
 using android::base::unique_fd;
-using android::media::SharedFileRegion;
-using android::NO_ERROR;
 using android::status_t;
 
 using namespace aaudio;
 
 AudioEndpointParcelable::AudioEndpointParcelable(Endpoint&& parcelable)
-        : mUpMessageQueueParcelable(std::move(parcelable.upMessageQueueParcelable)),
-          mDownMessageQueueParcelable(std::move(parcelable.downMessageQueueParcelable)),
-          mUpDataQueueParcelable(std::move(parcelable.upDataQueueParcelable)),
-          mDownDataQueueParcelable(std::move(parcelable.downDataQueueParcelable)),
+        : mUpMessageQueueParcelable(parcelable.upMessageQueueParcelable),
+          mDownMessageQueueParcelable(parcelable.downMessageQueueParcelable),
+          mUpDataQueueParcelable(parcelable.upDataQueueParcelable),
+          mDownDataQueueParcelable(parcelable.downDataQueueParcelable),
           mNumSharedMemories(parcelable.sharedMemories.size()) {
     for (size_t i = 0; i < parcelable.sharedMemories.size() && i < MAX_SHARED_MEMORIES; ++i) {
         // Re-construct.
@@ -56,10 +54,10 @@
 
 Endpoint AudioEndpointParcelable::parcelable()&& {
     Endpoint result;
-    result.upMessageQueueParcelable = std::move(mUpMessageQueueParcelable).parcelable();
-    result.downMessageQueueParcelable = std::move(mDownMessageQueueParcelable).parcelable();
-    result.upDataQueueParcelable = std::move(mUpDataQueueParcelable).parcelable();
-    result.downDataQueueParcelable = std::move(mDownDataQueueParcelable).parcelable();
+    result.upMessageQueueParcelable = mUpMessageQueueParcelable.parcelable();
+    result.downMessageQueueParcelable = mDownMessageQueueParcelable.parcelable();
+    result.upDataQueueParcelable = mUpDataQueueParcelable.parcelable();
+    result.downDataQueueParcelable = mDownDataQueueParcelable.parcelable();
     result.sharedMemories.reserve(std::min(mNumSharedMemories, MAX_SHARED_MEMORIES));
     for (size_t i = 0; i < mNumSharedMemories && i < MAX_SHARED_MEMORIES; ++i) {
         result.sharedMemories.emplace_back(std::move(mSharedMemories[i]).parcelable());
@@ -81,6 +79,22 @@
     return index;
 }
 
+void AudioEndpointParcelable::closeDataFileDescriptor() {
+    const int32_t curDataMemoryIndex = mDownDataQueueParcelable.getSharedMemoryIndex();
+    mSharedMemories[curDataMemoryIndex].closeAndReleaseFd();
+}
+
+void AudioEndpointParcelable::updateDataFileDescriptor(
+        AudioEndpointParcelable* endpointParcelable) {
+    const int32_t curDataMemoryIndex = mDownDataQueueParcelable.getSharedMemoryIndex();
+    const int32_t newDataMemoryIndex =
+            endpointParcelable->mDownDataQueueParcelable.getSharedMemoryIndex();
+    mSharedMemories[curDataMemoryIndex].close();
+    mSharedMemories[curDataMemoryIndex].setup(
+            endpointParcelable->mSharedMemories[newDataMemoryIndex]);
+    mDownDataQueueParcelable.updateMemory(endpointParcelable->mDownDataQueueParcelable);
+}
+
 aaudio_result_t AudioEndpointParcelable::resolve(EndpointDescriptor *descriptor) {
     aaudio_result_t result = mUpMessageQueueParcelable.resolve(mSharedMemories,
                                                            &descriptor->upMessageQueueDescriptor);
@@ -94,6 +108,10 @@
     return result;
 }
 
+aaudio_result_t AudioEndpointParcelable::resolveDataQueue(RingBufferDescriptor *descriptor) {
+    return mDownDataQueueParcelable.resolve(mSharedMemories, descriptor);
+}
+
 aaudio_result_t AudioEndpointParcelable::close() {
     int err = 0;
     for (int i = 0; i < mNumSharedMemories; i++) {
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.h b/media/libaaudio/src/binding/AudioEndpointParcelable.h
index 5237a1a..5d2c38f 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.h
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.h
@@ -43,7 +43,7 @@
     // Ctor/assignment from a parcelable representation.
     // Since the parcelable object owns unique FDs (for shared memory blocks), move semantics are
     // provided to avoid the need to dupe.
-    AudioEndpointParcelable(Endpoint&& parcelable);
+    explicit AudioEndpointParcelable(Endpoint&& parcelable);
     AudioEndpointParcelable& operator=(Endpoint&& parcelable);
 
     /**
@@ -52,7 +52,20 @@
      */
     int32_t addFileDescriptor(const android::base::unique_fd& fd, int32_t sizeInBytes);
 
+    /**
+     * Close current data file descriptor. The duplicated file descriptor will be close.
+     */
+    void closeDataFileDescriptor();
+
+    /**
+     * Update current data file descriptor with given endpoint parcelable.
+     * @param endpointParcelable an endpoint parcelable that contains new data file
+     *                           descriptor information
+     */
+    void updateDataFileDescriptor(AudioEndpointParcelable* endpointParcelable);
+
     aaudio_result_t resolve(EndpointDescriptor *descriptor);
+    aaudio_result_t resolveDataQueue(RingBufferDescriptor *descriptor);
 
     aaudio_result_t close();
 
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.cpp b/media/libaaudio/src/binding/RingBufferParcelable.cpp
index a4b3cec..3bc51d0 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.cpp
+++ b/media/libaaudio/src/binding/RingBufferParcelable.cpp
@@ -30,9 +30,10 @@
 using namespace aaudio;
 
 RingBufferParcelable::RingBufferParcelable(const RingBuffer& parcelable)
-        : mReadCounterParcelable(std::move(parcelable.readCounterParcelable)),
-          mWriteCounterParcelable(std::move(parcelable.writeCounterParcelable)),
-          mDataParcelable(std::move(parcelable.dataParcelable)),
+        : mReadCounterParcelable(parcelable.readCounterParcelable),
+          mWriteCounterParcelable(parcelable.writeCounterParcelable),
+          mDataParcelable(parcelable.dataParcelable),
+          mSharedMemoryIndex(parcelable.sharedMemoryIndex),
           mBytesPerFrame(parcelable.bytesPerFrame),
           mFramesPerBurst(parcelable.framesPerBurst),
           mCapacityInFrames(parcelable.capacityInFrames),
@@ -42,9 +43,10 @@
 
 RingBuffer RingBufferParcelable::parcelable() const {
     RingBuffer result;
-    result.readCounterParcelable = std::move(mReadCounterParcelable).parcelable();
-    result.writeCounterParcelable = std::move(mWriteCounterParcelable).parcelable();
-    result.dataParcelable = std::move(mDataParcelable).parcelable();
+    result.readCounterParcelable = mReadCounterParcelable.parcelable();
+    result.writeCounterParcelable = mWriteCounterParcelable.parcelable();
+    result.dataParcelable = mDataParcelable.parcelable();
+    result.sharedMemoryIndex = mSharedMemoryIndex;
     result.bytesPerFrame = mBytesPerFrame;
     result.framesPerBurst = mFramesPerBurst;
     result.capacityInFrames = mCapacityInFrames;
@@ -60,6 +62,7 @@
                  int32_t readCounterOffset,
                  int32_t writeCounterOffset,
                  int32_t counterSizeBytes) {
+    mSharedMemoryIndex = sharedMemoryIndex;
     mReadCounterParcelable.setup(sharedMemoryIndex, readCounterOffset, counterSizeBytes);
     mWriteCounterParcelable.setup(sharedMemoryIndex, writeCounterOffset, counterSizeBytes);
     mDataParcelable.setup(sharedMemoryIndex, dataMemoryOffset, dataSizeInBytes);
@@ -68,12 +71,13 @@
 void RingBufferParcelable::setupMemory(int32_t sharedMemoryIndex,
                  int32_t dataMemoryOffset,
                  int32_t dataSizeInBytes) {
+    mSharedMemoryIndex = sharedMemoryIndex;
     mReadCounterParcelable.setup(sharedMemoryIndex, 0, 0);
     mWriteCounterParcelable.setup(sharedMemoryIndex, 0, 0);
     mDataParcelable.setup(sharedMemoryIndex, dataMemoryOffset, dataSizeInBytes);
 }
 
-int32_t RingBufferParcelable::getBytesPerFrame() {
+int32_t RingBufferParcelable::getBytesPerFrame() const {
     return mBytesPerFrame;
 }
 
@@ -81,7 +85,7 @@
     mBytesPerFrame = bytesPerFrame;
 }
 
-int32_t RingBufferParcelable::getFramesPerBurst() {
+int32_t RingBufferParcelable::getFramesPerBurst() const {
     return mFramesPerBurst;
 }
 
@@ -89,7 +93,7 @@
     mFramesPerBurst = framesPerBurst;
 }
 
-int32_t RingBufferParcelable::getCapacityInFrames() {
+int32_t RingBufferParcelable::getCapacityInFrames() const {
     return mCapacityInFrames;
 }
 
@@ -124,6 +128,14 @@
     return AAUDIO_OK;
 }
 
+void RingBufferParcelable::updateMemory(const RingBufferParcelable& parcelable) {
+    setupMemory(mSharedMemoryIndex, 0,
+                parcelable.getCapacityInFrames() * parcelable.getBytesPerFrame());
+    setBytesPerFrame(parcelable.getBytesPerFrame());
+    setFramesPerBurst(parcelable.getFramesPerBurst());
+    setCapacityInFrames(parcelable.getCapacityInFrames());
+}
+
 aaudio_result_t RingBufferParcelable::validate() const {
     if (mCapacityInFrames < 0 || mCapacityInFrames >= 32 * 1024) {
         ALOGE("invalid mCapacityInFrames = %d", mCapacityInFrames);
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.h b/media/libaaudio/src/binding/RingBufferParcelable.h
index 2508cea..29d0d86 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.h
+++ b/media/libaaudio/src/binding/RingBufferParcelable.h
@@ -46,15 +46,15 @@
                      int32_t dataMemoryOffset,
                      int32_t dataSizeInBytes);
 
-    int32_t getBytesPerFrame();
+    int32_t getBytesPerFrame() const;
 
     void setBytesPerFrame(int32_t bytesPerFrame);
 
-    int32_t getFramesPerBurst();
+    int32_t getFramesPerBurst() const;
 
     void setFramesPerBurst(int32_t framesPerBurst);
 
-    int32_t getCapacityInFrames();
+    int32_t getCapacityInFrames() const;
 
     void setCapacityInFrames(int32_t capacityInFrames);
 
@@ -62,6 +62,12 @@
 
     aaudio_result_t resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor);
 
+    void updateMemory(const RingBufferParcelable& parcelable);
+
+    int32_t getSharedMemoryIndex() const {
+        return mSharedMemoryIndex;
+    }
+
     void dump();
 
     // Extract a parcelable representation of this object.
@@ -71,6 +77,7 @@
     SharedRegionParcelable  mReadCounterParcelable;
     SharedRegionParcelable  mWriteCounterParcelable;
     SharedRegionParcelable  mDataParcelable;
+    int32_t                 mSharedMemoryIndex = -1;
     int32_t                 mBytesPerFrame = 0;     // index is in frames
     int32_t                 mFramesPerBurst = 0;    // for ISOCHRONOUS queues
     int32_t                 mCapacityInFrames = 0;  // zero if unused
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
index eef238f..741aefc 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -32,7 +32,6 @@
 #include "binding/SharedMemoryParcelable.h"
 
 using android::base::unique_fd;
-using android::NO_ERROR;
 using android::status_t;
 using android::media::SharedFileRegion;
 
@@ -65,6 +64,10 @@
     mSizeInBytes = sizeInBytes;
 }
 
+void SharedMemoryParcelable::setup(const SharedMemoryParcelable &sharedMemoryParcelable) {
+    setup(sharedMemoryParcelable.mFd, sharedMemoryParcelable.mSizeInBytes);
+}
+
 aaudio_result_t SharedMemoryParcelable::close() {
     if (mResolvedAddress != MMAP_UNRESOLVED_ADDRESS) {
         int err = munmap(mResolvedAddress, mSizeInBytes);
@@ -77,8 +80,16 @@
     return AAUDIO_OK;
 }
 
+aaudio_result_t SharedMemoryParcelable::closeAndReleaseFd() {
+    aaudio_result_t result = close();
+    if (result == AAUDIO_OK) {
+        mFd.reset();
+    }
+    return result;
+}
+
 aaudio_result_t SharedMemoryParcelable::resolveSharedMemory(const unique_fd& fd) {
-    mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ | PROT_WRITE,
+    mResolvedAddress = (uint8_t *) mmap(nullptr, mSizeInBytes, PROT_READ | PROT_WRITE,
                                         MAP_SHARED, fd.get(), 0);
     if (mResolvedAddress == MMAP_UNRESOLVED_ADDRESS) {
         ALOGE("mmap() failed for fd = %d, nBytes = %" PRId64 ", errno = %s",
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.h b/media/libaaudio/src/binding/SharedMemoryParcelable.h
index 1f2c335..7762fef 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.h
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.h
@@ -52,12 +52,16 @@
      */
     void setup(const android::base::unique_fd& fd, int32_t sizeInBytes);
 
+    void setup(const SharedMemoryParcelable& sharedMemoryParcelable);
+
     // mmap() shared memory
     aaudio_result_t resolve(int32_t offsetInBytes, int32_t sizeInBytes, void **regionAddressPtr);
 
     // munmap() any mapped memory
     aaudio_result_t close();
 
+    aaudio_result_t closeAndReleaseFd();
+
     int32_t getSizeInBytes();
 
     void dump();
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.cpp b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
index 56b99c0..6fa109b 100644
--- a/media/libaaudio/src/binding/SharedRegionParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
@@ -29,10 +29,7 @@
 #include "binding/SharedMemoryParcelable.h"
 #include "binding/SharedRegionParcelable.h"
 
-using android::NO_ERROR;
 using android::status_t;
-using android::Parcel;
-using android::Parcelable;
 
 using namespace aaudio;
 
diff --git a/media/libaaudio/src/binding/aidl/aaudio/IAAudioService.aidl b/media/libaaudio/src/binding/aidl/aaudio/IAAudioService.aidl
index 44d2211..485c2e2 100644
--- a/media/libaaudio/src/binding/aidl/aaudio/IAAudioService.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/IAAudioService.aidl
@@ -78,4 +78,6 @@
 
     int unregisterAudioThread(int streamHandle,
                               int clientThreadId);
+
+    int exitStandby(int streamHandle, out Endpoint endpoint);
 }
diff --git a/media/libaaudio/src/binding/aidl/aaudio/RingBuffer.aidl b/media/libaaudio/src/binding/aidl/aaudio/RingBuffer.aidl
index a58b33a..dd64493 100644
--- a/media/libaaudio/src/binding/aidl/aaudio/RingBuffer.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/RingBuffer.aidl
@@ -26,4 +26,5 @@
     int                 framesPerBurst;    // for ISOCHRONOUS queues
     int                 capacityInFrames;  // zero if unused
     int /* RingbufferFlags */ flags;  // = RingbufferFlags::NONE;
+    int                 sharedMemoryIndex;
 }
\ No newline at end of file
diff --git a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
index a6541e1..983e193 100644
--- a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
@@ -16,14 +16,14 @@
 
 package aaudio;
 
-import android.media.audio.common.AudioFormat;
+import android.media.audio.common.AudioFormatDescription;
 
 parcelable StreamParameters {
     int                                       channelMask;  //          = AAUDIO_UNSPECIFIED;
     int                                       sampleRate;  //           = AAUDIO_UNSPECIFIED;
     int                                       deviceId;  //             = AAUDIO_UNSPECIFIED;
     int /* aaudio_sharing_mode_t */           sharingMode;  //          = AAUDIO_SHARING_MODE_SHARED;
-    AudioFormat                               audioFormat;  //          = AUDIO_FORMAT_DEFAULT;
+    AudioFormatDescription                    audioFormat;  //          = AUDIO_FORMAT_DEFAULT;
     int /* aaudio_direction_t */              direction;  //            = AAUDIO_DIRECTION_OUTPUT;
     int /* aaudio_usage_t */                  usage;  //                = AAUDIO_UNSPECIFIED;
     int /* aaudio_content_type_t */           contentType;  //          = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.cpp b/media/libaaudio/src/client/AAudioFlowGraph.cpp
index 61b50f3..d0c3238 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.cpp
+++ b/media/libaaudio/src/client/AAudioFlowGraph.cpp
@@ -21,7 +21,10 @@
 #include "AAudioFlowGraph.h"
 
 #include <flowgraph/ClipToRange.h>
+#include <flowgraph/ManyToMultiConverter.h>
+#include <flowgraph/MonoBlend.h>
 #include <flowgraph/MonoToMultiConverter.h>
+#include <flowgraph/MultiToManyConverter.h>
 #include <flowgraph/RampLinear.h>
 #include <flowgraph/SinkFloat.h>
 #include <flowgraph/SinkI16.h>
@@ -37,12 +40,17 @@
 aaudio_result_t AAudioFlowGraph::configure(audio_format_t sourceFormat,
                           int32_t sourceChannelCount,
                           audio_format_t sinkFormat,
-                          int32_t sinkChannelCount) {
-    AudioFloatOutputPort *lastOutput = nullptr;
+                          int32_t sinkChannelCount,
+                          bool useMonoBlend,
+                          float audioBalance,
+                          bool isExclusive) {
+    FlowGraphPortFloatOutput *lastOutput = nullptr;
 
     // TODO change back to ALOGD
-    ALOGI("%s() source format = 0x%08x, channels = %d, sink format = 0x%08x, channels = %d",
-          __func__, sourceFormat, sourceChannelCount, sinkFormat, sinkChannelCount);
+    ALOGI("%s() source format = 0x%08x, channels = %d, sink format = 0x%08x, channels = %d, "
+          "useMonoBlend = %d, audioBalance = %f, isExclusive %d",
+          __func__, sourceFormat, sourceChannelCount, sinkFormat, sinkChannelCount,
+          useMonoBlend, audioBalance, isExclusive);
 
     switch (sourceFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
@@ -63,10 +71,11 @@
     }
     lastOutput = &mSource->output;
 
-    // Apply volume as a ramp to avoid pops.
-    mVolumeRamp = std::make_unique<RampLinear>(sourceChannelCount);
-    lastOutput->connect(&mVolumeRamp->input);
-    lastOutput = &mVolumeRamp->output;
+    if (useMonoBlend) {
+        mMonoBlend = std::make_unique<MonoBlend>(sourceChannelCount);
+        lastOutput->connect(&mMonoBlend->input);
+        lastOutput = &mMonoBlend->output;
+    }
 
     // For a pure float graph, there is chance that the data range may be very large.
     // So we should clip to a reasonable value that allows a little headroom.
@@ -86,6 +95,26 @@
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
+    // Apply volume ramps for only exclusive streams.
+    if (isExclusive) {
+        // Apply volume ramps to set the left/right audio balance and target volumes.
+        // The signals will be decoupled, volume ramps will be applied, before the signals are
+        // combined again.
+        mMultiToManyConverter = std::make_unique<MultiToManyConverter>(sinkChannelCount);
+        mManyToMultiConverter = std::make_unique<ManyToMultiConverter>(sinkChannelCount);
+        lastOutput->connect(&mMultiToManyConverter->input);
+        for (int i = 0; i < sinkChannelCount; i++) {
+            mVolumeRamps.emplace_back(std::make_unique<RampLinear>(1));
+            mPanningVolumes.emplace_back(1.0f);
+            lastOutput = mMultiToManyConverter->outputs[i].get();
+            lastOutput->connect(&(mVolumeRamps[i].get()->input));
+            lastOutput = &(mVolumeRamps[i].get()->output);
+            lastOutput->connect(mManyToMultiConverter->inputs[i].get());
+        }
+        lastOutput = &mManyToMultiConverter->output;
+        setAudioBalance(audioBalance);
+    }
+
     switch (sinkFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
             mSink = std::make_unique<SinkFloat>(sinkChannelCount);
@@ -117,9 +146,32 @@
  * @param volume between 0.0 and 1.0
  */
 void AAudioFlowGraph::setTargetVolume(float volume) {
-    mVolumeRamp->setTarget(volume);
+    for (int i = 0; i < mVolumeRamps.size(); i++) {
+        mVolumeRamps[i]->setTarget(volume * mPanningVolumes[i]);
+    }
+    mTargetVolume = volume;
 }
 
+/**
+ * @param audioBalance between -1.0 and 1.0
+ */
+void AAudioFlowGraph::setAudioBalance(float audioBalance) {
+    if (mPanningVolumes.size() >= 2) {
+        float leftMultiplier = 0;
+        float rightMultiplier = 0;
+        mBalance.computeStereoBalance(audioBalance, &leftMultiplier, &rightMultiplier);
+        mPanningVolumes[0] = leftMultiplier;
+        mPanningVolumes[1] = rightMultiplier;
+        mVolumeRamps[0]->setTarget(mTargetVolume * leftMultiplier);
+        mVolumeRamps[1]->setTarget(mTargetVolume * rightMultiplier);
+    }
+}
+
+/**
+ * @param numFrames to slowly adjust for volume changes
+ */
 void AAudioFlowGraph::setRampLengthInFrames(int32_t numFrames) {
-    mVolumeRamp->setLengthInFrames(numFrames);
+    for (auto& ramp : mVolumeRamps) {
+        ramp->setLengthInFrames(numFrames);
+    }
 }
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.h b/media/libaaudio/src/client/AAudioFlowGraph.h
index a49f64e..00b6575 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.h
+++ b/media/libaaudio/src/client/AAudioFlowGraph.h
@@ -23,8 +23,12 @@
 #include <system/audio.h>
 
 #include <aaudio/AAudio.h>
+#include <audio_utils/Balance.h>
 #include <flowgraph/ClipToRange.h>
+#include <flowgraph/ManyToMultiConverter.h>
+#include <flowgraph/MonoBlend.h>
 #include <flowgraph/MonoToMultiConverter.h>
+#include <flowgraph/MultiToManyConverter.h>
 #include <flowgraph/RampLinear.h>
 
 class AAudioFlowGraph {
@@ -36,12 +40,19 @@
      * @param sourceChannelCount
      * @param sinkFormat
      * @param sinkChannelCount
+     * @param useMonoBlend
+     * @param audioBalance
+     * @param channelMask
+     * @param isExclusive
      * @return
      */
     aaudio_result_t configure(audio_format_t sourceFormat,
                               int32_t sourceChannelCount,
                               audio_format_t sinkFormat,
-                              int32_t sinkChannelCount);
+                              int32_t sinkChannelCount,
+                              bool useMonoBlend,
+                              float audioBalance,
+                              bool isExclusive);
 
     void process(const void *source, void *destination, int32_t numFrames);
 
@@ -50,14 +61,28 @@
      */
     void setTargetVolume(float volume);
 
+    /**
+     * @param audioBalance between -1.0 and 1.0
+     */
+    void setAudioBalance(float audioBalance);
+
+    /**
+     * @param numFrames to slowly adjust for volume changes
+     */
     void setRampLengthInFrames(int32_t numFrames);
 
 private:
-    std::unique_ptr<flowgraph::AudioSource>          mSource;
-    std::unique_ptr<flowgraph::RampLinear>           mVolumeRamp;
-    std::unique_ptr<flowgraph::ClipToRange>          mClipper;
-    std::unique_ptr<flowgraph::MonoToMultiConverter> mChannelConverter;
-    std::unique_ptr<flowgraph::AudioSink>            mSink;
+    std::unique_ptr<flowgraph::FlowGraphSourceBuffered>     mSource;
+    std::unique_ptr<flowgraph::MonoBlend>                   mMonoBlend;
+    std::unique_ptr<flowgraph::ClipToRange>                 mClipper;
+    std::unique_ptr<flowgraph::MonoToMultiConverter>        mChannelConverter;
+    std::unique_ptr<flowgraph::ManyToMultiConverter>        mManyToMultiConverter;
+    std::unique_ptr<flowgraph::MultiToManyConverter>        mMultiToManyConverter;
+    std::vector<std::unique_ptr<flowgraph::RampLinear>>     mVolumeRamps;
+    std::vector<float>                                      mPanningVolumes;
+    float                                                   mTargetVolume = 1.0f;
+    android::audio_utils::Balance                           mBalance;
+    std::unique_ptr<flowgraph::FlowGraphSink>               mSink;
 };
 
 
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index ebc9f2b..e780f4f 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -31,13 +31,6 @@
 #define RIDICULOUSLY_LARGE_BUFFER_CAPACITY   (256 * 1024)
 #define RIDICULOUSLY_LARGE_FRAME_SIZE        4096
 
-AudioEndpoint::AudioEndpoint()
-    : mFreeRunning(false)
-    , mDataReadCounter(0)
-    , mDataWriteCounter(0)
-{
-}
-
 // TODO Consider moving to a method in RingBufferDescriptor
 static aaudio_result_t AudioEndpoint_validateQueueDescriptor(const char *type,
                                                   const RingBufferDescriptor *descriptor) {
@@ -146,38 +139,49 @@
     );
 
     // ============================ data queue =============================
-    descriptor = &pEndpointDescriptor->dataQueueDescriptor;
-    ALOGV("configure() data framesPerBurst = %d", descriptor->framesPerBurst);
+    result = configureDataQueue(pEndpointDescriptor->dataQueueDescriptor, direction);
+
+    return result;
+}
+
+aaudio_result_t AudioEndpoint::configureDataQueue(const RingBufferDescriptor& descriptor,
+                                                  aaudio_direction_t direction) {
+    aaudio_result_t result = AudioEndpoint_validateQueueDescriptor("data", &descriptor);
+    if (result != AAUDIO_OK) {
+        return result;
+    }
+
+    ALOGV("configure() data framesPerBurst = %d", descriptor.framesPerBurst);
     ALOGV("configure() data readCounterAddress = %p",
-          descriptor->readCounterAddress);
+          descriptor.readCounterAddress);
 
     // An example of free running is when the other side is read or written by hardware DMA
     // or a DSP. It does not update its counter so we have to update it.
     int64_t *remoteCounter = (direction == AAUDIO_DIRECTION_OUTPUT)
-                             ? descriptor->readCounterAddress // read by other side
-                             : descriptor->writeCounterAddress; // written by other side
+                             ? descriptor.readCounterAddress // read by other side
+                             : descriptor.writeCounterAddress; // written by other side
     mFreeRunning = (remoteCounter == nullptr);
     ALOGV("configure() mFreeRunning = %d", mFreeRunning ? 1 : 0);
 
-    int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
+    int64_t *readCounterAddress = (descriptor.readCounterAddress == nullptr)
                                   ? &mDataReadCounter
-                                  : descriptor->readCounterAddress;
-    int64_t *writeCounterAddress = (descriptor->writeCounterAddress == nullptr)
+                                  : descriptor.readCounterAddress;
+    int64_t *writeCounterAddress = (descriptor.writeCounterAddress == nullptr)
                                   ? &mDataWriteCounter
-                                  : descriptor->writeCounterAddress;
+                                  : descriptor.writeCounterAddress;
 
     // Clear buffer to avoid an initial glitch on some devices.
-    size_t bufferSizeBytes = descriptor->capacityInFrames * descriptor->bytesPerFrame;
-    memset(descriptor->dataAddress, 0, bufferSizeBytes);
+    size_t bufferSizeBytes = descriptor.capacityInFrames * descriptor.bytesPerFrame;
+    memset(descriptor.dataAddress, 0, bufferSizeBytes);
 
     mDataQueue = std::make_unique<FifoBufferIndirect>(
-            descriptor->bytesPerFrame,
-            descriptor->capacityInFrames,
+            descriptor.bytesPerFrame,
+            descriptor.capacityInFrames,
             readCounterAddress,
             writeCounterAddress,
-            descriptor->dataAddress
+            descriptor.dataAddress
     );
-    uint32_t threshold = descriptor->capacityInFrames / 2;
+    uint32_t threshold = descriptor.capacityInFrames / 2;
     mDataQueue->setThreshold(threshold);
     return result;
 }
@@ -188,47 +192,66 @@
 }
 
 int32_t AudioEndpoint::getEmptyFramesAvailable(WrappingBuffer *wrappingBuffer) {
-    return mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
+    return mDataQueue == nullptr ? 0 : mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
 }
 
 int32_t AudioEndpoint::getEmptyFramesAvailable() {
-    return mDataQueue->getEmptyFramesAvailable();
+    return mDataQueue == nullptr ? 0 : mDataQueue->getEmptyFramesAvailable();
 }
 
 int32_t AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer) {
-    return mDataQueue->getFullDataAvailable(wrappingBuffer);
+    return mDataQueue == nullptr ? 0 : mDataQueue->getFullDataAvailable(wrappingBuffer);
 }
 
 int32_t AudioEndpoint::getFullFramesAvailable() {
-    return mDataQueue->getFullFramesAvailable();
+    return mDataQueue == nullptr ? 0 : mDataQueue->getFullFramesAvailable();
+}
+
+android::fifo_frames_t AudioEndpoint::read(void *buffer, android::fifo_frames_t numFrames) {
+    return mDataQueue == nullptr ? 0 : mDataQueue->read(buffer, numFrames);
+}
+
+android::fifo_frames_t AudioEndpoint::write(void *buffer, android::fifo_frames_t numFrames) {
+    return mDataQueue == nullptr ? 0 : mDataQueue->write(buffer, numFrames);
 }
 
 void AudioEndpoint::advanceWriteIndex(int32_t deltaFrames) {
-    mDataQueue->advanceWriteIndex(deltaFrames);
+    if (mDataQueue != nullptr) {
+        mDataQueue->advanceWriteIndex(deltaFrames);
+    }
 }
 
 void AudioEndpoint::advanceReadIndex(int32_t deltaFrames) {
-    mDataQueue->advanceReadIndex(deltaFrames);
+    if (mDataQueue != nullptr) {
+        mDataQueue->advanceReadIndex(deltaFrames);
+    }
 }
 
 void AudioEndpoint::setDataReadCounter(fifo_counter_t framesRead) {
-    mDataQueue->setReadCounter(framesRead);
+    if (mDataQueue != nullptr) {
+        mDataQueue->setReadCounter(framesRead);
+    }
 }
 
 fifo_counter_t AudioEndpoint::getDataReadCounter() const {
-    return mDataQueue->getReadCounter();
+    return mDataQueue == nullptr ? 0 : mDataQueue->getReadCounter();
 }
 
 void AudioEndpoint::setDataWriteCounter(fifo_counter_t framesRead) {
-    mDataQueue->setWriteCounter(framesRead);
+    if (mDataQueue != nullptr) {
+        mDataQueue->setWriteCounter(framesRead);
+    }
 }
 
 fifo_counter_t AudioEndpoint::getDataWriteCounter() const {
-    return mDataQueue->getWriteCounter();
+    return mDataQueue == nullptr ? 0 : mDataQueue->getWriteCounter();
 }
 
 int32_t AudioEndpoint::setBufferSizeInFrames(int32_t requestedFrames,
                                             int32_t *actualFrames) {
+    if (mDataQueue == nullptr) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
     if (requestedFrames < ENDPOINT_DATA_QUEUE_SIZE_MIN) {
         requestedFrames = ENDPOINT_DATA_QUEUE_SIZE_MIN;
     }
@@ -238,11 +261,11 @@
 }
 
 int32_t AudioEndpoint::getBufferSizeInFrames() const {
-    return mDataQueue->getThreshold();
+    return mDataQueue == nullptr ? 0 : mDataQueue->getThreshold();
 }
 
 int32_t AudioEndpoint::getBufferCapacityInFrames() const {
-    return (int32_t)mDataQueue->getBufferCapacityInFrames();
+    return mDataQueue == nullptr ? 0 : (int32_t)mDataQueue->getBufferCapacityInFrames();
 }
 
 void AudioEndpoint::dump() const {
@@ -251,5 +274,7 @@
 }
 
 void AudioEndpoint::eraseDataMemory() {
-    mDataQueue->eraseMemory();
+    if (mDataQueue != nullptr) {
+        mDataQueue->eraseMemory();
+    }
 }
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index 4c8d60f..01dd05a 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_AAUDIO_AUDIO_ENDPOINT_H
 #define ANDROID_AAUDIO_AUDIO_ENDPOINT_H
 
+#include <mutex>
+
 #include <aaudio/AAudio.h>
 
 #include "binding/AAudioServiceMessage.h"
@@ -34,7 +36,7 @@
 class AudioEndpoint {
 
 public:
-    AudioEndpoint();
+    AudioEndpoint() = default;
 
     /**
      * Configure based on the EndPointDescriptor_t.
@@ -42,6 +44,9 @@
     aaudio_result_t configure(const EndpointDescriptor *pEndpointDescriptor,
                               aaudio_direction_t direction);
 
+    aaudio_result_t configureDataQueue(const RingBufferDescriptor &descriptor,
+                            aaudio_direction_t direction);
+
     /**
      * Read from a command passed up from the Server.
      * @return 1 if command received, 0 for no command, or negative error.
@@ -56,6 +61,10 @@
 
     int32_t getFullFramesAvailable();
 
+    android::fifo_frames_t read(void* buffer, android::fifo_frames_t numFrames);
+
+    android::fifo_frames_t write(void* buffer, android::fifo_frames_t numFrames);
+
     void advanceReadIndex(int32_t deltaFrames);
 
     void advanceWriteIndex(int32_t deltaFrames);
@@ -85,19 +94,31 @@
 
     int32_t getBufferCapacityInFrames() const;
 
+    void setThreshold(int32_t frames) {
+        mDataQueue->setThreshold(frames);
+    }
+
+    int32_t getThreshold() {
+        return mDataQueue->getThreshold();
+    }
+
     /**
      * Write zeros to the data queue memory.
      */
     void eraseDataMemory();
 
+    void freeDataQueue();
+
     void dump() const;
 
 private:
     std::unique_ptr<android::FifoBufferIndirect> mUpCommandQueue;
     std::unique_ptr<android::FifoBufferIndirect> mDataQueue;
-    bool                    mFreeRunning;
-    android::fifo_counter_t mDataReadCounter; // only used if free-running
-    android::fifo_counter_t mDataWriteCounter; // only used if free-running
+    bool                    mFreeRunning{false};
+    android::fifo_counter_t mDataReadCounter{0}; // only used if free-running
+    android::fifo_counter_t mDataWriteCounter{0}; // only used if free-running
+
+    std::mutex mDataQueueLock;
 };
 
 } // namespace aaudio
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index f933b29..9f0564f 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -27,6 +27,8 @@
 #include <aaudio/AAudio.h>
 #include <cutils/properties.h>
 
+#include <media/AudioParameter.h>
+#include <media/AudioSystem.h>
 #include <media/MediaMetricsItem.h>
 #include <utils/Trace.h>
 
@@ -49,8 +51,6 @@
 // This is needed to make sense of the logs more easily.
 #define LOG_TAG (mInService ? "AudioStreamInternal_Service" : "AudioStreamInternal_Client")
 
-using android::Mutex;
-using android::WrappingBuffer;
 using android::content::AttributionSourceState;
 
 using namespace aaudio;
@@ -81,8 +81,6 @@
 aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
 
     aaudio_result_t result = AAUDIO_OK;
-    int32_t framesPerBurst;
-    int32_t framesPerHardwareBurst;
     AAudioStreamRequest request;
     AAudioStreamConfiguration configurationOutput;
 
@@ -97,9 +95,6 @@
         return result;
     }
 
-    const int32_t burstMinMicros = AAudioProperty_getHardwareBurstMinMicros();
-    int32_t burstMicros = 0;
-
     const audio_format_t requestedFormat = getFormat();
     // We have to do volume scaling. So we prefer FLOAT format.
     if (requestedFormat == AUDIO_FORMAT_DEFAULT) {
@@ -215,12 +210,28 @@
         goto error;
     }
 
-    framesPerHardwareBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
+    if ((result = configureDataInformation(builder.getFramesPerDataCallback())) != AAUDIO_OK) {
+        goto error;
+    }
+
+    setState(AAUDIO_STREAM_STATE_OPEN);
+
+    return result;
+
+error:
+    safeReleaseClose();
+    return result;
+}
+
+aaudio_result_t AudioStreamInternal::configureDataInformation(int32_t callbackFrames) {
+    int32_t framesPerHardwareBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
 
     // Scale up the burst size to meet the minimum equivalent in microseconds.
     // This is to avoid waking the CPU too often when the HW burst is very small
     // or at high sample rates.
-    framesPerBurst = framesPerHardwareBurst;
+    int32_t framesPerBurst = framesPerHardwareBurst;
+    int32_t burstMicros = 0;
+    const int32_t burstMinMicros = android::AudioSystem::getAAudioHardwareBurstMinUsec();
     do {
         if (burstMicros > 0) {  // skip first loop
             framesPerBurst *= 2;
@@ -233,8 +244,7 @@
     // Validate final burst size.
     if (framesPerBurst < MIN_FRAMES_PER_BURST || framesPerBurst > MAX_FRAMES_PER_BURST) {
         ALOGE("%s - framesPerBurst out of range = %d", __func__, framesPerBurst);
-        result = AAUDIO_ERROR_OUT_OF_RANGE;
-        goto error;
+        return AAUDIO_ERROR_OUT_OF_RANGE;
     }
     setFramesPerBurst(framesPerBurst); // only save good value
 
@@ -242,26 +252,21 @@
     if (mBufferCapacityInFrames < getFramesPerBurst()
             || mBufferCapacityInFrames > MAX_BUFFER_CAPACITY_IN_FRAMES) {
         ALOGE("%s - bufferCapacity out of range = %d", __func__, mBufferCapacityInFrames);
-        result = AAUDIO_ERROR_OUT_OF_RANGE;
-        goto error;
+        return AAUDIO_ERROR_OUT_OF_RANGE;
     }
 
     mClockModel.setSampleRate(getSampleRate());
     mClockModel.setFramesPerBurst(framesPerHardwareBurst);
 
     if (isDataCallbackSet()) {
-        mCallbackFrames = builder.getFramesPerDataCallback();
+        mCallbackFrames = callbackFrames;
         if (mCallbackFrames > getBufferCapacity() / 2) {
             ALOGW("%s - framesPerCallback too big = %d, capacity = %d",
                   __func__, mCallbackFrames, getBufferCapacity());
-            result = AAUDIO_ERROR_OUT_OF_RANGE;
-            goto error;
-
+            return AAUDIO_ERROR_OUT_OF_RANGE;
         } else if (mCallbackFrames < 0) {
             ALOGW("%s - framesPerCallback negative", __func__);
-            result = AAUDIO_ERROR_OUT_OF_RANGE;
-            goto error;
-
+            return AAUDIO_ERROR_OUT_OF_RANGE;
         }
         if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
             mCallbackFrames = getFramesPerBurst();
@@ -271,6 +276,18 @@
         mCallbackBuffer = std::make_unique<uint8_t[]>(callbackBufferSize);
     }
 
+    // Exclusive output streams should combine channels when mono audio adjustment
+    // is enabled. They should also adjust for audio balance.
+    if ((getDirection() == AAUDIO_DIRECTION_OUTPUT) &&
+        (getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE)) {
+        bool isMasterMono = false;
+        android::AudioSystem::getMasterMono(&isMasterMono);
+        setRequireMonoBlend(isMasterMono);
+        float audioBalance = 0;
+        android::AudioSystem::getMasterBalance(&audioBalance);
+        setAudioBalance(audioBalance);
+    }
+
     // For debugging and analyzing the distribution of MMAP timestamps.
     // For OUTPUT, use a NEGATIVE offset to move the CPU writes further BEFORE the HW reads.
     // For INPUT, use a POSITIVE offset to move the CPU reads further AFTER the HW writes.
@@ -290,14 +307,7 @@
     }
 
     setBufferSize(mBufferCapacityInFrames / 2); // Default buffer size to match Q
-
-    setState(AAUDIO_STREAM_STATE_OPEN);
-
-    return result;
-
-error:
-    safeReleaseClose();
-    return result;
+    return AAUDIO_OK;
 }
 
 // This must be called under mStreamLock.
@@ -338,13 +348,67 @@
 {
     AudioStreamInternal *stream = (AudioStreamInternal *)context;
     //LOGD("oboe_callback_thread, stream = %p", stream);
-    if (stream != NULL) {
+    if (stream != nullptr) {
         return stream->callbackLoop();
     } else {
-        return NULL;
+        return nullptr;
     }
 }
 
+aaudio_result_t AudioStreamInternal::exitStandby_l() {
+    AudioEndpointParcelable endpointParcelable;
+    // The stream is in standby mode, copy all available data and then close the duplicated
+    // shared file descriptor so that it won't cause issue when the HAL try to reallocate new
+    // shared file descriptor when exiting from standby.
+    // Cache current read counter, which will be reset to new read and write counter
+    // when the new data queue and endpoint are reconfigured.
+    const android::fifo_counter_t readCounter = mAudioEndpoint->getDataReadCounter();
+    // Cache the buffer size which may be from client.
+    const int32_t previousBufferSize = mBufferSizeInFrames;
+    // Copy all available data from current data queue.
+    uint8_t buffer[getBufferCapacity() * getBytesPerFrame()];
+    android::fifo_frames_t fullFramesAvailable =
+            mAudioEndpoint->read(buffer, getBufferCapacity());
+    mEndPointParcelable.closeDataFileDescriptor();
+    aaudio_result_t result = mServiceInterface.exitStandby(
+            mServiceStreamHandle, endpointParcelable);
+    if (result != AAUDIO_OK) {
+        ALOGE("Failed to exit standby, error=%d", result);
+        goto exit;
+    }
+    // Reconstruct data queue descriptor using new shared file descriptor.
+    mEndPointParcelable.updateDataFileDescriptor(&endpointParcelable);
+    result = mEndPointParcelable.resolveDataQueue(&mEndpointDescriptor.dataQueueDescriptor);
+    if (result != AAUDIO_OK) {
+        ALOGE("Failed to resolve data queue after exiting standby, error=%d", result);
+        goto exit;
+    }
+    // Reconfigure audio endpoint with new data queue descriptor.
+    mAudioEndpoint->configureDataQueue(
+            mEndpointDescriptor.dataQueueDescriptor, getDirection());
+    // Set read and write counters with previous read counter, the later write action
+    // will make the counter at the correct place.
+    mAudioEndpoint->setDataReadCounter(readCounter);
+    mAudioEndpoint->setDataWriteCounter(readCounter);
+    result = configureDataInformation(mCallbackFrames);
+    if (result != AAUDIO_OK) {
+        ALOGE("Failed to configure data information after exiting standby, error=%d", result);
+        goto exit;
+    }
+    // Write data from previous data buffer to new endpoint.
+    if (android::fifo_frames_t framesWritten =
+                mAudioEndpoint->write(buffer, fullFramesAvailable);
+            framesWritten != fullFramesAvailable) {
+        ALOGW("Some data lost after exiting standby, frames written: %d, "
+              "frames to write: %d", framesWritten, fullFramesAvailable);
+    }
+    // Reset previous buffer size as it may be requested by the client.
+    setBufferSize(previousBufferSize);
+
+exit:
+    return result;
+}
+
 /*
  * It normally takes about 20-30 msec to start a stream on the server.
  * But the first time can take as much as 200-300 msec. The HW
@@ -381,8 +445,15 @@
     prepareBuffersForStart(); // tell subclasses to get ready
 
     aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);
-    if (result == AAUDIO_ERROR_INVALID_HANDLE) {
-        ALOGD("%s() INVALID_HANDLE, stream was probably stolen", __func__);
+    if (result == AAUDIO_ERROR_STANDBY) {
+        // The stream is at standby mode. Need to exit standby before starting the stream.
+        result = exitStandby_l();
+        if (result == AAUDIO_OK) {
+            result = mServiceInterface.startStream(mServiceStreamHandle);
+        }
+    }
+    if (result != AAUDIO_OK) {
+        ALOGD("%s() error = %d, stream was probably stolen", __func__, result);
         // Stealing was added in R. Coerce result to improve backward compatibility.
         result = AAUDIO_ERROR_DISCONNECTED;
         setState(AAUDIO_STREAM_STATE_DISCONNECTED);
@@ -402,6 +473,7 @@
         result = createThread_l(periodNanos, aaudio_callback_thread_proc, this);
     }
     if (result != AAUDIO_OK) {
+        // TODO(b/214607638): Do we want to roll back to original state or keep as disconnected?
         setState(originalState);
     }
     return result;
@@ -430,7 +502,7 @@
     if (isDataCallbackSet()
             && (isActive() || getState() == AAUDIO_STREAM_STATE_DISCONNECTED)) {
         mCallbackEnabled.store(false);
-        aaudio_result_t result = joinThread_l(NULL); // may temporarily unlock mStreamLock
+        aaudio_result_t result = joinThread_l(nullptr); // may temporarily unlock mStreamLock
         if (result == AAUDIO_ERROR_INVALID_HANDLE) {
             ALOGD("%s() INVALID_HANDLE, stream was probably stolen", __func__);
             result = AAUDIO_OK;
@@ -517,7 +589,7 @@
     return result;
 }
 
-aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
+aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t /*clockId*/,
                            int64_t *framePosition,
                            int64_t *timeNanoseconds) {
     // Generated in server and passed to client. Return latest.
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index fbe4c13..2367572 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -116,7 +116,7 @@
 
     virtual void prepareBuffersForStart() {}
 
-    virtual void advanceClientToMatchServerPosition(int32_t serverMargin = 0) = 0;
+    virtual void advanceClientToMatchServerPosition(int32_t serverMargin) = 0;
 
     virtual void onFlushFromServer() {}
 
@@ -184,9 +184,14 @@
     aaudio_result_t writeNowWithConversion(const void *buffer,
                                      int32_t numFrames);
 
+    // Exit the stream from standby, will reconstruct data path.
+    aaudio_result_t exitStandby_l() REQUIRES(mStreamLock);
+
     // Adjust timing model based on timestamp from service.
     void processTimestamp(uint64_t position, int64_t time);
 
+    aaudio_result_t configureDataInformation(int32_t callbackFrames);
+
     // Thread on other side of FIFO will have wakeup jitter.
     // By delaying slightly we can avoid waking up before other side is ready.
     const int32_t            mWakeupDelayNanos; // delay past typical wakeup jitter
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 2da5406..1efccb1 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -46,8 +46,6 @@
 
 }
 
-AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
-
 void AudioStreamInternalCapture::advanceClientToMatchServerPosition(int32_t serverMargin) {
     int64_t readCounter = mAudioEndpoint->getDataReadCounter();
     int64_t writeCounter = mAudioEndpoint->getDataWriteCounter() + serverMargin;
@@ -109,7 +107,7 @@
     if (mNeedCatchUp.isRequested()) {
         // Catch an MMAP pointer that is already advancing.
         // This will avoid initial underruns caused by a slow cold start.
-        advanceClientToMatchServerPosition();
+        advanceClientToMatchServerPosition(0 /*serverMargin*/);
         mNeedCatchUp.acknowledge();
     }
 
@@ -228,7 +226,7 @@
 void *AudioStreamInternalCapture::callbackLoop() {
     aaudio_result_t result = AAUDIO_OK;
     aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
-    if (!isDataCallbackSet()) return NULL;
+    if (!isDataCallbackSet()) return nullptr;
 
     // result might be a frame count
     while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
@@ -260,5 +258,5 @@
 
     ALOGD("callbackLoop() exiting, result = %d, isActive() = %d",
           result, (int) isActive());
-    return NULL;
+    return nullptr;
 }
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.h b/media/libaaudio/src/client/AudioStreamInternalCapture.h
index 251a7f2..87017de 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.h
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.h
@@ -28,8 +28,9 @@
 
 class AudioStreamInternalCapture : public AudioStreamInternal {
 public:
-    AudioStreamInternalCapture(AAudioServiceInterface  &serviceInterface, bool inService = false);
-    virtual ~AudioStreamInternalCapture();
+    explicit AudioStreamInternalCapture(AAudioServiceInterface  &serviceInterface,
+                                        bool inService = false);
+    virtual ~AudioStreamInternalCapture() = default;
 
     aaudio_result_t read(void *buffer,
                          int32_t numFrames,
@@ -45,7 +46,7 @@
     }
 protected:
 
-    void advanceClientToMatchServerPosition(int32_t serverOffset = 0) override;
+    void advanceClientToMatchServerPosition(int32_t serverOffset) override;
 
 /**
  * Low level data processing that will not block. It will just read or write as much as it can.
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 71bde90..450d390 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -44,8 +44,6 @@
 
 }
 
-AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
-
 constexpr int kRampMSec = 10; // time to apply a change in volume
 
 aaudio_result_t AudioStreamInternalPlay::open(const AudioStreamBuilder &builder) {
@@ -54,7 +52,10 @@
         result = mFlowGraph.configure(getFormat(),
                              getSamplesPerFrame(),
                              getDeviceFormat(),
-                             getDeviceChannelCount());
+                             getDeviceChannelCount(),
+                             getRequireMonoBlend(),
+                             getAudioBalance(),
+                             (getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE));
 
         if (result != AAUDIO_OK) {
             safeReleaseClose();
@@ -115,7 +116,7 @@
 }
 
 void AudioStreamInternalPlay::onFlushFromServer() {
-    advanceClientToMatchServerPosition();
+    advanceClientToMatchServerPosition(0 /*serverMargin*/);
 }
 
 // Write the data, block if needed and timeoutMillis > 0
@@ -281,7 +282,7 @@
     ALOGD("%s() entering >>>>>>>>>>>>>>>", __func__);
     aaudio_result_t result = AAUDIO_OK;
     aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
-    if (!isDataCallbackSet()) return NULL;
+    if (!isDataCallbackSet()) return nullptr;
     int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
 
     // result might be a frame count
@@ -309,7 +310,7 @@
 
     ALOGD("%s() exiting, result = %d, isActive() = %d <<<<<<<<<<<<<<",
           __func__, result, (int) isActive());
-    return NULL;
+    return nullptr;
 }
 
 //------------------------------------------------------------------------------
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
index 03c957d..e761807 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.h
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -30,8 +30,9 @@
 
 class AudioStreamInternalPlay : public AudioStreamInternal {
 public:
-    AudioStreamInternalPlay(AAudioServiceInterface  &serviceInterface, bool inService = false);
-    virtual ~AudioStreamInternalPlay();
+    explicit AudioStreamInternalPlay(AAudioServiceInterface  &serviceInterface,
+                                     bool inService = false);
+    virtual ~AudioStreamInternalPlay() = default;
 
     aaudio_result_t open(const AudioStreamBuilder &builder) override;
 
@@ -66,7 +67,7 @@
 
     void prepareBuffersForStart() override;
 
-    void advanceClientToMatchServerPosition(int32_t serverMargin = 0) override;
+    void advanceClientToMatchServerPosition(int32_t serverMargin) override;
 
     void onFlushFromServer() override;
 
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index f0dcd44..6921271 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -43,14 +43,7 @@
 // and dumped to the log when the stream is stopped.
 
 IsochronousClockModel::IsochronousClockModel()
-        : mMarkerFramePosition(0)
-        , mMarkerNanoTime(0)
-        , mSampleRate(48000)
-        , mFramesPerBurst(48)
-        , mBurstPeriodNanos(0) // this will be updated before use
-        , mMaxMeasuredLatenessNanos(0)
-        , mLatenessForDriftNanos(kInitialLatenessForDriftNanos)
-        , mState(STATE_STOPPED)
+        : mLatenessForDriftNanos(kInitialLatenessForDriftNanos)
 {
     if ((AAudioProperty_getLogMask() & AAUDIO_LOG_CLOCK_MODEL_HISTOGRAM) != 0) {
         mHistogramMicros = std::make_unique<Histogram>(kHistogramBinCount,
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 6280013..3007237 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -149,16 +149,16 @@
     static constexpr int32_t   kHistogramBinWidthMicros = 50;
     static constexpr int32_t   kHistogramBinCount = 128;
 
-    int64_t             mMarkerFramePosition; // Estimated HW position.
-    int64_t             mMarkerNanoTime;      // Estimated HW time.
-    int32_t             mSampleRate;
-    int32_t             mFramesPerBurst;      // number of frames transferred at one time.
-    int32_t             mBurstPeriodNanos;    // Time between HW bursts.
+    int64_t             mMarkerFramePosition{0}; // Estimated HW position.
+    int64_t             mMarkerNanoTime{0};      // Estimated HW time.
+    int32_t             mSampleRate{48000};
+    int32_t             mFramesPerBurst{48};     // number of frames transferred at one time.
+    int32_t             mBurstPeriodNanos{0};    // Time between HW bursts.
     // Includes mBurstPeriodNanos because we sample randomly over time.
-    int32_t             mMaxMeasuredLatenessNanos;
+    int32_t             mMaxMeasuredLatenessNanos{0};
     // Threshold for lateness that triggers a drift later in time.
     int32_t             mLatenessForDriftNanos;
-    clock_model_state_t mState;               // State machine handles startup sequence.
+    clock_model_state_t mState{STATE_STOPPED};   // State machine handles startup sequence.
 
     int32_t             mTimestampCount = 0;  // For logging.
 
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index f07e66e..90ff4a5 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -352,7 +352,8 @@
 {
 
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
-    return audioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
+    android::sp<AudioStream> spAudioStream(audioStream);
+    return spAudioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
 }
 
 // ============================================================
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.cpp b/media/libaaudio/src/core/AAudioStreamParameters.cpp
index dc242b8..8b7b75e 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.cpp
+++ b/media/libaaudio/src/core/AAudioStreamParameters.cpp
@@ -30,9 +30,6 @@
 // HDMI supports up to 32 channels at 1536000 Hz.
 #define SAMPLE_RATE_HZ_MAX           1600000
 
-AAudioStreamParameters::AAudioStreamParameters() {}
-AAudioStreamParameters::~AAudioStreamParameters() {}
-
 void AAudioStreamParameters::copyFrom(const AAudioStreamParameters &other) {
     mSamplesPerFrame      = other.mSamplesPerFrame;
     mSampleRate           = other.mSampleRate;
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.h b/media/libaaudio/src/core/AAudioStreamParameters.h
index fed036b..cb998bf 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.h
+++ b/media/libaaudio/src/core/AAudioStreamParameters.h
@@ -26,8 +26,8 @@
 
 class AAudioStreamParameters {
 public:
-    AAudioStreamParameters();
-    virtual ~AAudioStreamParameters();
+    AAudioStreamParameters() = default;
+    virtual ~AAudioStreamParameters() = default;
 
     int32_t getDeviceId() const {
         return mDeviceId;
@@ -150,7 +150,7 @@
     }
 
     // TODO b/182392769: reexamine if Identity can be used
-    void setOpPackageName(const std::optional<std::string> opPackageName) {
+    void setOpPackageName(const std::optional<std::string>& opPackageName) {
         mOpPackageName = opPackageName;
     }
 
@@ -158,7 +158,7 @@
         return mAttributionTag;
     }
 
-    void setAttributionTag(const std::optional<std::string> attributionTag) {
+    void setAttributionTag(const std::optional<std::string>& attributionTag) {
         mAttributionTag = attributionTag;
     }
 
diff --git a/media/libaaudio/src/core/AudioGlobal.h b/media/libaaudio/src/core/AudioGlobal.h
index 1e88d15..6c22744 100644
--- a/media/libaaudio/src/core/AudioGlobal.h
+++ b/media/libaaudio/src/core/AudioGlobal.h
@@ -31,7 +31,8 @@
 const char* AudioGlobal_convertResultToText(aaudio_result_t returnCode);
 const char* AudioGlobal_convertSharingModeToText(aaudio_sharing_mode_t mode);
 const char* AudioGlobal_convertStreamStateToText(aaudio_stream_state_t state);
-}
+
+} // namespace aaudio
 
 #endif  // AAUDIO_AUDIOGLOBAL_H
 
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 73432af..06f05b0 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -602,6 +602,7 @@
 
 void AudioStream::setDuckAndMuteVolume(float duckAndMuteVolume) {
     ALOGD("%s() to %f", __func__, duckAndMuteVolume);
+    std::lock_guard<std::mutex> lock(mStreamLock);
     mDuckAndMuteVolume = duckAndMuteVolume;
     doSetVolume(); // apply this change
 }
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 7896e63..5fb4528 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -277,6 +277,14 @@
         return mIsPrivacySensitive;
     }
 
+    bool getRequireMonoBlend() const {
+        return mRequireMonoBlend;
+    }
+
+    float getAudioBalance() const {
+        return mAudioBalance;
+    }
+
     /**
      * This is only valid after setChannelMask() and setFormat()
      * have been called.
@@ -447,7 +455,7 @@
     // PlayerBase allows the system to control the stream volume.
     class MyPlayerBase : public android::PlayerBase {
     public:
-        MyPlayerBase() {};
+        MyPlayerBase() = default;
 
         virtual ~MyPlayerBase() = default;
 
@@ -576,7 +584,7 @@
      * @param numFrames
      * @return original pointer or the conversion buffer
      */
-    virtual const void * maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
+    virtual const void * maybeConvertDeviceData(const void *audioData, int32_t /*numFrames*/) {
         return audioData;
     }
 
@@ -631,6 +639,20 @@
         mIsPrivacySensitive = privacySensitive;
     }
 
+    /**
+     * This should not be called after the open() call.
+     */
+    void setRequireMonoBlend(bool requireMonoBlend) {
+        mRequireMonoBlend = requireMonoBlend;
+    }
+
+    /**
+     * This should not be called after the open() call.
+     */
+    void setAudioBalance(float audioBalance) {
+        mAudioBalance = audioBalance;
+    }
+
     std::string mMetricsId; // set once during open()
 
     std::mutex                 mStreamLock;
@@ -672,6 +694,8 @@
     aaudio_input_preset_t       mInputPreset     = AAUDIO_UNSPECIFIED;
     aaudio_allowed_capture_policy_t mAllowedCapturePolicy = AAUDIO_ALLOW_CAPTURE_BY_ALL;
     bool                        mIsPrivacySensitive = false;
+    bool                        mRequireMonoBlend = false;
+    float                       mAudioBalance = 0;
 
     int32_t                     mSessionId = AAUDIO_UNSPECIFIED;
 
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 5e1e007..2be3d65 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -20,9 +20,14 @@
 
 #include <new>
 #include <stdint.h>
+#include <vector>
 
 #include <aaudio/AAudio.h>
 #include <aaudio/AAudioTesting.h>
+#include <android/media/audio/common/AudioMMapPolicy.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
+#include <media/AudioSystem.h>
 
 #include "binding/AAudioBinderClient.h"
 #include "client/AudioStreamInternalCapture.h"
@@ -35,6 +40,10 @@
 
 using namespace aaudio;
 
+using android::media::audio::common::AudioMMapPolicy;
+using android::media::audio::common::AudioMMapPolicyInfo;
+using android::media::audio::common::AudioMMapPolicyType;
+
 #define AAUDIO_MMAP_POLICY_DEFAULT             AAUDIO_POLICY_NEVER
 #define AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT   AAUDIO_POLICY_NEVER
 
@@ -53,16 +62,10 @@
 /*
  * AudioStreamBuilder
  */
-AudioStreamBuilder::AudioStreamBuilder() {
-}
-
-AudioStreamBuilder::~AudioStreamBuilder() {
-}
-
 static aaudio_result_t builder_createStream(aaudio_direction_t direction,
-                                         aaudio_sharing_mode_t sharingMode,
-                                         bool tryMMap,
-                                         android::sp<AudioStream> &stream) {
+                                            aaudio_sharing_mode_t /*sharingMode*/,
+                                            bool tryMMap,
+                                            android::sp<AudioStream> &stream) {
     aaudio_result_t result = AAUDIO_OK;
 
     switch (direction) {
@@ -92,6 +95,37 @@
     return result;
 }
 
+namespace {
+
+aaudio_policy_t aidl2legacy_aaudio_policy(AudioMMapPolicy aidl) {
+    switch (aidl) {
+        case AudioMMapPolicy::NEVER:
+            return AAUDIO_POLICY_NEVER;
+        case AudioMMapPolicy::AUTO:
+            return AAUDIO_POLICY_AUTO;
+        case AudioMMapPolicy::ALWAYS:
+            return AAUDIO_POLICY_ALWAYS;
+        case AudioMMapPolicy::UNSPECIFIED:
+        default:
+            return AAUDIO_UNSPECIFIED;
+    }
+}
+
+// The aaudio policy will be ALWAYS, NEVER, UNSPECIFIED only when all policy info are
+// ALWAYS, NEVER or UNSPECIFIED. Otherwise, the aaudio policy will be AUTO.
+aaudio_policy_t getAAudioPolicy(
+        const std::vector<AudioMMapPolicyInfo>& policyInfos) {
+    if (policyInfos.empty()) return AAUDIO_POLICY_AUTO;
+    for (size_t i = 1; i < policyInfos.size(); ++i) {
+        if (policyInfos.at(i).mmapPolicy != policyInfos.at(0).mmapPolicy) {
+            return AAUDIO_POLICY_AUTO;
+        }
+    }
+    return aidl2legacy_aaudio_policy(policyInfos.at(0).mmapPolicy);
+}
+
+} // namespace
+
 // Try to open using MMAP path if that is allowed.
 // Fall back to Legacy path if MMAP not available.
 // Exact behavior is controlled by MMapPolicy.
@@ -110,25 +144,32 @@
         return result;
     }
 
+    std::vector<AudioMMapPolicyInfo> policyInfos;
     // The API setting is the highest priority.
     aaudio_policy_t mmapPolicy = AudioGlobal_getMMapPolicy();
     // If not specified then get from a system property.
-    if (mmapPolicy == AAUDIO_UNSPECIFIED) {
-        mmapPolicy = AAudioProperty_getMMapPolicy();
+    if (mmapPolicy == AAUDIO_UNSPECIFIED && android::AudioSystem::getMmapPolicyInfo(
+                AudioMMapPolicyType::DEFAULT, &policyInfos) == NO_ERROR) {
+        mmapPolicy = getAAudioPolicy(policyInfos);
     }
     // If still not specified then use the default.
     if (mmapPolicy == AAUDIO_UNSPECIFIED) {
         mmapPolicy = AAUDIO_MMAP_POLICY_DEFAULT;
     }
 
-    int32_t mapExclusivePolicy = AAudioProperty_getMMapExclusivePolicy();
-    if (mapExclusivePolicy == AAUDIO_UNSPECIFIED) {
-        mapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
+    policyInfos.clear();
+    aaudio_policy_t mmapExclusivePolicy = AAUDIO_UNSPECIFIED;
+    if (android::AudioSystem::getMmapPolicyInfo(
+            AudioMMapPolicyType::EXCLUSIVE, &policyInfos) == NO_ERROR) {
+        mmapExclusivePolicy = getAAudioPolicy(policyInfos);
+    }
+    if (mmapExclusivePolicy == AAUDIO_UNSPECIFIED) {
+        mmapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
     }
 
     aaudio_sharing_mode_t sharingMode = getSharingMode();
     if ((sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE)
-        && (mapExclusivePolicy == AAUDIO_POLICY_NEVER)) {
+        && (mmapExclusivePolicy == AAUDIO_POLICY_NEVER)) {
         ALOGD("%s() EXCLUSIVE sharing mode not supported. Use SHARED.", __func__);
         sharingMode = AAUDIO_SHARING_MODE_SHARED;
         setSharingMode(sharingMode);
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
index 9f93341..f91c25a 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.h
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -31,9 +31,9 @@
  */
 class AudioStreamBuilder : public AAudioStreamParameters {
 public:
-    AudioStreamBuilder();
+    AudioStreamBuilder() = default;
 
-    ~AudioStreamBuilder();
+    ~AudioStreamBuilder() = default;
 
     bool isSharingModeMatchRequired() const {
         return mSharingModeMatchRequired;
diff --git a/media/libaaudio/src/fifo/FifoBuffer.h b/media/libaaudio/src/fifo/FifoBuffer.h
index 37548f0..7b0aca1 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.h
+++ b/media/libaaudio/src/fifo/FifoBuffer.h
@@ -38,7 +38,7 @@
 
 class FifoBuffer {
 public:
-    FifoBuffer(int32_t bytesPerFrame);
+    explicit FifoBuffer(int32_t bytesPerFrame);
 
     virtual ~FifoBuffer() = default;
 
@@ -162,6 +162,6 @@
     uint8_t *mExternalStorage = nullptr;
 };
 
-}  // android
+}  // namespace android
 
 #endif //FIFO_FIFO_BUFFER_H
diff --git a/media/libaaudio/src/fifo/FifoController.h b/media/libaaudio/src/fifo/FifoController.h
index 057a94e..e15d444 100644
--- a/media/libaaudio/src/fifo/FifoController.h
+++ b/media/libaaudio/src/fifo/FifoController.h
@@ -36,7 +36,7 @@
     , mWriteCounter(0)
     {}
 
-    virtual ~FifoController() {}
+    virtual ~FifoController() = default;
 
     // TODO review use of memory barriers, probably incorrect
     virtual fifo_counter_t getReadCounter() override {
@@ -57,6 +57,6 @@
     std::atomic<fifo_counter_t> mWriteCounter;
 };
 
-}  // android
+}  // namespace android
 
 #endif //FIFO_FIFO_CONTROLLER_H
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.cpp b/media/libaaudio/src/fifo/FifoControllerBase.cpp
index 1dece0e..ad6d041 100644
--- a/media/libaaudio/src/fifo/FifoControllerBase.cpp
+++ b/media/libaaudio/src/fifo/FifoControllerBase.cpp
@@ -29,9 +29,6 @@
 {
 }
 
-FifoControllerBase::~FifoControllerBase() {
-}
-
 fifo_frames_t FifoControllerBase::getFullFramesAvailable() {
     fifo_frames_t temp = 0;
     __builtin_sub_overflow(getWriteCounter(), getReadCounter(), &temp);
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.h b/media/libaaudio/src/fifo/FifoControllerBase.h
index 1edb8a3..2a6173b 100644
--- a/media/libaaudio/src/fifo/FifoControllerBase.h
+++ b/media/libaaudio/src/fifo/FifoControllerBase.h
@@ -43,7 +43,7 @@
      */
     FifoControllerBase(fifo_frames_t capacity, fifo_frames_t threshold);
 
-    virtual ~FifoControllerBase();
+    virtual ~FifoControllerBase() = default;
 
     // Abstract methods to be implemented in subclasses.
     /**
@@ -123,6 +123,6 @@
     fifo_frames_t mThreshold;
 };
 
-}  // android
+}  // namespace android
 
 #endif // FIFO_FIFO_CONTROLLER_BASE_H
diff --git a/media/libaaudio/src/fifo/FifoControllerIndirect.h b/media/libaaudio/src/fifo/FifoControllerIndirect.h
index ec48e57..a59225a 100644
--- a/media/libaaudio/src/fifo/FifoControllerIndirect.h
+++ b/media/libaaudio/src/fifo/FifoControllerIndirect.h
@@ -44,7 +44,7 @@
         setReadCounter(0);
         setWriteCounter(0);
     }
-    virtual ~FifoControllerIndirect() {};
+    virtual ~FifoControllerIndirect() = default;
 
     // TODO review use of memory barriers, probably incorrect
     virtual fifo_counter_t getReadCounter() override {
@@ -68,6 +68,6 @@
     std::atomic<fifo_counter_t> * mWriteCounterAddress;
 };
 
-}  // android
+}  // namespace android
 
 #endif //FIFO_FIFO_CONTROLLER_INDIRECT_H
diff --git a/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp b/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp
deleted file mode 100644
index 5667fdb..0000000
--- a/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <algorithm>
-#include <sys/types.h>
-#include "AudioProcessorBase.h"
-
-using namespace flowgraph;
-
-/***************************************************************************/
-int32_t AudioProcessorBase::pullData(int64_t framePosition, int32_t numFrames) {
-    if (framePosition > mLastFramePosition) {
-        mLastFramePosition = framePosition;
-        mFramesValid = onProcess(framePosition, numFrames);
-    }
-    return mFramesValid;
-}
-
-/***************************************************************************/
-AudioFloatBlockPort::AudioFloatBlockPort(AudioProcessorBase &parent,
-                               int32_t samplesPerFrame,
-                               int32_t framesPerBlock)
-        : AudioPort(parent, samplesPerFrame)
-        , mFramesPerBlock(framesPerBlock)
-        , mSampleBlock(NULL) {
-    int32_t numFloats = framesPerBlock * getSamplesPerFrame();
-    mSampleBlock = new float[numFloats]{0.0f};
-}
-
-AudioFloatBlockPort::~AudioFloatBlockPort() {
-    delete[] mSampleBlock;
-}
-
-/***************************************************************************/
-int32_t AudioFloatOutputPort::pullData(int64_t framePosition, int32_t numFrames) {
-    numFrames = std::min(getFramesPerBlock(), numFrames);
-    return mParent.pullData(framePosition, numFrames);
-}
-
-// These need to be in the .cpp file because of forward cross references.
-void AudioFloatOutputPort::connect(AudioFloatInputPort *port) {
-    port->connect(this);
-}
-
-void AudioFloatOutputPort::disconnect(AudioFloatInputPort *port) {
-    port->disconnect(this);
-}
-
-/***************************************************************************/
-int32_t AudioFloatInputPort::pullData(int64_t framePosition, int32_t numFrames) {
-    return (mConnected == NULL)
-            ? std::min(getFramesPerBlock(), numFrames)
-            : mConnected->pullData(framePosition, numFrames);
-}
-
-float *AudioFloatInputPort::getBlock() {
-    if (mConnected == NULL) {
-        return AudioFloatBlockPort::getBlock(); // loaded using setValue()
-    } else {
-        return mConnected->getBlock();
-    }
-}
-
-/***************************************************************************/
-int32_t AudioSink::pull(int32_t numFrames) {
-    int32_t actualFrames = input.pullData(mFramePosition, numFrames);
-    mFramePosition += actualFrames;
-    return actualFrames;
-}
\ No newline at end of file
diff --git a/media/libaaudio/src/flowgraph/AudioProcessorBase.h b/media/libaaudio/src/flowgraph/AudioProcessorBase.h
deleted file mode 100644
index 972932f..0000000
--- a/media/libaaudio/src/flowgraph/AudioProcessorBase.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * AudioProcessorBase.h
- *
- * Audio processing node and ports that can be used in a simple data flow graph.
- */
-
-#ifndef FLOWGRAPH_AUDIO_PROCESSOR_BASE_H
-#define FLOWGRAPH_AUDIO_PROCESSOR_BASE_H
-
-#include <cassert>
-#include <cstring>
-#include <math.h>
-#include <sys/types.h>
-#include <time.h>
-#include <unistd.h>
-
-// TODO consider publishing all header files under "include/libaaudio/FlowGraph.h"
-
-namespace flowgraph {
-
-// Default block size that can be overridden when the AudioFloatBlockPort is created.
-// If it is too small then we will have too much overhead from switching between nodes.
-// If it is too high then we will thrash the caches.
-constexpr int kDefaultBlockSize = 8; // arbitrary
-
-class AudioFloatInputPort;
-
-/***************************************************************************/
-class AudioProcessorBase {
-public:
-    virtual ~AudioProcessorBase() = default;
-
-    /**
-     * Perform custom function.
-     *
-     * @param framePosition index of first frame to be processed
-     * @param numFrames maximum number of frames requested for processing
-     * @return number of frames actually processed
-     */
-    virtual int32_t onProcess(int64_t framePosition, int32_t numFrames) = 0;
-
-    /**
-     * If the framePosition is at or after the last frame position then call onProcess().
-     * This prevents infinite recursion in case of cyclic graphs.
-     * It also prevents nodes upstream from a branch from being executed twice.
-     *
-     * @param framePosition
-     * @param numFrames
-     * @return
-     */
-    int32_t pullData(int64_t framePosition, int32_t numFrames);
-
-protected:
-    int64_t  mLastFramePosition = -1; // Start at -1 so that the first pull works.
-
-private:
-    int32_t  mFramesValid = 0; // num valid frames in the block
-};
-
-/***************************************************************************/
-/**
-  * This is a connector that allows data to flow between modules.
-  */
-class AudioPort {
-public:
-    AudioPort(AudioProcessorBase &parent, int32_t samplesPerFrame)
-            : mParent(parent)
-            , mSamplesPerFrame(samplesPerFrame) {
-    }
-
-    // Ports are often declared public. So let's make them non-copyable.
-    AudioPort(const AudioPort&) = delete;
-    AudioPort& operator=(const AudioPort&) = delete;
-
-    int32_t getSamplesPerFrame() const {
-        return mSamplesPerFrame;
-    }
-
-protected:
-    AudioProcessorBase &mParent;
-
-private:
-    const int32_t    mSamplesPerFrame = 1;
-};
-
-/***************************************************************************/
-/**
- * This port contains a float type buffer.
- * The size is framesPerBlock * samplesPerFrame).
- */
-class AudioFloatBlockPort  : public AudioPort {
-public:
-    AudioFloatBlockPort(AudioProcessorBase &mParent,
-                   int32_t samplesPerFrame,
-                   int32_t framesPerBlock = kDefaultBlockSize
-                );
-
-    virtual ~AudioFloatBlockPort();
-
-    int32_t getFramesPerBlock() const {
-        return mFramesPerBlock;
-    }
-
-protected:
-
-    /**
-     * @return buffer internal to the port or from a connected port
-     */
-    virtual float *getBlock() {
-        return mSampleBlock;
-    }
-
-
-private:
-    const int32_t    mFramesPerBlock = 1;
-    float           *mSampleBlock = nullptr; // allocated in constructor
-};
-
-/***************************************************************************/
-/**
-  * The results of a module are stored in the buffer of the output ports.
-  */
-class AudioFloatOutputPort : public AudioFloatBlockPort {
-public:
-    AudioFloatOutputPort(AudioProcessorBase &parent, int32_t samplesPerFrame)
-            : AudioFloatBlockPort(parent, samplesPerFrame) {
-    }
-
-    virtual ~AudioFloatOutputPort() = default;
-
-    using AudioFloatBlockPort::getBlock;
-
-    /**
-     * Call the parent module's onProcess() method.
-     * That may pull data from its inputs and recursively
-     * process the entire graph.
-     * @return number of frames actually pulled
-     */
-    int32_t pullData(int64_t framePosition, int32_t numFrames);
-
-    /**
-     * Connect to the input of another module.
-     * An input port can only have one connection.
-     * An output port can have multiple connections.
-     * If you connect a second output port to an input port
-     * then it overwrites the previous connection.
-     *
-     * This not thread safe. Do not modify the graph topology form another thread while running.
-     */
-    void connect(AudioFloatInputPort *port);
-
-    /**
-     * Disconnect from the input of another module.
-     * This not thread safe.
-     */
-    void disconnect(AudioFloatInputPort *port);
-};
-
-/***************************************************************************/
-class AudioFloatInputPort : public AudioFloatBlockPort {
-public:
-    AudioFloatInputPort(AudioProcessorBase &parent, int32_t samplesPerFrame)
-            : AudioFloatBlockPort(parent, samplesPerFrame) {
-    }
-
-    virtual ~AudioFloatInputPort() = default;
-
-    /**
-     * If connected to an output port then this will return
-     * that output ports buffers.
-     * If not connected then it returns the input ports own buffer
-     * which can be loaded using setValue().
-     */
-    float *getBlock() override;
-
-    /**
-     * Pull data from any output port that is connected.
-     */
-    int32_t pullData(int64_t framePosition, int32_t numFrames);
-
-    /**
-     * Write every value of the float buffer.
-     * This value will be ignored if an output port is connected
-     * to this port.
-     */
-    void setValue(float value) {
-        int numFloats = kDefaultBlockSize * getSamplesPerFrame();
-        float *buffer = getBlock();
-        for (int i = 0; i < numFloats; i++) {
-            *buffer++ = value;
-        }
-    }
-
-    /**
-     * Connect to the output of another module.
-     * An input port can only have one connection.
-     * An output port can have multiple connections.
-     * This not thread safe.
-     */
-    void connect(AudioFloatOutputPort *port) {
-        assert(getSamplesPerFrame() == port->getSamplesPerFrame());
-        mConnected = port;
-    }
-
-    void disconnect(AudioFloatOutputPort *port) {
-        assert(mConnected == port);
-        (void) port;
-        mConnected = nullptr;
-    }
-
-    void disconnect() {
-        mConnected = nullptr;
-    }
-
-private:
-    AudioFloatOutputPort *mConnected = nullptr;
-};
-
-/***************************************************************************/
-class AudioSource : public AudioProcessorBase {
-public:
-    explicit AudioSource(int32_t channelCount)
-            : output(*this, channelCount) {
-    }
-
-    virtual ~AudioSource() = default;
-
-    AudioFloatOutputPort output;
-
-    void setData(const void *data, int32_t numFrames) {
-        mData = data;
-        mSizeInFrames = numFrames;
-        mFrameIndex = 0;
-    }
-
-protected:
-    const void *mData = nullptr;
-    int32_t     mSizeInFrames = 0; // number of frames in mData
-    int32_t     mFrameIndex = 0; // index of next frame to be processed
-};
-
-/***************************************************************************/
-class AudioSink : public AudioProcessorBase {
-public:
-    explicit AudioSink(int32_t channelCount)
-            : input(*this, channelCount) {
-    }
-
-    virtual ~AudioSink() = default;
-
-    AudioFloatInputPort input;
-
-    /**
-     * Do nothing. The work happens in the read() method.
-     *
-     * @param framePosition index of first frame to be processed
-     * @param numFrames
-     * @return number of frames actually processed
-     */
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override {
-        (void) framePosition;
-        (void) numFrames;
-        return 0;
-    };
-
-    virtual int32_t read(void *data, int32_t numFrames) = 0;
-
-protected:
-    int32_t pull(int32_t numFrames);
-
-private:
-    int64_t mFramePosition = 0;
-};
-
-} /* namespace flowgraph */
-
-#endif /* FLOWGRAPH_AUDIO_PROCESSOR_BASE_H */
diff --git a/media/libaaudio/src/flowgraph/ChannelCountConverter.cpp b/media/libaaudio/src/flowgraph/ChannelCountConverter.cpp
new file mode 100644
index 0000000..351def2
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ChannelCountConverter.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include "FlowGraphNode.h"
+#include "ChannelCountConverter.h"
+
+using namespace flowgraph;
+
+ChannelCountConverter::ChannelCountConverter(
+        int32_t inputChannelCount,
+        int32_t outputChannelCount)
+        : input(*this, inputChannelCount)
+        , output(*this, outputChannelCount) {
+}
+
+ChannelCountConverter::~ChannelCountConverter() = default;
+
+int32_t ChannelCountConverter::onProcess(int32_t numFrames) {
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
+    int32_t inputChannelCount = input.getSamplesPerFrame();
+    int32_t outputChannelCount = output.getSamplesPerFrame();
+    for (int i = 0; i < numFrames; i++) {
+        int inputChannel = 0;
+        for (int outputChannel = 0; outputChannel < outputChannelCount; outputChannel++) {
+            // Copy input channels to output channels.
+            // Wrap if we run out of inputs.
+            // Discard if we run out of outputs.
+            outputBuffer[outputChannel] = inputBuffer[inputChannel];
+            inputChannel = (inputChannel == inputChannelCount)
+                    ? 0 : inputChannel + 1;
+        }
+        inputBuffer += inputChannelCount;
+        outputBuffer += outputChannelCount;
+    }
+    return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/ChannelCountConverter.h b/media/libaaudio/src/flowgraph/ChannelCountConverter.h
new file mode 100644
index 0000000..e4b6f4e
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ChannelCountConverter.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
+#define FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Change the number of number of channels without mixing.
+ * When increasing the channel count, duplicate input channels.
+ * When decreasing the channel count, drop input channels.
+ */
+    class ChannelCountConverter : public FlowGraphNode {
+    public:
+        explicit ChannelCountConverter(
+                int32_t inputChannelCount,
+                int32_t outputChannelCount);
+
+        virtual ~ChannelCountConverter();
+
+        int32_t onProcess(int32_t numFrames) override;
+
+        const char *getName() override {
+            return "ChannelCountConverter";
+        }
+
+        FlowGraphPortFloatInput input;
+        FlowGraphPortFloatOutput output;
+    };
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/ClipToRange.cpp b/media/libaaudio/src/flowgraph/ClipToRange.cpp
index bd9c22a..d2f8a02 100644
--- a/media/libaaudio/src/flowgraph/ClipToRange.cpp
+++ b/media/libaaudio/src/flowgraph/ClipToRange.cpp
@@ -16,25 +16,23 @@
 
 #include <algorithm>
 #include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "ClipToRange.h"
 
 using namespace flowgraph;
 
 ClipToRange::ClipToRange(int32_t channelCount)
-        : input(*this, channelCount)
-        , output(*this, channelCount) {
+        : FlowGraphFilter(channelCount) {
 }
 
-int32_t ClipToRange::onProcess(int64_t framePosition, int32_t numFrames) {
-    int32_t framesToProcess = input.pullData(framePosition, numFrames);
-    const float *inputBuffer = input.getBlock();
-    float *outputBuffer = output.getBlock();
+int32_t ClipToRange::onProcess(int32_t numFrames) {
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
 
-    int32_t numSamples = framesToProcess * output.getSamplesPerFrame();
+    int32_t numSamples = numFrames * output.getSamplesPerFrame();
     for (int32_t i = 0; i < numSamples; i++) {
         *outputBuffer++ = std::min(mMaximum, std::max(mMinimum, *inputBuffer++));
     }
 
-    return framesToProcess;
+    return numFrames;
 }
diff --git a/media/libaaudio/src/flowgraph/ClipToRange.h b/media/libaaudio/src/flowgraph/ClipToRange.h
index 9eef254..22b7804 100644
--- a/media/libaaudio/src/flowgraph/ClipToRange.h
+++ b/media/libaaudio/src/flowgraph/ClipToRange.h
@@ -21,7 +21,7 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
@@ -30,13 +30,13 @@
 constexpr float kDefaultMaxHeadroom = 1.41253754f;
 constexpr float kDefaultMinHeadroom = -kDefaultMaxHeadroom;
 
-class ClipToRange : public AudioProcessorBase {
+class ClipToRange : public FlowGraphFilter {
 public:
     explicit ClipToRange(int32_t channelCount);
 
     virtual ~ClipToRange() = default;
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
 
     void setMinimum(float min) {
         mMinimum = min;
@@ -54,8 +54,9 @@
         return mMaximum;
     }
 
-    AudioFloatInputPort input;
-    AudioFloatOutputPort output;
+    const char *getName() override {
+        return "ClipToRange";
+    }
 
 private:
     float mMinimum = kDefaultMinHeadroom;
diff --git a/media/libaaudio/src/flowgraph/FlowGraphNode.cpp b/media/libaaudio/src/flowgraph/FlowGraphNode.cpp
new file mode 100644
index 0000000..4c76e77
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/FlowGraphNode.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stdio.h"
+#include <algorithm>
+#include <sys/types.h>
+#include "FlowGraphNode.h"
+
+using namespace flowgraph;
+
+/***************************************************************************/
+int32_t FlowGraphNode::pullData(int32_t numFrames, int64_t callCount) {
+    int32_t frameCount = numFrames;
+    // Prevent recursion and multiple execution of nodes.
+    if (callCount > mLastCallCount) {
+        mLastCallCount = callCount;
+        if (mDataPulledAutomatically) {
+            // Pull from all the upstream nodes.
+            for (auto &port : mInputPorts) {
+                // TODO fix bug of leaving unused data in some ports if using multiple AudioSource
+                frameCount = port.get().pullData(callCount, frameCount);
+            }
+        }
+        if (frameCount > 0) {
+            frameCount = onProcess(frameCount);
+        }
+        mLastFrameCount = frameCount;
+    } else {
+        frameCount = mLastFrameCount;
+    }
+    return frameCount;
+}
+
+void FlowGraphNode::pullReset() {
+    if (!mBlockRecursion) {
+        mBlockRecursion = true; // for cyclic graphs
+        // Pull reset from all the upstream nodes.
+        for (auto &port : mInputPorts) {
+            port.get().pullReset();
+        }
+        mBlockRecursion = false;
+        reset();
+    }
+}
+
+void FlowGraphNode::reset() {
+    mLastFrameCount = 0;
+    mLastCallCount = kInitialCallCount;
+}
+
+/***************************************************************************/
+FlowGraphPortFloat::FlowGraphPortFloat(FlowGraphNode &parent,
+                               int32_t samplesPerFrame,
+                               int32_t framesPerBuffer)
+        : FlowGraphPort(parent, samplesPerFrame)
+        , mFramesPerBuffer(framesPerBuffer)
+        , mBuffer(nullptr) {
+    size_t numFloats = framesPerBuffer * getSamplesPerFrame();
+    mBuffer = std::make_unique<float[]>(numFloats);
+}
+
+/***************************************************************************/
+int32_t FlowGraphPortFloatOutput::pullData(int64_t callCount, int32_t numFrames) {
+    numFrames = std::min(getFramesPerBuffer(), numFrames);
+    return mContainingNode.pullData(numFrames, callCount);
+}
+
+void FlowGraphPortFloatOutput::pullReset() {
+    mContainingNode.pullReset();
+}
+
+// These need to be in the .cpp file because of forward cross references.
+void FlowGraphPortFloatOutput::connect(FlowGraphPortFloatInput *port) {
+    port->connect(this);
+}
+
+void FlowGraphPortFloatOutput::disconnect(FlowGraphPortFloatInput *port) {
+    port->disconnect(this);
+}
+
+/***************************************************************************/
+int32_t FlowGraphPortFloatInput::pullData(int64_t callCount, int32_t numFrames) {
+    return (mConnected == nullptr)
+            ? std::min(getFramesPerBuffer(), numFrames)
+            : mConnected->pullData(callCount, numFrames);
+}
+void FlowGraphPortFloatInput::pullReset() {
+    if (mConnected != nullptr) mConnected->pullReset();
+}
+
+float *FlowGraphPortFloatInput::getBuffer() {
+    if (mConnected == nullptr) {
+        return FlowGraphPortFloat::getBuffer(); // loaded using setValue()
+    } else {
+        return mConnected->getBuffer();
+    }
+}
+
+int32_t FlowGraphSink::pullData(int32_t numFrames) {
+    return FlowGraphNode::pullData(numFrames, getLastCallCount() + 1);
+}
diff --git a/media/libaaudio/src/flowgraph/FlowGraphNode.h b/media/libaaudio/src/flowgraph/FlowGraphNode.h
new file mode 100644
index 0000000..69c83dd
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/FlowGraphNode.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * FlowGraph.h
+ *
+ * Processing node and ports that can be used in a simple data flow graph.
+ * This was designed to work with audio but could be used for other
+ * types of data.
+ */
+
+#ifndef FLOWGRAPH_FLOW_GRAPH_NODE_H
+#define FLOWGRAPH_FLOW_GRAPH_NODE_H
+
+#include <cassert>
+#include <cstring>
+#include <math.h>
+#include <memory>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+#include <vector>
+
+// TODO Move these classes into separate files.
+// TODO Review use of raw pointers for connect(). Maybe use smart pointers but need to avoid
+//      run-time deallocation in audio thread.
+
+// Set this to 1 if using it inside the Android framework.
+// This code is kept here so that it can be moved easily between Oboe and AAudio.
+#ifndef FLOWGRAPH_ANDROID_INTERNAL
+#define FLOWGRAPH_ANDROID_INTERNAL 0
+#endif
+
+namespace flowgraph {
+
+// Default block size that can be overridden when the FlowGraphPortFloat is created.
+// If it is too small then we will have too much overhead from switching between nodes.
+// If it is too high then we will thrash the caches.
+constexpr int kDefaultBufferSize = 8; // arbitrary
+
+class FlowGraphPort;
+class FlowGraphPortFloatInput;
+
+/***************************************************************************/
+/**
+ * Base class for all nodes in the flowgraph.
+ */
+class FlowGraphNode {
+public:
+    FlowGraphNode() = default;
+    virtual ~FlowGraphNode() = default;
+
+    /**
+     * Read from the input ports,
+     * generate multiple frames of data then write the results to the output ports.
+     *
+     * @param numFrames maximum number of frames requested for processing
+     * @return number of frames actually processed
+     */
+    virtual int32_t onProcess(int32_t numFrames) = 0;
+
+    /**
+     * If the callCount is at or after the previous callCount then call
+     * pullData on all of the upstreamNodes.
+     * Then call onProcess().
+     * This prevents infinite recursion in case of cyclic graphs.
+     * It also prevents nodes upstream from a branch from being executed twice.
+     *
+     * @param callCount
+     * @param numFrames
+     * @return number of frames valid
+     */
+    int32_t pullData(int32_t numFrames, int64_t callCount);
+
+    /**
+     * Recursively reset all the nodes in the graph, starting from a Sink.
+     *
+     * This must not be called at the same time as pullData!
+     */
+    void pullReset();
+
+    /**
+     * Reset framePosition counters.
+     */
+    virtual void reset();
+
+    void addInputPort(FlowGraphPort &port) {
+        mInputPorts.emplace_back(port);
+    }
+
+    bool isDataPulledAutomatically() const {
+        return mDataPulledAutomatically;
+    }
+
+    /**
+     * Set true if you want the data pulled through the graph automatically.
+     * This is the default.
+     *
+     * Set false if you want to pull the data from the input ports in the onProcess() method.
+     * You might do this, for example, in a sample rate converting node.
+     *
+     * @param automatic
+     */
+    void setDataPulledAutomatically(bool automatic) {
+        mDataPulledAutomatically = automatic;
+    }
+
+    virtual const char *getName() {
+        return "FlowGraph";
+    }
+
+    int64_t getLastCallCount() {
+        return mLastCallCount;
+    }
+
+protected:
+
+    static constexpr int64_t  kInitialCallCount = -1;
+    int64_t  mLastCallCount = kInitialCallCount;
+
+    std::vector<std::reference_wrapper<FlowGraphPort>> mInputPorts;
+
+private:
+    bool     mDataPulledAutomatically = true;
+    bool     mBlockRecursion = false;
+    int32_t  mLastFrameCount = 0;
+
+};
+
+/***************************************************************************/
+/**
+  * This is a connector that allows data to flow between modules.
+  *
+  * The ports are the primary means of interacting with a module.
+  * So they are generally declared as public.
+  *
+  */
+class FlowGraphPort {
+public:
+    FlowGraphPort(FlowGraphNode &parent, int32_t samplesPerFrame)
+            : mContainingNode(parent)
+            , mSamplesPerFrame(samplesPerFrame) {
+    }
+
+    virtual ~FlowGraphPort() = default;
+
+    // Ports are often declared public. So let's make them non-copyable.
+    FlowGraphPort(const FlowGraphPort&) = delete;
+    FlowGraphPort& operator=(const FlowGraphPort&) = delete;
+
+    int32_t getSamplesPerFrame() const {
+        return mSamplesPerFrame;
+    }
+
+    virtual int32_t pullData(int64_t framePosition, int32_t numFrames) = 0;
+
+    virtual void pullReset() {}
+
+protected:
+    FlowGraphNode &mContainingNode;
+
+private:
+    const int32_t    mSamplesPerFrame = 1;
+};
+
+/***************************************************************************/
+/**
+ * This port contains a 32-bit float buffer that can contain several frames of data.
+ * Processing the data in a block improves performance.
+ *
+ * The size is framesPerBuffer * samplesPerFrame).
+ */
+class FlowGraphPortFloat  : public FlowGraphPort {
+public:
+    FlowGraphPortFloat(FlowGraphNode &parent,
+                   int32_t samplesPerFrame,
+                   int32_t framesPerBuffer = kDefaultBufferSize
+                );
+
+    virtual ~FlowGraphPortFloat() = default;
+
+    int32_t getFramesPerBuffer() const {
+        return mFramesPerBuffer;
+    }
+
+protected:
+
+    /**
+     * @return buffer internal to the port or from a connected port
+     */
+    virtual float *getBuffer() {
+        return mBuffer.get();
+    }
+
+private:
+    const int32_t    mFramesPerBuffer = 1;
+    std::unique_ptr<float[]> mBuffer; // allocated in constructor
+};
+
+/***************************************************************************/
+/**
+  * The results of a node's processing are stored in the buffers of the output ports.
+  */
+class FlowGraphPortFloatOutput : public FlowGraphPortFloat {
+public:
+    FlowGraphPortFloatOutput(FlowGraphNode &parent, int32_t samplesPerFrame)
+            : FlowGraphPortFloat(parent, samplesPerFrame) {
+    }
+
+    virtual ~FlowGraphPortFloatOutput() = default;
+
+    using FlowGraphPortFloat::getBuffer;
+
+    /**
+     * Connect to the input of another module.
+     * An input port can only have one connection.
+     * An output port can have multiple connections.
+     * If you connect a second output port to an input port
+     * then it overwrites the previous connection.
+     *
+     * This not thread safe. Do not modify the graph topology from another thread while running.
+     * Also do not delete a module while it is connected to another port if the graph is running.
+     */
+    void connect(FlowGraphPortFloatInput *port);
+
+    /**
+     * Disconnect from the input of another module.
+     * This not thread safe.
+     */
+    void disconnect(FlowGraphPortFloatInput *port);
+
+    /**
+     * Call the parent module's onProcess() method.
+     * That may pull data from its inputs and recursively
+     * process the entire graph.
+     * @return number of frames actually pulled
+     */
+    int32_t pullData(int64_t framePosition, int32_t numFrames) override;
+
+
+    void pullReset() override;
+
+};
+
+/***************************************************************************/
+
+/**
+ * An input port for streaming audio data.
+ * You can set a value that will be used for processing.
+ * If you connect an output port to this port then its value will be used instead.
+ */
+class FlowGraphPortFloatInput : public FlowGraphPortFloat {
+public:
+    FlowGraphPortFloatInput(FlowGraphNode &parent, int32_t samplesPerFrame)
+            : FlowGraphPortFloat(parent, samplesPerFrame) {
+        // Add to parent so it can pull data from each input.
+        parent.addInputPort(*this);
+    }
+
+    virtual ~FlowGraphPortFloatInput() = default;
+
+    /**
+     * If connected to an output port then this will return
+     * that output ports buffers.
+     * If not connected then it returns the input ports own buffer
+     * which can be loaded using setValue().
+     */
+    float *getBuffer() override;
+
+    /**
+     * Write every value of the float buffer.
+     * This value will be ignored if an output port is connected
+     * to this port.
+     */
+    void setValue(float value) {
+        int numFloats = kDefaultBufferSize * getSamplesPerFrame();
+        float *buffer = getBuffer();
+        for (int i = 0; i < numFloats; i++) {
+            *buffer++ = value;
+        }
+    }
+
+    /**
+     * Connect to the output of another module.
+     * An input port can only have one connection.
+     * An output port can have multiple connections.
+     * This not thread safe.
+     */
+    void connect(FlowGraphPortFloatOutput *port) {
+        assert(getSamplesPerFrame() == port->getSamplesPerFrame());
+        mConnected = port;
+    }
+
+    void disconnect(FlowGraphPortFloatOutput *port) {
+        assert(mConnected == port);
+        (void) port;
+        mConnected = nullptr;
+    }
+
+    void disconnect() {
+        mConnected = nullptr;
+    }
+
+    /**
+     * Pull data from any output port that is connected.
+     */
+    int32_t pullData(int64_t framePosition, int32_t numFrames) override;
+
+    void pullReset() override;
+
+private:
+    FlowGraphPortFloatOutput *mConnected = nullptr;
+};
+
+/***************************************************************************/
+
+/**
+ * Base class for an edge node in a graph that has no upstream nodes.
+ * It outputs data but does not consume data.
+ * By default, it will read its data from an external buffer.
+ */
+class FlowGraphSource : public FlowGraphNode {
+public:
+    explicit FlowGraphSource(int32_t channelCount)
+            : output(*this, channelCount) {
+    }
+
+    virtual ~FlowGraphSource() = default;
+
+    FlowGraphPortFloatOutput output;
+};
+
+/***************************************************************************/
+
+/**
+ * Base class for an edge node in a graph that has no upstream nodes.
+ * It outputs data but does not consume data.
+ * By default, it will read its data from an external buffer.
+ */
+class FlowGraphSourceBuffered : public FlowGraphSource {
+public:
+    explicit FlowGraphSourceBuffered(int32_t channelCount)
+            : FlowGraphSource(channelCount) {}
+
+    virtual ~FlowGraphSourceBuffered() = default;
+
+    /**
+     * Specify buffer that the node will read from.
+     *
+     * @param data TODO Consider using std::shared_ptr.
+     * @param numFrames
+     */
+    void setData(const void *data, int32_t numFrames) {
+        mData = data;
+        mSizeInFrames = numFrames;
+        mFrameIndex = 0;
+    }
+
+protected:
+    const void *mData = nullptr;
+    int32_t     mSizeInFrames = 0; // number of frames in mData
+    int32_t     mFrameIndex = 0; // index of next frame to be processed
+};
+
+/***************************************************************************/
+/**
+ * Base class for an edge node in a graph that has no downstream nodes.
+ * It consumes data but does not output data.
+ * This graph will be executed when data is read() from this node
+ * by pulling data from upstream nodes.
+ */
+class FlowGraphSink : public FlowGraphNode {
+public:
+    explicit FlowGraphSink(int32_t channelCount)
+            : input(*this, channelCount) {
+    }
+
+    virtual ~FlowGraphSink() = default;
+
+    FlowGraphPortFloatInput input;
+
+    /**
+     * Do nothing. The work happens in the read() method.
+     *
+     * @param numFrames
+     * @return number of frames actually processed
+     */
+    int32_t onProcess(int32_t numFrames) override {
+        return numFrames;
+    }
+
+    virtual int32_t read(void *data, int32_t numFrames) = 0;
+
+protected:
+    /**
+     * Pull data through the graph using this nodes last callCount.
+     * @param numFrames
+     * @return
+     */
+    int32_t pullData(int32_t numFrames);
+};
+
+/***************************************************************************/
+/**
+ * Base class for a node that has an input and an output with the same number of channels.
+ * This may include traditional filters, eg. FIR, but also include
+ * any processing node that converts input to output.
+ */
+class FlowGraphFilter : public FlowGraphNode {
+public:
+    explicit FlowGraphFilter(int32_t channelCount)
+            : input(*this, channelCount)
+            , output(*this, channelCount) {
+    }
+
+    virtual ~FlowGraphFilter() = default;
+
+    FlowGraphPortFloatInput input;
+    FlowGraphPortFloatOutput output;
+};
+
+} /* namespace flowgraph */
+
+#endif /* FLOWGRAPH_FLOW_GRAPH_NODE_H */
diff --git a/media/libaaudio/src/flowgraph/ManyToMultiConverter.cpp b/media/libaaudio/src/flowgraph/ManyToMultiConverter.cpp
new file mode 100644
index 0000000..879685e
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ManyToMultiConverter.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+
+#include "ManyToMultiConverter.h"
+
+using namespace flowgraph;
+
+ManyToMultiConverter::ManyToMultiConverter(int32_t channelCount)
+        : inputs(channelCount)
+        , output(*this, channelCount) {
+    for (int i = 0; i < channelCount; i++) {
+        inputs[i] = std::make_unique<FlowGraphPortFloatInput>(*this, 1);
+    }
+}
+
+int32_t ManyToMultiConverter::onProcess(int32_t numFrames) {
+    int32_t channelCount = output.getSamplesPerFrame();
+
+    for (int ch = 0; ch < channelCount; ch++) {
+        const float *inputBuffer = inputs[ch]->getBuffer();
+        float *outputBuffer = output.getBuffer() + ch;
+
+        for (int i = 0; i < numFrames; i++) {
+            // read one, write into the proper interleaved output channel
+            float sample = *inputBuffer++;
+            *outputBuffer = sample;
+            outputBuffer += channelCount; // advance to next multichannel frame
+        }
+    }
+    return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/ManyToMultiConverter.h b/media/libaaudio/src/flowgraph/ManyToMultiConverter.h
new file mode 100644
index 0000000..c7460ff
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ManyToMultiConverter.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
+#define FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <vector>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Combine multiple mono inputs into one interleaved multi-channel output.
+ */
+class ManyToMultiConverter : public flowgraph::FlowGraphNode {
+public:
+    explicit ManyToMultiConverter(int32_t channelCount);
+
+    virtual ~ManyToMultiConverter() = default;
+
+    int32_t onProcess(int numFrames) override;
+
+    void setEnabled(bool /*enabled*/) {}
+
+    std::vector<std::unique_ptr<flowgraph::FlowGraphPortFloatInput>> inputs;
+    flowgraph::FlowGraphPortFloatOutput output;
+
+    const char *getName() override {
+        return "ManyToMultiConverter";
+    }
+
+private:
+};
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/MonoBlend.cpp b/media/libaaudio/src/flowgraph/MonoBlend.cpp
new file mode 100644
index 0000000..62e2809
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MonoBlend.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+
+#include "MonoBlend.h"
+
+using namespace flowgraph;
+
+MonoBlend::MonoBlend(int32_t channelCount)
+        : FlowGraphFilter(channelCount)
+        , mInvChannelCount(1. / channelCount)
+{
+}
+
+int32_t MonoBlend::onProcess(int32_t numFrames) {
+    int32_t channelCount = output.getSamplesPerFrame();
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
+
+    for (size_t i = 0; i < numFrames; ++i) {
+        float accum = 0;
+        for (size_t j = 0; j < channelCount; ++j) {
+            accum += *inputBuffer++;
+        }
+        accum *= mInvChannelCount;
+        for (size_t j = 0; j < channelCount; ++j) {
+            *outputBuffer++ = accum;
+        }
+    }
+
+    return numFrames;
+}
\ No newline at end of file
diff --git a/media/libaaudio/src/flowgraph/MonoBlend.h b/media/libaaudio/src/flowgraph/MonoBlend.h
new file mode 100644
index 0000000..7e3c35b
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MonoBlend.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MONO_BLEND_H
+#define FLOWGRAPH_MONO_BLEND_H
+
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Combine data between multiple channels so each channel is an average
+ * of all channels.
+ */
+class MonoBlend : public FlowGraphFilter {
+public:
+    explicit MonoBlend(int32_t channelCount);
+
+    virtual ~MonoBlend() = default;
+
+    int32_t onProcess(int32_t numFrames) override;
+
+    const char *getName() override {
+        return "MonoBlend";
+    }
+private:
+    const float mInvChannelCount;
+};
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MONO_BLEND
diff --git a/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp b/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
index 78aad52..c8d60b9 100644
--- a/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
+++ b/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
@@ -14,34 +14,28 @@
  * limitations under the License.
  */
 
-
 #include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "MonoToMultiConverter.h"
 
 using namespace flowgraph;
 
-MonoToMultiConverter::MonoToMultiConverter(int32_t channelCount)
+MonoToMultiConverter::MonoToMultiConverter(int32_t outputChannelCount)
         : input(*this, 1)
-        , output(*this, channelCount) {
+        , output(*this, outputChannelCount) {
 }
 
-MonoToMultiConverter::~MonoToMultiConverter() { }
-
-int32_t MonoToMultiConverter::onProcess(int64_t framePosition, int32_t numFrames) {
-    int32_t framesToProcess = input.pullData(framePosition, numFrames);
-
-    const float *inputBuffer = input.getBlock();
-    float *outputBuffer = output.getBlock();
+int32_t MonoToMultiConverter::onProcess(int32_t numFrames) {
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
     int32_t channelCount = output.getSamplesPerFrame();
-    // TODO maybe move to audio_util as audio_mono_to_multi()
-    for (int i = 0; i < framesToProcess; i++) {
+    for (int i = 0; i < numFrames; i++) {
         // read one, write many
         float sample = *inputBuffer++;
         for (int channel = 0; channel < channelCount; channel++) {
             *outputBuffer++ = sample;
         }
     }
-    return framesToProcess;
+    return numFrames;
 }
 
diff --git a/media/libaaudio/src/flowgraph/MonoToMultiConverter.h b/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
index 34d53c7..6e87ccb 100644
--- a/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
+++ b/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
@@ -14,27 +14,34 @@
  * limitations under the License.
  */
 
-
 #ifndef FLOWGRAPH_MONO_TO_MULTI_CONVERTER_H
 #define FLOWGRAPH_MONO_TO_MULTI_CONVERTER_H
 
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class MonoToMultiConverter : public AudioProcessorBase {
+/**
+ * Convert a monophonic stream to a multi-channel interleaved stream
+ * with the same signal on each channel.
+ */
+class MonoToMultiConverter : public FlowGraphNode {
 public:
-    explicit MonoToMultiConverter(int32_t channelCount);
+    explicit MonoToMultiConverter(int32_t outputChannelCount);
 
-    virtual ~MonoToMultiConverter();
+    virtual ~MonoToMultiConverter() = default;
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
 
-    AudioFloatInputPort input;
-    AudioFloatOutputPort output;
+    const char *getName() override {
+        return "MonoToMultiConverter";
+    }
+
+    FlowGraphPortFloatInput input;
+    FlowGraphPortFloatOutput output;
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/MultiToManyConverter.cpp b/media/libaaudio/src/flowgraph/MultiToManyConverter.cpp
new file mode 100644
index 0000000..f074364
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToManyConverter.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include "FlowGraphNode.h"
+#include "MultiToManyConverter.h"
+
+using namespace flowgraph;
+
+MultiToManyConverter::MultiToManyConverter(int32_t channelCount)
+        : outputs(channelCount)
+        , input(*this, channelCount) {
+    for (int i = 0; i < channelCount; i++) {
+        outputs[i] = std::make_unique<FlowGraphPortFloatOutput>(*this, 1);
+    }
+}
+
+MultiToManyConverter::~MultiToManyConverter() = default;
+
+int32_t MultiToManyConverter::onProcess(int32_t numFrames) {
+    int32_t channelCount = input.getSamplesPerFrame();
+
+    for (int ch = 0; ch < channelCount; ch++) {
+        const float *inputBuffer = input.getBuffer() + ch;
+        float *outputBuffer = outputs[ch]->getBuffer();
+
+        for (int i = 0; i < numFrames; i++) {
+            *outputBuffer++ = *inputBuffer;
+            inputBuffer += channelCount;
+        }
+    }
+
+    return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/MultiToManyConverter.h b/media/libaaudio/src/flowgraph/MultiToManyConverter.h
new file mode 100644
index 0000000..de31475
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToManyConverter.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MULTI_TO_MANY_CONVERTER_H
+#define FLOWGRAPH_MULTI_TO_MANY_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Convert a multi-channel interleaved stream to multiple mono-channel
+ * outputs
+ */
+    class MultiToManyConverter : public FlowGraphNode {
+    public:
+        explicit MultiToManyConverter(int32_t channelCount);
+
+        virtual ~MultiToManyConverter();
+
+        int32_t onProcess(int32_t numFrames) override;
+
+        const char *getName() override {
+            return "MultiToManyConverter";
+        }
+
+        std::vector<std::unique_ptr<flowgraph::FlowGraphPortFloatOutput>> outputs;
+        flowgraph::FlowGraphPortFloatInput input;
+    };
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MULTI_TO_MANY_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/MultiToMonoConverter.cpp b/media/libaaudio/src/flowgraph/MultiToMonoConverter.cpp
new file mode 100644
index 0000000..c745108
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToMonoConverter.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include "FlowGraphNode.h"
+#include "MultiToMonoConverter.h"
+
+using namespace flowgraph;
+
+MultiToMonoConverter::MultiToMonoConverter(int32_t inputChannelCount)
+        : input(*this, inputChannelCount)
+        , output(*this, 1) {
+}
+
+MultiToMonoConverter::~MultiToMonoConverter() = default;
+
+int32_t MultiToMonoConverter::onProcess(int32_t numFrames) {
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
+    int32_t channelCount = input.getSamplesPerFrame();
+    for (int i = 0; i < numFrames; i++) {
+        // read first channel of multi stream, write many
+        *outputBuffer++ = *inputBuffer;
+        inputBuffer += channelCount;
+    }
+    return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/MultiToMonoConverter.h b/media/libaaudio/src/flowgraph/MultiToMonoConverter.h
new file mode 100644
index 0000000..37c53bd
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToMonoConverter.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
+#define FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Convert a multi-channel interleaved stream to a monophonic stream
+ * by extracting channel[0].
+ */
+    class MultiToMonoConverter : public FlowGraphNode {
+    public:
+        explicit MultiToMonoConverter(int32_t inputChannelCount);
+
+        virtual ~MultiToMonoConverter();
+
+        int32_t onProcess(int32_t numFrames) override;
+
+        const char *getName() override {
+            return "MultiToMonoConverter";
+        }
+
+        FlowGraphPortFloatInput input;
+        FlowGraphPortFloatOutput output;
+    };
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/RampLinear.cpp b/media/libaaudio/src/flowgraph/RampLinear.cpp
index a260828..905ae07 100644
--- a/media/libaaudio/src/flowgraph/RampLinear.cpp
+++ b/media/libaaudio/src/flowgraph/RampLinear.cpp
@@ -14,20 +14,15 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "RampLinear"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
 #include <algorithm>
 #include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "RampLinear.h"
 
 using namespace flowgraph;
 
 RampLinear::RampLinear(int32_t channelCount)
-        : input(*this, channelCount)
-        , output(*this, channelCount) {
+        : FlowGraphFilter(channelCount) {
     mTarget.store(1.0f);
 }
 
@@ -37,16 +32,19 @@
 
 void RampLinear::setTarget(float target) {
     mTarget.store(target);
+    // If the ramp has not been used then start immediately at this level.
+    if (mLastCallCount == kInitialCallCount) {
+        forceCurrent(target);
+    }
 }
 
 float RampLinear::interpolateCurrent() {
     return mLevelTo - (mRemaining * mScaler);
 }
 
-int32_t RampLinear::onProcess(int64_t framePosition, int32_t numFrames) {
-    int32_t framesToProcess = input.pullData(framePosition, numFrames);
-    const float *inputBuffer = input.getBlock();
-    float *outputBuffer = output.getBlock();
+int32_t RampLinear::onProcess(int32_t numFrames) {
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
     int32_t channelCount = output.getSamplesPerFrame();
 
     float target = getTarget();
@@ -55,12 +53,10 @@
         mLevelFrom = interpolateCurrent();
         mLevelTo = target;
         mRemaining = mLengthInFrames;
-        ALOGV("%s() mLevelFrom = %f, mLevelTo = %f, mRemaining = %d, mScaler = %f",
-              __func__, mLevelFrom, mLevelTo, mRemaining, mScaler);
         mScaler = (mLevelTo - mLevelFrom) / mLengthInFrames; // for interpolation
     }
 
-    int32_t framesLeft = framesToProcess;
+    int32_t framesLeft = numFrames;
 
     if (mRemaining > 0) { // Ramping? This doesn't happen very often.
         int32_t framesToRamp = std::min(framesLeft, mRemaining);
@@ -81,5 +77,5 @@
         *outputBuffer++ = *inputBuffer++ * mLevelTo;
     }
 
-    return framesToProcess;
+    return numFrames;
 }
diff --git a/media/libaaudio/src/flowgraph/RampLinear.h b/media/libaaudio/src/flowgraph/RampLinear.h
index bdc8f41..f285704 100644
--- a/media/libaaudio/src/flowgraph/RampLinear.h
+++ b/media/libaaudio/src/flowgraph/RampLinear.h
@@ -21,17 +21,25 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class RampLinear : public AudioProcessorBase {
+/**
+ * When the target is modified then the output will ramp smoothly
+ * between the original and the new target value.
+ * This can be used to smooth out control values and reduce pops.
+ *
+ * The target may be updated while a ramp is in progress, which will trigger
+ * a new ramp from the current value.
+ */
+class RampLinear : public FlowGraphFilter {
 public:
     explicit RampLinear(int32_t channelCount);
 
     virtual ~RampLinear() = default;
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
 
     /**
      * This is used for the next ramp.
@@ -66,8 +74,9 @@
         mLevelTo = level;
     }
 
-    AudioFloatInputPort input;
-    AudioFloatOutputPort output;
+    const char *getName() override {
+        return "RampLinear";
+    }
 
 private:
 
diff --git a/media/libaaudio/src/flowgraph/SampleRateConverter.cpp b/media/libaaudio/src/flowgraph/SampleRateConverter.cpp
new file mode 100644
index 0000000..5c3ed1f
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/SampleRateConverter.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SampleRateConverter.h"
+
+using namespace flowgraph;
+using namespace resampler;
+
+SampleRateConverter::SampleRateConverter(int32_t channelCount, MultiChannelResampler &resampler)
+        : FlowGraphFilter(channelCount)
+        , mResampler(resampler) {
+    setDataPulledAutomatically(false);
+}
+
+void SampleRateConverter::reset() {
+    FlowGraphNode::reset();
+    mInputCursor = kInitialCallCount;
+}
+
+// Return true if there is a sample available.
+bool SampleRateConverter::isInputAvailable() {
+    // If we have consumed all of the input data then go out and get some more.
+    if (mInputCursor >= mNumValidInputFrames) {
+        mInputCallCount++;
+        mNumValidInputFrames = input.pullData(mInputCallCount, input.getFramesPerBuffer());
+        mInputCursor = 0;
+    }
+    return (mInputCursor < mNumValidInputFrames);
+}
+
+const float *SampleRateConverter::getNextInputFrame() {
+    const float *inputBuffer = input.getBuffer();
+    return &inputBuffer[mInputCursor++ * input.getSamplesPerFrame()];
+}
+
+int32_t SampleRateConverter::onProcess(int32_t numFrames) {
+    float *outputBuffer = output.getBuffer();
+    int32_t channelCount = output.getSamplesPerFrame();
+    int framesLeft = numFrames;
+    while (framesLeft > 0) {
+        // Gather input samples as needed.
+        if(mResampler.isWriteNeeded()) {
+            if (isInputAvailable()) {
+                const float *frame = getNextInputFrame();
+                mResampler.writeNextFrame(frame);
+            } else {
+                break;
+            }
+        } else {
+            // Output frame is interpolated from input samples.
+            mResampler.readNextFrame(outputBuffer);
+            outputBuffer += channelCount;
+            framesLeft--;
+        }
+    }
+    return numFrames - framesLeft;
+}
diff --git a/media/libaaudio/src/flowgraph/SampleRateConverter.h b/media/libaaudio/src/flowgraph/SampleRateConverter.h
new file mode 100644
index 0000000..57d76a4
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/SampleRateConverter.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SAMPLE_RATE_CONVERTER_H
+#define OBOE_SAMPLE_RATE_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+#include "resampler/MultiChannelResampler.h"
+
+namespace flowgraph {
+
+class SampleRateConverter : public FlowGraphFilter {
+public:
+    explicit SampleRateConverter(int32_t channelCount,
+                                 resampler::MultiChannelResampler &mResampler);
+
+    virtual ~SampleRateConverter() = default;
+
+    int32_t onProcess(int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SampleRateConverter";
+    }
+
+    void reset() override;
+
+private:
+
+    // Return true if there is a sample available.
+    bool isInputAvailable();
+
+    // This assumes data is available. Only call after calling isInputAvailable().
+    const float *getNextInputFrame();
+
+    resampler::MultiChannelResampler &mResampler;
+
+    int32_t mInputCursor = 0;         // offset into the input port buffer
+    int32_t mNumValidInputFrames = 0; // number of valid frames currently in the input port buffer
+    // We need our own callCount for upstream calls because calls occur at a different rate.
+    // This means we cannot have cyclic graphs or merges that contain an SRC.
+    int64_t mInputCallCount = 0;
+
+};
+
+} /* namespace flowgraph */
+
+#endif //OBOE_SAMPLE_RATE_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/SinkFloat.cpp b/media/libaaudio/src/flowgraph/SinkFloat.cpp
index fb3dcbc..0588848 100644
--- a/media/libaaudio/src/flowgraph/SinkFloat.cpp
+++ b/media/libaaudio/src/flowgraph/SinkFloat.cpp
@@ -16,31 +16,31 @@
 
 #include <algorithm>
 #include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "SinkFloat.h"
 
 using namespace flowgraph;
 
 SinkFloat::SinkFloat(int32_t channelCount)
-        : AudioSink(channelCount) {
+        : FlowGraphSink(channelCount) {
 }
 
 int32_t SinkFloat::read(void *data, int32_t numFrames) {
     float *floatData = (float *) data;
-    int32_t channelCount = input.getSamplesPerFrame();
+    const int32_t channelCount = input.getSamplesPerFrame();
 
     int32_t framesLeft = numFrames;
     while (framesLeft > 0) {
         // Run the graph and pull data through the input port.
-        int32_t framesRead = pull(framesLeft);
-        if (framesRead <= 0) {
+        int32_t framesPulled = pullData(framesLeft);
+        if (framesPulled <= 0) {
             break;
         }
-        const float *signal = input.getBlock();
-        int32_t numSamples = framesRead * channelCount;
+        const float *signal = input.getBuffer();
+        int32_t numSamples = framesPulled * channelCount;
         memcpy(floatData, signal, numSamples * sizeof(float));
         floatData += numSamples;
-        framesLeft -= framesRead;
+        framesLeft -= framesPulled;
     }
     return numFrames - framesLeft;
 }
diff --git a/media/libaaudio/src/flowgraph/SinkFloat.h b/media/libaaudio/src/flowgraph/SinkFloat.h
index 7775c08..c812373 100644
--- a/media/libaaudio/src/flowgraph/SinkFloat.h
+++ b/media/libaaudio/src/flowgraph/SinkFloat.h
@@ -21,16 +21,23 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SinkFloat : public AudioSink {
+/**
+ * AudioSink that lets you read data as 32-bit floats.
+ */
+class SinkFloat : public FlowGraphSink {
 public:
     explicit SinkFloat(int32_t channelCount);
+    ~SinkFloat() override = default;
 
     int32_t read(void *data, int32_t numFrames) override;
 
+    const char *getName() override {
+        return "SinkFloat";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SinkI16.cpp b/media/libaaudio/src/flowgraph/SinkI16.cpp
index ffec8f5..da7fd6b 100644
--- a/media/libaaudio/src/flowgraph/SinkI16.cpp
+++ b/media/libaaudio/src/flowgraph/SinkI16.cpp
@@ -17,17 +17,16 @@
 #include <algorithm>
 #include <unistd.h>
 
-#ifdef __ANDROID__
+#include "SinkI16.h"
+
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
-#include "SinkI16.h"
-
 using namespace flowgraph;
 
 SinkI16::SinkI16(int32_t channelCount)
-        : AudioSink(channelCount) {}
+        : FlowGraphSink(channelCount) {}
 
 int32_t SinkI16::read(void *data, int32_t numFrames) {
     int16_t *shortData = (int16_t *) data;
@@ -36,13 +35,13 @@
     int32_t framesLeft = numFrames;
     while (framesLeft > 0) {
         // Run the graph and pull data through the input port.
-        int32_t framesRead = pull(framesLeft);
+        int32_t framesRead = pullData(framesLeft);
         if (framesRead <= 0) {
             break;
         }
-        const float *signal = input.getBlock();
+        const float *signal = input.getBuffer();
         int32_t numSamples = framesRead * channelCount;
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
         memcpy_to_i16_from_float(shortData, signal, numSamples);
         shortData += numSamples;
         signal += numSamples;
diff --git a/media/libaaudio/src/flowgraph/SinkI16.h b/media/libaaudio/src/flowgraph/SinkI16.h
index 6d86266..1e1ce3a 100644
--- a/media/libaaudio/src/flowgraph/SinkI16.h
+++ b/media/libaaudio/src/flowgraph/SinkI16.h
@@ -20,15 +20,22 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SinkI16 : public AudioSink {
+/**
+ * AudioSink that lets you read data as 16-bit signed integers.
+ */
+class SinkI16 : public FlowGraphSink {
 public:
     explicit SinkI16(int32_t channelCount);
 
     int32_t read(void *data, int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SinkI16";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SinkI24.cpp b/media/libaaudio/src/flowgraph/SinkI24.cpp
index 0cb077d..a9fb5d2 100644
--- a/media/libaaudio/src/flowgraph/SinkI24.cpp
+++ b/media/libaaudio/src/flowgraph/SinkI24.cpp
@@ -15,19 +15,20 @@
  */
 
 #include <algorithm>
-#include <stdint.h>
+#include <unistd.h>
 
-#ifdef __ANDROID__
+
+#include "FlowGraphNode.h"
+#include "SinkI24.h"
+
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
-#include "SinkI24.h"
-
 using namespace flowgraph;
 
 SinkI24::SinkI24(int32_t channelCount)
-        : AudioSink(channelCount) {}
+        : FlowGraphSink(channelCount) {}
 
 int32_t SinkI24::read(void *data, int32_t numFrames) {
     uint8_t *byteData = (uint8_t *) data;
@@ -36,13 +37,13 @@
     int32_t framesLeft = numFrames;
     while (framesLeft > 0) {
         // Run the graph and pull data through the input port.
-        int32_t framesRead = pull(framesLeft);
+        int32_t framesRead = pullData(framesLeft);
         if (framesRead <= 0) {
             break;
         }
-        const float *floatData = input.getBlock();
+        const float *floatData = input.getBuffer();
         int32_t numSamples = framesRead * channelCount;
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
         memcpy_to_p24_from_float(byteData, floatData, numSamples);
         static const int kBytesPerI24Packed = 3;
         byteData += numSamples * kBytesPerI24Packed;
diff --git a/media/libaaudio/src/flowgraph/SinkI24.h b/media/libaaudio/src/flowgraph/SinkI24.h
index 5b9b505..44078a9 100644
--- a/media/libaaudio/src/flowgraph/SinkI24.h
+++ b/media/libaaudio/src/flowgraph/SinkI24.h
@@ -20,15 +20,23 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SinkI24 : public AudioSink {
+/**
+ * AudioSink that lets you read data as packed 24-bit signed integers.
+ * The sample size is 3 bytes.
+ */
+class SinkI24 : public FlowGraphSink {
 public:
     explicit SinkI24(int32_t channelCount);
 
     int32_t read(void *data, int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SinkI24";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SinkI32.cpp b/media/libaaudio/src/flowgraph/SinkI32.cpp
index eab863d..9fd4e96 100644
--- a/media/libaaudio/src/flowgraph/SinkI32.cpp
+++ b/media/libaaudio/src/flowgraph/SinkI32.cpp
@@ -14,18 +14,18 @@
  * limitations under the License.
  */
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "FlowgraphUtilities.h"
 #include "SinkI32.h"
 
 using namespace flowgraph;
 
 SinkI32::SinkI32(int32_t channelCount)
-        : AudioSink(channelCount) {}
+        : FlowGraphSink(channelCount) {}
 
 int32_t SinkI32::read(void *data, int32_t numFrames) {
     int32_t *intData = (int32_t *) data;
@@ -34,13 +34,13 @@
     int32_t framesLeft = numFrames;
     while (framesLeft > 0) {
         // Run the graph and pull data through the input port.
-        int32_t framesRead = pull(framesLeft);
+        int32_t framesRead = pullData(framesLeft);
         if (framesRead <= 0) {
             break;
         }
-        const float *signal = input.getBlock();
+        const float *signal = input.getBuffer();
         int32_t numSamples = framesRead * channelCount;
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
         memcpy_to_i32_from_float(intData, signal, numSamples);
         intData += numSamples;
         signal += numSamples;
diff --git a/media/libaaudio/src/flowgraph/SinkI32.h b/media/libaaudio/src/flowgraph/SinkI32.h
index 09d23b7..7456d5f 100644
--- a/media/libaaudio/src/flowgraph/SinkI32.h
+++ b/media/libaaudio/src/flowgraph/SinkI32.h
@@ -19,16 +19,20 @@
 
 #include <stdint.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SinkI32 : public AudioSink {
+class SinkI32 : public FlowGraphSink {
 public:
     explicit SinkI32(int32_t channelCount);
     ~SinkI32() override = default;
 
     int32_t read(void *data, int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SinkI32";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceFloat.cpp b/media/libaaudio/src/flowgraph/SourceFloat.cpp
index 4bb674f..1b3daf1 100644
--- a/media/libaaudio/src/flowgraph/SourceFloat.cpp
+++ b/media/libaaudio/src/flowgraph/SourceFloat.cpp
@@ -16,23 +16,22 @@
 
 #include <algorithm>
 #include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "SourceFloat.h"
 
 using namespace flowgraph;
 
 SourceFloat::SourceFloat(int32_t channelCount)
-        : AudioSource(channelCount) {
+        : FlowGraphSourceBuffered(channelCount) {
 }
 
-int32_t SourceFloat::onProcess(int64_t framePosition, int32_t numFrames) {
+int32_t SourceFloat::onProcess(int32_t numFrames) {
+    float *outputBuffer = output.getBuffer();
+    const int32_t channelCount = output.getSamplesPerFrame();
 
-    float *outputBuffer = output.getBlock();
-    int32_t channelCount = output.getSamplesPerFrame();
-
-    int32_t framesLeft = mSizeInFrames - mFrameIndex;
-    int32_t framesToProcess = std::min(numFrames, framesLeft);
-    int32_t numSamples = framesToProcess * channelCount;
+    const int32_t framesLeft = mSizeInFrames - mFrameIndex;
+    const int32_t framesToProcess = std::min(numFrames, framesLeft);
+    const int32_t numSamples = framesToProcess * channelCount;
 
     const float *floatBase = (float *) mData;
     const float *floatData = &floatBase[mFrameIndex * channelCount];
diff --git a/media/libaaudio/src/flowgraph/SourceFloat.h b/media/libaaudio/src/flowgraph/SourceFloat.h
index e6eed9f..4719669 100644
--- a/media/libaaudio/src/flowgraph/SourceFloat.h
+++ b/media/libaaudio/src/flowgraph/SourceFloat.h
@@ -20,15 +20,23 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SourceFloat : public AudioSource {
+/**
+ * AudioSource that reads a block of pre-defined float data.
+ */
+class SourceFloat : public FlowGraphSourceBuffered {
 public:
     explicit SourceFloat(int32_t channelCount);
+    ~SourceFloat() override = default;
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SourceFloat";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceI16.cpp b/media/libaaudio/src/flowgraph/SourceI16.cpp
index c3fcec2..8813023 100644
--- a/media/libaaudio/src/flowgraph/SourceI16.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI16.cpp
@@ -17,21 +17,21 @@
 #include <algorithm>
 #include <unistd.h>
 
-#ifdef __ANDROID__
+#include "FlowGraphNode.h"
+#include "SourceI16.h"
+
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
-#include "SourceI16.h"
-
 using namespace flowgraph;
 
 SourceI16::SourceI16(int32_t channelCount)
-        : AudioSource(channelCount) {
+        : FlowGraphSourceBuffered(channelCount) {
 }
 
-int32_t SourceI16::onProcess(int64_t framePosition, int32_t numFrames) {
-    float *floatData = output.getBlock();
+int32_t SourceI16::onProcess(int32_t numFrames) {
+    float *floatData = output.getBuffer();
     int32_t channelCount = output.getSamplesPerFrame();
 
     int32_t framesLeft = mSizeInFrames - mFrameIndex;
@@ -41,7 +41,7 @@
     const int16_t *shortBase = static_cast<const int16_t *>(mData);
     const int16_t *shortData = &shortBase[mFrameIndex * channelCount];
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
     memcpy_to_float_from_i16(floatData, shortData, numSamples);
 #else
     for (int i = 0; i < numSamples; i++) {
diff --git a/media/libaaudio/src/flowgraph/SourceI16.h b/media/libaaudio/src/flowgraph/SourceI16.h
index 2b116cf..fe440b2 100644
--- a/media/libaaudio/src/flowgraph/SourceI16.h
+++ b/media/libaaudio/src/flowgraph/SourceI16.h
@@ -20,15 +20,21 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
-
-class SourceI16 : public AudioSource {
+/**
+ * AudioSource that reads a block of pre-defined 16-bit integer data.
+ */
+class SourceI16 : public FlowGraphSourceBuffered {
 public:
     explicit SourceI16(int32_t channelCount);
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SourceI16";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceI24.cpp b/media/libaaudio/src/flowgraph/SourceI24.cpp
index 097954e..1975878 100644
--- a/media/libaaudio/src/flowgraph/SourceI24.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI24.cpp
@@ -15,13 +15,13 @@
  */
 
 #include <algorithm>
-#include <stdint.h>
+#include <unistd.h>
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "SourceI24.h"
 
 using namespace flowgraph;
@@ -29,11 +29,11 @@
 constexpr int kBytesPerI24Packed = 3;
 
 SourceI24::SourceI24(int32_t channelCount)
-        : AudioSource(channelCount) {
+        : FlowGraphSourceBuffered(channelCount) {
 }
 
-int32_t SourceI24::onProcess(int64_t framePosition, int32_t numFrames) {
-    float *floatData = output.getBlock();
+int32_t SourceI24::onProcess(int32_t numFrames) {
+    float *floatData = output.getBuffer();
     int32_t channelCount = output.getSamplesPerFrame();
 
     int32_t framesLeft = mSizeInFrames - mFrameIndex;
@@ -43,7 +43,7 @@
     const uint8_t *byteBase = (uint8_t *) mData;
     const uint8_t *byteData = &byteBase[mFrameIndex * channelCount * kBytesPerI24Packed];
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
     memcpy_to_float_from_p24(floatData, byteData, numSamples);
 #else
     static const float scale = 1. / (float)(1UL << 31);
diff --git a/media/libaaudio/src/flowgraph/SourceI24.h b/media/libaaudio/src/flowgraph/SourceI24.h
index 2ed6f18..3779534 100644
--- a/media/libaaudio/src/flowgraph/SourceI24.h
+++ b/media/libaaudio/src/flowgraph/SourceI24.h
@@ -17,17 +17,25 @@
 #ifndef FLOWGRAPH_SOURCE_I24_H
 #define FLOWGRAPH_SOURCE_I24_H
 
-#include <stdint.h>
+#include <unistd.h>
+#include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SourceI24 : public AudioSource {
+/**
+ * AudioSource that reads a block of pre-defined 24-bit packed integer data.
+ */
+class SourceI24 : public FlowGraphSourceBuffered {
 public:
     explicit SourceI24(int32_t channelCount);
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SourceI24";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceI32.cpp b/media/libaaudio/src/flowgraph/SourceI32.cpp
index e8177ad..4b2e8c4 100644
--- a/media/libaaudio/src/flowgraph/SourceI32.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI32.cpp
@@ -17,31 +17,31 @@
 #include <algorithm>
 #include <unistd.h>
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "SourceI32.h"
 
 using namespace flowgraph;
 
 SourceI32::SourceI32(int32_t channelCount)
-        : AudioSource(channelCount) {
+        : FlowGraphSourceBuffered(channelCount) {
 }
 
-int32_t SourceI32::onProcess(int64_t framePosition, int32_t numFrames) {
-    float *floatData = output.getBlock();
-    int32_t channelCount = output.getSamplesPerFrame();
+int32_t SourceI32::onProcess(int32_t numFrames) {
+    float *floatData = output.getBuffer();
+    const int32_t channelCount = output.getSamplesPerFrame();
 
-    int32_t framesLeft = mSizeInFrames - mFrameIndex;
-    int32_t framesToProcess = std::min(numFrames, framesLeft);
-    int32_t numSamples = framesToProcess * channelCount;
+    const int32_t framesLeft = mSizeInFrames - mFrameIndex;
+    const int32_t framesToProcess = std::min(numFrames, framesLeft);
+    const int32_t numSamples = framesToProcess * channelCount;
 
     const int32_t *intBase = static_cast<const int32_t *>(mData);
     const int32_t *intData = &intBase[mFrameIndex * channelCount];
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
     memcpy_to_float_from_i32(floatData, intData, numSamples);
 #else
     for (int i = 0; i < numSamples; i++) {
diff --git a/media/libaaudio/src/flowgraph/SourceI32.h b/media/libaaudio/src/flowgraph/SourceI32.h
index e50f9be..b4e0d7b 100644
--- a/media/libaaudio/src/flowgraph/SourceI32.h
+++ b/media/libaaudio/src/flowgraph/SourceI32.h
@@ -19,17 +19,20 @@
 
 #include <stdint.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SourceI32 : public AudioSource {
+class SourceI32 : public FlowGraphSourceBuffered {
 public:
     explicit SourceI32(int32_t channelCount);
     ~SourceI32() override = default;
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
 
+    const char *getName() override {
+        return "SourceI32";
+    }
 private:
     static constexpr float kScale = 1.0 / (1UL << 31);
 };
diff --git a/media/libaaudio/src/flowgraph/resampler/HyperbolicCosineWindow.h b/media/libaaudio/src/flowgraph/resampler/HyperbolicCosineWindow.h
new file mode 100644
index 0000000..f6479ae
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/HyperbolicCosineWindow.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
+#define RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
+
+#include <math.h>
+
+namespace resampler {
+
+/**
+ * Calculate a HyperbolicCosineWindow window centered at 0.
+ * This can be used in place of a Kaiser window.
+ *
+ * The code is based on an anonymous contribution by "a concerned citizen":
+ * https://dsp.stackexchange.com/questions/37714/kaiser-window-approximation
+ */
+class HyperbolicCosineWindow {
+public:
+    HyperbolicCosineWindow() {
+        setStopBandAttenuation(60);
+    }
+
+    /**
+     * @param attenuation typical values range from 30 to 90 dB
+     * @return beta
+     */
+    double setStopBandAttenuation(double attenuation) {
+        double alpha = ((-325.1e-6 * attenuation + 0.1677) * attenuation) - 3.149;
+        setAlpha(alpha);
+        return alpha;
+    }
+
+    void setAlpha(double alpha) {
+        mAlpha = alpha;
+        mInverseCoshAlpha = 1.0 / cosh(alpha);
+    }
+
+    /**
+     * @param x ranges from -1.0 to +1.0
+     */
+    double operator()(double x) {
+        double x2 = x * x;
+        if (x2 >= 1.0) return 0.0;
+        double w = mAlpha * sqrt(1.0 - x2);
+        return cosh(w) * mInverseCoshAlpha;
+    }
+
+private:
+    double mAlpha = 0.0;
+    double mInverseCoshAlpha = 1.0;
+};
+
+} // namespace resampler
+#endif //RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
diff --git a/media/libaaudio/src/flowgraph/resampler/IntegerRatio.cpp b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.cpp
new file mode 100644
index 0000000..4bd75b3
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IntegerRatio.h"
+
+using namespace resampler;
+
+// Enough primes to cover the common sample rates.
+static const int kPrimes[] = {
+        2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
+        43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97,
+        101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
+        151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199};
+
+void IntegerRatio::reduce() {
+    for (int prime : kPrimes) {
+        if (mNumerator < prime || mDenominator < prime) {
+            break;
+        }
+
+        // Find biggest prime factor for numerator.
+        while (true) {
+            int top = mNumerator / prime;
+            int bottom = mDenominator / prime;
+            if ((top >= 1)
+                && (bottom >= 1)
+                && (top * prime == mNumerator) // divided evenly?
+                && (bottom * prime == mDenominator)) {
+                mNumerator = top;
+                mDenominator = bottom;
+            } else {
+                break;
+            }
+        }
+
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/IntegerRatio.h b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.h
new file mode 100644
index 0000000..8c044d8
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_INTEGER_RATIO_H
+#define OBOE_INTEGER_RATIO_H
+
+#include <sys/types.h>
+
+namespace resampler {
+
+/**
+ * Represent the ratio of two integers.
+ */
+class IntegerRatio {
+public:
+    IntegerRatio(int32_t numerator, int32_t denominator)
+            : mNumerator(numerator), mDenominator(denominator) {}
+
+    /**
+     * Reduce by removing common prime factors.
+     */
+    void reduce();
+
+    int32_t getNumerator() {
+        return mNumerator;
+    }
+
+    int32_t getDenominator() {
+        return mDenominator;
+    }
+
+private:
+    int32_t mNumerator;
+    int32_t mDenominator;
+};
+
+} // namespace resampler
+
+#endif //OBOE_INTEGER_RATIO_H
diff --git a/media/libaaudio/src/flowgraph/resampler/KaiserWindow.h b/media/libaaudio/src/flowgraph/resampler/KaiserWindow.h
new file mode 100644
index 0000000..73dbc41
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/KaiserWindow.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RESAMPLER_KAISER_WINDOW_H
+#define RESAMPLER_KAISER_WINDOW_H
+
+#include <math.h>
+
+namespace resampler {
+
+/**
+ * Calculate a Kaiser window centered at 0.
+ */
+class KaiserWindow {
+public:
+    KaiserWindow() {
+        setStopBandAttenuation(60);
+    }
+
+    /**
+     * @param attenuation typical values range from 30 to 90 dB
+     * @return beta
+     */
+    double setStopBandAttenuation(double attenuation) {
+        double beta = 0.0;
+        if (attenuation > 50) {
+            beta = 0.1102 * (attenuation - 8.7);
+        } else if (attenuation >= 21) {
+            double a21 = attenuation - 21;
+            beta = 0.5842 * pow(a21, 0.4) + (0.07886 * a21);
+        }
+        setBeta(beta);
+        return beta;
+    }
+
+    void setBeta(double beta) {
+        mBeta = beta;
+        mInverseBesselBeta = 1.0 / bessel(beta);
+    }
+
+    /**
+     * @param x ranges from -1.0 to +1.0
+     */
+    double operator()(double x) {
+        double x2 = x * x;
+        if (x2 >= 1.0) return 0.0;
+        double w = mBeta * sqrt(1.0 - x2);
+        return bessel(w) * mInverseBesselBeta;
+    }
+
+    // Approximation of a
+    // modified zero order Bessel function of the first kind.
+    // Based on a discussion at:
+    // https://dsp.stackexchange.com/questions/37714/kaiser-window-approximation
+    static double bessel(double x) {
+        double y = cosh(0.970941817426052 * x);
+        y += cosh(0.8854560256532099 * x);
+        y += cosh(0.7485107481711011 * x);
+        y += cosh(0.5680647467311558 * x);
+        y += cosh(0.3546048870425356 * x);
+        y += cosh(0.120536680255323 * x);
+        y *= 2;
+        y += cosh(x);
+        y /= 13;
+        return y;
+    }
+
+private:
+    double mBeta = 0.0;
+    double mInverseBesselBeta = 1.0;
+};
+
+} // namespace resampler
+#endif //RESAMPLER_KAISER_WINDOW_H
diff --git a/media/libaaudio/src/flowgraph/resampler/LinearResampler.cpp b/media/libaaudio/src/flowgraph/resampler/LinearResampler.cpp
new file mode 100644
index 0000000..a7748c1
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/LinearResampler.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LinearResampler.h"
+
+using namespace resampler;
+
+LinearResampler::LinearResampler(const MultiChannelResampler::Builder &builder)
+        : MultiChannelResampler(builder) {
+    mPreviousFrame = std::make_unique<float[]>(getChannelCount());
+    mCurrentFrame = std::make_unique<float[]>(getChannelCount());
+}
+
+void LinearResampler::writeFrame(const float *frame) {
+    memcpy(mPreviousFrame.get(), mCurrentFrame.get(), sizeof(float) * getChannelCount());
+    memcpy(mCurrentFrame.get(), frame, sizeof(float) * getChannelCount());
+}
+
+void LinearResampler::readFrame(float *frame) {
+    float *previous = mPreviousFrame.get();
+    float *current = mCurrentFrame.get();
+    float phase = (float) getIntegerPhase() / mDenominator;
+    // iterate across samples in the frame
+    for (int channel = 0; channel < getChannelCount(); channel++) {
+        float f0 = *previous++;
+        float f1 = *current++;
+        *frame++ = f0 + (phase * (f1 - f0));
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/LinearResampler.h b/media/libaaudio/src/flowgraph/resampler/LinearResampler.h
new file mode 100644
index 0000000..6bde81d
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/LinearResampler.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_LINEAR_RESAMPLER_H
+#define OBOE_LINEAR_RESAMPLER_H
+
+#include <memory>
+#include <sys/types.h>
+#include <unistd.h>
+#include "MultiChannelResampler.h"
+
+namespace resampler {
+
+/**
+ * Simple resampler that uses bi-linear interpolation.
+ */
+class LinearResampler : public MultiChannelResampler {
+public:
+    explicit LinearResampler(const MultiChannelResampler::Builder &builder);
+
+    void writeFrame(const float *frame) override;
+
+    void readFrame(float *frame) override;
+
+private:
+    std::unique_ptr<float[]> mPreviousFrame;
+    std::unique_ptr<float[]> mCurrentFrame;
+};
+
+} // namespace resampler
+#endif //OBOE_LINEAR_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp
new file mode 100644
index 0000000..d630520
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <math.h>
+
+#include "IntegerRatio.h"
+#include "LinearResampler.h"
+#include "MultiChannelResampler.h"
+#include "PolyphaseResampler.h"
+#include "PolyphaseResamplerMono.h"
+#include "PolyphaseResamplerStereo.h"
+#include "SincResampler.h"
+#include "SincResamplerStereo.h"
+
+using namespace resampler;
+
+MultiChannelResampler::MultiChannelResampler(const MultiChannelResampler::Builder &builder)
+        : mNumTaps(builder.getNumTaps())
+        , mX(builder.getChannelCount() * builder.getNumTaps() * 2)
+        , mSingleFrame(builder.getChannelCount())
+        , mChannelCount(builder.getChannelCount())
+        {
+    // Reduce sample rates to the smallest ratio.
+    // For example 44100/48000 would become 147/160.
+    IntegerRatio ratio(builder.getInputRate(), builder.getOutputRate());
+    ratio.reduce();
+    mNumerator = ratio.getNumerator();
+    mDenominator = ratio.getDenominator();
+    mIntegerPhase = mDenominator;
+}
+
+// static factory method
+MultiChannelResampler *MultiChannelResampler::make(int32_t channelCount,
+                                                   int32_t inputRate,
+                                                   int32_t outputRate,
+                                                   Quality quality) {
+    Builder builder;
+    builder.setInputRate(inputRate);
+    builder.setOutputRate(outputRate);
+    builder.setChannelCount(channelCount);
+
+    switch (quality) {
+        case Quality::Fastest:
+            builder.setNumTaps(2);
+            break;
+        case Quality::Low:
+            builder.setNumTaps(4);
+            break;
+        case Quality::Medium:
+        default:
+            builder.setNumTaps(8);
+            break;
+        case Quality::High:
+            builder.setNumTaps(16);
+            break;
+        case Quality::Best:
+            builder.setNumTaps(32);
+            break;
+    }
+
+    // Set the cutoff frequency so that we do not get aliasing when down-sampling.
+    if (inputRate > outputRate) {
+        builder.setNormalizedCutoff(kDefaultNormalizedCutoff);
+    }
+    return builder.build();
+}
+
+MultiChannelResampler *MultiChannelResampler::Builder::build() {
+    if (getNumTaps() == 2) {
+        // Note that this does not do low pass filteringh.
+        return new LinearResampler(*this);
+    }
+    IntegerRatio ratio(getInputRate(), getOutputRate());
+    ratio.reduce();
+    bool usePolyphase = (getNumTaps() * ratio.getDenominator()) <= kMaxCoefficients;
+    if (usePolyphase) {
+        if (getChannelCount() == 1) {
+            return new PolyphaseResamplerMono(*this);
+        } else if (getChannelCount() == 2) {
+            return new PolyphaseResamplerStereo(*this);
+        } else {
+            return new PolyphaseResampler(*this);
+        }
+    } else {
+        // Use less optimized resampler that uses a float phaseIncrement.
+        // TODO mono resampler
+        if (getChannelCount() == 2) {
+            return new SincResamplerStereo(*this);
+        } else {
+            return new SincResampler(*this);
+        }
+    }
+}
+
+void MultiChannelResampler::writeFrame(const float *frame) {
+    // Move cursor before write so that cursor points to last written frame in read.
+    if (--mCursor < 0) {
+        mCursor = getNumTaps() - 1;
+    }
+    float *dest = &mX[mCursor * getChannelCount()];
+    int offset = getNumTaps() * getChannelCount();
+    for (int channel = 0; channel < getChannelCount(); channel++) {
+        // Write twice so we avoid having to wrap when reading.
+        dest[channel] = dest[channel + offset] = frame[channel];
+    }
+}
+
+float MultiChannelResampler::sinc(float radians) {
+    if (abs(radians) < 1.0e-9) return 1.0f;   // avoid divide by zero
+    return sinf(radians) / radians;   // Sinc function
+}
+
+// Generate coefficients in the order they will be used by readFrame().
+// This is more complicated but readFrame() is called repeatedly and should be optimized.
+void MultiChannelResampler::generateCoefficients(int32_t inputRate,
+                                              int32_t outputRate,
+                                              int32_t numRows,
+                                              double phaseIncrement,
+                                              float normalizedCutoff) {
+    mCoefficients.resize(getNumTaps() * numRows);
+    int coefficientIndex = 0;
+    double phase = 0.0; // ranges from 0.0 to 1.0, fraction between samples
+    // Stretch the sinc function for low pass filtering.
+    const float cutoffScaler = normalizedCutoff *
+            ((outputRate < inputRate)
+             ? ((float)outputRate / inputRate)
+             : ((float)inputRate / outputRate));
+    const int numTapsHalf = getNumTaps() / 2; // numTaps must be even.
+    const float numTapsHalfInverse = 1.0f / numTapsHalf;
+    for (int i = 0; i < numRows; i++) {
+        float tapPhase = phase - numTapsHalf;
+        float gain = 0.0; // sum of raw coefficients
+        int gainCursor = coefficientIndex;
+        for (int tap = 0; tap < getNumTaps(); tap++) {
+            float radians = tapPhase * M_PI;
+
+#if MCR_USE_KAISER
+            float window = mKaiserWindow(tapPhase * numTapsHalfInverse);
+#else
+            float window = mCoshWindow(tapPhase * numTapsHalfInverse);
+#endif
+            float coefficient = sinc(radians * cutoffScaler) * window;
+            mCoefficients.at(coefficientIndex++) = coefficient;
+            gain += coefficient;
+            tapPhase += 1.0;
+        }
+        phase += phaseIncrement;
+        while (phase >= 1.0) {
+            phase -= 1.0;
+        }
+
+        // Correct for gain variations.
+        float gainCorrection = 1.0 / gain; // normalize the gain
+        for (int tap = 0; tap < getNumTaps(); tap++) {
+            mCoefficients.at(gainCursor + tap) *= gainCorrection;
+        }
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.h b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.h
new file mode 100644
index 0000000..da79cad
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_MULTICHANNEL_RESAMPLER_H
+#define OBOE_MULTICHANNEL_RESAMPLER_H
+
+#include <memory>
+#include <vector>
+#include <sys/types.h>
+#include <unistd.h>
+
+#ifndef MCR_USE_KAISER
+// It appears from the spectrogram that the HyperbolicCosine window leads to fewer artifacts.
+// And it is faster to calculate.
+#define MCR_USE_KAISER 0
+#endif
+
+#if MCR_USE_KAISER
+#include "KaiserWindow.h"
+#else
+#include "HyperbolicCosineWindow.h"
+#endif
+
+namespace resampler {
+
+class MultiChannelResampler {
+
+public:
+
+    enum class Quality : int32_t {
+        Fastest,
+        Low,
+        Medium,
+        High,
+        Best,
+    };
+
+    class Builder {
+    public:
+        /**
+         * Construct an optimal resampler based on the specified parameters.
+         * @return address of a resampler
+         */
+        MultiChannelResampler *build();
+
+        /**
+         * The number of taps in the resampling filter.
+         * More taps gives better quality but uses more CPU time.
+         * This typically ranges from 4 to 64. Default is 16.
+         *
+         * For polyphase filters, numTaps must be a multiple of four for loop unrolling.
+         * @param numTaps number of taps for the filter
+         * @return address of this builder for chaining calls
+         */
+        Builder *setNumTaps(int32_t numTaps) {
+            mNumTaps = numTaps;
+            return this;
+        }
+
+        /**
+         * Use 1 for mono, 2 for stereo, etc. Default is 1.
+         *
+         * @param channelCount number of channels
+         * @return address of this builder for chaining calls
+         */
+        Builder *setChannelCount(int32_t channelCount) {
+            mChannelCount = channelCount;
+            return this;
+        }
+
+        /**
+         * Default is 48000.
+         *
+         * @param inputRate sample rate of the input stream
+         * @return address of this builder for chaining calls
+         */
+        Builder *setInputRate(int32_t inputRate) {
+            mInputRate = inputRate;
+            return this;
+        }
+
+        /**
+         * Default is 48000.
+         *
+         * @param outputRate sample rate of the output stream
+         * @return address of this builder for chaining calls
+         */
+        Builder *setOutputRate(int32_t outputRate) {
+            mOutputRate = outputRate;
+            return this;
+        }
+
+        /**
+         * Set cutoff frequency relative to the Nyquist rate of the output sample rate.
+         * Set to 1.0 to match the Nyquist frequency.
+         * Set lower to reduce aliasing.
+         * Default is 0.70.
+         *
+         * @param normalizedCutoff anti-aliasing filter cutoff
+         * @return address of this builder for chaining calls
+         */
+        Builder *setNormalizedCutoff(float normalizedCutoff) {
+            mNormalizedCutoff = normalizedCutoff;
+            return this;
+        }
+
+        int32_t getNumTaps() const {
+            return mNumTaps;
+        }
+
+        int32_t getChannelCount() const {
+            return mChannelCount;
+        }
+
+        int32_t getInputRate() const {
+            return mInputRate;
+        }
+
+        int32_t getOutputRate() const {
+            return mOutputRate;
+        }
+
+        float getNormalizedCutoff() const {
+            return mNormalizedCutoff;
+        }
+
+    protected:
+        int32_t mChannelCount = 1;
+        int32_t mNumTaps = 16;
+        int32_t mInputRate = 48000;
+        int32_t mOutputRate = 48000;
+        float   mNormalizedCutoff = kDefaultNormalizedCutoff;
+    };
+
+    virtual ~MultiChannelResampler() = default;
+
+    /**
+     * Factory method for making a resampler that is optimal for the given inputs.
+     *
+     * @param channelCount number of channels, 2 for stereo
+     * @param inputRate sample rate of the input stream
+     * @param outputRate  sample rate of the output stream
+     * @param quality higher quality sounds better but uses more CPU
+     * @return an optimal resampler
+     */
+    static MultiChannelResampler *make(int32_t channelCount,
+                                       int32_t inputRate,
+                                       int32_t outputRate,
+                                       Quality quality);
+
+    bool isWriteNeeded() const {
+        return mIntegerPhase >= mDenominator;
+    }
+
+    /**
+     * Write a frame containing N samples.
+     *
+     * @param frame pointer to the first sample in a frame
+     */
+    void writeNextFrame(const float *frame) {
+        writeFrame(frame);
+        advanceWrite();
+    }
+
+    /**
+     * Read a frame containing N samples.
+     *
+     * @param frame pointer to the first sample in a frame
+     */
+    void readNextFrame(float *frame) {
+        readFrame(frame);
+        advanceRead();
+    }
+
+    int getNumTaps() const {
+        return mNumTaps;
+    }
+
+    int getChannelCount() const {
+        return mChannelCount;
+    }
+
+    static float hammingWindow(float radians, float spread);
+
+    static float sinc(float radians);
+
+protected:
+
+    explicit MultiChannelResampler(const MultiChannelResampler::Builder &builder);
+
+    /**
+     * Write a frame containing N samples.
+     * Call advanceWrite() after calling this.
+     * @param frame pointer to the first sample in a frame
+     */
+    virtual void writeFrame(const float *frame);
+
+    /**
+     * Read a frame containing N samples using interpolation.
+     * Call advanceRead() after calling this.
+     * @param frame pointer to the first sample in a frame
+     */
+    virtual void readFrame(float *frame) = 0;
+
+    void advanceWrite() {
+        mIntegerPhase -= mDenominator;
+    }
+
+    void advanceRead() {
+        mIntegerPhase += mNumerator;
+    }
+
+    /**
+     * Generate the filter coefficients in optimal order.
+     * @param inputRate sample rate of the input stream
+     * @param outputRate  sample rate of the output stream
+     * @param numRows number of rows in the array that contain a set of tap coefficients
+     * @param phaseIncrement how much to increment the phase between rows
+     * @param normalizedCutoff filter cutoff frequency normalized to Nyquist rate of output
+     */
+    void generateCoefficients(int32_t inputRate,
+                              int32_t outputRate,
+                              int32_t numRows,
+                              double phaseIncrement,
+                              float normalizedCutoff);
+
+
+    int32_t getIntegerPhase() {
+        return mIntegerPhase;
+    }
+
+    static constexpr int kMaxCoefficients = 8 * 1024;
+    std::vector<float>   mCoefficients;
+
+    const int            mNumTaps;
+    int                  mCursor = 0;
+    std::vector<float>   mX;           // delayed input values for the FIR
+    std::vector<float>   mSingleFrame; // one frame for temporary use
+    int32_t              mIntegerPhase = 0;
+    int32_t              mNumerator = 0;
+    int32_t              mDenominator = 0;
+
+
+private:
+
+#if MCR_USE_KAISER
+    KaiserWindow           mKaiserWindow;
+#else
+    HyperbolicCosineWindow mCoshWindow;
+#endif
+
+    static constexpr float kDefaultNormalizedCutoff = 0.70f;
+
+    const int              mChannelCount;
+};
+
+} // namespace resampler
+#endif //OBOE_MULTICHANNEL_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.cpp b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.cpp
new file mode 100644
index 0000000..aa4ffd9
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <math.h>
+#include "IntegerRatio.h"
+#include "PolyphaseResampler.h"
+
+using namespace resampler;
+
+PolyphaseResampler::PolyphaseResampler(const MultiChannelResampler::Builder &builder)
+        : MultiChannelResampler(builder)
+        {
+    assert((getNumTaps() % 4) == 0); // Required for loop unrolling.
+
+    int32_t inputRate = builder.getInputRate();
+    int32_t outputRate = builder.getOutputRate();
+
+    int32_t numRows = mDenominator;
+    double phaseIncrement = (double) inputRate / (double) outputRate;
+    generateCoefficients(inputRate, outputRate,
+                         numRows, phaseIncrement,
+                         builder.getNormalizedCutoff());
+}
+
+void PolyphaseResampler::readFrame(float *frame) {
+    // Clear accumulator for mixing.
+    std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
+
+    // Multiply input times windowed sinc function.
+    float *coefficients = &mCoefficients[mCoefficientCursor];
+    float *xFrame = &mX[mCursor * getChannelCount()];
+    for (int i = 0; i < mNumTaps; i++) {
+        float coefficient = *coefficients++;
+        for (int channel = 0; channel < getChannelCount(); channel++) {
+            mSingleFrame[channel] += *xFrame++ * coefficient;
+        }
+    }
+
+    // Advance and wrap through coefficients.
+    mCoefficientCursor = (mCoefficientCursor + mNumTaps) % mCoefficients.size();
+
+    // Copy accumulator to output.
+    for (int channel = 0; channel < getChannelCount(); channel++) {
+        frame[channel] = mSingleFrame[channel];
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.h b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.h
new file mode 100644
index 0000000..1aeb680
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_POLYPHASE_RESAMPLER_H
+#define OBOE_POLYPHASE_RESAMPLER_H
+
+#include <memory>
+#include <vector>
+#include <sys/types.h>
+#include <unistd.h>
+#include "MultiChannelResampler.h"
+
+namespace resampler {
+/**
+ * Resampler that is optimized for a reduced ratio of sample rates.
+ * All of the coefficients for each possible phase value are pre-calculated.
+ */
+class PolyphaseResampler : public MultiChannelResampler {
+public:
+    /**
+     *
+     * @param builder containing lots of parameters
+     */
+    explicit PolyphaseResampler(const MultiChannelResampler::Builder &builder);
+
+    virtual ~PolyphaseResampler() = default;
+
+    void readFrame(float *frame) override;
+
+protected:
+
+    int32_t                mCoefficientCursor = 0;
+
+};
+
+} // namespace resampler
+
+#endif //OBOE_POLYPHASE_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.cpp b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.cpp
new file mode 100644
index 0000000..c0e29b7
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include "PolyphaseResamplerMono.h"
+
+using namespace resampler;
+
+#define MONO  1
+
+PolyphaseResamplerMono::PolyphaseResamplerMono(const MultiChannelResampler::Builder &builder)
+        : PolyphaseResampler(builder) {
+    assert(builder.getChannelCount() == MONO);
+}
+
+void PolyphaseResamplerMono::writeFrame(const float *frame) {
+    // Move cursor before write so that cursor points to last written frame in read.
+    if (--mCursor < 0) {
+        mCursor = getNumTaps() - 1;
+    }
+    float *dest = &mX[mCursor * MONO];
+    const int offset = mNumTaps * MONO;
+    // Write each channel twice so we avoid having to wrap when running the FIR.
+    const float sample =  frame[0];
+    // Put ordered writes together.
+    dest[0] = sample;
+    dest[offset] = sample;
+}
+
+void PolyphaseResamplerMono::readFrame(float *frame) {
+    // Clear accumulator.
+    float sum = 0.0;
+
+    // Multiply input times precomputed windowed sinc function.
+    const float *coefficients = &mCoefficients[mCoefficientCursor];
+    float *xFrame = &mX[mCursor * MONO];
+    const int numLoops = mNumTaps >> 2; // n/4
+    for (int i = 0; i < numLoops; i++) {
+        // Manual loop unrolling, might get converted to SIMD.
+        sum += *xFrame++ * *coefficients++;
+        sum += *xFrame++ * *coefficients++;
+        sum += *xFrame++ * *coefficients++;
+        sum += *xFrame++ * *coefficients++;
+    }
+
+    mCoefficientCursor = (mCoefficientCursor + mNumTaps) % mCoefficients.size();
+
+    // Copy accumulator to output.
+    frame[0] = sum;
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.h b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.h
new file mode 100644
index 0000000..0a691a3
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_POLYPHASE_RESAMPLER_MONO_H
+#define OBOE_POLYPHASE_RESAMPLER_MONO_H
+
+#include <sys/types.h>
+#include <unistd.h>
+#include "PolyphaseResampler.h"
+
+namespace resampler {
+
+class PolyphaseResamplerMono : public PolyphaseResampler {
+public:
+    explicit PolyphaseResamplerMono(const MultiChannelResampler::Builder &builder);
+
+    virtual ~PolyphaseResamplerMono() = default;
+
+    void writeFrame(const float *frame) override;
+
+    void readFrame(float *frame) override;
+};
+
+} // namespace resampler
+
+#endif //OBOE_POLYPHASE_RESAMPLER_MONO_H
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp
new file mode 100644
index 0000000..e4bef74
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include "PolyphaseResamplerStereo.h"
+
+using namespace resampler;
+
+#define STEREO  2
+
+PolyphaseResamplerStereo::PolyphaseResamplerStereo(const MultiChannelResampler::Builder &builder)
+        : PolyphaseResampler(builder) {
+    assert(builder.getChannelCount() == STEREO);
+}
+
+void PolyphaseResamplerStereo::writeFrame(const float *frame) {
+    // Move cursor before write so that cursor points to last written frame in read.
+    if (--mCursor < 0) {
+        mCursor = getNumTaps() - 1;
+    }
+    float *dest = &mX[mCursor * STEREO];
+    const int offset = mNumTaps * STEREO;
+    // Write each channel twice so we avoid having to wrap when running the FIR.
+    const float left =  frame[0];
+    const float right = frame[1];
+    // Put ordered writes together.
+    dest[0] = left;
+    dest[1] = right;
+    dest[offset] = left;
+    dest[1 + offset] = right;
+}
+
+void PolyphaseResamplerStereo::readFrame(float *frame) {
+    // Clear accumulators.
+    float left = 0.0;
+    float right = 0.0;
+
+    // Multiply input times precomputed windowed sinc function.
+    const float *coefficients = &mCoefficients[mCoefficientCursor];
+    float *xFrame = &mX[mCursor * STEREO];
+    const int numLoops = mNumTaps >> 2; // n/4
+    for (int i = 0; i < numLoops; i++) {
+        // Manual loop unrolling, might get converted to SIMD.
+        float coefficient = *coefficients++;
+        left += *xFrame++ * coefficient;
+        right += *xFrame++ * coefficient;
+
+        coefficient = *coefficients++; // next tap
+        left += *xFrame++ * coefficient;
+        right += *xFrame++ * coefficient;
+
+        coefficient = *coefficients++;  // next tap
+        left += *xFrame++ * coefficient;
+        right += *xFrame++ * coefficient;
+
+        coefficient = *coefficients++;  // next tap
+        left += *xFrame++ * coefficient;
+        right += *xFrame++ * coefficient;
+    }
+
+    mCoefficientCursor = (mCoefficientCursor + mNumTaps) % mCoefficients.size();
+
+    // Copy accumulators to output.
+    frame[0] = left;
+    frame[1] = right;
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.h b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.h
new file mode 100644
index 0000000..e608483
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_POLYPHASE_RESAMPLER_STEREO_H
+#define OBOE_POLYPHASE_RESAMPLER_STEREO_H
+
+#include <sys/types.h>
+#include <unistd.h>
+#include "PolyphaseResampler.h"
+
+namespace resampler {
+
+class PolyphaseResamplerStereo : public PolyphaseResampler {
+public:
+    explicit PolyphaseResamplerStereo(const MultiChannelResampler::Builder &builder);
+
+    virtual ~PolyphaseResamplerStereo() = default;
+
+    void writeFrame(const float *frame) override;
+
+    void readFrame(float *frame) override;
+};
+
+} // namespace resampler
+
+#endif //OBOE_POLYPHASE_RESAMPLER_STEREO_H
diff --git a/media/libaaudio/src/flowgraph/resampler/README.md b/media/libaaudio/src/flowgraph/resampler/README.md
new file mode 100644
index 0000000..05d8a89
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/README.md
@@ -0,0 +1,91 @@
+# Sample Rate Converter
+
+This folder contains a sample rate converter, or "resampler".
+
+The converter is based on a sinc function that has been windowed by a hyperbolic cosine.
+We found this had fewer artifacts than the more traditional Kaiser window.
+
+## Creating a Resampler
+
+Include the [main header](MultiChannelResampler.h) for the resampler.
+
+    #include "resampler/MultiChannelResampler.h"
+
+Here is an example of creating a stereo resampler that will convert from 44100 to 48000 Hz.
+Only do this once, when you open your stream. Then use the sample resampler to process multiple buffers.
+
+    MultiChannelResampler *resampler = MultiChannelResampler::make(
+            2, // channel count
+            44100, // input sampleRate
+            48000, // output sampleRate
+            MultiChannelResampler::Quality::Medium); // conversion quality
+
+Possible values for quality include { Fastest, Low, Medium, High, Best }.
+Higher quality levels will sound better but consume more CPU because they have more taps in the filter.
+
+## Fractional Frame Counts
+
+Note that the number of output frames generated for a given number of input frames can vary.
+
+For example, suppose you are converting from 44100 Hz to 48000 Hz and using an input buffer with 960 frames. If you calculate the number of output frames you get:
+
+    960 * 48000 * 44100 = 1044.897959...
+
+You cannot generate a fractional number of frames. So the resampler will sometimes generate 1044 frames and sometimes 1045 frames. On average it will generate 1044.897959 frames. The resampler stores the fraction internally and keeps track of when to consume or generate a frame.
+
+You can either use a fixed number of input frames or a fixed number of output frames. The other frame count will vary.
+
+## Calling the Resampler with a fixed number of OUTPUT frames
+
+In this example, suppose we have a fixed number of output frames and a variable number of input frames.
+
+Assume you start with these variables and a method that returns the next input frame:
+
+    float *outputBuffer;     // multi-channel buffer to be filled
+    int    numOutputFrames;  // number of frames of output
+
+The resampler has a method isWriteNeeded() that tells you whether to write to or read from the resampler.
+
+    int outputFramesLeft = numOutputFrames;
+    while (outputFramesLeft > 0) {
+        if(resampler->isWriteNeeded()) {
+            const float *frame = getNextInputFrame(); // you provide this
+            resampler->writeNextFrame(frame);
+        } else {
+            resampler->readNextFrame(outputBuffer);
+            outputBuffer += channelCount;
+            outputFramesLeft--;
+        }
+    }
+
+## Calling the Resampler with a fixed number of INPUT frames
+
+In this example, suppose we have a fixed number of input frames and a variable number of output frames.
+
+Assume you start with these variables:
+
+    float *inputBuffer;     // multi-channel buffer to be consumed
+    float *outputBuffer;    // multi-channel buffer to be filled
+    int    numInputFrames;  // number of frames of input
+    int    numOutputFrames = 0;
+    int    channelCount;    // 1 for mono, 2 for stereo
+
+    int inputFramesLeft = numInputFrames;
+    while (inputFramesLeft > 0) {
+        if(resampler->isWriteNeeded()) {
+            resampler->writeNextFrame(inputBuffer);
+            inputBuffer += channelCount;
+            inputFramesLeft--;
+        } else {
+            resampler->readNextFrame(outputBuffer);
+            outputBuffer += channelCount;
+            numOutputFrames++;
+        }
+    }
+
+## Deleting the Resampler
+
+When you are done, you should delete the Resampler to avoid a memory leak.
+
+    delete resampler;
+
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp b/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp
new file mode 100644
index 0000000..5e8a9e0
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <math.h>
+#include "SincResampler.h"
+
+using namespace resampler;
+
+SincResampler::SincResampler(const MultiChannelResampler::Builder &builder)
+        : MultiChannelResampler(builder)
+        , mSingleFrame2(builder.getChannelCount()) {
+    assert((getNumTaps() % 4) == 0); // Required for loop unrolling.
+    mNumRows = kMaxCoefficients / getNumTaps(); // no guard row needed
+    mPhaseScaler = (double) mNumRows / mDenominator;
+    double phaseIncrement = 1.0 / mNumRows;
+    generateCoefficients(builder.getInputRate(),
+                         builder.getOutputRate(),
+                         mNumRows,
+                         phaseIncrement,
+                         builder.getNormalizedCutoff());
+}
+
+void SincResampler::readFrame(float *frame) {
+    // Clear accumulator for mixing.
+    std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
+    std::fill(mSingleFrame2.begin(), mSingleFrame2.end(), 0.0);
+
+    // Determine indices into coefficients table.
+    double tablePhase = getIntegerPhase() * mPhaseScaler;
+    int index1 = static_cast<int>(floor(tablePhase));
+    if (index1 >= mNumRows) { // no guard row needed because we wrap the indices
+        tablePhase -= mNumRows;
+        index1 -= mNumRows;
+    }
+
+    int index2 = index1 + 1;
+    if (index2 >= mNumRows) { // no guard row needed because we wrap the indices
+        index2 -= mNumRows;
+    }
+
+    float *coefficients1 = &mCoefficients[index1 * getNumTaps()];
+    float *coefficients2 = &mCoefficients[index2 * getNumTaps()];
+
+    float *xFrame = &mX[mCursor * getChannelCount()];
+    for (int i = 0; i < mNumTaps; i++) {
+        float coefficient1 = *coefficients1++;
+        float coefficient2 = *coefficients2++;
+        for (int channel = 0; channel < getChannelCount(); channel++) {
+            float sample = *xFrame++;
+            mSingleFrame[channel] +=  sample * coefficient1;
+            mSingleFrame2[channel] += sample * coefficient2;
+        }
+    }
+
+    // Interpolate and copy to output.
+    float fraction = tablePhase - index1;
+    for (int channel = 0; channel < getChannelCount(); channel++) {
+        float low = mSingleFrame[channel];
+        float high = mSingleFrame2[channel];
+        frame[channel] = low + (fraction * (high - low));
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResampler.h b/media/libaaudio/src/flowgraph/resampler/SincResampler.h
new file mode 100644
index 0000000..b235188
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResampler.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SINC_RESAMPLER_H
+#define OBOE_SINC_RESAMPLER_H
+
+#include <memory>
+#include <sys/types.h>
+#include <unistd.h>
+#include "MultiChannelResampler.h"
+
+namespace resampler {
+
+/**
+ * Resampler that can interpolate between coefficients.
+ * This can be used to support arbitrary ratios.
+ */
+class SincResampler : public MultiChannelResampler {
+public:
+    explicit SincResampler(const MultiChannelResampler::Builder &builder);
+
+    virtual ~SincResampler() = default;
+
+    void readFrame(float *frame) override;
+
+protected:
+
+    std::vector<float> mSingleFrame2; // for interpolation
+    int32_t            mNumRows = 0;
+    double             mPhaseScaler = 1.0;
+};
+
+} // namespace resampler
+#endif //OBOE_SINC_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp
new file mode 100644
index 0000000..ce00302
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <math.h>
+
+#include "SincResamplerStereo.h"
+
+using namespace resampler;
+
+#define STEREO  2
+
+SincResamplerStereo::SincResamplerStereo(const MultiChannelResampler::Builder &builder)
+        : SincResampler(builder) {
+    assert(builder.getChannelCount() == STEREO);
+}
+
+void SincResamplerStereo::writeFrame(const float *frame) {
+    // Move cursor before write so that cursor points to last written frame in read.
+    if (--mCursor < 0) {
+        mCursor = getNumTaps() - 1;
+    }
+    float *dest = &mX[mCursor * STEREO];
+    const int offset = mNumTaps * STEREO;
+    // Write each channel twice so we avoid having to wrap when running the FIR.
+    const float left =  frame[0];
+    const float right = frame[1];
+    // Put ordered writes together.
+    dest[0] = left;
+    dest[1] = right;
+    dest[offset] = left;
+    dest[1 + offset] = right;
+}
+
+// Multiply input times windowed sinc function.
+void SincResamplerStereo::readFrame(float *frame) {
+    // Clear accumulator for mixing.
+    std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
+    std::fill(mSingleFrame2.begin(), mSingleFrame2.end(), 0.0);
+
+    // Determine indices into coefficients table.
+    double tablePhase = getIntegerPhase() * mPhaseScaler;
+    int index1 = static_cast<int>(floor(tablePhase));
+    float *coefficients1 = &mCoefficients[index1 * getNumTaps()];
+    int index2 = (index1 + 1);
+    if (index2 >= mNumRows) { // no guard row needed because we wrap the indices
+        index2 = 0;
+    }
+    float *coefficients2 = &mCoefficients[index2 * getNumTaps()];
+    float *xFrame = &mX[mCursor * getChannelCount()];
+    for (int i = 0; i < mNumTaps; i++) {
+        float coefficient1 = *coefficients1++;
+        float coefficient2 = *coefficients2++;
+        for (int channel = 0; channel < getChannelCount(); channel++) {
+            float sample = *xFrame++;
+            mSingleFrame[channel] +=  sample * coefficient1;
+            mSingleFrame2[channel] += sample * coefficient2;
+        }
+    }
+
+    // Interpolate and copy to output.
+    float fraction = tablePhase - index1;
+    for (int channel = 0; channel < getChannelCount(); channel++) {
+        float low = mSingleFrame[channel];
+        float high = mSingleFrame2[channel];
+        frame[channel] = low + (fraction * (high - low));
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.h b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.h
new file mode 100644
index 0000000..7d49ec7
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SINC_RESAMPLER_STEREO_H
+#define OBOE_SINC_RESAMPLER_STEREO_H
+
+#include <sys/types.h>
+#include <unistd.h>
+#include "SincResampler.h"
+
+namespace resampler {
+
+class SincResamplerStereo : public SincResampler {
+public:
+    explicit SincResamplerStereo(const MultiChannelResampler::Builder &builder);
+
+    virtual ~SincResamplerStereo() = default;
+
+    void writeFrame(const float *frame) override;
+
+    void readFrame(float *frame) override;
+
+};
+
+} // namespace resampler
+#endif //OBOE_SINC_RESAMPLER_STEREO_H
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index e96e134..38f3c24 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -37,18 +37,6 @@
         : AudioStream() {
 }
 
-AudioStreamLegacy::~AudioStreamLegacy() {
-}
-
-// Called from AudioTrack.cpp or AudioRecord.cpp
-static void AudioStreamLegacy_callback(int event, void* userData, void *info) {
-    AudioStreamLegacy *streamLegacy = (AudioStreamLegacy *) userData;
-    streamLegacy->processCallback(event, info);
-}
-
-aaudio_legacy_callback_t AudioStreamLegacy::getLegacyCallback() {
-    return AudioStreamLegacy_callback;
-}
 
 aaudio_data_callback_result_t AudioStreamLegacy::callDataCallbackFrames(uint8_t *buffer,
                                                                         int32_t numFrames) {
@@ -76,84 +64,77 @@
     return (int32_t) callDataCallbackFrames(buffer, numFrames);
 }
 
-void AudioStreamLegacy::processCallbackCommon(aaudio_callback_operation_t opcode, void *info) {
-    aaudio_data_callback_result_t callbackResult;
+
+void AudioStreamLegacy::onNewIAudioTrack() {
+    ALOGD("%s stream disconnected", __func__);
+    forceDisconnect();
+    mCallbackEnabled.store(false);
+}
+
+size_t AudioStreamLegacy::onMoreData(const android::AudioTrack::Buffer& buffer) {
     // This illegal size can be used to tell AudioRecord or AudioTrack to stop calling us.
     // This takes advantage of them killing the stream when they see a size out of range.
     // That is an undocumented behavior.
     // TODO add to API in AudioRecord and AudioTrack
     const size_t SIZE_STOP_CALLBACKS = SIZE_MAX;
+    aaudio_data_callback_result_t callbackResult;
+    (void) checkForDisconnectRequest(true);
 
-    switch (opcode) {
-        case AAUDIO_CALLBACK_OPERATION_PROCESS_DATA: {
-            (void) checkForDisconnectRequest(true);
-
-            // Note that this code assumes an AudioTrack::Buffer is the same as
-            // AudioRecord::Buffer
-            // TODO define our own AudioBuffer and pass it from the subclasses.
-            AudioTrack::Buffer *audioBuffer = static_cast<AudioTrack::Buffer *>(info);
-            if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
-                ALOGW("processCallbackCommon() data, stream disconnected");
-                // This will kill the stream and prevent it from being restarted.
-                // That is OK because the stream is disconnected.
-                audioBuffer->size = SIZE_STOP_CALLBACKS;
-            } else if (!mCallbackEnabled.load()) {
-                ALOGW("processCallbackCommon() no data because callback disabled, set size=0");
-                // Do NOT use SIZE_STOP_CALLBACKS here because that will kill the stream and
-                // prevent it from being restarted. This can occur because of a race condition
-                // caused by Legacy callbacks running after the track is "stopped".
-                audioBuffer->size = 0;
-            } else {
-                if (audioBuffer->frameCount == 0) {
-                    ALOGW("processCallbackCommon() data, frameCount is zero");
-                    return;
-                }
-
-                // If the caller specified an exact size then use a block size adapter.
-                if (mBlockAdapter != nullptr) {
-                    int32_t byteCount = audioBuffer->frameCount * getBytesPerDeviceFrame();
-                    callbackResult = mBlockAdapter->processVariableBlock(
-                            (uint8_t *) audioBuffer->raw, byteCount);
-                } else {
-                    // Call using the AAudio callback interface.
-                    callbackResult = callDataCallbackFrames((uint8_t *)audioBuffer->raw,
-                                                            audioBuffer->frameCount);
-                }
-                if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
-                    audioBuffer->size = audioBuffer->frameCount * getBytesPerDeviceFrame();
-                } else {
-                    if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
-                        ALOGD("%s() callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
-                    } else {
-                        ALOGW("%s() callback returned invalid result = %d",
-                              __func__, callbackResult);
-                    }
-                    audioBuffer->size = 0;
-                    systemStopInternal();
-                    // Disable the callback just in case the system keeps trying to call us.
-                    mCallbackEnabled.store(false);
-                }
-
-                if (updateStateMachine() != AAUDIO_OK) {
-                    forceDisconnect();
-                    mCallbackEnabled.store(false);
-                }
-            }
+    // Note that this code assumes an AudioTrack::Buffer is the same as
+    // AudioRecord::Buffer
+    // TODO define our own AudioBuffer and pass it from the subclasses.
+    size_t written = buffer.size;
+    if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+        ALOGW("%s() data, stream disconnected", __func__);
+        // This will kill the stream and prevent it from being restarted.
+        // That is OK because the stream is disconnected.
+        written = SIZE_STOP_CALLBACKS;
+    } else if (!mCallbackEnabled.load()) {
+        ALOGW("%s() no data because callback disabled, set size=0", __func__);
+        // Do NOT use SIZE_STOP_CALLBACKS here because that will kill the stream and
+        // prevent it from being restarted. This can occur because of a race condition
+        // caused by Legacy callbacks running after the track is "stopped".
+        written = 0;
+    } else {
+        if (buffer.frameCount == 0) {
+            ALOGW("%s() data, frameCount is zero", __func__);
+            return written;
         }
-            break;
 
-        // Stream got rerouted so we disconnect.
-        case AAUDIO_CALLBACK_OPERATION_DISCONNECTED:
-            ALOGD("processCallbackCommon() stream disconnected");
+        // If the caller specified an exact size then use a block size adapter.
+        if (mBlockAdapter != nullptr) {
+            int32_t byteCount = buffer.frameCount * getBytesPerDeviceFrame();
+            callbackResult = mBlockAdapter->processVariableBlock(
+                    static_cast<uint8_t*>(buffer.raw), byteCount);
+        } else {
+            // Call using the AAudio callback interface.
+            callbackResult = callDataCallbackFrames(static_cast<uint8_t *>(buffer.raw),
+                                                    buffer.frameCount);
+        }
+        if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
+            written = buffer.frameCount * getBytesPerDeviceFrame();
+        } else {
+            if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+                ALOGD("%s() callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
+            } else {
+                ALOGW("%s() callback returned invalid result = %d",
+                      __func__, callbackResult);
+            }
+            written = 0;
+            systemStopInternal();
+            // Disable the callback just in case the system keeps trying to call us.
+            mCallbackEnabled.store(false);
+        }
+
+        if (updateStateMachine() != AAUDIO_OK) {
             forceDisconnect();
             mCallbackEnabled.store(false);
-            break;
-
-        default:
-            break;
+        }
     }
+    return written;
 }
 
+
 aaudio_result_t AudioStreamLegacy::checkForDisconnectRequest(bool errorCallbackEnabled) {
     if (mRequestDisconnect.isRequested()) {
         ALOGD("checkForDisconnectRequest() mRequestDisconnect acknowledged");
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
index 88ef270..c54d7e2 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.h
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -18,6 +18,7 @@
 #define LEGACY_AUDIO_STREAM_LEGACY_H
 
 #include <media/AudioTimestamp.h>
+#include <media/AudioTrack.h>
 #include <media/AudioSystem.h>
 
 #include <aaudio/AAudio.h>
@@ -30,8 +31,6 @@
 namespace aaudio {
 
 
-typedef void (*aaudio_legacy_callback_t)(int event, void* user, void *info);
-
 enum {
     /**
      * Request that the callback function should fill the data buffer of an output stream,
@@ -56,21 +55,17 @@
 typedef int32_t aaudio_callback_operation_t;
 
 
-class AudioStreamLegacy : public AudioStream, public FixedBlockProcessor {
+class AudioStreamLegacy : public AudioStream,
+                          public FixedBlockProcessor,
+                          protected android::AudioTrack::IAudioTrackCallback {
 public:
     AudioStreamLegacy();
 
-    virtual ~AudioStreamLegacy();
+    virtual ~AudioStreamLegacy() = default;
 
-    aaudio_legacy_callback_t getLegacyCallback();
 
     int32_t callDataCallbackFrames(uint8_t *buffer, int32_t numFrames);
 
-    // This is public so it can be called from the C callback function.
-    // This is called from the AudioTrack/AudioRecord client.
-    virtual void processCallback(int event, void *info) = 0;
-
-    void processCallbackCommon(aaudio_callback_operation_t opcode, void *info);
 
     // Implement FixedBlockProcessor
     int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override;
@@ -86,7 +81,8 @@
     }
 
 protected:
-
+    size_t onMoreData(const android::AudioTrack::Buffer& buffer) override;
+    void onNewIAudioTrack() override;
     aaudio_result_t getBestTimestamp(clockid_t clockId,
                                      int64_t *framePosition,
                                      int64_t *timeNanoseconds,
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index fe8fb19..df7d4cf 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -37,6 +37,10 @@
 using namespace android;
 using namespace aaudio;
 
+static void sCallbackWrapper(int event, void* userData, void* info) {
+    static_cast<AudioStreamRecord*>(userData)->processCallback(event, info);
+}
+
 AudioStreamRecord::AudioStreamRecord()
     : AudioStreamLegacy()
     , mFixedBlockWriter(*this)
@@ -124,12 +128,12 @@
     uint32_t notificationFrames = 0;
 
     // Setup the callback if there is one.
-    AudioRecord::callback_t callback = nullptr;
+    AudioRecord::legacy_callback_t callback = nullptr;
     void *callbackData = nullptr;
     AudioRecord::transfer_type streamTransferType = AudioRecord::transfer_type::TRANSFER_SYNC;
     if (builder.getDataCallbackProc() != nullptr) {
         streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
-        callback = getLegacyCallback();
+        callback = sCallbackWrapper;
         callbackData = this;
     }
     mCallbackBufferSize = builder.getFramesPerDataCallback();
@@ -353,14 +357,15 @@
 void AudioStreamRecord::processCallback(int event, void *info) {
     switch (event) {
         case AudioRecord::EVENT_MORE_DATA:
-            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
+        {
+            AudioTrack::Buffer *audioBuffer = static_cast<AudioTrack::Buffer *>(info);
+            audioBuffer->size = onMoreData(*audioBuffer);
             break;
-
+        }
             // Stream got rerouted so we disconnect.
         case AudioRecord::EVENT_NEW_IAUDIORECORD:
-            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
+            onNewIAudioTrack();
             break;
-
         default:
             break;
     }
@@ -504,7 +509,7 @@
     return (aaudio_result_t) framesRead;
 }
 
-aaudio_result_t AudioStreamRecord::setBufferSize(int32_t requestedFrames)
+aaudio_result_t AudioStreamRecord::setBufferSize(int32_t /*requestedFrames*/)
 {
     return getBufferSize();
 }
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index 692651d..5ce73f9 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -65,7 +65,9 @@
     }
 
     // This is public so it can be called from the C callback function.
-    void processCallback(int event, void *info) override;
+    void processCallback(int event, void *info);
+
+    void processCallbackRecord(aaudio_callback_operation_t opcode, void *info);
 
     int64_t incrementClientFrameCounter(int32_t frames) override {
         return incrementFramesRead(frames);
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 17736fc..17a6d0c 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -103,14 +103,12 @@
             : getFormat();
 
     // Setup the callback if there is one.
-    AudioTrack::callback_t callback = nullptr;
-    void *callbackData = nullptr;
+    wp<AudioTrack::IAudioTrackCallback> callback;
     // Note that TRANSFER_SYNC does not allow FAST track
     AudioTrack::transfer_type streamTransferType = AudioTrack::transfer_type::TRANSFER_SYNC;
     if (builder.getDataCallbackProc() != nullptr) {
         streamTransferType = AudioTrack::transfer_type::TRANSFER_CALLBACK;
-        callback = getLegacyCallback();
-        callbackData = this;
+        callback = wp<AudioTrack::IAudioTrackCallback>::fromExisting(this);
 
         // If the total buffer size is unspecified then base the size on the burst size.
         if (frameCount == 0
@@ -157,13 +155,12 @@
             frameCount,
             flags,
             callback,
-            callbackData,
             notificationFrames,
-            0,       // DEFAULT sharedBuffer*/,
+            nullptr,       // DEFAULT sharedBuffer*/,
             false,   // DEFAULT threadCanCallJava
             sessionId,
             streamTransferType,
-            NULL,    // DEFAULT audio_offload_info_t
+            nullptr,    // DEFAULT audio_offload_info_t
             AttributionSourceState(), // DEFAULT uid and pid
             &attributes,
             // WARNING - If doNotReconnect set true then audio stops after plugging and unplugging
@@ -217,7 +214,6 @@
         mBlockAdapter = nullptr;
     }
 
-    setState(AAUDIO_STREAM_STATE_OPEN);
     setDeviceId(mAudioTrack->getRoutedDeviceId());
 
     aaudio_session_id_t actualSessionId =
@@ -250,6 +246,19 @@
              "open() perfMode changed from %d to %d",
              perfMode, actualPerformanceMode);
 
+    if (getState() != AAUDIO_STREAM_STATE_UNINITIALIZED) {
+        ALOGE("%s - Open canceled since state = %d", __func__, getState());
+        if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED)
+        {
+            ALOGE("%s - Opening while state is disconnected", __func__);
+            safeReleaseClose();
+            return AAUDIO_ERROR_DISCONNECTED;
+        }
+        safeReleaseClose();
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+
+    setState(AAUDIO_STREAM_STATE_OPEN);
     return AAUDIO_OK;
 }
 
@@ -281,31 +290,19 @@
     AudioStream::close_l();
 }
 
-void AudioStreamTrack::processCallback(int event, void *info) {
 
-    switch (event) {
-        case AudioTrack::EVENT_MORE_DATA:
-            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
-            break;
-
-            // Stream got rerouted so we disconnect.
-        case AudioTrack::EVENT_NEW_IAUDIOTRACK:
-            // request stream disconnect if the restored AudioTrack has properties not matching
-            // what was requested initially
-            if (mAudioTrack->channelCount() != getSamplesPerFrame()
-                    || mAudioTrack->format() != getFormat()
-                    || mAudioTrack->getSampleRate() != getSampleRate()
-                    || mAudioTrack->getRoutedDeviceId() != getDeviceId()
-                    || getBufferCapacityFromDevice() != getBufferCapacity()
-                    || getFramesPerBurstFromDevice() != getFramesPerBurst()) {
-                processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
-            }
-            break;
-
-        default:
-            break;
+void AudioStreamTrack::onNewIAudioTrack() {
+    // Stream got rerouted so we disconnect.
+    // request stream disconnect if the restored AudioTrack has properties not matching
+    // what was requested initially
+    if (mAudioTrack->channelCount() != getSamplesPerFrame()
+          || mAudioTrack->format() != getFormat()
+          || mAudioTrack->getSampleRate() != getSampleRate()
+          || mAudioTrack->getRoutedDeviceId() != getDeviceId()
+          || getBufferCapacityFromDevice() != getBufferCapacity()
+          || getFramesPerBurstFromDevice() != getFramesPerBurst()) {
+        AudioStreamLegacy::onNewIAudioTrack();
     }
-    return;
 }
 
 aaudio_result_t AudioStreamTrack::requestStart_l() {
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
index f604871..0f4d72b 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.h
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -81,9 +81,6 @@
 
     aaudio_result_t updateStateMachine() override;
 
-    // This is public so it can be called from the C callback function.
-    void processCallback(int event, void *info) override;
-
     int64_t incrementClientFrameCounter(int32_t frames) override {
         return incrementFramesWritten(frames);
     }
@@ -100,6 +97,7 @@
 
     int32_t getFramesPerBurstFromDevice() const override;
     int32_t getBufferCapacityFromDevice() const override;
+    void onNewIAudioTrack() override;
 
 private:
 
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index e44ccee..a0952fe 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -544,45 +544,6 @@
     return AAUDIO_OK;
 }
 
-static int32_t AAudioProperty_getMMapProperty(const char *propName,
-                                              int32_t defaultValue,
-                                              const char * caller) {
-    int32_t prop = property_get_int32(propName, defaultValue);
-    switch (prop) {
-        case AAUDIO_UNSPECIFIED:
-        case AAUDIO_POLICY_NEVER:
-        case AAUDIO_POLICY_ALWAYS:
-        case AAUDIO_POLICY_AUTO:
-            break;
-        default:
-            ALOGE("%s: invalid = %d", caller, prop);
-            prop = defaultValue;
-            break;
-    }
-    return prop;
-}
-
-int32_t AAudioProperty_getMMapPolicy() {
-    return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_POLICY,
-                                          AAUDIO_UNSPECIFIED, __func__);
-}
-
-int32_t AAudioProperty_getMMapExclusivePolicy() {
-    return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY,
-                                          AAUDIO_UNSPECIFIED, __func__);
-}
-
-int32_t AAudioProperty_getMixerBursts() {
-    const int32_t defaultBursts = 2; // arbitrary, use 2 for double buffered
-    const int32_t maxBursts = 1024; // arbitrary
-    int32_t prop = property_get_int32(AAUDIO_PROP_MIXER_BURSTS, defaultBursts);
-    if (prop < 1 || prop > maxBursts) {
-        ALOGE("AAudioProperty_getMixerBursts: invalid = %d", prop);
-        prop = defaultBursts;
-    }
-    return prop;
-}
-
 int32_t AAudioProperty_getWakeupDelayMicros() {
     const int32_t minMicros = 0; // arbitrary
     const int32_t defaultMicros = 200; // arbitrary, based on some observed jitter
@@ -613,18 +574,6 @@
     return prop;
 }
 
-int32_t AAudioProperty_getHardwareBurstMinMicros() {
-    const int32_t defaultMicros = 1000; // arbitrary
-    const int32_t maxMicros = 1000 * 1000; // arbitrary
-    int32_t prop = property_get_int32(AAUDIO_PROP_HW_BURST_MIN_USEC, defaultMicros);
-    if (prop < 1 || prop > maxMicros) {
-        ALOGE("AAudioProperty_getHardwareBurstMinMicros: invalid = %d, use %d",
-              prop, defaultMicros);
-        prop = defaultMicros;
-    }
-    return prop;
-}
-
 static int32_t AAudioProperty_getMMapOffsetMicros(const char *functionName,
         const char *propertyName) {
     const int32_t minMicros = -20000; // arbitrary
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index f24df46..b59ce1c 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -128,27 +128,6 @@
 // Note that this code may be replaced by Settings or by some other system configuration tool.
 
 /**
- * Read system property.
- * @return AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER or AAUDIO_POLICY_AUTO or AAUDIO_POLICY_ALWAYS
- */
-int32_t AAudioProperty_getMMapPolicy();
-#define AAUDIO_PROP_MMAP_POLICY           "aaudio.mmap_policy"
-
-/**
- * Read system property.
- * @return AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER or AAUDIO_POLICY_AUTO or AAUDIO_POLICY_ALWAYS
- */
-int32_t AAudioProperty_getMMapExclusivePolicy();
-#define AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY "aaudio.mmap_exclusive_policy"
-
-/**
- * Read system property.
- * @return number of bursts per AAudio service mixer cycle
- */
-int32_t AAudioProperty_getMixerBursts();
-#define AAUDIO_PROP_MIXER_BURSTS           "aaudio.mixer_bursts"
-
-/**
  * Read a system property that specifies the number of extra microseconds that a thread
  * should sleep when waiting for another thread to service a FIFO. This is used
  * to avoid the waking thread from being overly optimistic about the other threads
@@ -169,19 +148,6 @@
 #define AAUDIO_PROP_MINIMUM_SLEEP_USEC      "aaudio.minimum_sleep_usec"
 
 /**
- * Read system property.
- * This is handy in case the DMA is bursting too quickly for the CPU to keep up.
- * For example, there may be a DMA burst every 100 usec but you only
- * want to feed the MMAP buffer every 2000 usec.
- *
- * This will affect the framesPerBurst for an MMAP stream.
- *
- * @return minimum number of microseconds for a MMAP HW burst
- */
-int32_t AAudioProperty_getHardwareBurstMinMicros();
-#define AAUDIO_PROP_HW_BURST_MIN_USEC      "aaudio.hw_burst_min_usec"
-
-/**
  * Read a system property that specifies an offset that will be added to MMAP timestamps.
  * This can be used to correct bias in the timestamp.
  * It can also be used to analyze the time distribution of the timestamp
@@ -227,7 +193,7 @@
  * @return true if f() eventually returns true.
  */
 static inline bool AAudio_tryUntilTrue(
-        std::function<bool()> f, int times, int sleepMs) {
+        const std::function<bool()>& f, int times, int sleepMs) {
     static const useconds_t US_PER_MS = 1000;
 
     sleepMs = std::max(sleepMs, 0);
@@ -299,9 +265,7 @@
 
 class Timestamp {
 public:
-    Timestamp()
-            : mPosition(0)
-            , mNanoseconds(0) {}
+    Timestamp() = default;
     Timestamp(int64_t position, int64_t nanoseconds)
             : mPosition(position)
             , mNanoseconds(nanoseconds) {}
@@ -312,8 +276,8 @@
 
 private:
     // These cannot be const because we need to implement the copy assignment operator.
-    int64_t mPosition;
-    int64_t mNanoseconds;
+    int64_t mPosition{0};
+    int64_t mNanoseconds{0};
 };
 
 
diff --git a/media/libaaudio/src/utility/FixedBlockAdapter.h b/media/libaaudio/src/utility/FixedBlockAdapter.h
index 4dc7e68..290e473 100644
--- a/media/libaaudio/src/utility/FixedBlockAdapter.h
+++ b/media/libaaudio/src/utility/FixedBlockAdapter.h
@@ -35,7 +35,7 @@
 class FixedBlockAdapter
 {
 public:
-    FixedBlockAdapter(FixedBlockProcessor &fixedBlockProcessor)
+    explicit FixedBlockAdapter(FixedBlockProcessor &fixedBlockProcessor)
     : mFixedBlockProcessor(fixedBlockProcessor) {}
 
     virtual ~FixedBlockAdapter() = default;
diff --git a/media/libaaudio/src/utility/FixedBlockReader.h b/media/libaaudio/src/utility/FixedBlockReader.h
index 128dd52..dc82416 100644
--- a/media/libaaudio/src/utility/FixedBlockReader.h
+++ b/media/libaaudio/src/utility/FixedBlockReader.h
@@ -30,7 +30,7 @@
 class FixedBlockReader : public FixedBlockAdapter
 {
 public:
-    FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor);
+    explicit FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor);
 
     virtual ~FixedBlockReader() = default;
 
diff --git a/media/libaaudio/src/utility/FixedBlockWriter.h b/media/libaaudio/src/utility/FixedBlockWriter.h
index f1d917c..3e89b5d 100644
--- a/media/libaaudio/src/utility/FixedBlockWriter.h
+++ b/media/libaaudio/src/utility/FixedBlockWriter.h
@@ -28,7 +28,7 @@
 class FixedBlockWriter : public FixedBlockAdapter
 {
 public:
-    FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor);
+    explicit FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor);
 
     virtual ~FixedBlockWriter() = default;
 
diff --git a/media/libaaudio/src/utility/MonotonicCounter.h b/media/libaaudio/src/utility/MonotonicCounter.h
index 63add4e..313ccbd 100644
--- a/media/libaaudio/src/utility/MonotonicCounter.h
+++ b/media/libaaudio/src/utility/MonotonicCounter.h
@@ -30,8 +30,8 @@
 class MonotonicCounter {
 
 public:
-    MonotonicCounter() {};
-    virtual ~MonotonicCounter() {};
+    MonotonicCounter() = default;
+    virtual ~MonotonicCounter() = default;
 
     /**
      * @return current value of the counter
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 98e9727..ea00a5a 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -48,7 +48,7 @@
     shared_libs: ["libaaudio_internal"],
 }
 
-cc_test {
+cc_binary {
     name: "test_timestamps",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_timestamps.cpp"],
@@ -60,121 +60,71 @@
     name: "test_open_params",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_open_params.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
-cc_test {
+cc_binary {
     name: "test_no_close",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_no_close.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
-cc_test {
+cc_binary {
     name: "test_aaudio_recovery",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_recovery.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
-cc_test {
+cc_binary {
     name: "test_n_streams",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_n_streams.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
-cc_test {
+cc_binary {
     name: "test_bad_disconnect",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_bad_disconnect.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
     name: "test_various",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_various.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
     name: "test_session_id",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_session_id.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
-cc_test {
+cc_binary {
     name: "test_aaudio_monkey",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_aaudio_monkey.cpp"],
     header_libs: ["libaaudio_example_utils"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
     name: "test_attributes",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_attributes.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
     name: "test_interference",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_interference.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
@@ -196,28 +146,18 @@
     ],
 }
 
-cc_test {
+cc_binary {
     name: "test_return_stop",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_return_stop.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
     name: "test_callback_race",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_callback_race.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
@@ -238,7 +178,7 @@
     ],
 }
 
-cc_test {
+cc_binary {
     name: "test_steal_exclusive",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_steal_exclusive.cpp"],
@@ -251,15 +191,9 @@
     ],
 }
 
-
-cc_test {
+cc_binary {
     name: "test_disconnect_race",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_disconnect_race.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
diff --git a/media/libaaudio/tests/test_attributes.cpp b/media/libaaudio/tests/test_attributes.cpp
index d540866..b88d562 100644
--- a/media/libaaudio/tests/test_attributes.cpp
+++ b/media/libaaudio/tests/test_attributes.cpp
@@ -16,6 +16,10 @@
 
 // Test AAudio attributes such as Usage, ContentType and InputPreset.
 
+// TODO Many of these tests are duplicates of CTS tests in
+// "test_aaudio_attributes.cpp". That other file is more current.
+// So these tests could be deleted.
+
 #include <stdio.h>
 #include <unistd.h>
 
@@ -91,7 +95,7 @@
     aaudio_allowed_capture_policy_t expectedCapturePolicy =
             (capturePolicy == DONT_SET || capturePolicy == AAUDIO_UNSPECIFIED)
             ? AAUDIO_ALLOW_CAPTURE_BY_ALL // default
-            : preset;
+            : capturePolicy;
     EXPECT_EQ(expectedCapturePolicy, AAudioStream_getAllowedCapturePolicy(aaudioStream));
 
     bool expectedPrivacyMode =
@@ -132,10 +136,7 @@
     AAUDIO_USAGE_ASSISTANCE_SONIFICATION,
     AAUDIO_USAGE_GAME,
     AAUDIO_USAGE_ASSISTANT,
-    AAUDIO_SYSTEM_USAGE_EMERGENCY,
-    AAUDIO_SYSTEM_USAGE_SAFETY,
-    AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS,
-    AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT
+    // Note that the AAUDIO_SYSTEM_USAGE_* values requires special permission.
 };
 
 static const aaudio_content_type_t sContentypes[] = {
diff --git a/media/libaaudio/tests/test_flowgraph.cpp b/media/libaaudio/tests/test_flowgraph.cpp
index d563a7e..0792fc5 100644
--- a/media/libaaudio/tests/test_flowgraph.cpp
+++ b/media/libaaudio/tests/test_flowgraph.cpp
@@ -23,6 +23,7 @@
 #include <gtest/gtest.h>
 
 #include "flowgraph/ClipToRange.h"
+#include "flowgraph/MonoBlend.h"
 #include "flowgraph/MonoToMultiConverter.h"
 #include "flowgraph/SourceFloat.h"
 #include "flowgraph/RampLinear.h"
@@ -76,31 +77,40 @@
 }
 
 TEST(test_flowgraph, module_ramp_linear) {
+    constexpr int singleNumOutput = 1;
     constexpr int rampSize = 5;
     constexpr int numOutput = 100;
     constexpr float value = 1.0f;
-    constexpr float target = 100.0f;
+    constexpr float initialTarget = 10.0f;
+    constexpr float finalTarget = 100.0f;
+    constexpr float tolerance = 0.0001f; // arbitrary
     float output[numOutput] = {};
     RampLinear rampLinear{1};
     SinkFloat sinkFloat{1};
 
     rampLinear.input.setValue(value);
     rampLinear.setLengthInFrames(rampSize);
-    rampLinear.setTarget(target);
-    rampLinear.forceCurrent(0.0f);
-
     rampLinear.output.connect(&sinkFloat.input);
 
+    // Check that the values go to the initial target instantly.
+    rampLinear.setTarget(initialTarget);
+    int32_t singleNumRead = sinkFloat.read(output, singleNumOutput);
+    ASSERT_EQ(singleNumRead, singleNumOutput);
+    EXPECT_NEAR(value * initialTarget, output[0], tolerance);
+
+    // Now set target and check that the linear ramp works as expected.
+    rampLinear.setTarget(finalTarget);
     int32_t numRead = sinkFloat.read(output, numOutput);
+    const float incrementSize = (finalTarget - initialTarget) / rampSize;
     ASSERT_EQ(numOutput, numRead);
-    constexpr float tolerance = 0.0001f; // arbitrary
+
     int i = 0;
     for (; i < rampSize; i++) {
-        float expected = i * value * target / rampSize;
+        float expected = value * (initialTarget + i * incrementSize);
         EXPECT_NEAR(expected, output[i], tolerance);
     }
     for (; i < numOutput; i++) {
-        float expected = value * target;
+        float expected = value * finalTarget;
         EXPECT_NEAR(expected, output[i], tolerance);
     }
 }
@@ -155,3 +165,29 @@
         EXPECT_NEAR(expected[i], output[i], tolerance);
     }
 }
+
+TEST(test_flowgraph, module_mono_blend) {
+    // Two channel to two channel with 3 inputs and outputs.
+    constexpr int numChannels = 2;
+    constexpr int numFrames = 3;
+
+    static const float input[] = {-0.7, 0.5, -0.25, 1.25, 1000, 2000};
+    static const float expected[] = {-0.1, -0.1, 0.5, 0.5, 1500, 1500};
+    float output[100];
+    SourceFloat sourceFloat{numChannels};
+    MonoBlend monoBlend{numChannels};
+    SinkFloat sinkFloat{numChannels};
+
+    sourceFloat.setData(input, numFrames);
+
+    sourceFloat.output.connect(&monoBlend.input);
+    monoBlend.output.connect(&sinkFloat.input);
+
+    int32_t numRead = sinkFloat.read(output, numFrames);
+    ASSERT_EQ(numRead, numFrames);
+    constexpr float tolerance = 0.000001f; // arbitrary
+    for (int i = 0; i < numRead; i++) {
+        EXPECT_NEAR(expected[i], output[i], tolerance);
+    }
+}
+
diff --git a/media/libaaudio/tests/test_steal_exclusive.cpp b/media/libaaudio/tests/test_steal_exclusive.cpp
index 05c560d..ca4f3d6 100644
--- a/media/libaaudio/tests/test_steal_exclusive.cpp
+++ b/media/libaaudio/tests/test_steal_exclusive.cpp
@@ -110,7 +110,11 @@
         mOpenDelayMillis = openDelayMillis;
     }
 
-    void restartStream() {
+    void setCloseEnabled(bool enabled) {
+        mCloseEnabled = enabled;
+    }
+
+    aaudio_result_t restartStream() {
         int retriesLeft = mMaxRetries;
         aaudio_result_t result;
         do {
@@ -126,6 +130,7 @@
                     mName.c_str(),
                     AAudio_convertResultToText(result));
         } while (retriesLeft-- > 0 && result != AAUDIO_OK);
+        return result;
     }
 
     aaudio_data_callback_result_t onAudioReady(
@@ -189,10 +194,12 @@
         std::lock_guard<std::mutex> lock(mLock);
         aaudio_result_t result = AAUDIO_OK;
         if (mStream != nullptr) {
-            result = AAudioStream_close(mStream);
-            if (result != AAUDIO_OK) {
-                printf("AAudioStream_close returned %s\n",
-                       AAudio_convertResultToText(result));
+            if (mCloseEnabled) {
+                result = AAudioStream_close(mStream);
+                printf("AAudioStream_close() returned %s\n",
+                        AAudio_convertResultToText(result));
+            } else {
+                printf("AAudioStream_close() DISABLED!\n");
             }
             mStream = nullptr;
         }
@@ -232,6 +239,12 @@
         return AAudioStream_requestStart(mStream);
     }
 
+    aaudio_result_t pause() {
+        std::lock_guard<std::mutex> lock(mLock);
+        if (mStream == nullptr) return 0;
+        return AAudioStream_requestPause(mStream);
+    }
+
     aaudio_result_t stop() {
         std::lock_guard<std::mutex> lock(mLock);
         if (mStream == nullptr) return 0;
@@ -287,6 +300,7 @@
     std::string         mName;
     int                 mMaxRetries = 1;
     int                 mOpenDelayMillis = 0;
+    bool                mCloseEnabled = true;
 };
 
 // Callback function that fills the audio output buffer.
@@ -319,11 +333,13 @@
 }
 
 static void s_usage() {
-    printf("test_steal_exclusive [-i] [-r{maxRetries}] [-d{delay}] -s\n");
+    printf("test_steal_exclusive [-i] [-r{maxRetries}] [-d{delay}] [-p{pausedTime}]-s -c{flag}\n");
     printf("     -i direction INPUT, otherwise OUTPUT\n");
-    printf("     -d delay open by milliseconds, default = 0\n");
-    printf("     -r max retries in the error callback, default = 1\n");
+    printf("     -d Delay open by milliseconds, default = 0\n");
+    printf("     -p Pause first stream then sleep for msec before opening second streams, default = 0\n");
+    printf("     -r max Retries in the error callback, default = 1\n");
     printf("     -s try to open in SHARED mode\n");
+    printf("     -c enable or disabling Closing of the stream with 0/1, default = 1\n");
 }
 
 int main(int argc, char ** argv) {
@@ -334,7 +350,9 @@
     int errorCount = 0;
     int maxRetries = 1;
     int openDelayMillis = 0;
+    bool closeEnabled = true;
     aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
+    int pausedMillis = 0;
 
     // Make printf print immediately so that debug info is not stuck
     // in a buffer if we hang or crash.
@@ -348,12 +366,18 @@
         if (arg[0] == '-') {
             char option = arg[1];
             switch (option) {
+                case 'c':
+                    closeEnabled = atoi(&arg[2]) != 0;
+                    break;
                 case 'd':
                     openDelayMillis = atoi(&arg[2]);
                     break;
                 case 'i':
                     direction = AAUDIO_DIRECTION_INPUT;
                     break;
+                case 'p':
+                    pausedMillis = atoi(&arg[2]);
+                    break;
                 case 'r':
                     maxRetries = atoi(&arg[2]);
                     break;
@@ -376,6 +400,8 @@
     thief.setOpenDelayMillis(openDelayMillis);
     victim.setMaxRetries(maxRetries);
     thief.setMaxRetries(maxRetries);
+    victim.setCloseEnabled(closeEnabled);
+    thief.setCloseEnabled(closeEnabled);
 
     result = victim.openAudioStream(direction, requestedSharingMode);
     if (result != AAUDIO_OK) {
@@ -414,6 +440,12 @@
         }
     }
 
+    if (pausedMillis > 0) {
+        printf("Pausing the VICTIM for %d millis before starting THIEF -----\n", pausedMillis);
+        victim.pause();
+        usleep(pausedMillis * 1000);
+    }
+
     printf("Trying to start the THIEF stream, which may steal the VICTIM MMAP resource -----\n");
     result = thief.openAudioStream(direction, requestedSharingMode);
     if (result != AAUDIO_OK) {
@@ -429,6 +461,25 @@
         errorCount++;
     }
 
+    if (pausedMillis > 0) {
+        result = victim.start();
+        printf("Restarting VICTIM, AAudioStream_requestStart(VICTIM) returned %d "
+               ">>>>>>>>>>>>>>>>>>>>>>\n", result);
+        if (result == AAUDIO_ERROR_DISCONNECTED) {
+            // The stream is disconnected due to thief steal the resource
+            printf("VICTIM was disconnected while hanging as the THIEF "
+                   "stole the resource >>>>>>>>>>>>>>>>>>>>>>\n");
+            result = victim.restartStream();
+            printf("Restarting VICTIM, AAudioStream_requestStart(VICTIM) returned %d "
+                   ">>>>>>>>>>>>>>>>>>>>>>\n", result);
+            if (result != AAUDIO_OK) {
+                errorCount++;
+            }
+        } else {
+            errorCount++;
+        }
+    }
+
     // Give stream time to advance.
     usleep(SLEEP_DURATION_MSEC * 1000);
 
@@ -442,7 +493,7 @@
     }
 
     LOGI("Both streams running. Ask user to plug in headset. ====");
-    printf("\n====\nPlease PLUG IN A HEADSET now!\n====\n\n");
+    printf("\n====\nPlease PLUG IN A HEADSET now! - OPTIONAL\n====\n\n");
 
     if (result == AAUDIO_OK) {
         const int watchLoops = DUET_DURATION_MSEC / SLEEP_DURATION_MSEC;
diff --git a/media/libaaudio/tests/test_various.cpp b/media/libaaudio/tests/test_various.cpp
index cbf863f..b68fc7b 100644
--- a/media/libaaudio/tests/test_various.cpp
+++ b/media/libaaudio/tests/test_various.cpp
@@ -25,6 +25,7 @@
 
 #include <gtest/gtest.h>
 #include <unistd.h>
+#include <thread>
 
 // Callback function that does nothing.
 aaudio_data_callback_result_t NoopDataCallbackProc(
@@ -51,6 +52,7 @@
 }
 
 constexpr int64_t NANOS_PER_MILLISECOND = 1000 * 1000;
+constexpr int64_t MICROS_PER_MILLISECOND = 1000;
 
 void checkReleaseThenClose(aaudio_performance_mode_t perfMode,
         aaudio_sharing_mode_t sharingMode,
@@ -762,6 +764,58 @@
     checkCallbackOnce(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
 }
 
+void waitForStateChangeToClosingorClosed(AAudioStream **stream, std::atomic<bool>* isReady)
+{
+    *isReady = true;
+    aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(*stream,
+                                                         AAUDIO_STREAM_STATE_OPEN, &state,
+                                                         10000 * NANOS_PER_MILLISECOND));
+    if ((state != AAUDIO_STREAM_STATE_CLOSING) && (state != AAUDIO_STREAM_STATE_CLOSED)){
+        FAIL() << "ERROR - State not closing or closed. Current state: " <<
+                AAudio_convertStreamStateToText(state);
+    }
+}
+
+void testWaitForStateChangeClose(aaudio_performance_mode_t perfMode) {
+    AAudioStreamBuilder *aaudioBuilder = nullptr;
+    AAudioStream *aaudioStream = nullptr;
+
+    ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+    AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, perfMode);
+    ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+
+    // Verify Open State
+    aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+                                                         AAUDIO_STREAM_STATE_UNKNOWN, &state,
+                                                         1000 * NANOS_PER_MILLISECOND));
+    EXPECT_EQ(AAUDIO_STREAM_STATE_OPEN, state);
+
+    std::atomic<bool> isWaitThreadReady{false};
+
+    // Spawn a new thread to wait for the state change
+    std::thread waitThread (waitForStateChangeToClosingorClosed, &aaudioStream,
+                            &isWaitThreadReady);
+
+    // Wait for worker thread to be ready
+    while (!isWaitThreadReady) {
+        usleep(MICROS_PER_MILLISECOND);
+    }
+    // Sleep an additional millisecond to make sure waitForAudioThread is called
+    usleep(MICROS_PER_MILLISECOND);
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+    waitThread.join();
+}
+
+TEST(test_various, wait_for_state_change_close_none) {
+    testWaitForStateChangeClose(AAUDIO_PERFORMANCE_MODE_NONE);
+}
+
+TEST(test_various, wait_for_state_change_close_lowlat) {
+    testWaitForStateChangeClose(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+}
+
 // ************************************************************
 struct WakeUpCallbackData {
     void wakeOther() {
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index acab774..b7be3ff 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -14,6 +14,11 @@
  * limitations under the License.
  */
 
+#include <algorithm>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
 #define LOG_TAG "AidlConversion"
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
@@ -21,6 +26,7 @@
 #include "media/AidlConversion.h"
 
 #include <media/ShmemCompat.h>
+#include <media/stagefright/foundation/MediaDefs.h>
 
 ////////////////////////////////////////////////////////////////////////////////////////////////////
 // Utilities
@@ -28,6 +34,40 @@
 namespace android {
 
 using base::unexpected;
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioContentType;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceAddress;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioDeviceType;
+using media::audio::common::AudioEncapsulationMetadataType;
+using media::audio::common::AudioEncapsulationMode;
+using media::audio::common::AudioEncapsulationType;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioFormatType;
+using media::audio::common::AudioGain;
+using media::audio::common::AudioGainConfig;
+using media::audio::common::AudioGainMode;
+using media::audio::common::AudioInputFlags;
+using media::audio::common::AudioIoFlags;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioOffloadInfo;
+using media::audio::common::AudioOutputFlags;
+using media::audio::common::AudioPortDeviceExt;
+using media::audio::common::AudioPortExt;
+using media::audio::common::AudioPortMixExt;
+using media::audio::common::AudioPortMixExtUseCase;
+using media::audio::common::AudioProfile;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStandard;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::AudioUuid;
+using media::audio::common::ExtraAudioDescriptor;
+using media::audio::common::Int;
+using media::audio::common::PcmType;
 
 namespace {
 
@@ -219,75 +259,7 @@
     return std::string(legacy.c_str());
 }
 
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<int32_t> aidl2legacy_AudioPortConfigType_int32_t(
-        media::AudioPortConfigType aidl) {
-    switch (aidl) {
-        case media::AudioPortConfigType::SAMPLE_RATE:
-            return AUDIO_PORT_CONFIG_SAMPLE_RATE;
-        case media::AudioPortConfigType::CHANNEL_MASK:
-            return AUDIO_PORT_CONFIG_CHANNEL_MASK;
-        case media::AudioPortConfigType::FORMAT:
-            return AUDIO_PORT_CONFIG_FORMAT;
-        case media::AudioPortConfigType::GAIN:
-            return AUDIO_PORT_CONFIG_GAIN;
-        case media::AudioPortConfigType::FLAGS:
-            return AUDIO_PORT_CONFIG_FLAGS;
-    }
-    return unexpected(BAD_VALUE);
-}
-
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<media::AudioPortConfigType> legacy2aidl_int32_t_AudioPortConfigType(
-        int32_t legacy) {
-    switch (legacy) {
-        case AUDIO_PORT_CONFIG_SAMPLE_RATE:
-            return media::AudioPortConfigType::SAMPLE_RATE;
-        case AUDIO_PORT_CONFIG_CHANNEL_MASK:
-            return media::AudioPortConfigType::CHANNEL_MASK;
-        case AUDIO_PORT_CONFIG_FORMAT:
-            return media::AudioPortConfigType::FORMAT;
-        case AUDIO_PORT_CONFIG_GAIN:
-            return media::AudioPortConfigType::GAIN;
-        case AUDIO_PORT_CONFIG_FLAGS:
-            return media::AudioPortConfigType::FLAGS;
-    }
-    return unexpected(BAD_VALUE);
-}
-
-ConversionResult<unsigned int> aidl2legacy_int32_t_config_mask(int32_t aidl) {
-    return convertBitmask<unsigned int, int32_t, int, media::AudioPortConfigType>(
-            aidl, aidl2legacy_AudioPortConfigType_int32_t,
-            // AudioPortConfigType enum is index-based.
-            indexToEnum_index<media::AudioPortConfigType>,
-            // AUDIO_PORT_CONFIG_* flags are mask-based.
-            enumToMask_bitmask<unsigned int, int>);
-}
-
-ConversionResult<int32_t> legacy2aidl_config_mask_int32_t(unsigned int legacy) {
-    return convertBitmask<int32_t, unsigned int, media::AudioPortConfigType, int>(
-            legacy, legacy2aidl_int32_t_AudioPortConfigType,
-            // AUDIO_PORT_CONFIG_* flags are mask-based.
-            indexToEnum_bitmask<unsigned>,
-            // AudioPortConfigType enum is index-based.
-            enumToMask_index<int32_t, media::AudioPortConfigType>);
-}
-
-ConversionResult<audio_channel_mask_t> aidl2legacy_int32_t_audio_channel_mask_t(int32_t aidl) {
-    // TODO(ytai): should we convert bit-by-bit?
-    // One problem here is that the representation is both opaque and is different based on the
-    // context (input vs. output). Can determine based on type and role, as per useInChannelMask().
-    return convertReinterpret<audio_channel_mask_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_channel_mask_t_int32_t(audio_channel_mask_t legacy) {
-    // TODO(ytai): should we convert bit-by-bit?
-    // One problem here is that the representation is both opaque and is different based on the
-    // context (input vs. output). Can determine based on type and role, as per useInChannelMask().
-    return convertReinterpret<int32_t>(legacy);
-}
-
-ConversionResult<audio_io_config_event> aidl2legacy_AudioIoConfigEvent_audio_io_config_event(
+ConversionResult<audio_io_config_event_t> aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(
         media::AudioIoConfigEvent aidl) {
     switch (aidl) {
         case media::AudioIoConfigEvent::OUTPUT_REGISTERED:
@@ -312,8 +284,8 @@
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_AudioIoConfigEvent(
-        audio_io_config_event legacy) {
+ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_t_AudioIoConfigEvent(
+        audio_io_config_event_t legacy) {
     switch (legacy) {
         case AUDIO_OUTPUT_REGISTERED:
             return media::AudioIoConfigEvent::OUTPUT_REGISTERED;
@@ -393,81 +365,1024 @@
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<audio_format_t> aidl2legacy_AudioFormat_audio_format_t(
-        media::audio::common::AudioFormat aidl) {
-    // This relies on AudioFormat being kept in sync with audio_format_t.
-    static_assert(sizeof(media::audio::common::AudioFormat) == sizeof(audio_format_t));
-    return static_cast<audio_format_t>(aidl);
+namespace {
+
+namespace detail {
+using AudioChannelBitPair = std::pair<audio_channel_mask_t, int>;
+using AudioChannelBitPairs = std::vector<AudioChannelBitPair>;
+using AudioChannelPair = std::pair<audio_channel_mask_t, AudioChannelLayout>;
+using AudioChannelPairs = std::vector<AudioChannelPair>;
+using AudioDevicePair = std::pair<audio_devices_t, AudioDeviceDescription>;
+using AudioDevicePairs = std::vector<AudioDevicePair>;
+using AudioFormatPair = std::pair<audio_format_t, AudioFormatDescription>;
+using AudioFormatPairs = std::vector<AudioFormatPair>;
 }
 
-ConversionResult<media::audio::common::AudioFormat> legacy2aidl_audio_format_t_AudioFormat(
+const detail::AudioChannelBitPairs& getInAudioChannelBits() {
+    static const detail::AudioChannelBitPairs pairs = {
+        { AUDIO_CHANNEL_IN_LEFT, AudioChannelLayout::CHANNEL_FRONT_LEFT },
+        { AUDIO_CHANNEL_IN_RIGHT, AudioChannelLayout::CHANNEL_FRONT_RIGHT },
+        // AUDIO_CHANNEL_IN_FRONT is at the end
+        { AUDIO_CHANNEL_IN_BACK, AudioChannelLayout::CHANNEL_BACK_CENTER },
+        // AUDIO_CHANNEL_IN_*_PROCESSED not supported
+        // AUDIO_CHANNEL_IN_PRESSURE not supported
+        // AUDIO_CHANNEL_IN_*_AXIS not supported
+        // AUDIO_CHANNEL_IN_VOICE_* not supported
+        { AUDIO_CHANNEL_IN_BACK_LEFT, AudioChannelLayout::CHANNEL_BACK_LEFT },
+        { AUDIO_CHANNEL_IN_BACK_RIGHT, AudioChannelLayout::CHANNEL_BACK_RIGHT },
+        { AUDIO_CHANNEL_IN_CENTER, AudioChannelLayout::CHANNEL_FRONT_CENTER },
+        { AUDIO_CHANNEL_IN_LOW_FREQUENCY, AudioChannelLayout::CHANNEL_LOW_FREQUENCY },
+        { AUDIO_CHANNEL_IN_TOP_LEFT, AudioChannelLayout::CHANNEL_TOP_SIDE_LEFT },
+        { AUDIO_CHANNEL_IN_TOP_RIGHT, AudioChannelLayout::CHANNEL_TOP_SIDE_RIGHT },
+        // When going from aidl to legacy, IN_CENTER is used
+        { AUDIO_CHANNEL_IN_FRONT, AudioChannelLayout::CHANNEL_FRONT_CENTER }
+    };
+    return pairs;
+}
+
+const detail::AudioChannelPairs& getInAudioChannelPairs() {
+    static const detail::AudioChannelPairs pairs = {
+#define DEFINE_INPUT_LAYOUT(n)                                                 \
+            {                                                                  \
+                AUDIO_CHANNEL_IN_##n,                                          \
+                AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
+                        AudioChannelLayout::LAYOUT_##n)                        \
+            }
+
+        DEFINE_INPUT_LAYOUT(MONO),
+        DEFINE_INPUT_LAYOUT(STEREO),
+        DEFINE_INPUT_LAYOUT(FRONT_BACK),
+        // AUDIO_CHANNEL_IN_6 not supported
+        DEFINE_INPUT_LAYOUT(2POINT0POINT2),
+        DEFINE_INPUT_LAYOUT(2POINT1POINT2),
+        DEFINE_INPUT_LAYOUT(3POINT0POINT2),
+        DEFINE_INPUT_LAYOUT(3POINT1POINT2),
+        DEFINE_INPUT_LAYOUT(5POINT1)
+#undef DEFINE_INPUT_LAYOUT
+    };
+    return pairs;
+}
+
+const detail::AudioChannelBitPairs& getOutAudioChannelBits() {
+    static const detail::AudioChannelBitPairs pairs = {
+#define DEFINE_OUTPUT_BITS(n)                                                  \
+            { AUDIO_CHANNEL_OUT_##n, AudioChannelLayout::CHANNEL_##n }
+
+        DEFINE_OUTPUT_BITS(FRONT_LEFT),
+        DEFINE_OUTPUT_BITS(FRONT_RIGHT),
+        DEFINE_OUTPUT_BITS(FRONT_CENTER),
+        DEFINE_OUTPUT_BITS(LOW_FREQUENCY),
+        DEFINE_OUTPUT_BITS(BACK_LEFT),
+        DEFINE_OUTPUT_BITS(BACK_RIGHT),
+        DEFINE_OUTPUT_BITS(FRONT_LEFT_OF_CENTER),
+        DEFINE_OUTPUT_BITS(FRONT_RIGHT_OF_CENTER),
+        DEFINE_OUTPUT_BITS(BACK_CENTER),
+        DEFINE_OUTPUT_BITS(SIDE_LEFT),
+        DEFINE_OUTPUT_BITS(SIDE_RIGHT),
+        DEFINE_OUTPUT_BITS(TOP_CENTER),
+        DEFINE_OUTPUT_BITS(TOP_FRONT_LEFT),
+        DEFINE_OUTPUT_BITS(TOP_FRONT_CENTER),
+        DEFINE_OUTPUT_BITS(TOP_FRONT_RIGHT),
+        DEFINE_OUTPUT_BITS(TOP_BACK_LEFT),
+        DEFINE_OUTPUT_BITS(TOP_BACK_CENTER),
+        DEFINE_OUTPUT_BITS(TOP_BACK_RIGHT),
+        DEFINE_OUTPUT_BITS(TOP_SIDE_LEFT),
+        DEFINE_OUTPUT_BITS(TOP_SIDE_RIGHT),
+        DEFINE_OUTPUT_BITS(BOTTOM_FRONT_LEFT),
+        DEFINE_OUTPUT_BITS(BOTTOM_FRONT_CENTER),
+        DEFINE_OUTPUT_BITS(BOTTOM_FRONT_RIGHT),
+        DEFINE_OUTPUT_BITS(LOW_FREQUENCY_2),
+        DEFINE_OUTPUT_BITS(FRONT_WIDE_LEFT),
+        DEFINE_OUTPUT_BITS(FRONT_WIDE_RIGHT),
+#undef DEFINE_OUTPUT_BITS
+        { AUDIO_CHANNEL_OUT_HAPTIC_A, AudioChannelLayout::CHANNEL_HAPTIC_A },
+        { AUDIO_CHANNEL_OUT_HAPTIC_B, AudioChannelLayout::CHANNEL_HAPTIC_B }
+    };
+    return pairs;
+}
+
+const detail::AudioChannelPairs& getOutAudioChannelPairs() {
+    static const detail::AudioChannelPairs pairs = {
+#define DEFINE_OUTPUT_LAYOUT(n)                                                \
+            {                                                                  \
+                AUDIO_CHANNEL_OUT_##n,                                         \
+                AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
+                        AudioChannelLayout::LAYOUT_##n)                        \
+            }
+
+        DEFINE_OUTPUT_LAYOUT(MONO),
+        DEFINE_OUTPUT_LAYOUT(STEREO),
+        DEFINE_OUTPUT_LAYOUT(2POINT1),
+        DEFINE_OUTPUT_LAYOUT(TRI),
+        DEFINE_OUTPUT_LAYOUT(TRI_BACK),
+        DEFINE_OUTPUT_LAYOUT(3POINT1),
+        DEFINE_OUTPUT_LAYOUT(2POINT0POINT2),
+        DEFINE_OUTPUT_LAYOUT(2POINT1POINT2),
+        DEFINE_OUTPUT_LAYOUT(3POINT0POINT2),
+        DEFINE_OUTPUT_LAYOUT(3POINT1POINT2),
+        DEFINE_OUTPUT_LAYOUT(QUAD),
+        DEFINE_OUTPUT_LAYOUT(QUAD_SIDE),
+        DEFINE_OUTPUT_LAYOUT(SURROUND),
+        DEFINE_OUTPUT_LAYOUT(PENTA),
+        DEFINE_OUTPUT_LAYOUT(5POINT1),
+        DEFINE_OUTPUT_LAYOUT(5POINT1_SIDE),
+        DEFINE_OUTPUT_LAYOUT(5POINT1POINT2),
+        DEFINE_OUTPUT_LAYOUT(5POINT1POINT4),
+        DEFINE_OUTPUT_LAYOUT(6POINT1),
+        DEFINE_OUTPUT_LAYOUT(7POINT1),
+        DEFINE_OUTPUT_LAYOUT(7POINT1POINT2),
+        DEFINE_OUTPUT_LAYOUT(7POINT1POINT4),
+        DEFINE_OUTPUT_LAYOUT(13POINT_360RA),
+        DEFINE_OUTPUT_LAYOUT(22POINT2),
+        DEFINE_OUTPUT_LAYOUT(MONO_HAPTIC_A),
+        DEFINE_OUTPUT_LAYOUT(STEREO_HAPTIC_A),
+        DEFINE_OUTPUT_LAYOUT(HAPTIC_AB),
+        DEFINE_OUTPUT_LAYOUT(MONO_HAPTIC_AB),
+        DEFINE_OUTPUT_LAYOUT(STEREO_HAPTIC_AB)
+#undef DEFINE_OUTPUT_LAYOUT
+    };
+    return pairs;
+}
+
+const detail::AudioChannelPairs& getVoiceAudioChannelPairs() {
+    static const detail::AudioChannelPairs pairs = {
+#define DEFINE_VOICE_LAYOUT(n)                                                 \
+            {                                                                  \
+                AUDIO_CHANNEL_IN_VOICE_##n,                                    \
+                AudioChannelLayout::make<AudioChannelLayout::Tag::voiceMask>(  \
+                        AudioChannelLayout::VOICE_##n)                         \
+            }
+        DEFINE_VOICE_LAYOUT(UPLINK_MONO),
+        DEFINE_VOICE_LAYOUT(DNLINK_MONO),
+        DEFINE_VOICE_LAYOUT(CALL_MONO)
+#undef DEFINE_VOICE_LAYOUT
+    };
+    return pairs;
+}
+
+AudioDeviceDescription make_AudioDeviceDescription(AudioDeviceType type,
+        const std::string& connection = "") {
+    AudioDeviceDescription result;
+    result.type = type;
+    result.connection = connection;
+    return result;
+}
+
+void append_AudioDeviceDescription(detail::AudioDevicePairs& pairs,
+        audio_devices_t inputType, audio_devices_t outputType,
+        AudioDeviceType inType, AudioDeviceType outType,
+        const std::string& connection = "") {
+    pairs.push_back(std::make_pair(inputType, make_AudioDeviceDescription(inType, connection)));
+    pairs.push_back(std::make_pair(outputType, make_AudioDeviceDescription(outType, connection)));
+}
+
+const detail::AudioDevicePairs& getAudioDevicePairs() {
+    static const detail::AudioDevicePairs pairs = []() {
+        detail::AudioDevicePairs pairs = {{
+            {
+                AUDIO_DEVICE_NONE, AudioDeviceDescription{}
+            },
+            {
+                AUDIO_DEVICE_OUT_EARPIECE, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_SPEAKER_EARPIECE)
+            },
+            {
+                AUDIO_DEVICE_OUT_SPEAKER, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_SPEAKER)
+            },
+            {
+                AUDIO_DEVICE_OUT_WIRED_HEADPHONE, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_HEADPHONE,
+                        AudioDeviceDescription::CONNECTION_ANALOG())
+            },
+            {
+                AUDIO_DEVICE_OUT_BLUETOOTH_SCO, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_DEVICE,
+                        AudioDeviceDescription::CONNECTION_BT_SCO())
+            },
+            {
+                AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_CARKIT,
+                        AudioDeviceDescription::CONNECTION_BT_SCO())
+            },
+            {
+                AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_HEADPHONE,
+                        AudioDeviceDescription::CONNECTION_BT_A2DP())
+            },
+            {
+                AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_SPEAKER,
+                        AudioDeviceDescription::CONNECTION_BT_A2DP())
+            },
+            {
+                AUDIO_DEVICE_OUT_TELEPHONY_TX, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_TELEPHONY_TX)
+            },
+            {
+                AUDIO_DEVICE_OUT_AUX_LINE, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_LINE_AUX)
+            },
+            {
+                AUDIO_DEVICE_OUT_SPEAKER_SAFE, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_SPEAKER_SAFE)
+            },
+            {
+                AUDIO_DEVICE_OUT_HEARING_AID, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_HEARING_AID,
+                        AudioDeviceDescription::CONNECTION_WIRELESS())
+            },
+            {
+                AUDIO_DEVICE_OUT_ECHO_CANCELLER, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_ECHO_CANCELLER)
+            },
+            {
+                AUDIO_DEVICE_OUT_BLE_SPEAKER, make_AudioDeviceDescription(
+                        AudioDeviceType::OUT_SPEAKER,
+                        AudioDeviceDescription::CONNECTION_BT_LE())
+            },
+            // AUDIO_DEVICE_IN_AMBIENT and IN_COMMUNICATION are removed since they were deprecated.
+            {
+                AUDIO_DEVICE_IN_BUILTIN_MIC, make_AudioDeviceDescription(
+                        AudioDeviceType::IN_MICROPHONE)
+            },
+            {
+                AUDIO_DEVICE_IN_BACK_MIC, make_AudioDeviceDescription(
+                        AudioDeviceType::IN_MICROPHONE_BACK)
+            },
+            {
+                AUDIO_DEVICE_IN_TELEPHONY_RX, make_AudioDeviceDescription(
+                        AudioDeviceType::IN_TELEPHONY_RX)
+            },
+            {
+                AUDIO_DEVICE_IN_TV_TUNER, make_AudioDeviceDescription(
+                        AudioDeviceType::IN_TV_TUNER)
+            },
+            {
+                AUDIO_DEVICE_IN_LOOPBACK, make_AudioDeviceDescription(
+                        AudioDeviceType::IN_LOOPBACK)
+            },
+            {
+                AUDIO_DEVICE_IN_BLUETOOTH_BLE, make_AudioDeviceDescription(
+                        AudioDeviceType::IN_DEVICE,
+                        AudioDeviceDescription::CONNECTION_BT_LE())
+            },
+            {
+                AUDIO_DEVICE_IN_ECHO_REFERENCE, make_AudioDeviceDescription(
+                        AudioDeviceType::IN_ECHO_REFERENCE)
+            }
+        }};
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_DEFAULT, AUDIO_DEVICE_OUT_DEFAULT,
+                AudioDeviceType::IN_DEFAULT, AudioDeviceType::OUT_DEFAULT);
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_OUT_WIRED_HEADSET,
+                AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+                AudioDeviceDescription::CONNECTION_ANALOG());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET,
+                AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+                AudioDeviceDescription::CONNECTION_BT_SCO());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_HDMI, AUDIO_DEVICE_OUT_HDMI,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_HDMI());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_REMOTE_SUBMIX, AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                AudioDeviceType::IN_SUBMIX, AudioDeviceType::OUT_SUBMIX);
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET, AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET,
+                AudioDeviceType::IN_DOCK, AudioDeviceType::OUT_DOCK,
+                AudioDeviceDescription::CONNECTION_ANALOG());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET,
+                AudioDeviceType::IN_DOCK, AudioDeviceType::OUT_DOCK,
+                AudioDeviceDescription::CONNECTION_USB());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_USB_ACCESSORY, AUDIO_DEVICE_OUT_USB_ACCESSORY,
+                AudioDeviceType::IN_ACCESSORY, AudioDeviceType::OUT_ACCESSORY,
+                AudioDeviceDescription::CONNECTION_USB());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_OUT_USB_DEVICE,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_USB());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_FM_TUNER, AUDIO_DEVICE_OUT_FM,
+                AudioDeviceType::IN_FM_TUNER, AudioDeviceType::OUT_FM);
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_LINE, AUDIO_DEVICE_OUT_LINE,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_ANALOG());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_SPDIF, AUDIO_DEVICE_OUT_SPDIF,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_SPDIF());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_BLUETOOTH_A2DP, AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_BT_A2DP());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_IP, AUDIO_DEVICE_OUT_IP,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_IP_V4());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_BUS, AUDIO_DEVICE_OUT_BUS,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_BUS());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_PROXY, AUDIO_DEVICE_OUT_PROXY,
+                AudioDeviceType::IN_AFE_PROXY, AudioDeviceType::OUT_AFE_PROXY);
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_USB_HEADSET, AUDIO_DEVICE_OUT_USB_HEADSET,
+                AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+                AudioDeviceDescription::CONNECTION_USB());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_HDMI_ARC, AUDIO_DEVICE_OUT_HDMI_ARC,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_HDMI_ARC());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_HDMI_EARC, AUDIO_DEVICE_OUT_HDMI_EARC,
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_HDMI_EARC());
+        append_AudioDeviceDescription(pairs,
+                AUDIO_DEVICE_IN_BLE_HEADSET, AUDIO_DEVICE_OUT_BLE_HEADSET,
+                AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+                AudioDeviceDescription::CONNECTION_BT_LE());
+        return pairs;
+    }();
+    return pairs;
+}
+
+AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
+    AudioFormatDescription result;
+    result.type = type;
+    return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(PcmType pcm) {
+    auto result = make_AudioFormatDescription(AudioFormatType::PCM);
+    result.pcm = pcm;
+    return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
+    AudioFormatDescription result;
+    result.encoding = encoding;
+    return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(PcmType transport,
+        const std::string& encoding) {
+    auto result = make_AudioFormatDescription(encoding);
+    result.pcm = transport;
+    return result;
+}
+
+const detail::AudioFormatPairs& getAudioFormatPairs() {
+    static const detail::AudioFormatPairs pairs = {{
+        {
+            AUDIO_FORMAT_INVALID,
+            make_AudioFormatDescription(AudioFormatType::SYS_RESERVED_INVALID)
+        },
+        {
+            AUDIO_FORMAT_DEFAULT, AudioFormatDescription{}
+        },
+        {
+            AUDIO_FORMAT_PCM_16_BIT, make_AudioFormatDescription(PcmType::INT_16_BIT)
+        },
+        {
+            AUDIO_FORMAT_PCM_8_BIT, make_AudioFormatDescription(PcmType::UINT_8_BIT)
+        },
+        {
+            AUDIO_FORMAT_PCM_32_BIT, make_AudioFormatDescription(PcmType::INT_32_BIT)
+        },
+        {
+            AUDIO_FORMAT_PCM_8_24_BIT, make_AudioFormatDescription(PcmType::FIXED_Q_8_24)
+        },
+        {
+            AUDIO_FORMAT_PCM_FLOAT, make_AudioFormatDescription(PcmType::FLOAT_32_BIT)
+        },
+        {
+            AUDIO_FORMAT_PCM_24_BIT_PACKED, make_AudioFormatDescription(PcmType::INT_24_BIT)
+        },
+        {
+            AUDIO_FORMAT_MP3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEG)
+        },
+        {
+            AUDIO_FORMAT_AMR_NB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_NB)
+        },
+        {
+            AUDIO_FORMAT_AMR_WB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_WB)
+        },
+        {
+            AUDIO_FORMAT_AAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_MP4)
+        },
+        {
+            AUDIO_FORMAT_AAC_MAIN, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_MAIN)
+        },
+        {
+            AUDIO_FORMAT_AAC_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LC)
+        },
+        {
+            AUDIO_FORMAT_AAC_SSR, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_SSR)
+        },
+        {
+            AUDIO_FORMAT_AAC_LTP, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LTP)
+        },
+        {
+            AUDIO_FORMAT_AAC_HE_V1, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_HE_V1)
+        },
+        {
+            AUDIO_FORMAT_AAC_SCALABLE,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE)
+        },
+        {
+            AUDIO_FORMAT_AAC_ERLC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ERLC)
+        },
+        {
+            AUDIO_FORMAT_AAC_LD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LD)
+        },
+        {
+            AUDIO_FORMAT_AAC_HE_V2, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_HE_V2)
+        },
+        {
+            AUDIO_FORMAT_AAC_ELD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ELD)
+        },
+        {
+            AUDIO_FORMAT_AAC_XHE, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_XHE)
+        },
+        // AUDIO_FORMAT_HE_AAC_V1 and HE_AAC_V2 are removed since they were deprecated long time
+        // ago.
+        {
+            AUDIO_FORMAT_VORBIS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_VORBIS)
+        },
+        {
+            AUDIO_FORMAT_OPUS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_OPUS)
+        },
+        {
+            AUDIO_FORMAT_AC3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AC3)
+        },
+        {
+            AUDIO_FORMAT_E_AC3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EAC3)
+        },
+        {
+            AUDIO_FORMAT_E_AC3_JOC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EAC3_JOC)
+        },
+        {
+            AUDIO_FORMAT_DTS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS)
+        },
+        {
+            AUDIO_FORMAT_DTS_HD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS_HD)
+        },
+        // In the future, we would like to represent encapsulated bitstreams as
+        // nested AudioFormatDescriptions. The legacy 'AUDIO_FORMAT_IEC61937' type doesn't
+        // specify the format of the encapsulated bitstream.
+        {
+            AUDIO_FORMAT_IEC61937,
+            make_AudioFormatDescription(PcmType::INT_16_BIT, MEDIA_MIMETYPE_AUDIO_IEC61937)
+        },
+        {
+            AUDIO_FORMAT_DOLBY_TRUEHD,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD)
+        },
+        {
+            AUDIO_FORMAT_EVRC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRC)
+        },
+        {
+            AUDIO_FORMAT_EVRCB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCB)
+        },
+        {
+            AUDIO_FORMAT_EVRCWB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCWB)
+        },
+        {
+            AUDIO_FORMAT_EVRCNW, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCNW)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADIF, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADIF)
+        },
+        {
+            AUDIO_FORMAT_WMA, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_WMA)
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_WMA_PRO, make_AudioFormatDescription("audio/x-ms-wma.pro")
+        },
+        {
+            AUDIO_FORMAT_AMR_WB_PLUS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS)
+        },
+        {
+            AUDIO_FORMAT_MP2, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II)
+        },
+        {
+            AUDIO_FORMAT_QCELP, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_QCELP)
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_DSD, make_AudioFormatDescription("audio/vnd.sony.dsd")
+        },
+        {
+            AUDIO_FORMAT_FLAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_FLAC)
+        },
+        {
+            AUDIO_FORMAT_ALAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_ALAC)
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_APE, make_AudioFormatDescription("audio/x-ape")
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_MAIN,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_SSR,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_LTP,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_HE_V1,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_SCALABLE,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_ERLC,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_LD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_HE_V2,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_ELD,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD)
+        },
+        {
+            AUDIO_FORMAT_AAC_ADTS_XHE,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE)
+        },
+        {
+            // Note: not in the IANA registry. "vnd.octel.sbc" is not BT SBC.
+            AUDIO_FORMAT_SBC, make_AudioFormatDescription("audio/x-sbc")
+        },
+        {
+            AUDIO_FORMAT_APTX, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_APTX)
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_APTX_HD, make_AudioFormatDescription("audio/vnd.qcom.aptx.hd")
+        },
+        {
+            AUDIO_FORMAT_AC4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AC4)
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_LDAC, make_AudioFormatDescription("audio/vnd.sony.ldac")
+        },
+        {
+            AUDIO_FORMAT_MAT, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT)
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_MAT_1_0,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".1.0"))
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_MAT_2_0,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".2.0"))
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_MAT_2_1,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".2.1"))
+        },
+        {
+            AUDIO_FORMAT_AAC_LATM, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC)
+        },
+        {
+            AUDIO_FORMAT_AAC_LATM_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC)
+        },
+        {
+            AUDIO_FORMAT_AAC_LATM_HE_V1,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1)
+        },
+        {
+            AUDIO_FORMAT_AAC_LATM_HE_V2,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2)
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_CELT, make_AudioFormatDescription("audio/x-celt")
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_APTX_ADAPTIVE, make_AudioFormatDescription("audio/vnd.qcom.aptx.adaptive")
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_LHDC, make_AudioFormatDescription("audio/vnd.savitech.lhdc")
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_LHDC_LL, make_AudioFormatDescription("audio/vnd.savitech.lhdc.ll")
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_APTX_TWSP, make_AudioFormatDescription("audio/vnd.qcom.aptx.twsp")
+        },
+        {
+            // Note: not in the IANA registry.
+            AUDIO_FORMAT_LC3, make_AudioFormatDescription("audio/x-lc3")
+        },
+        {
+            AUDIO_FORMAT_MPEGH, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1)
+        },
+        {
+            AUDIO_FORMAT_MPEGH_BL_L3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3)
+        },
+        {
+            AUDIO_FORMAT_MPEGH_BL_L4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4)
+        },
+        {
+            AUDIO_FORMAT_MPEGH_LC_L3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3)
+        },
+        {
+            AUDIO_FORMAT_MPEGH_LC_L4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4)
+        },
+        {
+            AUDIO_FORMAT_IEC60958,
+            make_AudioFormatDescription(PcmType::INT_24_BIT, MEDIA_MIMETYPE_AUDIO_IEC60958)
+        },
+        {
+            AUDIO_FORMAT_DTS_UHD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS_UHD)
+        },
+        {
+            AUDIO_FORMAT_DRA, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DRA)
+        },
+    }};
+    return pairs;
+}
+
+template<typename S, typename T>
+std::unordered_map<S, T> make_DirectMap(const std::vector<std::pair<S, T>>& v) {
+    std::unordered_map<S, T> result(v.begin(), v.end());
+    LOG_ALWAYS_FATAL_IF(result.size() != v.size(), "Duplicate key elements detected");
+    return result;
+}
+
+template<typename S, typename T>
+std::unordered_map<S, T> make_DirectMap(
+        const std::vector<std::pair<S, T>>& v1, const std::vector<std::pair<S, T>>& v2) {
+    std::unordered_map<S, T> result(v1.begin(), v1.end());
+    LOG_ALWAYS_FATAL_IF(result.size() != v1.size(), "Duplicate key elements detected in v1");
+    result.insert(v2.begin(), v2.end());
+    LOG_ALWAYS_FATAL_IF(result.size() != v1.size() + v2.size(),
+            "Duplicate key elements detected in v1+v2");
+    return result;
+}
+
+template<typename S, typename T>
+std::unordered_map<T, S> make_ReverseMap(const std::vector<std::pair<S, T>>& v) {
+    std::unordered_map<T, S> result;
+    std::transform(v.begin(), v.end(), std::inserter(result, result.begin()),
+            [](const std::pair<S, T>& p) {
+                return std::make_pair(p.second, p.first);
+            });
+    LOG_ALWAYS_FATAL_IF(result.size() != v.size(), "Duplicate key elements detected");
+    return result;
+}
+
+}  // namespace
+
+audio_channel_mask_t aidl2legacy_AudioChannelLayout_layout_audio_channel_mask_t_bits(
+        int aidlLayout, bool isInput) {
+    auto& bitMapping = isInput ? getInAudioChannelBits() : getOutAudioChannelBits();
+    const int aidlLayoutInitial = aidlLayout; // for error message
+    audio_channel_mask_t legacy = AUDIO_CHANNEL_NONE;
+    for (const auto& bitPair : bitMapping) {
+        if ((aidlLayout & bitPair.second) == bitPair.second) {
+            legacy = static_cast<audio_channel_mask_t>(legacy | bitPair.first);
+            aidlLayout &= ~bitPair.second;
+            if (aidlLayout == 0) {
+                return legacy;
+            }
+        }
+    }
+    ALOGE("%s: aidl layout 0x%x contains bits 0x%x that have no match to legacy %s bits",
+            __func__, aidlLayoutInitial, aidlLayout, isInput ? "input" : "output");
+    return AUDIO_CHANNEL_NONE;
+}
+
+ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+        const AudioChannelLayout& aidl, bool isInput) {
+    using ReverseMap = std::unordered_map<AudioChannelLayout, audio_channel_mask_t>;
+    using Tag = AudioChannelLayout::Tag;
+    static const ReverseMap mIn = make_ReverseMap(getInAudioChannelPairs());
+    static const ReverseMap mOut = make_ReverseMap(getOutAudioChannelPairs());
+    static const ReverseMap mVoice = make_ReverseMap(getVoiceAudioChannelPairs());
+
+    auto convert = [](const AudioChannelLayout& aidl, const ReverseMap& m,
+            const char* func, const char* type) -> ConversionResult<audio_channel_mask_t> {
+        if (auto it = m.find(aidl); it != m.end()) {
+            return it->second;
+        } else {
+            ALOGW("%s: no legacy %s audio_channel_mask_t found for %s", func, type,
+                    aidl.toString().c_str());
+            return unexpected(BAD_VALUE);
+        }
+    };
+
+    switch (aidl.getTag()) {
+        case Tag::none:
+            return AUDIO_CHANNEL_NONE;
+        case Tag::invalid:
+            return AUDIO_CHANNEL_INVALID;
+        case Tag::indexMask:
+            // Index masks do not have pre-defined values.
+            if (const int bits = aidl.get<Tag::indexMask>();
+                    __builtin_popcount(bits) != 0 &&
+                    __builtin_popcount(bits) <= AUDIO_CHANNEL_COUNT_MAX) {
+                return audio_channel_mask_from_representation_and_bits(
+                        AUDIO_CHANNEL_REPRESENTATION_INDEX, bits);
+            } else {
+                ALOGE("%s: invalid indexMask value 0x%x in %s",
+                        __func__, bits, aidl.toString().c_str());
+                return unexpected(BAD_VALUE);
+            }
+        case Tag::layoutMask:
+            // The fast path is to find a direct match for some known layout mask.
+            if (const auto layoutMatch = convert(aidl, isInput ? mIn : mOut, __func__,
+                    isInput ? "input" : "output");
+                    layoutMatch.ok()) {
+                return layoutMatch;
+            }
+            // If a match for a predefined layout wasn't found, make a custom one from bits.
+            if (audio_channel_mask_t bitMask =
+                    aidl2legacy_AudioChannelLayout_layout_audio_channel_mask_t_bits(
+                            aidl.get<Tag::layoutMask>(), isInput);
+                    bitMask != AUDIO_CHANNEL_NONE) {
+                return bitMask;
+            }
+            return unexpected(BAD_VALUE);
+        case Tag::voiceMask:
+            return convert(aidl, mVoice, __func__, "voice");
+    }
+    ALOGE("%s: unexpected tag value %d", __func__, aidl.getTag());
+    return unexpected(BAD_VALUE);
+}
+
+int legacy2aidl_audio_channel_mask_t_bits_AudioChannelLayout_layout(
+        audio_channel_mask_t legacy, bool isInput) {
+    auto& bitMapping = isInput ? getInAudioChannelBits() : getOutAudioChannelBits();
+    const int legacyInitial = legacy; // for error message
+    int aidlLayout = 0;
+    for (const auto& bitPair : bitMapping) {
+        if ((legacy & bitPair.first) == bitPair.first) {
+            aidlLayout |= bitPair.second;
+            legacy = static_cast<audio_channel_mask_t>(legacy & ~bitPair.first);
+            if (legacy == 0) {
+                return aidlLayout;
+            }
+        }
+    }
+    ALOGE("%s: legacy %s audio_channel_mask_t 0x%x contains unrecognized bits 0x%x",
+            __func__, isInput ? "input" : "output", legacyInitial, legacy);
+    return 0;
+}
+
+ConversionResult<AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+        audio_channel_mask_t legacy, bool isInput) {
+    using DirectMap = std::unordered_map<audio_channel_mask_t, AudioChannelLayout>;
+    using Tag = AudioChannelLayout::Tag;
+    static const DirectMap mInAndVoice = make_DirectMap(
+            getInAudioChannelPairs(), getVoiceAudioChannelPairs());
+    static const DirectMap mOut = make_DirectMap(getOutAudioChannelPairs());
+
+    auto convert = [](const audio_channel_mask_t legacy, const DirectMap& m,
+            const char* func, const char* type) -> ConversionResult<AudioChannelLayout> {
+        if (auto it = m.find(legacy); it != m.end()) {
+            return it->second;
+        } else {
+            ALOGW("%s: no AudioChannelLayout found for legacy %s audio_channel_mask_t value 0x%x",
+                    func, type, legacy);
+            return unexpected(BAD_VALUE);
+        }
+    };
+
+    if (legacy == AUDIO_CHANNEL_NONE) {
+        return AudioChannelLayout{};
+    } else if (legacy == AUDIO_CHANNEL_INVALID) {
+        return AudioChannelLayout::make<Tag::invalid>(0);
+    }
+
+    const audio_channel_representation_t repr = audio_channel_mask_get_representation(legacy);
+    if (repr == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+        if (audio_channel_mask_is_valid(legacy)) {
+            const int indexMask = VALUE_OR_RETURN(
+                    convertIntegral<int>(audio_channel_mask_get_bits(legacy)));
+            return AudioChannelLayout::make<Tag::indexMask>(indexMask);
+        } else {
+            ALOGE("%s: legacy audio_channel_mask_t value 0x%x is invalid", __func__, legacy);
+            return unexpected(BAD_VALUE);
+        }
+    } else if (repr == AUDIO_CHANNEL_REPRESENTATION_POSITION) {
+        // The fast path is to find a direct match for some known layout mask.
+        if (const auto layoutMatch = convert(legacy, isInput ? mInAndVoice : mOut, __func__,
+                isInput ? "input / voice" : "output");
+                layoutMatch.ok()) {
+            return layoutMatch;
+        }
+        // If a match for a predefined layout wasn't found, make a custom one from bits,
+        // rejecting those with voice channel bits.
+        if (!isInput ||
+                (legacy & (AUDIO_CHANNEL_IN_VOICE_UPLINK | AUDIO_CHANNEL_IN_VOICE_DNLINK)) == 0) {
+            if (int bitMaskLayout =
+                    legacy2aidl_audio_channel_mask_t_bits_AudioChannelLayout_layout(
+                            legacy, isInput);
+                    bitMaskLayout != 0) {
+                return AudioChannelLayout::make<Tag::layoutMask>(bitMaskLayout);
+            }
+        } else {
+            ALOGE("%s: legacy audio_channel_mask_t value 0x%x contains voice bits",
+                    __func__, legacy);
+        }
+        return unexpected(BAD_VALUE);
+    }
+
+    ALOGE("%s: unknown representation %d in audio_channel_mask_t value 0x%x",
+            __func__, repr, legacy);
+    return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
+        const AudioDeviceDescription& aidl) {
+    static const std::unordered_map<AudioDeviceDescription, audio_devices_t> m =
+            make_ReverseMap(getAudioDevicePairs());
+    if (auto it = m.find(aidl); it != m.end()) {
+        return it->second;
+    } else {
+        ALOGE("%s: no legacy audio_devices_t found for %s", __func__, aidl.toString().c_str());
+        return unexpected(BAD_VALUE);
+    }
+}
+
+ConversionResult<AudioDeviceDescription> legacy2aidl_audio_devices_t_AudioDeviceDescription(
+        audio_devices_t legacy) {
+    static const std::unordered_map<audio_devices_t, AudioDeviceDescription> m =
+            make_DirectMap(getAudioDevicePairs());
+    if (auto it = m.find(legacy); it != m.end()) {
+        return it->second;
+    } else {
+        ALOGE("%s: no AudioDeviceDescription found for legacy audio_devices_t value 0x%x",
+                __func__, legacy);
+        return unexpected(BAD_VALUE);
+    }
+}
+
+status_t aidl2legacy_AudioDevice_audio_device(
+        const AudioDevice& aidl,
+        audio_devices_t* legacyType, char* legacyAddress) {
+    *legacyType = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
+    return aidl2legacy_string(
+                    aidl.address.get<AudioDeviceAddress::id>(),
+                    legacyAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN);
+}
+
+status_t aidl2legacy_AudioDevice_audio_device(
+        const AudioDevice& aidl,
+        audio_devices_t* legacyType, String8* legacyAddress) {
+    *legacyType = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
+    *legacyAddress = VALUE_OR_RETURN_STATUS(aidl2legacy_string_view_String8(
+                    aidl.address.get<AudioDeviceAddress::id>()));
+    return OK;
+}
+
+status_t aidl2legacy_AudioDevice_audio_device(
+        const AudioDevice& aidl,
+        audio_devices_t* legacyType, std::string* legacyAddress) {
+    *legacyType = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
+    *legacyAddress = aidl.address.get<AudioDeviceAddress::id>();
+    return OK;
+}
+
+ConversionResult<AudioDevice> legacy2aidl_audio_device_AudioDevice(
+        audio_devices_t legacyType, const char* legacyAddress) {
+    AudioDevice aidl;
+    aidl.type = VALUE_OR_RETURN(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(legacyType));
+    const std::string aidl_id = VALUE_OR_RETURN(
+            legacy2aidl_string(legacyAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN));
+    aidl.address = AudioDeviceAddress::make<AudioDeviceAddress::id>(aidl_id);
+    return aidl;
+}
+
+ConversionResult<AudioDevice>
+legacy2aidl_audio_device_AudioDevice(
+        audio_devices_t legacyType, const String8& legacyAddress) {
+    AudioDevice aidl;
+    aidl.type = VALUE_OR_RETURN(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(legacyType));
+    const std::string aidl_id = VALUE_OR_RETURN(
+            legacy2aidl_String8_string(legacyAddress));
+    aidl.address = AudioDeviceAddress::make<AudioDeviceAddress::id>(aidl_id);
+    return aidl;
+}
+
+ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
+        const AudioFormatDescription& aidl) {
+    static const std::unordered_map<AudioFormatDescription, audio_format_t> m =
+            make_ReverseMap(getAudioFormatPairs());
+    if (auto it = m.find(aidl); it != m.end()) {
+        return it->second;
+    } else {
+        ALOGE("%s: no legacy audio_format_t found for %s", __func__, aidl.toString().c_str());
+        return unexpected(BAD_VALUE);
+    }
+}
+
+ConversionResult<AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
         audio_format_t legacy) {
-    // This relies on AudioFormat being kept in sync with audio_format_t.
-    static_assert(sizeof(media::audio::common::AudioFormat) == sizeof(audio_format_t));
-    return static_cast<media::audio::common::AudioFormat>(legacy);
+    static const std::unordered_map<audio_format_t, AudioFormatDescription> m =
+            make_DirectMap(getAudioFormatPairs());
+    if (auto it = m.find(legacy); it != m.end()) {
+        return it->second;
+    } else {
+        ALOGE("%s: no AudioFormatDescription found for legacy audio_format_t value 0x%x",
+                __func__, legacy);
+        return unexpected(BAD_VALUE);
+    }
 }
 
-ConversionResult<audio_gain_mode_t> aidl2legacy_AudioGainMode_audio_gain_mode_t(media::AudioGainMode aidl) {
+ConversionResult<audio_gain_mode_t> aidl2legacy_AudioGainMode_audio_gain_mode_t(
+        AudioGainMode aidl) {
     switch (aidl) {
-        case media::AudioGainMode::JOINT:
+        case AudioGainMode::JOINT:
             return AUDIO_GAIN_MODE_JOINT;
-        case media::AudioGainMode::CHANNELS:
+        case AudioGainMode::CHANNELS:
             return AUDIO_GAIN_MODE_CHANNELS;
-        case media::AudioGainMode::RAMP:
+        case AudioGainMode::RAMP:
             return AUDIO_GAIN_MODE_RAMP;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioGainMode> legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy) {
+ConversionResult<AudioGainMode> legacy2aidl_audio_gain_mode_t_AudioGainMode(
+        audio_gain_mode_t legacy) {
     switch (legacy) {
         case AUDIO_GAIN_MODE_JOINT:
-            return media::AudioGainMode::JOINT;
+            return AudioGainMode::JOINT;
         case AUDIO_GAIN_MODE_CHANNELS:
-            return media::AudioGainMode::CHANNELS;
+            return AudioGainMode::CHANNELS;
         case AUDIO_GAIN_MODE_RAMP:
-            return media::AudioGainMode::RAMP;
+            return AudioGainMode::RAMP;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl) {
-    return convertBitmask<audio_gain_mode_t, int32_t, audio_gain_mode_t, media::AudioGainMode>(
+    return convertBitmask<audio_gain_mode_t, int32_t, audio_gain_mode_t, AudioGainMode>(
             aidl, aidl2legacy_AudioGainMode_audio_gain_mode_t,
             // AudioGainMode is index-based.
-            indexToEnum_index<media::AudioGainMode>,
+            indexToEnum_index<AudioGainMode>,
             // AUDIO_GAIN_MODE_* constants are mask-based.
             enumToMask_bitmask<audio_gain_mode_t, audio_gain_mode_t>);
 }
 
 ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy) {
-    return convertBitmask<int32_t, audio_gain_mode_t, media::AudioGainMode, audio_gain_mode_t>(
+    return convertBitmask<int32_t, audio_gain_mode_t, AudioGainMode, audio_gain_mode_t>(
             legacy, legacy2aidl_audio_gain_mode_t_AudioGainMode,
             // AUDIO_GAIN_MODE_* constants are mask-based.
             indexToEnum_bitmask<audio_gain_mode_t>,
             // AudioGainMode is index-based.
-            enumToMask_index<int32_t, media::AudioGainMode>);
-}
-
-ConversionResult<audio_devices_t> aidl2legacy_int32_t_audio_devices_t(int32_t aidl) {
-    // TODO(ytai): bitfield?
-    return convertReinterpret<audio_devices_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_devices_t_int32_t(audio_devices_t legacy) {
-    // TODO(ytai): bitfield?
-    return convertReinterpret<int32_t>(legacy);
+            enumToMask_index<int32_t, AudioGainMode>);
 }
 
 ConversionResult<audio_gain_config> aidl2legacy_AudioGainConfig_audio_gain_config(
-        const media::AudioGainConfig& aidl, media::AudioPortRole role, media::AudioPortType type) {
+        const AudioGainConfig& aidl, bool isInput) {
     audio_gain_config legacy;
     legacy.index = VALUE_OR_RETURN(convertIntegral<int>(aidl.index));
     legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
-    legacy.channel_mask =
-            VALUE_OR_RETURN(aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
-    const bool isInput = VALUE_OR_RETURN(direction(role, type)) == Direction::INPUT;
-    const bool isJoint = bitmaskIsSet(aidl.mode, media::AudioGainMode::JOINT);
+    legacy.channel_mask = VALUE_OR_RETURN(
+            aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
+    const bool isJoint = bitmaskIsSet(aidl.mode, AudioGainMode::JOINT);
     size_t numValues = isJoint ? 1
                                : isInput ? audio_channel_count_from_in_mask(legacy.channel_mask)
                                          : audio_channel_count_from_out_mask(legacy.channel_mask);
@@ -481,14 +1396,13 @@
     return legacy;
 }
 
-ConversionResult<media::AudioGainConfig> legacy2aidl_audio_gain_config_AudioGainConfig(
-        const audio_gain_config& legacy, audio_port_role_t role, audio_port_type_t type) {
-    media::AudioGainConfig aidl;
+ConversionResult<AudioGainConfig> legacy2aidl_audio_gain_config_AudioGainConfig(
+        const audio_gain_config& legacy, bool isInput) {
+    AudioGainConfig aidl;
     aidl.index = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.index));
     aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
-    aidl.channelMask =
-            VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
-    const bool isInput = VALUE_OR_RETURN(direction(role, type)) == Direction::INPUT;
+    aidl.channelMask = VALUE_OR_RETURN(
+            legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
     const bool isJoint = (legacy.mode & AUDIO_GAIN_MODE_JOINT) != 0;
     size_t numValues = isJoint ? 1
                                : isInput ? audio_channel_count_from_in_mask(legacy.channel_mask)
@@ -502,129 +1416,137 @@
 }
 
 ConversionResult<audio_input_flags_t> aidl2legacy_AudioInputFlags_audio_input_flags_t(
-        media::AudioInputFlags aidl) {
+        AudioInputFlags aidl) {
     switch (aidl) {
-        case media::AudioInputFlags::FAST:
+        case AudioInputFlags::FAST:
             return AUDIO_INPUT_FLAG_FAST;
-        case media::AudioInputFlags::HW_HOTWORD:
+        case AudioInputFlags::HW_HOTWORD:
             return AUDIO_INPUT_FLAG_HW_HOTWORD;
-        case media::AudioInputFlags::RAW:
+        case AudioInputFlags::RAW:
             return AUDIO_INPUT_FLAG_RAW;
-        case media::AudioInputFlags::SYNC:
+        case AudioInputFlags::SYNC:
             return AUDIO_INPUT_FLAG_SYNC;
-        case media::AudioInputFlags::MMAP_NOIRQ:
+        case AudioInputFlags::MMAP_NOIRQ:
             return AUDIO_INPUT_FLAG_MMAP_NOIRQ;
-        case media::AudioInputFlags::VOIP_TX:
+        case AudioInputFlags::VOIP_TX:
             return AUDIO_INPUT_FLAG_VOIP_TX;
-        case media::AudioInputFlags::HW_AV_SYNC:
+        case AudioInputFlags::HW_AV_SYNC:
             return AUDIO_INPUT_FLAG_HW_AV_SYNC;
-        case media::AudioInputFlags::DIRECT:
+        case AudioInputFlags::DIRECT:
             return AUDIO_INPUT_FLAG_DIRECT;
+        case AudioInputFlags::ULTRASOUND:
+            return AUDIO_INPUT_FLAG_ULTRASOUND;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioInputFlags> legacy2aidl_audio_input_flags_t_AudioInputFlags(
+ConversionResult<AudioInputFlags> legacy2aidl_audio_input_flags_t_AudioInputFlags(
         audio_input_flags_t legacy) {
     switch (legacy) {
         case AUDIO_INPUT_FLAG_NONE:
             break; // shouldn't get here. must be listed  -Werror,-Wswitch
         case AUDIO_INPUT_FLAG_FAST:
-            return media::AudioInputFlags::FAST;
+            return AudioInputFlags::FAST;
         case AUDIO_INPUT_FLAG_HW_HOTWORD:
-            return media::AudioInputFlags::HW_HOTWORD;
+            return AudioInputFlags::HW_HOTWORD;
         case AUDIO_INPUT_FLAG_RAW:
-            return media::AudioInputFlags::RAW;
+            return AudioInputFlags::RAW;
         case AUDIO_INPUT_FLAG_SYNC:
-            return media::AudioInputFlags::SYNC;
+            return AudioInputFlags::SYNC;
         case AUDIO_INPUT_FLAG_MMAP_NOIRQ:
-            return media::AudioInputFlags::MMAP_NOIRQ;
+            return AudioInputFlags::MMAP_NOIRQ;
         case AUDIO_INPUT_FLAG_VOIP_TX:
-            return media::AudioInputFlags::VOIP_TX;
+            return AudioInputFlags::VOIP_TX;
         case AUDIO_INPUT_FLAG_HW_AV_SYNC:
-            return media::AudioInputFlags::HW_AV_SYNC;
+            return AudioInputFlags::HW_AV_SYNC;
         case AUDIO_INPUT_FLAG_DIRECT:
-            return media::AudioInputFlags::DIRECT;
+            return AudioInputFlags::DIRECT;
+        case AUDIO_INPUT_FLAG_ULTRASOUND:
+            return AudioInputFlags::ULTRASOUND;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_output_flags_t> aidl2legacy_AudioOutputFlags_audio_output_flags_t(
-        media::AudioOutputFlags aidl) {
+        AudioOutputFlags aidl) {
     switch (aidl) {
-        case media::AudioOutputFlags::DIRECT:
+        case AudioOutputFlags::DIRECT:
             return AUDIO_OUTPUT_FLAG_DIRECT;
-        case media::AudioOutputFlags::PRIMARY:
+        case AudioOutputFlags::PRIMARY:
             return AUDIO_OUTPUT_FLAG_PRIMARY;
-        case media::AudioOutputFlags::FAST:
+        case AudioOutputFlags::FAST:
             return AUDIO_OUTPUT_FLAG_FAST;
-        case media::AudioOutputFlags::DEEP_BUFFER:
+        case AudioOutputFlags::DEEP_BUFFER:
             return AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
-        case media::AudioOutputFlags::COMPRESS_OFFLOAD:
+        case AudioOutputFlags::COMPRESS_OFFLOAD:
             return AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
-        case media::AudioOutputFlags::NON_BLOCKING:
+        case AudioOutputFlags::NON_BLOCKING:
             return AUDIO_OUTPUT_FLAG_NON_BLOCKING;
-        case media::AudioOutputFlags::HW_AV_SYNC:
+        case AudioOutputFlags::HW_AV_SYNC:
             return AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
-        case media::AudioOutputFlags::TTS:
+        case AudioOutputFlags::TTS:
             return AUDIO_OUTPUT_FLAG_TTS;
-        case media::AudioOutputFlags::RAW:
+        case AudioOutputFlags::RAW:
             return AUDIO_OUTPUT_FLAG_RAW;
-        case media::AudioOutputFlags::SYNC:
+        case AudioOutputFlags::SYNC:
             return AUDIO_OUTPUT_FLAG_SYNC;
-        case media::AudioOutputFlags::IEC958_NONAUDIO:
+        case AudioOutputFlags::IEC958_NONAUDIO:
             return AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
-        case media::AudioOutputFlags::DIRECT_PCM:
+        case AudioOutputFlags::DIRECT_PCM:
             return AUDIO_OUTPUT_FLAG_DIRECT_PCM;
-        case media::AudioOutputFlags::MMAP_NOIRQ:
+        case AudioOutputFlags::MMAP_NOIRQ:
             return AUDIO_OUTPUT_FLAG_MMAP_NOIRQ;
-        case media::AudioOutputFlags::VOIP_RX:
+        case AudioOutputFlags::VOIP_RX:
             return AUDIO_OUTPUT_FLAG_VOIP_RX;
-        case media::AudioOutputFlags::INCALL_MUSIC:
+        case AudioOutputFlags::INCALL_MUSIC:
             return AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
-        case media::AudioOutputFlags::GAPLESS_OFFLOAD:
+        case AudioOutputFlags::GAPLESS_OFFLOAD:
             return AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD;
+        case AudioOutputFlags::ULTRASOUND:
+            return AUDIO_OUTPUT_FLAG_ULTRASOUND;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioOutputFlags> legacy2aidl_audio_output_flags_t_AudioOutputFlags(
+ConversionResult<AudioOutputFlags> legacy2aidl_audio_output_flags_t_AudioOutputFlags(
         audio_output_flags_t legacy) {
     switch (legacy) {
         case AUDIO_OUTPUT_FLAG_NONE:
             break; // shouldn't get here. must be listed  -Werror,-Wswitch
         case AUDIO_OUTPUT_FLAG_DIRECT:
-            return media::AudioOutputFlags::DIRECT;
+            return AudioOutputFlags::DIRECT;
         case AUDIO_OUTPUT_FLAG_PRIMARY:
-            return media::AudioOutputFlags::PRIMARY;
+            return AudioOutputFlags::PRIMARY;
         case AUDIO_OUTPUT_FLAG_FAST:
-            return media::AudioOutputFlags::FAST;
+            return AudioOutputFlags::FAST;
         case AUDIO_OUTPUT_FLAG_DEEP_BUFFER:
-            return media::AudioOutputFlags::DEEP_BUFFER;
+            return AudioOutputFlags::DEEP_BUFFER;
         case AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD:
-            return media::AudioOutputFlags::COMPRESS_OFFLOAD;
+            return AudioOutputFlags::COMPRESS_OFFLOAD;
         case AUDIO_OUTPUT_FLAG_NON_BLOCKING:
-            return media::AudioOutputFlags::NON_BLOCKING;
+            return AudioOutputFlags::NON_BLOCKING;
         case AUDIO_OUTPUT_FLAG_HW_AV_SYNC:
-            return media::AudioOutputFlags::HW_AV_SYNC;
+            return AudioOutputFlags::HW_AV_SYNC;
         case AUDIO_OUTPUT_FLAG_TTS:
-            return media::AudioOutputFlags::TTS;
+            return AudioOutputFlags::TTS;
         case AUDIO_OUTPUT_FLAG_RAW:
-            return media::AudioOutputFlags::RAW;
+            return AudioOutputFlags::RAW;
         case AUDIO_OUTPUT_FLAG_SYNC:
-            return media::AudioOutputFlags::SYNC;
+            return AudioOutputFlags::SYNC;
         case AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO:
-            return media::AudioOutputFlags::IEC958_NONAUDIO;
+            return AudioOutputFlags::IEC958_NONAUDIO;
         case AUDIO_OUTPUT_FLAG_DIRECT_PCM:
-            return media::AudioOutputFlags::DIRECT_PCM;
+            return AudioOutputFlags::DIRECT_PCM;
         case AUDIO_OUTPUT_FLAG_MMAP_NOIRQ:
-            return media::AudioOutputFlags::MMAP_NOIRQ;
+            return AudioOutputFlags::MMAP_NOIRQ;
         case AUDIO_OUTPUT_FLAG_VOIP_RX:
-            return media::AudioOutputFlags::VOIP_RX;
+            return AudioOutputFlags::VOIP_RX;
         case AUDIO_OUTPUT_FLAG_INCALL_MUSIC:
-            return media::AudioOutputFlags::INCALL_MUSIC;
+            return AudioOutputFlags::INCALL_MUSIC;
         case AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD:
-            return media::AudioOutputFlags::GAPLESS_OFFLOAD;
+            return AudioOutputFlags::GAPLESS_OFFLOAD;
+        case AUDIO_OUTPUT_FLAG_ULTRASOUND:
+            return AudioOutputFlags::ULTRASOUND;
     }
     return unexpected(BAD_VALUE);
 }
@@ -634,9 +1556,9 @@
     using LegacyMask = std::underlying_type_t<audio_input_flags_t>;
 
     LegacyMask converted = VALUE_OR_RETURN(
-            (convertBitmask<LegacyMask, int32_t, audio_input_flags_t, media::AudioInputFlags>(
+            (convertBitmask<LegacyMask, int32_t, audio_input_flags_t, AudioInputFlags>(
                     aidl, aidl2legacy_AudioInputFlags_audio_input_flags_t,
-                    indexToEnum_index<media::AudioInputFlags>,
+                    indexToEnum_index<AudioInputFlags>,
                     enumToMask_bitmask<LegacyMask, audio_input_flags_t>)));
     return static_cast<audio_input_flags_t>(converted);
 }
@@ -646,10 +1568,10 @@
     using LegacyMask = std::underlying_type_t<audio_input_flags_t>;
 
     LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
-    return convertBitmask<int32_t, LegacyMask, media::AudioInputFlags, audio_input_flags_t>(
+    return convertBitmask<int32_t, LegacyMask, AudioInputFlags, audio_input_flags_t>(
             legacyMask, legacy2aidl_audio_input_flags_t_AudioInputFlags,
             indexToEnum_bitmask<audio_input_flags_t>,
-            enumToMask_index<int32_t, media::AudioInputFlags>);
+            enumToMask_index<int32_t, AudioInputFlags>);
 }
 
 ConversionResult<audio_output_flags_t> aidl2legacy_int32_t_audio_output_flags_t_mask(
@@ -657,9 +1579,9 @@
     return convertBitmask<audio_output_flags_t,
             int32_t,
             audio_output_flags_t,
-            media::AudioOutputFlags>(
+            AudioOutputFlags>(
             aidl, aidl2legacy_AudioOutputFlags_audio_output_flags_t,
-            indexToEnum_index<media::AudioOutputFlags>,
+            indexToEnum_index<AudioOutputFlags>,
             enumToMask_bitmask<audio_output_flags_t, audio_output_flags_t>);
 }
 
@@ -668,225 +1590,215 @@
     using LegacyMask = std::underlying_type_t<audio_output_flags_t>;
 
     LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
-    return convertBitmask<int32_t, LegacyMask, media::AudioOutputFlags, audio_output_flags_t>(
+    return convertBitmask<int32_t, LegacyMask, AudioOutputFlags, audio_output_flags_t>(
             legacyMask, legacy2aidl_audio_output_flags_t_AudioOutputFlags,
             indexToEnum_bitmask<audio_output_flags_t>,
-            enumToMask_index<int32_t, media::AudioOutputFlags>);
+            enumToMask_index<int32_t, AudioOutputFlags>);
 }
 
 ConversionResult<audio_io_flags> aidl2legacy_AudioIoFlags_audio_io_flags(
-        const media::AudioIoFlags& aidl, media::AudioPortRole role, media::AudioPortType type) {
+        const AudioIoFlags& aidl, bool isInput) {
     audio_io_flags legacy;
-    Direction dir = VALUE_OR_RETURN(direction(role, type));
-    switch (dir) {
-        case Direction::INPUT: {
-            legacy.input = VALUE_OR_RETURN(
-                    aidl2legacy_int32_t_audio_input_flags_t_mask(
-                            VALUE_OR_RETURN(UNION_GET(aidl, input))));
-        }
-            break;
-
-        case Direction::OUTPUT: {
-            legacy.output = VALUE_OR_RETURN(
-                    aidl2legacy_int32_t_audio_output_flags_t_mask(
-                            VALUE_OR_RETURN(UNION_GET(aidl, output))));
-        }
-            break;
+    if (isInput) {
+        legacy.input = VALUE_OR_RETURN(
+                aidl2legacy_int32_t_audio_input_flags_t_mask(
+                        VALUE_OR_RETURN(UNION_GET(aidl, input))));
+    } else {
+        legacy.output = VALUE_OR_RETURN(
+                aidl2legacy_int32_t_audio_output_flags_t_mask(
+                        VALUE_OR_RETURN(UNION_GET(aidl, output))));
     }
-
     return legacy;
 }
 
-ConversionResult<media::AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
-        const audio_io_flags& legacy, audio_port_role_t role, audio_port_type_t type) {
-    media::AudioIoFlags aidl;
-
-    Direction dir = VALUE_OR_RETURN(direction(role, type));
-    switch (dir) {
-        case Direction::INPUT:
-            UNION_SET(aidl, input,
-                      VALUE_OR_RETURN(legacy2aidl_audio_input_flags_t_int32_t_mask(
-                              legacy.input)));
-            break;
-        case Direction::OUTPUT:
-            UNION_SET(aidl, output,
-                      VALUE_OR_RETURN(legacy2aidl_audio_output_flags_t_int32_t_mask(
-                              legacy.output)));
-            break;
+ConversionResult<AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
+        const audio_io_flags& legacy, bool isInput) {
+    AudioIoFlags aidl;
+    if (isInput) {
+        UNION_SET(aidl, input,
+                VALUE_OR_RETURN(legacy2aidl_audio_input_flags_t_int32_t_mask(legacy.input)));
+    } else {
+        UNION_SET(aidl, output,
+                VALUE_OR_RETURN(legacy2aidl_audio_output_flags_t_int32_t_mask(legacy.output)));
     }
     return aidl;
 }
 
 ConversionResult<audio_port_config_device_ext>
-aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
-        const media::AudioPortConfigDeviceExt& aidl) {
+aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(
+        const AudioPortDeviceExt& aidl, const media::AudioPortDeviceExtSys& aidlDeviceExt) {
     audio_port_config_device_ext legacy;
-    legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
-    legacy.type = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.type));
-    RETURN_IF_ERROR(aidl2legacy_string(aidl.address, legacy.address, AUDIO_DEVICE_MAX_ADDRESS_LEN));
+    legacy.hw_module = VALUE_OR_RETURN(
+            aidl2legacy_int32_t_audio_module_handle_t(aidlDeviceExt.hwModule));
+    RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
+                    aidl.device, &legacy.type, legacy.address));
     return legacy;
 }
 
-ConversionResult<media::AudioPortConfigDeviceExt>
-legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
-        const audio_port_config_device_ext& legacy) {
-    media::AudioPortConfigDeviceExt aidl;
-    aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
-    aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.type));
-    aidl.address = VALUE_OR_RETURN(
-            legacy2aidl_string(legacy.address, AUDIO_DEVICE_MAX_ADDRESS_LEN));
-    return aidl;
+status_t legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(
+        const audio_port_config_device_ext& legacy,
+        AudioPortDeviceExt* aidl, media::AudioPortDeviceExtSys* aidlDeviceExt) {
+    aidlDeviceExt->hwModule = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+    aidl->device = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_device_AudioDevice(legacy.type, legacy.address));
+    return OK;
 }
 
 ConversionResult<audio_stream_type_t> aidl2legacy_AudioStreamType_audio_stream_type_t(
-        media::AudioStreamType aidl) {
+        AudioStreamType aidl) {
     switch (aidl) {
-        case media::AudioStreamType::DEFAULT:
+        case AudioStreamType::INVALID:
+            break;  // return error
+        case AudioStreamType::SYS_RESERVED_DEFAULT:
             return AUDIO_STREAM_DEFAULT;
-        case media::AudioStreamType::VOICE_CALL:
+        case AudioStreamType::VOICE_CALL:
             return AUDIO_STREAM_VOICE_CALL;
-        case media::AudioStreamType::SYSTEM:
+        case AudioStreamType::SYSTEM:
             return AUDIO_STREAM_SYSTEM;
-        case media::AudioStreamType::RING:
+        case AudioStreamType::RING:
             return AUDIO_STREAM_RING;
-        case media::AudioStreamType::MUSIC:
+        case AudioStreamType::MUSIC:
             return AUDIO_STREAM_MUSIC;
-        case media::AudioStreamType::ALARM:
+        case AudioStreamType::ALARM:
             return AUDIO_STREAM_ALARM;
-        case media::AudioStreamType::NOTIFICATION:
+        case AudioStreamType::NOTIFICATION:
             return AUDIO_STREAM_NOTIFICATION;
-        case media::AudioStreamType::BLUETOOTH_SCO:
+        case AudioStreamType::BLUETOOTH_SCO:
             return AUDIO_STREAM_BLUETOOTH_SCO;
-        case media::AudioStreamType::ENFORCED_AUDIBLE:
+        case AudioStreamType::ENFORCED_AUDIBLE:
             return AUDIO_STREAM_ENFORCED_AUDIBLE;
-        case media::AudioStreamType::DTMF:
+        case AudioStreamType::DTMF:
             return AUDIO_STREAM_DTMF;
-        case media::AudioStreamType::TTS:
+        case AudioStreamType::TTS:
             return AUDIO_STREAM_TTS;
-        case media::AudioStreamType::ACCESSIBILITY:
+        case AudioStreamType::ACCESSIBILITY:
             return AUDIO_STREAM_ACCESSIBILITY;
-        case media::AudioStreamType::ASSISTANT:
+        case AudioStreamType::ASSISTANT:
             return AUDIO_STREAM_ASSISTANT;
-        case media::AudioStreamType::REROUTING:
+        case AudioStreamType::SYS_RESERVED_REROUTING:
             return AUDIO_STREAM_REROUTING;
-        case media::AudioStreamType::PATCH:
+        case AudioStreamType::SYS_RESERVED_PATCH:
             return AUDIO_STREAM_PATCH;
-        case media::AudioStreamType::CALL_ASSISTANT:
+        case AudioStreamType::CALL_ASSISTANT:
             return AUDIO_STREAM_CALL_ASSISTANT;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
+ConversionResult<AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
         audio_stream_type_t legacy) {
     switch (legacy) {
         case AUDIO_STREAM_DEFAULT:
-            return media::AudioStreamType::DEFAULT;
+            return AudioStreamType::SYS_RESERVED_DEFAULT;
         case AUDIO_STREAM_VOICE_CALL:
-            return media::AudioStreamType::VOICE_CALL;
+            return AudioStreamType::VOICE_CALL;
         case AUDIO_STREAM_SYSTEM:
-            return media::AudioStreamType::SYSTEM;
+            return AudioStreamType::SYSTEM;
         case AUDIO_STREAM_RING:
-            return media::AudioStreamType::RING;
+            return AudioStreamType::RING;
         case AUDIO_STREAM_MUSIC:
-            return media::AudioStreamType::MUSIC;
+            return AudioStreamType::MUSIC;
         case AUDIO_STREAM_ALARM:
-            return media::AudioStreamType::ALARM;
+            return AudioStreamType::ALARM;
         case AUDIO_STREAM_NOTIFICATION:
-            return media::AudioStreamType::NOTIFICATION;
+            return AudioStreamType::NOTIFICATION;
         case AUDIO_STREAM_BLUETOOTH_SCO:
-            return media::AudioStreamType::BLUETOOTH_SCO;
+            return AudioStreamType::BLUETOOTH_SCO;
         case AUDIO_STREAM_ENFORCED_AUDIBLE:
-            return media::AudioStreamType::ENFORCED_AUDIBLE;
+            return AudioStreamType::ENFORCED_AUDIBLE;
         case AUDIO_STREAM_DTMF:
-            return media::AudioStreamType::DTMF;
+            return AudioStreamType::DTMF;
         case AUDIO_STREAM_TTS:
-            return media::AudioStreamType::TTS;
+            return AudioStreamType::TTS;
         case AUDIO_STREAM_ACCESSIBILITY:
-            return media::AudioStreamType::ACCESSIBILITY;
+            return AudioStreamType::ACCESSIBILITY;
         case AUDIO_STREAM_ASSISTANT:
-            return media::AudioStreamType::ASSISTANT;
+            return AudioStreamType::ASSISTANT;
         case AUDIO_STREAM_REROUTING:
-            return media::AudioStreamType::REROUTING;
+            return AudioStreamType::SYS_RESERVED_REROUTING;
         case AUDIO_STREAM_PATCH:
-            return media::AudioStreamType::PATCH;
+            return AudioStreamType::SYS_RESERVED_PATCH;
         case AUDIO_STREAM_CALL_ASSISTANT:
-            return media::AudioStreamType::CALL_ASSISTANT;
+            return AudioStreamType::CALL_ASSISTANT;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<audio_source_t> aidl2legacy_AudioSourceType_audio_source_t(
-        media::AudioSourceType aidl) {
+ConversionResult<audio_source_t> aidl2legacy_AudioSource_audio_source_t(
+        AudioSource aidl) {
     switch (aidl) {
-        case media::AudioSourceType::INVALID:
-            // This value does not have an enum
+        case AudioSource::SYS_RESERVED_INVALID:
             return AUDIO_SOURCE_INVALID;
-        case media::AudioSourceType::DEFAULT:
+        case AudioSource::DEFAULT:
             return AUDIO_SOURCE_DEFAULT;
-        case media::AudioSourceType::MIC:
+        case AudioSource::MIC:
             return AUDIO_SOURCE_MIC;
-        case media::AudioSourceType::VOICE_UPLINK:
+        case AudioSource::VOICE_UPLINK:
             return AUDIO_SOURCE_VOICE_UPLINK;
-        case media::AudioSourceType::VOICE_DOWNLINK:
+        case AudioSource::VOICE_DOWNLINK:
             return AUDIO_SOURCE_VOICE_DOWNLINK;
-        case media::AudioSourceType::VOICE_CALL:
+        case AudioSource::VOICE_CALL:
             return AUDIO_SOURCE_VOICE_CALL;
-        case media::AudioSourceType::CAMCORDER:
+        case AudioSource::CAMCORDER:
             return AUDIO_SOURCE_CAMCORDER;
-        case media::AudioSourceType::VOICE_RECOGNITION:
+        case AudioSource::VOICE_RECOGNITION:
             return AUDIO_SOURCE_VOICE_RECOGNITION;
-        case media::AudioSourceType::VOICE_COMMUNICATION:
+        case AudioSource::VOICE_COMMUNICATION:
             return AUDIO_SOURCE_VOICE_COMMUNICATION;
-        case media::AudioSourceType::REMOTE_SUBMIX:
+        case AudioSource::REMOTE_SUBMIX:
             return AUDIO_SOURCE_REMOTE_SUBMIX;
-        case media::AudioSourceType::UNPROCESSED:
+        case AudioSource::UNPROCESSED:
             return AUDIO_SOURCE_UNPROCESSED;
-        case media::AudioSourceType::VOICE_PERFORMANCE:
+        case AudioSource::VOICE_PERFORMANCE:
             return AUDIO_SOURCE_VOICE_PERFORMANCE;
-        case media::AudioSourceType::ECHO_REFERENCE:
+        case AudioSource::ULTRASOUND:
+            return AUDIO_SOURCE_ULTRASOUND;
+        case AudioSource::ECHO_REFERENCE:
             return AUDIO_SOURCE_ECHO_REFERENCE;
-        case media::AudioSourceType::FM_TUNER:
+        case AudioSource::FM_TUNER:
             return AUDIO_SOURCE_FM_TUNER;
-        case media::AudioSourceType::HOTWORD:
+        case AudioSource::HOTWORD:
             return AUDIO_SOURCE_HOTWORD;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioSourceType> legacy2aidl_audio_source_t_AudioSourceType(
+ConversionResult<AudioSource> legacy2aidl_audio_source_t_AudioSource(
         audio_source_t legacy) {
     switch (legacy) {
         case AUDIO_SOURCE_INVALID:
-            return media::AudioSourceType::INVALID;
+            return AudioSource::SYS_RESERVED_INVALID;
         case AUDIO_SOURCE_DEFAULT:
-            return media::AudioSourceType::DEFAULT;
+            return AudioSource::DEFAULT;
         case AUDIO_SOURCE_MIC:
-            return media::AudioSourceType::MIC;
+            return AudioSource::MIC;
         case AUDIO_SOURCE_VOICE_UPLINK:
-            return media::AudioSourceType::VOICE_UPLINK;
+            return AudioSource::VOICE_UPLINK;
         case AUDIO_SOURCE_VOICE_DOWNLINK:
-            return media::AudioSourceType::VOICE_DOWNLINK;
+            return AudioSource::VOICE_DOWNLINK;
         case AUDIO_SOURCE_VOICE_CALL:
-            return media::AudioSourceType::VOICE_CALL;
+            return AudioSource::VOICE_CALL;
         case AUDIO_SOURCE_CAMCORDER:
-            return media::AudioSourceType::CAMCORDER;
+            return AudioSource::CAMCORDER;
         case AUDIO_SOURCE_VOICE_RECOGNITION:
-            return media::AudioSourceType::VOICE_RECOGNITION;
+            return AudioSource::VOICE_RECOGNITION;
         case AUDIO_SOURCE_VOICE_COMMUNICATION:
-            return media::AudioSourceType::VOICE_COMMUNICATION;
+            return AudioSource::VOICE_COMMUNICATION;
         case AUDIO_SOURCE_REMOTE_SUBMIX:
-            return media::AudioSourceType::REMOTE_SUBMIX;
+            return AudioSource::REMOTE_SUBMIX;
         case AUDIO_SOURCE_UNPROCESSED:
-            return media::AudioSourceType::UNPROCESSED;
+            return AudioSource::UNPROCESSED;
         case AUDIO_SOURCE_VOICE_PERFORMANCE:
-            return media::AudioSourceType::VOICE_PERFORMANCE;
+            return AudioSource::VOICE_PERFORMANCE;
+        case AUDIO_SOURCE_ULTRASOUND:
+            return AudioSource::ULTRASOUND;
         case AUDIO_SOURCE_ECHO_REFERENCE:
-            return media::AudioSourceType::ECHO_REFERENCE;
+            return AudioSource::ECHO_REFERENCE;
         case AUDIO_SOURCE_FM_TUNER:
-            return media::AudioSourceType::FM_TUNER;
+            return AudioSource::FM_TUNER;
         case AUDIO_SOURCE_HOTWORD:
-            return media::AudioSourceType::HOTWORD;
+            return AudioSource::HOTWORD;
     }
     return unexpected(BAD_VALUE);
 }
@@ -902,8 +1814,8 @@
 // This type is unnamed in the original definition, thus we name it here.
 using audio_port_config_mix_ext_usecase = decltype(audio_port_config_mix_ext::usecase);
 
-ConversionResult<audio_port_config_mix_ext_usecase> aidl2legacy_AudioPortConfigMixExtUseCase(
-        const media::AudioPortConfigMixExtUseCase& aidl, media::AudioPortRole role) {
+ConversionResult<audio_port_config_mix_ext_usecase> aidl2legacy_AudioPortMixExtUseCase(
+        const AudioPortMixExtUseCase& aidl, media::AudioPortRole role) {
     audio_port_config_mix_ext_usecase legacy;
 
     switch (role) {
@@ -920,16 +1832,16 @@
 
         case media::AudioPortRole::SINK:
             // This is not a bug. A SINK role corresponds to the source field.
-            legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSourceType_audio_source_t(
+            legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSource_audio_source_t(
                     VALUE_OR_RETURN(UNION_GET(aidl, source))));
             return legacy;
     }
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
-ConversionResult<media::AudioPortConfigMixExtUseCase> legacy2aidl_AudioPortConfigMixExtUseCase(
+ConversionResult<AudioPortMixExtUseCase> legacy2aidl_AudioPortMixExtUseCase(
         const audio_port_config_mix_ext_usecase& legacy, audio_port_role_t role) {
-    media::AudioPortConfigMixExtUseCase aidl;
+    AudioPortMixExtUseCase aidl;
 
     switch (role) {
         case AUDIO_PORT_ROLE_NONE:
@@ -943,52 +1855,53 @@
         case AUDIO_PORT_ROLE_SINK:
             // This is not a bug. A SINK role corresponds to the source field.
             UNION_SET(aidl, source,
-                      VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.source)));
+                      VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.source)));
             return aidl;
     }
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
-ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortConfigMixExt(
-        const media::AudioPortConfigMixExt& aidl, media::AudioPortRole role) {
+ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortMixExt(
+        const AudioPortMixExt& aidl, media::AudioPortRole role,
+        const media::AudioPortMixExtSys& aidlMixExt) {
     audio_port_config_mix_ext legacy;
-    legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
+    legacy.hw_module = VALUE_OR_RETURN(
+            aidl2legacy_int32_t_audio_module_handle_t(aidlMixExt.hwModule));
     legacy.handle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.handle));
-    legacy.usecase = VALUE_OR_RETURN(aidl2legacy_AudioPortConfigMixExtUseCase(aidl.usecase, role));
+    legacy.usecase = VALUE_OR_RETURN(aidl2legacy_AudioPortMixExtUseCase(aidl.usecase, role));
     return legacy;
 }
 
-ConversionResult<media::AudioPortConfigMixExt> legacy2aidl_AudioPortConfigMixExt(
-        const audio_port_config_mix_ext& legacy, audio_port_role_t role) {
-    media::AudioPortConfigMixExt aidl;
-    aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
-    aidl.handle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
-    aidl.usecase = VALUE_OR_RETURN(legacy2aidl_AudioPortConfigMixExtUseCase(legacy.usecase, role));
-    return aidl;
+status_t legacy2aidl_AudioPortMixExt(
+        const audio_port_config_mix_ext& legacy, audio_port_role_t role,
+        AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt) {
+    aidlMixExt->hwModule = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+    aidl->handle = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
+    aidl->usecase = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_AudioPortMixExtUseCase(legacy.usecase, role));
+    return OK;
 }
 
 ConversionResult<audio_port_config_session_ext>
-aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
-        const media::AudioPortConfigSessionExt& aidl) {
+aidl2legacy_int32_t_audio_port_config_session_ext(int32_t aidl) {
     audio_port_config_session_ext legacy;
-    legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.session));
+    legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl));
     return legacy;
 }
 
-ConversionResult<media::AudioPortConfigSessionExt>
-legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
+ConversionResult<int32_t>
+legacy2aidl_audio_port_config_session_ext_int32_t(
         const audio_port_config_session_ext& legacy) {
-    media::AudioPortConfigSessionExt aidl;
-    aidl.session = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.session));
-    return aidl;
+    return legacy2aidl_audio_session_t_int32_t(legacy.session);
 }
 
 // This type is unnamed in the original definition, thus we name it here.
 using audio_port_config_ext = decltype(audio_port_config::ext);
 
-ConversionResult<audio_port_config_ext> aidl2legacy_AudioPortConfigExt(
-        const media::AudioPortConfigExt& aidl, media::AudioPortType type,
-        media::AudioPortRole role) {
+ConversionResult<audio_port_config_ext> aidl2legacy_AudioPortExt_audio_port_config_ext(
+        const AudioPortExt& aidl, media::AudioPortType type,
+        media::AudioPortRole role, const media::AudioPortExtSys& aidlSys) {
     audio_port_config_ext legacy;
     switch (type) {
         case media::AudioPortType::NONE:
@@ -997,16 +1910,19 @@
             return legacy;
         case media::AudioPortType::DEVICE:
             legacy.device = VALUE_OR_RETURN(
-                    aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
-                            VALUE_OR_RETURN(UNION_GET(aidl, device))));
+                    aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(
+                            VALUE_OR_RETURN(UNION_GET(aidl, device)),
+                            VALUE_OR_RETURN(UNION_GET(aidlSys, device))));
             return legacy;
         case media::AudioPortType::MIX:
             legacy.mix = VALUE_OR_RETURN(
-                    aidl2legacy_AudioPortConfigMixExt(VALUE_OR_RETURN(UNION_GET(aidl, mix)), role));
+                    aidl2legacy_AudioPortMixExt(
+                            VALUE_OR_RETURN(UNION_GET(aidl, mix)), role,
+                            VALUE_OR_RETURN(UNION_GET(aidlSys, mix))));
             return legacy;
         case media::AudioPortType::SESSION:
             legacy.session = VALUE_OR_RETURN(
-                    aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
+                    aidl2legacy_int32_t_audio_port_config_session_ext(
                             VALUE_OR_RETURN(UNION_GET(aidl, session))));
             return legacy;
 
@@ -1014,90 +1930,113 @@
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
-ConversionResult<media::AudioPortConfigExt> legacy2aidl_AudioPortConfigExt(
-        const audio_port_config_ext& legacy, audio_port_type_t type, audio_port_role_t role) {
-    media::AudioPortConfigExt aidl;
-
+status_t legacy2aidl_AudioPortExt(
+        const audio_port_config_ext& legacy, audio_port_type_t type, audio_port_role_t role,
+        AudioPortExt* aidl, media::AudioPortExtSys* aidlSys) {
     switch (type) {
         case AUDIO_PORT_TYPE_NONE:
-            UNION_SET(aidl, unspecified, false);
-            return aidl;
-        case AUDIO_PORT_TYPE_DEVICE:
-            UNION_SET(aidl, device,
-                      VALUE_OR_RETURN(
-                        legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
-                          legacy.device)));
-            return aidl;
-        case AUDIO_PORT_TYPE_MIX:
-            UNION_SET(aidl, mix,
-                      VALUE_OR_RETURN(legacy2aidl_AudioPortConfigMixExt(legacy.mix, role)));
-            return aidl;
+            UNION_SET(*aidl, unspecified, false);
+            UNION_SET(*aidlSys, unspecified, false);
+            return OK;
+        case AUDIO_PORT_TYPE_DEVICE: {
+            AudioPortDeviceExt device;
+            media::AudioPortDeviceExtSys deviceSys;
+            RETURN_STATUS_IF_ERROR(
+                    legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(
+                            legacy.device, &device, &deviceSys));
+            UNION_SET(*aidl, device, device);
+            UNION_SET(*aidlSys, device, deviceSys);
+            return OK;
+        }
+        case AUDIO_PORT_TYPE_MIX: {
+            AudioPortMixExt mix;
+            media::AudioPortMixExtSys mixSys;
+            RETURN_STATUS_IF_ERROR(legacy2aidl_AudioPortMixExt(legacy.mix, role, &mix, &mixSys));
+            UNION_SET(*aidl, mix, mix);
+            UNION_SET(*aidlSys, mix, mixSys);
+            return OK;
+        }
         case AUDIO_PORT_TYPE_SESSION:
-            UNION_SET(aidl, session,
-                      VALUE_OR_RETURN(
-                        legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
-                          legacy.session)));
-            return aidl;
+            UNION_SET(*aidl, session, VALUE_OR_RETURN_STATUS(
+                            legacy2aidl_audio_port_config_session_ext_int32_t(legacy.session)));
+            UNION_SET(*aidlSys, unspecified, false);
+            return OK;
     }
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
         const media::AudioPortConfig& aidl) {
-    audio_port_config legacy;
-    legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.id));
-    legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.role));
-    legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.type));
-    legacy.config_mask = VALUE_OR_RETURN(aidl2legacy_int32_t_config_mask(aidl.configMask));
-    if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::SAMPLE_RATE)) {
-        legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.sampleRate));
+    audio_port_config legacy{};
+    legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.hal.id));
+    legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.sys.role));
+    legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.sys.type));
+    const bool isInput =
+            VALUE_OR_RETURN(direction(aidl.sys.role, aidl.sys.type)) == Direction::INPUT;
+    if (aidl.hal.sampleRate.has_value()) {
+        legacy.sample_rate = VALUE_OR_RETURN(
+                convertIntegral<unsigned int>(aidl.hal.sampleRate.value().value));
+        legacy.config_mask |= AUDIO_PORT_CONFIG_SAMPLE_RATE;
     }
-    if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::CHANNEL_MASK)) {
+    if (aidl.hal.channelMask.has_value()) {
         legacy.channel_mask =
-                VALUE_OR_RETURN(aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
+                VALUE_OR_RETURN(
+                        aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+                                aidl.hal.channelMask.value(), isInput));
+        legacy.config_mask |= AUDIO_PORT_CONFIG_CHANNEL_MASK;
     }
-    if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::FORMAT)) {
-        legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+    if (aidl.hal.format.has_value()) {
+        legacy.format = VALUE_OR_RETURN(
+                aidl2legacy_AudioFormatDescription_audio_format_t(aidl.hal.format.value()));
+        legacy.config_mask |= AUDIO_PORT_CONFIG_FORMAT;
     }
-    if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::GAIN)) {
-        legacy.gain = VALUE_OR_RETURN(
-                aidl2legacy_AudioGainConfig_audio_gain_config(aidl.gain, aidl.role, aidl.type));
+    if (aidl.hal.gain.has_value()) {
+        legacy.gain = VALUE_OR_RETURN(aidl2legacy_AudioGainConfig_audio_gain_config(
+                        aidl.hal.gain.value(), isInput));
+        legacy.config_mask |= AUDIO_PORT_CONFIG_GAIN;
     }
-    if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::FLAGS)) {
+    if (aidl.hal.flags.has_value()) {
         legacy.flags = VALUE_OR_RETURN(
-                aidl2legacy_AudioIoFlags_audio_io_flags(aidl.flags, aidl.role, aidl.type));
+                aidl2legacy_AudioIoFlags_audio_io_flags(aidl.hal.flags.value(), isInput));
+        legacy.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
     }
-    legacy.ext = VALUE_OR_RETURN(aidl2legacy_AudioPortConfigExt(aidl.ext, aidl.type, aidl.role));
+    legacy.ext = VALUE_OR_RETURN(
+            aidl2legacy_AudioPortExt_audio_port_config_ext(
+                    aidl.hal.ext, aidl.sys.type, aidl.sys.role, aidl.sys.ext));
     return legacy;
 }
 
 ConversionResult<media::AudioPortConfig> legacy2aidl_audio_port_config_AudioPortConfig(
         const audio_port_config& legacy) {
     media::AudioPortConfig aidl;
-    aidl.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
-    aidl.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
-    aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
-    aidl.configMask = VALUE_OR_RETURN(legacy2aidl_config_mask_int32_t(legacy.config_mask));
+    aidl.hal.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
+    aidl.sys.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
+    aidl.sys.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
+    const bool isInput = VALUE_OR_RETURN(
+            direction(legacy.role, legacy.type)) == Direction::INPUT;
     if (legacy.config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
-        aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
+        Int aidl_sampleRate;
+        aidl_sampleRate.value = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
+        aidl.hal.sampleRate = aidl_sampleRate;
     }
     if (legacy.config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
-        aidl.channelMask =
-                VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
+        aidl.hal.channelMask = VALUE_OR_RETURN(
+                legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
     }
     if (legacy.config_mask & AUDIO_PORT_CONFIG_FORMAT) {
-        aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+        aidl.hal.format = VALUE_OR_RETURN(
+                legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
     }
     if (legacy.config_mask & AUDIO_PORT_CONFIG_GAIN) {
-        aidl.gain = VALUE_OR_RETURN(legacy2aidl_audio_gain_config_AudioGainConfig(
-                legacy.gain, legacy.role, legacy.type));
+        aidl.hal.gain = VALUE_OR_RETURN(
+                legacy2aidl_audio_gain_config_AudioGainConfig(legacy.gain, isInput));
     }
     if (legacy.config_mask & AUDIO_PORT_CONFIG_FLAGS) {
-        aidl.flags = VALUE_OR_RETURN(
-                legacy2aidl_audio_io_flags_AudioIoFlags(legacy.flags, legacy.role, legacy.type));
+        aidl.hal.flags = VALUE_OR_RETURN(
+                legacy2aidl_audio_io_flags_AudioIoFlags(legacy.flags, isInput));
     }
-    aidl.ext =
-            VALUE_OR_RETURN(legacy2aidl_AudioPortConfigExt(legacy.ext, legacy.type, legacy.role));
+    RETURN_IF_ERROR(legacy2aidl_AudioPortExt(legacy.ext, legacy.type, legacy.role,
+                    &aidl.hal.ext, &aidl.sys.ext));
     return aidl;
 }
 
@@ -1148,33 +2087,40 @@
 
 ConversionResult<sp<AudioIoDescriptor>> aidl2legacy_AudioIoDescriptor_AudioIoDescriptor(
         const media::AudioIoDescriptor& aidl) {
-    sp<AudioIoDescriptor> legacy(new AudioIoDescriptor());
-    legacy->mIoHandle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.ioHandle));
-    legacy->mPatch = VALUE_OR_RETURN(aidl2legacy_AudioPatch_audio_patch(aidl.patch));
-    legacy->mSamplingRate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.samplingRate));
-    legacy->mFormat = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
-    legacy->mChannelMask =
-            VALUE_OR_RETURN(aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
-    legacy->mFrameCount = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
-    legacy->mFrameCountHAL = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCountHAL));
-    legacy->mLatency = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.latency));
-    legacy->mPortId = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
-    return legacy;
+    const audio_io_handle_t io_handle = VALUE_OR_RETURN(
+            aidl2legacy_int32_t_audio_io_handle_t(aidl.ioHandle));
+    const struct audio_patch patch = VALUE_OR_RETURN(
+            aidl2legacy_AudioPatch_audio_patch(aidl.patch));
+    const bool isInput = aidl.isInput;
+    const uint32_t sampling_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.samplingRate));
+    const audio_format_t format = VALUE_OR_RETURN(
+            aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
+    const audio_channel_mask_t channel_mask = VALUE_OR_RETURN(
+            aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
+    const size_t frame_count = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
+    const size_t frame_count_hal = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCountHAL));
+    const uint32_t latency = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.latency));
+    const audio_port_handle_t port_id = VALUE_OR_RETURN(
+            aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
+    return sp<AudioIoDescriptor>::make(io_handle, patch, isInput, sampling_rate, format,
+            channel_mask, frame_count, frame_count_hal, latency, port_id);
 }
 
 ConversionResult<media::AudioIoDescriptor> legacy2aidl_AudioIoDescriptor_AudioIoDescriptor(
         const sp<AudioIoDescriptor>& legacy) {
     media::AudioIoDescriptor aidl;
-    aidl.ioHandle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy->mIoHandle));
-    aidl.patch = VALUE_OR_RETURN(legacy2aidl_audio_patch_AudioPatch(legacy->mPatch));
-    aidl.samplingRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->mSamplingRate));
-    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy->mFormat));
-    aidl.channelMask = VALUE_OR_RETURN(
-            legacy2aidl_audio_channel_mask_t_int32_t(legacy->mChannelMask));
-    aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->mFrameCount));
-    aidl.frameCountHAL = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->mFrameCountHAL));
-    aidl.latency = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->mLatency));
-    aidl.portId = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy->mPortId));
+    aidl.ioHandle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy->getIoHandle()));
+    aidl.patch = VALUE_OR_RETURN(legacy2aidl_audio_patch_AudioPatch(legacy->getPatch()));
+    aidl.isInput = legacy->getIsInput();
+    aidl.samplingRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->getSamplingRate()));
+    aidl.format = VALUE_OR_RETURN(
+            legacy2aidl_audio_format_t_AudioFormatDescription(legacy->getFormat()));
+    aidl.channelMask = VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+                    legacy->getChannelMask(), legacy->getIsInput()));
+    aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->getFrameCount()));
+    aidl.frameCountHAL = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->getFrameCountHAL()));
+    aidl.latency = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->getLatency()));
+    aidl.portId = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy->getPortId()));
     return aidl;
 }
 
@@ -1195,137 +2141,143 @@
 }
 
 ConversionResult<audio_content_type_t>
-aidl2legacy_AudioContentType_audio_content_type_t(media::AudioContentType aidl) {
+aidl2legacy_AudioContentType_audio_content_type_t(AudioContentType aidl) {
     switch (aidl) {
-        case media::AudioContentType::UNKNOWN:
+        case AudioContentType::UNKNOWN:
             return AUDIO_CONTENT_TYPE_UNKNOWN;
-        case media::AudioContentType::SPEECH:
+        case AudioContentType::SPEECH:
             return AUDIO_CONTENT_TYPE_SPEECH;
-        case media::AudioContentType::MUSIC:
+        case AudioContentType::MUSIC:
             return AUDIO_CONTENT_TYPE_MUSIC;
-        case media::AudioContentType::MOVIE:
+        case AudioContentType::MOVIE:
             return AUDIO_CONTENT_TYPE_MOVIE;
-        case media::AudioContentType::SONIFICATION:
+        case AudioContentType::SONIFICATION:
             return AUDIO_CONTENT_TYPE_SONIFICATION;
+        case AudioContentType::ULTRASOUND:
+            return AUDIO_CONTENT_TYPE_ULTRASOUND;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioContentType>
+ConversionResult<AudioContentType>
 legacy2aidl_audio_content_type_t_AudioContentType(audio_content_type_t legacy) {
     switch (legacy) {
         case AUDIO_CONTENT_TYPE_UNKNOWN:
-            return media::AudioContentType::UNKNOWN;
+            return AudioContentType::UNKNOWN;
         case AUDIO_CONTENT_TYPE_SPEECH:
-            return media::AudioContentType::SPEECH;
+            return AudioContentType::SPEECH;
         case AUDIO_CONTENT_TYPE_MUSIC:
-            return media::AudioContentType::MUSIC;
+            return AudioContentType::MUSIC;
         case AUDIO_CONTENT_TYPE_MOVIE:
-            return media::AudioContentType::MOVIE;
+            return AudioContentType::MOVIE;
         case AUDIO_CONTENT_TYPE_SONIFICATION:
-            return media::AudioContentType::SONIFICATION;
+            return AudioContentType::SONIFICATION;
+        case AUDIO_CONTENT_TYPE_ULTRASOUND:
+            return AudioContentType::ULTRASOUND;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_usage_t>
-aidl2legacy_AudioUsage_audio_usage_t(media::AudioUsage aidl) {
+aidl2legacy_AudioUsage_audio_usage_t(AudioUsage aidl) {
     switch (aidl) {
-        case media::AudioUsage::UNKNOWN:
+        case AudioUsage::INVALID:
+            break;  // return error
+        case AudioUsage::UNKNOWN:
             return AUDIO_USAGE_UNKNOWN;
-        case media::AudioUsage::MEDIA:
+        case AudioUsage::MEDIA:
             return AUDIO_USAGE_MEDIA;
-        case media::AudioUsage::VOICE_COMMUNICATION:
+        case AudioUsage::VOICE_COMMUNICATION:
             return AUDIO_USAGE_VOICE_COMMUNICATION;
-        case media::AudioUsage::VOICE_COMMUNICATION_SIGNALLING:
+        case AudioUsage::VOICE_COMMUNICATION_SIGNALLING:
             return AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
-        case media::AudioUsage::ALARM:
+        case AudioUsage::ALARM:
             return AUDIO_USAGE_ALARM;
-        case media::AudioUsage::NOTIFICATION:
+        case AudioUsage::NOTIFICATION:
             return AUDIO_USAGE_NOTIFICATION;
-        case media::AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE:
+        case AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE:
             return AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
-        case media::AudioUsage::NOTIFICATION_COMMUNICATION_REQUEST:
+        case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_REQUEST:
             return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST;
-        case media::AudioUsage::NOTIFICATION_COMMUNICATION_INSTANT:
+        case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_INSTANT:
             return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT;
-        case media::AudioUsage::NOTIFICATION_COMMUNICATION_DELAYED:
+        case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_DELAYED:
             return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED;
-        case media::AudioUsage::NOTIFICATION_EVENT:
+        case AudioUsage::NOTIFICATION_EVENT:
             return AUDIO_USAGE_NOTIFICATION_EVENT;
-        case media::AudioUsage::ASSISTANCE_ACCESSIBILITY:
+        case AudioUsage::ASSISTANCE_ACCESSIBILITY:
             return AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
-        case media::AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE:
+        case AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE:
             return AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE;
-        case media::AudioUsage::ASSISTANCE_SONIFICATION:
+        case AudioUsage::ASSISTANCE_SONIFICATION:
             return AUDIO_USAGE_ASSISTANCE_SONIFICATION;
-        case media::AudioUsage::GAME:
+        case AudioUsage::GAME:
             return AUDIO_USAGE_GAME;
-        case media::AudioUsage::VIRTUAL_SOURCE:
+        case AudioUsage::VIRTUAL_SOURCE:
             return AUDIO_USAGE_VIRTUAL_SOURCE;
-        case media::AudioUsage::ASSISTANT:
+        case AudioUsage::ASSISTANT:
             return AUDIO_USAGE_ASSISTANT;
-        case media::AudioUsage::CALL_ASSISTANT:
+        case AudioUsage::CALL_ASSISTANT:
             return AUDIO_USAGE_CALL_ASSISTANT;
-        case media::AudioUsage::EMERGENCY:
+        case AudioUsage::EMERGENCY:
             return AUDIO_USAGE_EMERGENCY;
-        case media::AudioUsage::SAFETY:
+        case AudioUsage::SAFETY:
             return AUDIO_USAGE_SAFETY;
-        case media::AudioUsage::VEHICLE_STATUS:
+        case AudioUsage::VEHICLE_STATUS:
             return AUDIO_USAGE_VEHICLE_STATUS;
-        case media::AudioUsage::ANNOUNCEMENT:
+        case AudioUsage::ANNOUNCEMENT:
             return AUDIO_USAGE_ANNOUNCEMENT;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioUsage>
+ConversionResult<AudioUsage>
 legacy2aidl_audio_usage_t_AudioUsage(audio_usage_t legacy) {
     switch (legacy) {
         case AUDIO_USAGE_UNKNOWN:
-            return media::AudioUsage::UNKNOWN;
+            return AudioUsage::UNKNOWN;
         case AUDIO_USAGE_MEDIA:
-            return media::AudioUsage::MEDIA;
+            return AudioUsage::MEDIA;
         case AUDIO_USAGE_VOICE_COMMUNICATION:
-            return media::AudioUsage::VOICE_COMMUNICATION;
+            return AudioUsage::VOICE_COMMUNICATION;
         case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
-            return media::AudioUsage::VOICE_COMMUNICATION_SIGNALLING;
+            return AudioUsage::VOICE_COMMUNICATION_SIGNALLING;
         case AUDIO_USAGE_ALARM:
-            return media::AudioUsage::ALARM;
+            return AudioUsage::ALARM;
         case AUDIO_USAGE_NOTIFICATION:
-            return media::AudioUsage::NOTIFICATION;
+            return AudioUsage::NOTIFICATION;
         case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
-            return media::AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE;
+            return AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE;
         case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
-            return media::AudioUsage::NOTIFICATION_COMMUNICATION_REQUEST;
+            return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_REQUEST;
         case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
-            return media::AudioUsage::NOTIFICATION_COMMUNICATION_INSTANT;
+            return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_INSTANT;
         case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
-            return media::AudioUsage::NOTIFICATION_COMMUNICATION_DELAYED;
+            return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_DELAYED;
         case AUDIO_USAGE_NOTIFICATION_EVENT:
-            return media::AudioUsage::NOTIFICATION_EVENT;
+            return AudioUsage::NOTIFICATION_EVENT;
         case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
-            return media::AudioUsage::ASSISTANCE_ACCESSIBILITY;
+            return AudioUsage::ASSISTANCE_ACCESSIBILITY;
         case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
-            return media::AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE;
+            return AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE;
         case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
-            return media::AudioUsage::ASSISTANCE_SONIFICATION;
+            return AudioUsage::ASSISTANCE_SONIFICATION;
         case AUDIO_USAGE_GAME:
-            return media::AudioUsage::GAME;
+            return AudioUsage::GAME;
         case AUDIO_USAGE_VIRTUAL_SOURCE:
-            return media::AudioUsage::VIRTUAL_SOURCE;
+            return AudioUsage::VIRTUAL_SOURCE;
         case AUDIO_USAGE_ASSISTANT:
-            return media::AudioUsage::ASSISTANT;
+            return AudioUsage::ASSISTANT;
         case AUDIO_USAGE_CALL_ASSISTANT:
-            return media::AudioUsage::CALL_ASSISTANT;
+            return AudioUsage::CALL_ASSISTANT;
         case AUDIO_USAGE_EMERGENCY:
-            return media::AudioUsage::EMERGENCY;
+            return AudioUsage::EMERGENCY;
         case AUDIO_USAGE_SAFETY:
-            return media::AudioUsage::SAFETY;
+            return AudioUsage::SAFETY;
         case AUDIO_USAGE_VEHICLE_STATUS:
-            return media::AudioUsage::VEHICLE_STATUS;
+            return AudioUsage::VEHICLE_STATUS;
         case AUDIO_USAGE_ANNOUNCEMENT:
-            return media::AudioUsage::ANNOUNCEMENT;
+            return AudioUsage::ANNOUNCEMENT;
     }
     return unexpected(BAD_VALUE);
 }
@@ -1365,6 +2317,8 @@
             return AUDIO_FLAG_CONTENT_SPATIALIZED;
         case media::AudioFlag::NEVER_SPATIALIZE:
             return AUDIO_FLAG_NEVER_SPATIALIZE;
+        case media::AudioFlag::CALL_REDIRECTION:
+            return AUDIO_FLAG_CALL_REDIRECTION;
     }
     return unexpected(BAD_VALUE);
 }
@@ -1406,6 +2360,8 @@
             return media::AudioFlag::CONTENT_SPATIALIZED;
         case AUDIO_FLAG_NEVER_SPATIALIZE:
             return media::AudioFlag::NEVER_SPATIALIZE;
+        case AUDIO_FLAG_CALL_REDIRECTION:
+            return media::AudioFlag::CALL_REDIRECTION;
     }
     return unexpected(BAD_VALUE);
 }
@@ -1431,7 +2387,7 @@
     legacy.content_type = VALUE_OR_RETURN(
             aidl2legacy_AudioContentType_audio_content_type_t(aidl.contentType));
     legacy.usage = VALUE_OR_RETURN(aidl2legacy_AudioUsage_audio_usage_t(aidl.usage));
-    legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSourceType_audio_source_t(aidl.source));
+    legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSource_audio_source_t(aidl.source));
     legacy.flags = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_flags_mask_t_mask(aidl.flags));
     RETURN_IF_ERROR(aidl2legacy_string(aidl.tags, legacy.tags, sizeof(legacy.tags)));
     return legacy;
@@ -1443,51 +2399,51 @@
     aidl.contentType = VALUE_OR_RETURN(
             legacy2aidl_audio_content_type_t_AudioContentType(legacy.content_type));
     aidl.usage = VALUE_OR_RETURN(legacy2aidl_audio_usage_t_AudioUsage(legacy.usage));
-    aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.source));
+    aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.source));
     aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_flags_mask_t_int32_t_mask(legacy.flags));
     aidl.tags = VALUE_OR_RETURN(legacy2aidl_string(legacy.tags, sizeof(legacy.tags)));
     return aidl;
 }
 
 ConversionResult<audio_encapsulation_mode_t>
-aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(media::AudioEncapsulationMode aidl) {
+aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(AudioEncapsulationMode aidl) {
     switch (aidl) {
-        case media::AudioEncapsulationMode::NONE:
+        case AudioEncapsulationMode::INVALID:
+            break;  // return error
+        case AudioEncapsulationMode::NONE:
             return AUDIO_ENCAPSULATION_MODE_NONE;
-        case media::AudioEncapsulationMode::ELEMENTARY_STREAM:
+        case AudioEncapsulationMode::ELEMENTARY_STREAM:
             return AUDIO_ENCAPSULATION_MODE_ELEMENTARY_STREAM;
-        case media::AudioEncapsulationMode::HANDLE:
+        case AudioEncapsulationMode::HANDLE:
             return AUDIO_ENCAPSULATION_MODE_HANDLE;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioEncapsulationMode>
+ConversionResult<AudioEncapsulationMode>
 legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode(audio_encapsulation_mode_t legacy) {
     switch (legacy) {
         case AUDIO_ENCAPSULATION_MODE_NONE:
-            return media::AudioEncapsulationMode::NONE;
+            return AudioEncapsulationMode::NONE;
         case AUDIO_ENCAPSULATION_MODE_ELEMENTARY_STREAM:
-            return media::AudioEncapsulationMode::ELEMENTARY_STREAM;
+            return AudioEncapsulationMode::ELEMENTARY_STREAM;
         case AUDIO_ENCAPSULATION_MODE_HANDLE:
-            return media::AudioEncapsulationMode::HANDLE;
+            return AudioEncapsulationMode::HANDLE;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_offload_info_t>
-aidl2legacy_AudioOffloadInfo_audio_offload_info_t(const media::AudioOffloadInfo& aidl) {
-    audio_offload_info_t legacy;
-    legacy.version = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.version));
-    legacy.size = sizeof(audio_offload_info_t);
-    audio_config_base_t config = VALUE_OR_RETURN(
-            aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config));
-    legacy.sample_rate = config.sample_rate;
-    legacy.channel_mask = config.channel_mask;
-    legacy.format = config.format;
+aidl2legacy_AudioOffloadInfo_audio_offload_info_t(const AudioOffloadInfo& aidl) {
+    audio_offload_info_t legacy = AUDIO_INFO_INITIALIZER;
+    audio_config_base_t base = VALUE_OR_RETURN(
+            aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.base, false /*isInput*/));
+    legacy.sample_rate = base.sample_rate;
+    legacy.channel_mask = base.channel_mask;
+    legacy.format = base.format;
     legacy.stream_type = VALUE_OR_RETURN(
             aidl2legacy_AudioStreamType_audio_stream_type_t(aidl.streamType));
-    legacy.bit_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.bitRate));
+    legacy.bit_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.bitRatePerSecond));
     legacy.duration_us = VALUE_OR_RETURN(convertIntegral<int64_t>(aidl.durationUs));
     legacy.has_video = aidl.hasVideo;
     legacy.is_streaming = aidl.isStreaming;
@@ -1501,21 +2457,20 @@
     return legacy;
 }
 
-ConversionResult<media::AudioOffloadInfo>
+ConversionResult<AudioOffloadInfo>
 legacy2aidl_audio_offload_info_t_AudioOffloadInfo(const audio_offload_info_t& legacy) {
-    media::AudioOffloadInfo aidl;
+    AudioOffloadInfo aidl;
     // Version 0.1 fields.
     if (legacy.size < offsetof(audio_offload_info_t, usage) + sizeof(audio_offload_info_t::usage)) {
         return unexpected(BAD_VALUE);
     }
-    aidl.version = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.version));
-    aidl.config.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
-    aidl.config.channelMask = VALUE_OR_RETURN(
-            legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
-    aidl.config.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+    const audio_config_base_t base = { .sample_rate = legacy.sample_rate,
+        .channel_mask = legacy.channel_mask, .format = legacy.format };
+    aidl.base = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(
+                    base, false /*isInput*/));
     aidl.streamType = VALUE_OR_RETURN(
             legacy2aidl_audio_stream_type_t_AudioStreamType(legacy.stream_type));
-    aidl.bitRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.bit_rate));
+    aidl.bitRatePerSecond = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.bit_rate));
     aidl.durationUs = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy.duration_us));
     aidl.hasVideo = legacy.has_video;
     aidl.isStreaming = legacy.is_streaming;
@@ -1539,25 +2494,25 @@
 }
 
 ConversionResult<audio_config_t>
-aidl2legacy_AudioConfig_audio_config_t(const media::AudioConfig& aidl) {
-    audio_config_t legacy;
-    legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.sampleRate));
-    legacy.channel_mask = VALUE_OR_RETURN(
-            aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
-    legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+aidl2legacy_AudioConfig_audio_config_t(const AudioConfig& aidl, bool isInput) {
+    const audio_config_base_t legacyBase = VALUE_OR_RETURN(
+            aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.base, isInput));
+    audio_config_t legacy = AUDIO_CONFIG_INITIALIZER;
+    legacy.sample_rate = legacyBase.sample_rate;
+    legacy.channel_mask = legacyBase.channel_mask;
+    legacy.format = legacyBase.format;
     legacy.offload_info = VALUE_OR_RETURN(
             aidl2legacy_AudioOffloadInfo_audio_offload_info_t(aidl.offloadInfo));
     legacy.frame_count = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.frameCount));
     return legacy;
 }
 
-ConversionResult<media::AudioConfig>
-legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy) {
-    media::AudioConfig aidl;
-    aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
-    aidl.channelMask = VALUE_OR_RETURN(
-            legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
-    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+ConversionResult<AudioConfig>
+legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy, bool isInput) {
+    const audio_config_base_t base = { .sample_rate = legacy.sample_rate,
+        .channel_mask = legacy.channel_mask, .format = legacy.format };
+    AudioConfig aidl;
+    aidl.base = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(base, isInput));
     aidl.offloadInfo = VALUE_OR_RETURN(
             legacy2aidl_audio_offload_info_t_AudioOffloadInfo(legacy.offload_info));
     aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy.frame_count));
@@ -1565,22 +2520,22 @@
 }
 
 ConversionResult<audio_config_base_t>
-aidl2legacy_AudioConfigBase_audio_config_base_t(const media::AudioConfigBase& aidl) {
+aidl2legacy_AudioConfigBase_audio_config_base_t(const AudioConfigBase& aidl, bool isInput) {
     audio_config_base_t legacy;
     legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.sampleRate));
     legacy.channel_mask = VALUE_OR_RETURN(
-            aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
-    legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+            aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
+    legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
     return legacy;
 }
 
-ConversionResult<media::AudioConfigBase>
-legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy) {
-    media::AudioConfigBase aidl;
+ConversionResult<AudioConfigBase>
+legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy, bool isInput) {
+    AudioConfigBase aidl;
     aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
     aidl.channelMask = VALUE_OR_RETURN(
-            legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
-    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+            legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
+    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
     return aidl;
 }
 
@@ -1639,7 +2594,7 @@
 }
 
 ConversionResult<audio_uuid_t>
-aidl2legacy_AudioUuid_audio_uuid_t(const media::AudioUuid& aidl) {
+aidl2legacy_AudioUuid_audio_uuid_t(const AudioUuid& aidl) {
     audio_uuid_t legacy;
     legacy.timeLow = VALUE_OR_RETURN(convertReinterpret<uint32_t>(aidl.timeLow));
     legacy.timeMid = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.timeMid));
@@ -1652,9 +2607,9 @@
     return legacy;
 }
 
-ConversionResult<media::AudioUuid>
+ConversionResult<AudioUuid>
 legacy2aidl_audio_uuid_t_AudioUuid(const audio_uuid_t& legacy) {
-    media::AudioUuid aidl;
+    AudioUuid aidl;
     aidl.timeLow = VALUE_OR_RETURN(convertReinterpret<int32_t>(legacy.timeLow));
     aidl.timeMid = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.timeMid));
     aidl.timeHiAndVersion = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.timeHiAndVersion));
@@ -1695,28 +2650,28 @@
 
 ConversionResult<audio_encapsulation_metadata_type_t>
 aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t(
-        media::AudioEncapsulationMetadataType aidl) {
+        AudioEncapsulationMetadataType aidl) {
     switch (aidl) {
-        case media::AudioEncapsulationMetadataType::NONE:
+        case AudioEncapsulationMetadataType::NONE:
             return AUDIO_ENCAPSULATION_METADATA_TYPE_NONE;
-        case media::AudioEncapsulationMetadataType::FRAMEWORK_TUNER:
+        case AudioEncapsulationMetadataType::FRAMEWORK_TUNER:
             return AUDIO_ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER;
-        case media::AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR:
+        case AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR:
             return AUDIO_ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioEncapsulationMetadataType>
+ConversionResult<AudioEncapsulationMetadataType>
 legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType(
         audio_encapsulation_metadata_type_t legacy) {
     switch (legacy) {
         case AUDIO_ENCAPSULATION_METADATA_TYPE_NONE:
-            return media::AudioEncapsulationMetadataType::NONE;
+            return AudioEncapsulationMetadataType::NONE;
         case AUDIO_ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER:
-            return media::AudioEncapsulationMetadataType::FRAMEWORK_TUNER;
+            return AudioEncapsulationMetadataType::FRAMEWORK_TUNER;
         case AUDIO_ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR:
-            return media::AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR;
+            return AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR;
     }
     return unexpected(BAD_VALUE);
 }
@@ -1726,9 +2681,9 @@
     return convertBitmask<uint32_t,
             int32_t,
             audio_encapsulation_mode_t,
-            media::AudioEncapsulationMode>(
+            AudioEncapsulationMode>(
             aidl, aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t,
-            indexToEnum_index<media::AudioEncapsulationMode>,
+            indexToEnum_index<AudioEncapsulationMode>,
             enumToMask_index<uint32_t, audio_encapsulation_mode_t>);
 }
 
@@ -1736,11 +2691,11 @@
 legacy2aidl_AudioEncapsulationMode_mask(uint32_t legacy) {
     return convertBitmask<int32_t,
             uint32_t,
-            media::AudioEncapsulationMode,
+            AudioEncapsulationMode,
             audio_encapsulation_mode_t>(
             legacy, legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode,
             indexToEnum_index<audio_encapsulation_mode_t>,
-            enumToMask_index<int32_t, media::AudioEncapsulationMode>);
+            enumToMask_index<int32_t, AudioEncapsulationMode>);
 }
 
 ConversionResult<uint32_t>
@@ -1748,9 +2703,9 @@
     return convertBitmask<uint32_t,
             int32_t,
             audio_encapsulation_metadata_type_t,
-            media::AudioEncapsulationMetadataType>(
+            AudioEncapsulationMetadataType>(
             aidl, aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t,
-            indexToEnum_index<media::AudioEncapsulationMetadataType>,
+            indexToEnum_index<AudioEncapsulationMetadataType>,
             enumToMask_index<uint32_t, audio_encapsulation_metadata_type_t>);
 }
 
@@ -1758,104 +2713,79 @@
 legacy2aidl_AudioEncapsulationMetadataType_mask(uint32_t legacy) {
     return convertBitmask<int32_t,
             uint32_t,
-            media::AudioEncapsulationMetadataType,
+            AudioEncapsulationMetadataType,
             audio_encapsulation_metadata_type_t>(
             legacy, legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType,
             indexToEnum_index<audio_encapsulation_metadata_type_t>,
-            enumToMask_index<int32_t, media::AudioEncapsulationMetadataType>);
-}
-
-ConversionResult<audio_mix_latency_class_t>
-aidl2legacy_AudioMixLatencyClass_audio_mix_latency_class_t(
-        media::AudioMixLatencyClass aidl) {
-    switch (aidl) {
-        case media::AudioMixLatencyClass::LOW:
-            return AUDIO_LATENCY_LOW;
-        case media::AudioMixLatencyClass::NORMAL:
-            return AUDIO_LATENCY_NORMAL;
-    }
-    return unexpected(BAD_VALUE);
-}
-
-ConversionResult<media::AudioMixLatencyClass>
-legacy2aidl_audio_mix_latency_class_t_AudioMixLatencyClass(
-        audio_mix_latency_class_t legacy) {
-    switch (legacy) {
-        case AUDIO_LATENCY_LOW:
-            return media::AudioMixLatencyClass::LOW;
-        case AUDIO_LATENCY_NORMAL:
-            return media::AudioMixLatencyClass::NORMAL;
-    }
-    return unexpected(BAD_VALUE);
+            enumToMask_index<int32_t, AudioEncapsulationMetadataType>);
 }
 
 ConversionResult<audio_port_device_ext>
-aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(const media::AudioPortDeviceExt& aidl) {
+aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
+        const AudioPortDeviceExt& aidl, const media::AudioPortDeviceExtSys& aidlSys) {
     audio_port_device_ext legacy;
-    legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
-    legacy.type = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.device.type));
-    RETURN_IF_ERROR(
-            aidl2legacy_string(aidl.device.address, legacy.address, sizeof(legacy.address)));
+    legacy.hw_module = VALUE_OR_RETURN(
+            aidl2legacy_int32_t_audio_module_handle_t(aidlSys.hwModule));
+    RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
+                    aidl.device, &legacy.type, legacy.address));
     legacy.encapsulation_modes = VALUE_OR_RETURN(
-            aidl2legacy_AudioEncapsulationMode_mask(aidl.encapsulationModes));
+            aidl2legacy_AudioEncapsulationMode_mask(aidlSys.encapsulationModes));
     legacy.encapsulation_metadata_types = VALUE_OR_RETURN(
-            aidl2legacy_AudioEncapsulationMetadataType_mask(aidl.encapsulationMetadataTypes));
+            aidl2legacy_AudioEncapsulationMetadataType_mask(
+                    aidlSys.encapsulationMetadataTypes));
     return legacy;
 }
 
-ConversionResult<media::AudioPortDeviceExt>
-legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(const audio_port_device_ext& legacy) {
-    media::AudioPortDeviceExt aidl;
-    aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
-    aidl.device.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.type));
-    aidl.device.address = VALUE_OR_RETURN(
-            legacy2aidl_string(legacy.address, sizeof(legacy.address)));
-    aidl.encapsulationModes = VALUE_OR_RETURN(
+status_t legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(
+        const audio_port_device_ext& legacy,
+        AudioPortDeviceExt* aidl, media::AudioPortDeviceExtSys* aidlDeviceExt) {
+    aidlDeviceExt->hwModule = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+    aidl->device = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_device_AudioDevice(legacy.type, legacy.address));
+    aidlDeviceExt->encapsulationModes = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioEncapsulationMode_mask(legacy.encapsulation_modes));
-    aidl.encapsulationMetadataTypes = VALUE_OR_RETURN(
+    aidlDeviceExt->encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioEncapsulationMetadataType_mask(legacy.encapsulation_metadata_types));
-    return aidl;
+    return OK;
 }
 
 ConversionResult<audio_port_mix_ext>
-aidl2legacy_AudioPortMixExt_audio_port_mix_ext(const media::AudioPortMixExt& aidl) {
-    audio_port_mix_ext legacy;
-    legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
+aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
+        const AudioPortMixExt& aidl, const media::AudioPortMixExtSys& aidlSys) {
+    audio_port_mix_ext legacy{};
+    legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidlSys.hwModule));
     legacy.handle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.handle));
-    legacy.latency_class = VALUE_OR_RETURN(
-            aidl2legacy_AudioMixLatencyClass_audio_mix_latency_class_t(aidl.latencyClass));
     return legacy;
 }
 
-ConversionResult<media::AudioPortMixExt>
-legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy) {
-    media::AudioPortMixExt aidl;
-    aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
-    aidl.handle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
-    aidl.latencyClass = VALUE_OR_RETURN(
-            legacy2aidl_audio_mix_latency_class_t_AudioMixLatencyClass(legacy.latency_class));
-    return aidl;
+status_t
+legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy,
+        AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt) {
+    aidlMixExt->hwModule = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+    aidl->handle = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
+    return OK;
 }
 
 ConversionResult<audio_port_session_ext>
-aidl2legacy_AudioPortSessionExt_audio_port_session_ext(const media::AudioPortSessionExt& aidl) {
+aidl2legacy_int32_t_audio_port_session_ext(int32_t aidl) {
     audio_port_session_ext legacy;
-    legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.session));
+    legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl));
     return legacy;
 }
 
-ConversionResult<media::AudioPortSessionExt>
-legacy2aidl_audio_port_session_ext_AudioPortSessionExt(const audio_port_session_ext& legacy) {
-    media::AudioPortSessionExt aidl;
-    aidl.session = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.session));
-    return aidl;
+ConversionResult<int32_t>
+legacy2aidl_audio_port_session_ext_int32_t(const audio_port_session_ext& legacy) {
+    return legacy2aidl_audio_session_t_int32_t(legacy.session);
 }
 
 // This type is unnamed in the original definition, thus we name it here.
 using audio_port_v7_ext = decltype(audio_port_v7::ext);
 
-ConversionResult<audio_port_v7_ext> aidl2legacy_AudioPortExt(
-        const media::AudioPortExt& aidl, media::AudioPortType type) {
+ConversionResult<audio_port_v7_ext> aidl2legacy_AudioPortExt_audio_port_v7_ext(
+        const AudioPortExt& aidl, media::AudioPortType type,
+        const media::AudioPortExtSys& aidlSys) {
     audio_port_v7_ext legacy;
     switch (type) {
         case media::AudioPortType::NONE:
@@ -1865,66 +2795,83 @@
         case media::AudioPortType::DEVICE:
             legacy.device = VALUE_OR_RETURN(
                     aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
-                            VALUE_OR_RETURN(UNION_GET(aidl, device))));
+                            VALUE_OR_RETURN(UNION_GET(aidl, device)),
+                            VALUE_OR_RETURN(UNION_GET(aidlSys, device))));
             return legacy;
         case media::AudioPortType::MIX:
             legacy.mix = VALUE_OR_RETURN(
                     aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
-                            VALUE_OR_RETURN(UNION_GET(aidl, mix))));
+                            VALUE_OR_RETURN(UNION_GET(aidl, mix)),
+                            VALUE_OR_RETURN(UNION_GET(aidlSys, mix))));
             return legacy;
         case media::AudioPortType::SESSION:
-            legacy.session = VALUE_OR_RETURN(aidl2legacy_AudioPortSessionExt_audio_port_session_ext(
-                    VALUE_OR_RETURN(UNION_GET(aidl, session))));
+            legacy.session = VALUE_OR_RETURN(
+                    aidl2legacy_int32_t_audio_port_session_ext(
+                            VALUE_OR_RETURN(UNION_GET(aidl, session))));
             return legacy;
 
     }
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
-ConversionResult<media::AudioPortExt> legacy2aidl_AudioPortExt(
-        const audio_port_v7_ext& legacy, audio_port_type_t type) {
-    media::AudioPortExt aidl;
+status_t legacy2aidl_AudioPortExt(
+        const audio_port_v7_ext& legacy, audio_port_type_t type,
+        AudioPortExt* aidl, media::AudioPortExtSys* aidlSys) {
     switch (type) {
         case AUDIO_PORT_TYPE_NONE:
-            UNION_SET(aidl, unspecified, false);
-            return aidl;
-        case AUDIO_PORT_TYPE_DEVICE:
-            UNION_SET(aidl, device,
-                      VALUE_OR_RETURN(
-                              legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(legacy.device)));
-            return aidl;
-        case AUDIO_PORT_TYPE_MIX:
-            UNION_SET(aidl, mix,
-                      VALUE_OR_RETURN(legacy2aidl_audio_port_mix_ext_AudioPortMixExt(legacy.mix)));
-            return aidl;
+            UNION_SET(*aidl, unspecified, false);
+            UNION_SET(*aidlSys, unspecified, false);
+            return OK;
+        case AUDIO_PORT_TYPE_DEVICE: {
+            AudioPortDeviceExt device;
+            media::AudioPortDeviceExtSys deviceSys;
+            RETURN_STATUS_IF_ERROR(
+                    legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(
+                            legacy.device, &device, &deviceSys));
+            UNION_SET(*aidl, device, device);
+            UNION_SET(*aidlSys, device, deviceSys);
+            return OK;
+        }
+        case AUDIO_PORT_TYPE_MIX: {
+            AudioPortMixExt mix;
+            media::AudioPortMixExtSys mixSys;
+            RETURN_STATUS_IF_ERROR(
+                    legacy2aidl_audio_port_mix_ext_AudioPortMixExt(
+                            legacy.mix, &mix, &mixSys));
+            UNION_SET(*aidl, mix, mix);
+            UNION_SET(*aidlSys, mix, mixSys);
+            return OK;
+        }
         case AUDIO_PORT_TYPE_SESSION:
-            UNION_SET(aidl, session,
-                      VALUE_OR_RETURN(legacy2aidl_audio_port_session_ext_AudioPortSessionExt(
-                              legacy.session)));
-            return aidl;
+            UNION_SET(*aidl, session, VALUE_OR_RETURN_STATUS(
+                            legacy2aidl_audio_port_session_ext_int32_t(legacy.session)));
+            UNION_SET(*aidlSys, unspecified, false);
+            return OK;
     }
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<audio_profile>
-aidl2legacy_AudioProfile_audio_profile(const media::AudioProfile& aidl) {
+aidl2legacy_AudioProfile_audio_profile(const AudioProfile& aidl, bool isInput) {
     audio_profile legacy;
-    legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+    legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
 
-    if (aidl.samplingRates.size() > std::size(legacy.sample_rates)) {
+    if (aidl.sampleRates.size() > std::size(legacy.sample_rates)) {
         return unexpected(BAD_VALUE);
     }
     RETURN_IF_ERROR(
-            convertRange(aidl.samplingRates.begin(), aidl.samplingRates.end(), legacy.sample_rates,
+            convertRange(aidl.sampleRates.begin(), aidl.sampleRates.end(), legacy.sample_rates,
                          convertIntegral<int32_t, unsigned int>));
-    legacy.num_sample_rates = aidl.samplingRates.size();
+    legacy.num_sample_rates = aidl.sampleRates.size();
 
     if (aidl.channelMasks.size() > std::size(legacy.channel_masks)) {
         return unexpected(BAD_VALUE);
     }
     RETURN_IF_ERROR(
             convertRange(aidl.channelMasks.begin(), aidl.channelMasks.end(), legacy.channel_masks,
-                         aidl2legacy_int32_t_audio_channel_mask_t));
+                    [isInput](const AudioChannelLayout& l) {
+                        return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
+                    }));
     legacy.num_channel_masks = aidl.channelMasks.size();
 
     legacy.encapsulation_type = VALUE_OR_RETURN(
@@ -1932,17 +2879,17 @@
     return legacy;
 }
 
-ConversionResult<media::AudioProfile>
-legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy) {
-    media::AudioProfile aidl;
-    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+ConversionResult<AudioProfile>
+legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy, bool isInput) {
+    AudioProfile aidl;
+    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
 
     if (legacy.num_sample_rates > std::size(legacy.sample_rates)) {
         return unexpected(BAD_VALUE);
     }
     RETURN_IF_ERROR(
             convertRange(legacy.sample_rates, legacy.sample_rates + legacy.num_sample_rates,
-                         std::back_inserter(aidl.samplingRates),
+                         std::back_inserter(aidl.sampleRates),
                          convertIntegral<unsigned int, int32_t>));
 
     if (legacy.num_channel_masks > std::size(legacy.channel_masks)) {
@@ -1951,7 +2898,9 @@
     RETURN_IF_ERROR(
             convertRange(legacy.channel_masks, legacy.channel_masks + legacy.num_channel_masks,
                          std::back_inserter(aidl.channelMasks),
-                         legacy2aidl_audio_channel_mask_t_int32_t));
+                    [isInput](audio_channel_mask_t m) {
+                        return legacy2aidl_audio_channel_mask_t_AudioChannelLayout(m, isInput);
+                    }));
 
     aidl.encapsulationType = VALUE_OR_RETURN(
             legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
@@ -1960,11 +2909,11 @@
 }
 
 ConversionResult<audio_gain>
-aidl2legacy_AudioGain_audio_gain(const media::AudioGain& aidl) {
+aidl2legacy_AudioGain_audio_gain(const AudioGain& aidl, bool isInput) {
     audio_gain legacy;
     legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
-    legacy.channel_mask = VALUE_OR_RETURN(
-            aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
+    legacy.channel_mask = VALUE_OR_RETURN(aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+                    aidl.channelMask, isInput));
     legacy.min_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.minValue));
     legacy.max_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.maxValue));
     legacy.default_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.defaultValue));
@@ -1974,12 +2923,12 @@
     return legacy;
 }
 
-ConversionResult<media::AudioGain>
-legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy) {
-    media::AudioGain aidl;
+ConversionResult<AudioGain>
+legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy, bool isInput) {
+    AudioGain aidl;
     aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
     aidl.channelMask = VALUE_OR_RETURN(
-            legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
+            legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
     aidl.minValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.min_value));
     aidl.maxValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.max_value));
     aidl.defaultValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.default_value));
@@ -1992,63 +2941,76 @@
 ConversionResult<audio_port_v7>
 aidl2legacy_AudioPort_audio_port_v7(const media::AudioPort& aidl) {
     audio_port_v7 legacy;
-    legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.id));
-    legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.role));
-    legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.type));
-    RETURN_IF_ERROR(aidl2legacy_string(aidl.name, legacy.name, sizeof(legacy.name)));
+    legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.hal.id));
+    legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.sys.role));
+    legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.sys.type));
+    RETURN_IF_ERROR(aidl2legacy_string(aidl.hal.name, legacy.name, sizeof(legacy.name)));
 
-    if (aidl.profiles.size() > std::size(legacy.audio_profiles)) {
+    if (aidl.hal.profiles.size() > std::size(legacy.audio_profiles)) {
         return unexpected(BAD_VALUE);
     }
-    RETURN_IF_ERROR(convertRange(aidl.profiles.begin(), aidl.profiles.end(), legacy.audio_profiles,
-                                 aidl2legacy_AudioProfile_audio_profile));
-    legacy.num_audio_profiles = aidl.profiles.size();
+    const bool isInput =
+            VALUE_OR_RETURN(direction(aidl.sys.role, aidl.sys.type)) == Direction::INPUT;
+    RETURN_IF_ERROR(convertRange(
+                    aidl.hal.profiles.begin(), aidl.hal.profiles.end(), legacy.audio_profiles,
+                    [isInput](const AudioProfile& p) {
+                        return aidl2legacy_AudioProfile_audio_profile(p, isInput);
+                    }));
+    legacy.num_audio_profiles = aidl.hal.profiles.size();
 
-    if (aidl.extraAudioDescriptors.size() > std::size(legacy.extra_audio_descriptors)) {
+    if (aidl.hal.extraAudioDescriptors.size() > std::size(legacy.extra_audio_descriptors)) {
         return unexpected(BAD_VALUE);
     }
     RETURN_IF_ERROR(
-            convertRange(aidl.extraAudioDescriptors.begin(), aidl.extraAudioDescriptors.end(),
-                         legacy.extra_audio_descriptors,
-                         aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor));
-    legacy.num_extra_audio_descriptors = aidl.extraAudioDescriptors.size();
+            convertRange(
+                    aidl.hal.extraAudioDescriptors.begin(), aidl.hal.extraAudioDescriptors.end(),
+                    legacy.extra_audio_descriptors,
+                    aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor));
+    legacy.num_extra_audio_descriptors = aidl.hal.extraAudioDescriptors.size();
 
-    if (aidl.gains.size() > std::size(legacy.gains)) {
+    if (aidl.hal.gains.size() > std::size(legacy.gains)) {
         return unexpected(BAD_VALUE);
     }
-    RETURN_IF_ERROR(convertRange(aidl.gains.begin(), aidl.gains.end(), legacy.gains,
-                                 aidl2legacy_AudioGain_audio_gain));
-    legacy.num_gains = aidl.gains.size();
+    RETURN_IF_ERROR(convertRange(aidl.hal.gains.begin(), aidl.hal.gains.end(), legacy.gains,
+                                 [isInput](const AudioGain& g) {
+                                     return aidl2legacy_AudioGain_audio_gain(g, isInput);
+                                 }));
+    legacy.num_gains = aidl.hal.gains.size();
 
     legacy.active_config = VALUE_OR_RETURN(
-            aidl2legacy_AudioPortConfig_audio_port_config(aidl.activeConfig));
-    legacy.ext = VALUE_OR_RETURN(aidl2legacy_AudioPortExt(aidl.ext, aidl.type));
+            aidl2legacy_AudioPortConfig_audio_port_config(aidl.sys.activeConfig));
+    legacy.ext = VALUE_OR_RETURN(
+            aidl2legacy_AudioPortExt_audio_port_v7_ext(aidl.hal.ext, aidl.sys.type, aidl.sys.ext));
     return legacy;
 }
 
 ConversionResult<media::AudioPort>
 legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy) {
     media::AudioPort aidl;
-    aidl.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
-    aidl.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
-    aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
-    aidl.name = VALUE_OR_RETURN(legacy2aidl_string(legacy.name, sizeof(legacy.name)));
+    aidl.hal.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
+    aidl.sys.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
+    aidl.sys.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
+    aidl.hal.name = VALUE_OR_RETURN(legacy2aidl_string(legacy.name, sizeof(legacy.name)));
 
     if (legacy.num_audio_profiles > std::size(legacy.audio_profiles)) {
         return unexpected(BAD_VALUE);
     }
+    const bool isInput = VALUE_OR_RETURN(direction(legacy.role, legacy.type)) == Direction::INPUT;
     RETURN_IF_ERROR(
             convertRange(legacy.audio_profiles, legacy.audio_profiles + legacy.num_audio_profiles,
-                         std::back_inserter(aidl.profiles),
-                         legacy2aidl_audio_profile_AudioProfile));
+                         std::back_inserter(aidl.hal.profiles),
+                         [isInput](const audio_profile& p) {
+                             return legacy2aidl_audio_profile_AudioProfile(p, isInput);
+                         }));
 
     if (legacy.num_extra_audio_descriptors > std::size(legacy.extra_audio_descriptors)) {
         return unexpected(BAD_VALUE);
     }
+    aidl.sys.profiles.resize(legacy.num_audio_profiles);
     RETURN_IF_ERROR(
             convertRange(legacy.extra_audio_descriptors,
                     legacy.extra_audio_descriptors + legacy.num_extra_audio_descriptors,
-                    std::back_inserter(aidl.extraAudioDescriptors),
+                    std::back_inserter(aidl.hal.extraAudioDescriptors),
                     legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor));
 
     if (legacy.num_gains > std::size(legacy.gains)) {
@@ -2056,53 +3018,66 @@
     }
     RETURN_IF_ERROR(
             convertRange(legacy.gains, legacy.gains + legacy.num_gains,
-                         std::back_inserter(aidl.gains),
-                         legacy2aidl_audio_gain_AudioGain));
+                         std::back_inserter(aidl.hal.gains),
+                         [isInput](const audio_gain& g) {
+                             return legacy2aidl_audio_gain_AudioGain(g, isInput);
+                         }));
+    aidl.sys.gains.resize(legacy.num_gains);
 
-    aidl.activeConfig = VALUE_OR_RETURN(
+    aidl.sys.activeConfig = VALUE_OR_RETURN(
             legacy2aidl_audio_port_config_AudioPortConfig(legacy.active_config));
-    aidl.ext = VALUE_OR_RETURN(legacy2aidl_AudioPortExt(legacy.ext, legacy.type));
+    aidl.sys.activeConfig.hal.portId = aidl.hal.id;
+    RETURN_IF_ERROR(
+            legacy2aidl_AudioPortExt(legacy.ext, legacy.type, &aidl.hal.ext, &aidl.sys.ext));
     return aidl;
 }
 
 ConversionResult<audio_mode_t>
-aidl2legacy_AudioMode_audio_mode_t(media::AudioMode aidl) {
+aidl2legacy_AudioMode_audio_mode_t(AudioMode aidl) {
     switch (aidl) {
-        case media::AudioMode::INVALID:
+        case AudioMode::SYS_RESERVED_INVALID:
             return AUDIO_MODE_INVALID;
-        case media::AudioMode::CURRENT:
+        case AudioMode::SYS_RESERVED_CURRENT:
             return AUDIO_MODE_CURRENT;
-        case media::AudioMode::NORMAL:
+        case AudioMode::NORMAL:
             return AUDIO_MODE_NORMAL;
-        case media::AudioMode::RINGTONE:
+        case AudioMode::RINGTONE:
             return AUDIO_MODE_RINGTONE;
-        case media::AudioMode::IN_CALL:
+        case AudioMode::IN_CALL:
             return AUDIO_MODE_IN_CALL;
-        case media::AudioMode::IN_COMMUNICATION:
+        case AudioMode::IN_COMMUNICATION:
             return AUDIO_MODE_IN_COMMUNICATION;
-        case media::AudioMode::CALL_SCREEN:
+        case AudioMode::CALL_SCREEN:
             return AUDIO_MODE_CALL_SCREEN;
+        case AudioMode::SYS_RESERVED_CALL_REDIRECT:
+            return AUDIO_MODE_CALL_REDIRECT;
+        case AudioMode::SYS_RESERVED_COMMUNICATION_REDIRECT:
+            return AUDIO_MODE_COMMUNICATION_REDIRECT;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioMode>
+ConversionResult<AudioMode>
 legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy) {
     switch (legacy) {
         case AUDIO_MODE_INVALID:
-            return media::AudioMode::INVALID;
+            return AudioMode::SYS_RESERVED_INVALID;
         case AUDIO_MODE_CURRENT:
-            return media::AudioMode::CURRENT;
+            return AudioMode::SYS_RESERVED_CURRENT;
         case AUDIO_MODE_NORMAL:
-            return media::AudioMode::NORMAL;
+            return AudioMode::NORMAL;
         case AUDIO_MODE_RINGTONE:
-            return media::AudioMode::RINGTONE;
+            return AudioMode::RINGTONE;
         case AUDIO_MODE_IN_CALL:
-            return media::AudioMode::IN_CALL;
+            return AudioMode::IN_CALL;
         case AUDIO_MODE_IN_COMMUNICATION:
-            return media::AudioMode::IN_COMMUNICATION;
+            return AudioMode::IN_COMMUNICATION;
         case AUDIO_MODE_CALL_SCREEN:
-            return media::AudioMode::CALL_SCREEN;
+            return AudioMode::CALL_SCREEN;
+        case AUDIO_MODE_CALL_REDIRECT:
+            return AudioMode::SYS_RESERVED_CALL_REDIRECT;
+        case AUDIO_MODE_COMMUNICATION_REDIRECT:
+            return AudioMode::SYS_RESERVED_COMMUNICATION_REDIRECT;
         case AUDIO_MODE_CNT:
             break;
     }
@@ -2252,30 +3227,30 @@
 }
 
 ConversionResult<audio_standard_t>
-aidl2legacy_AudioStandard_audio_standard_t(media::AudioStandard aidl) {
+aidl2legacy_AudioStandard_audio_standard_t(AudioStandard aidl) {
     switch (aidl) {
-        case media::AudioStandard::NONE:
+        case AudioStandard::NONE:
             return AUDIO_STANDARD_NONE;
-        case media::AudioStandard::EDID:
+        case AudioStandard::EDID:
             return AUDIO_STANDARD_EDID;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioStandard>
+ConversionResult<AudioStandard>
 legacy2aidl_audio_standard_t_AudioStandard(audio_standard_t legacy) {
     switch (legacy) {
         case AUDIO_STANDARD_NONE:
-            return media::AudioStandard::NONE;
+            return AudioStandard::NONE;
         case AUDIO_STANDARD_EDID:
-            return media::AudioStandard::EDID;
+            return AudioStandard::EDID;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_extra_audio_descriptor>
 aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor(
-        const media::ExtraAudioDescriptor& aidl) {
+        const ExtraAudioDescriptor& aidl) {
     audio_extra_audio_descriptor legacy;
     legacy.standard = VALUE_OR_RETURN(aidl2legacy_AudioStandard_audio_standard_t(aidl.standard));
     if (aidl.audioDescriptor.size() > EXTRA_AUDIO_DESCRIPTOR_SIZE) {
@@ -2290,10 +3265,10 @@
     return legacy;
 }
 
-ConversionResult<media::ExtraAudioDescriptor>
+ConversionResult<ExtraAudioDescriptor>
 legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor(
         const audio_extra_audio_descriptor& legacy) {
-    media::ExtraAudioDescriptor aidl;
+    ExtraAudioDescriptor aidl;
     aidl.standard = VALUE_OR_RETURN(legacy2aidl_audio_standard_t_AudioStandard(legacy.standard));
     if (legacy.descriptor_length > EXTRA_AUDIO_DESCRIPTOR_SIZE) {
         return unexpected(BAD_VALUE);
@@ -2309,24 +3284,24 @@
 
 ConversionResult<audio_encapsulation_type_t>
 aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
-        const media::AudioEncapsulationType& aidl) {
+        const AudioEncapsulationType& aidl) {
     switch (aidl) {
-        case media::AudioEncapsulationType::NONE:
+        case AudioEncapsulationType::NONE:
             return AUDIO_ENCAPSULATION_TYPE_NONE;
-        case media::AudioEncapsulationType::IEC61937:
+        case AudioEncapsulationType::IEC61937:
             return AUDIO_ENCAPSULATION_TYPE_IEC61937;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioEncapsulationType>
+ConversionResult<AudioEncapsulationType>
 legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
         const audio_encapsulation_type_t & legacy) {
     switch (legacy) {
         case AUDIO_ENCAPSULATION_TYPE_NONE:
-            return media::AudioEncapsulationType::NONE;
+            return AudioEncapsulationType::NONE;
         case AUDIO_ENCAPSULATION_TYPE_IEC61937:
-            return media::AudioEncapsulationType::IEC61937;
+            return AudioEncapsulationType::IEC61937;
     }
     return unexpected(BAD_VALUE);
 }
@@ -2355,4 +3330,53 @@
     return trackSecondaryOutputInfo;
 }
 
+ConversionResult<audio_direct_mode_t>
+aidl2legacy_AudioDirectMode_audio_direct_mode_t(media::AudioDirectMode aidl) {
+    switch (aidl) {
+        case media::AudioDirectMode::NONE:
+            return AUDIO_DIRECT_NOT_SUPPORTED;
+        case media::AudioDirectMode::OFFLOAD:
+            return AUDIO_DIRECT_OFFLOAD_SUPPORTED;
+        case media::AudioDirectMode::OFFLOAD_GAPLESS:
+            return AUDIO_DIRECT_OFFLOAD_GAPLESS_SUPPORTED;
+        case media::AudioDirectMode::BITSTREAM:
+            return AUDIO_DIRECT_BITSTREAM_SUPPORTED;
+    }
+    return unexpected(BAD_VALUE);
+}
+ConversionResult<media::AudioDirectMode>
+legacy2aidl_audio_direct_mode_t_AudioDirectMode(audio_direct_mode_t legacy) {
+    switch (legacy) {
+        case AUDIO_DIRECT_NOT_SUPPORTED:
+            return media::AudioDirectMode::NONE;
+        case AUDIO_DIRECT_OFFLOAD_SUPPORTED:
+            return media::AudioDirectMode::OFFLOAD;
+        case AUDIO_DIRECT_OFFLOAD_GAPLESS_SUPPORTED:
+            return media::AudioDirectMode::OFFLOAD_GAPLESS;
+        case AUDIO_DIRECT_BITSTREAM_SUPPORTED:
+            return media::AudioDirectMode::BITSTREAM;
+    }
+    return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_direct_mode_t> aidl2legacy_int32_t_audio_direct_mode_t_mask(int32_t aidl) {
+    using LegacyMask = std::underlying_type_t<audio_direct_mode_t>;
+
+    LegacyMask converted = VALUE_OR_RETURN(
+            (convertBitmask<LegacyMask, int32_t, audio_direct_mode_t, media::AudioDirectMode>(
+                    aidl, aidl2legacy_AudioDirectMode_audio_direct_mode_t,
+                    indexToEnum_index<media::AudioDirectMode>,
+                    enumToMask_bitmask<LegacyMask, audio_direct_mode_t>)));
+    return static_cast<audio_direct_mode_t>(converted);
+}
+ConversionResult<int32_t> legacy2aidl_audio_direct_mode_t_int32_t_mask(audio_direct_mode_t legacy) {
+    using LegacyMask = std::underlying_type_t<audio_direct_mode_t>;
+
+    LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
+    return convertBitmask<int32_t, LegacyMask, media::AudioDirectMode, audio_direct_mode_t>(
+            legacyMask, legacy2aidl_audio_direct_mode_t_AudioDirectMode,
+            indexToEnum_bitmask<audio_direct_mode_t>,
+            enumToMask_index<int32_t, media::AudioDirectMode>);
+}
+
 }  // namespace android
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index a00cb79..be39527 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -51,6 +51,7 @@
         "PolicyAidlConversion.cpp"
     ],
     shared_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
@@ -71,6 +72,7 @@
     include_dirs: ["system/media/audio_utils/include"],
     export_include_dirs: ["include"],
     export_shared_lib_headers: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
@@ -111,6 +113,7 @@
         "TrackPlayerBase.cpp",
     ],
     shared_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
@@ -134,7 +137,6 @@
         "libprocessgroup",
         "libshmemcompat",
         "libutils",
-        "libvibrator",
         "framework-permission-aidl-cpp",
         "packagemanager_aidl-cpp",
     ],
@@ -144,6 +146,7 @@
         "spatializer-aidl-cpp",
         "framework-permission-aidl-cpp",
         "libbinder",
+        "libmediametrics",
     ],
 
     include_dirs: [
@@ -229,16 +232,19 @@
         "libaudioclient_aidl_conversion_util",
     ],
     shared_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "libbase",
         "libbinder",
         "liblog",
         "libshmemcompat",
+        "libstagefright_foundation",
         "libutils",
         "shared-file-region-aidl-cpp",
         "framework-permission-aidl-cpp",
     ],
     export_shared_lib_headers: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "libbase",
         "shared-file-region-aidl-cpp",
@@ -308,57 +314,32 @@
     srcs: [
         "aidl/android/media/AudioAttributesInternal.aidl",
         "aidl/android/media/AudioClient.aidl",
-        "aidl/android/media/AudioConfig.aidl",
-        "aidl/android/media/AudioConfigBase.aidl",
-        "aidl/android/media/AudioContentType.aidl",
-        "aidl/android/media/AudioDevice.aidl",
+        "aidl/android/media/AudioDirectMode.aidl",
         "aidl/android/media/AudioDualMonoMode.aidl",
-        "aidl/android/media/AudioEncapsulationMode.aidl",
-        "aidl/android/media/AudioEncapsulationMetadataType.aidl",
-        "aidl/android/media/AudioEncapsulationType.aidl",
         "aidl/android/media/AudioFlag.aidl",
-        "aidl/android/media/AudioGain.aidl",
-        "aidl/android/media/AudioGainConfig.aidl",
-        "aidl/android/media/AudioGainMode.aidl",
-        "aidl/android/media/AudioInputFlags.aidl",
+        "aidl/android/media/AudioGainSys.aidl",
         "aidl/android/media/AudioIoConfigEvent.aidl",
         "aidl/android/media/AudioIoDescriptor.aidl",
-        "aidl/android/media/AudioIoFlags.aidl",
-        "aidl/android/media/AudioMixLatencyClass.aidl",
-        "aidl/android/media/AudioMode.aidl",
-        "aidl/android/media/AudioOffloadInfo.aidl",
-        "aidl/android/media/AudioOutputFlags.aidl",
         "aidl/android/media/AudioPatch.aidl",
         "aidl/android/media/AudioPlaybackRate.aidl",
         "aidl/android/media/AudioPort.aidl",
+        "aidl/android/media/AudioPortSys.aidl",
         "aidl/android/media/AudioPortConfig.aidl",
-        "aidl/android/media/AudioPortConfigType.aidl",
-        "aidl/android/media/AudioPortConfigDeviceExt.aidl",
-        "aidl/android/media/AudioPortConfigExt.aidl",
-        "aidl/android/media/AudioPortConfigMixExt.aidl",
-        "aidl/android/media/AudioPortConfigMixExtUseCase.aidl",
-        "aidl/android/media/AudioPortConfigSessionExt.aidl",
-        "aidl/android/media/AudioPortDeviceExt.aidl",
-        "aidl/android/media/AudioPortExt.aidl",
-        "aidl/android/media/AudioPortMixExt.aidl",
+        "aidl/android/media/AudioPortConfigSys.aidl",
+        "aidl/android/media/AudioPortDeviceExtSys.aidl",
+        "aidl/android/media/AudioPortExtSys.aidl",
+        "aidl/android/media/AudioPortMixExtSys.aidl",
         "aidl/android/media/AudioPortRole.aidl",
-        "aidl/android/media/AudioPortSessionExt.aidl",
         "aidl/android/media/AudioPortType.aidl",
-        "aidl/android/media/AudioProfile.aidl",
-        "aidl/android/media/AudioSourceType.aidl",
-        "aidl/android/media/AudioStandard.aidl",
-        "aidl/android/media/AudioStreamType.aidl",
+        "aidl/android/media/AudioProfileSys.aidl",
         "aidl/android/media/AudioTimestampInternal.aidl",
         "aidl/android/media/AudioUniqueIdUse.aidl",
-        "aidl/android/media/AudioUsage.aidl",
-        "aidl/android/media/AudioUuid.aidl",
         "aidl/android/media/AudioVibratorInfo.aidl",
         "aidl/android/media/EffectDescriptor.aidl",
-        "aidl/android/media/ExtraAudioDescriptor.aidl",
         "aidl/android/media/TrackSecondaryOutputInfo.aidl",
     ],
     imports: [
-        "audio_common-aidl",
+        "android.media.audio.common.types",
         "framework-permission-aidl",
     ],
     backend: {
@@ -369,6 +350,9 @@
                 "com.android.media",
             ],
         },
+        java: {
+            sdk_version: "module_current",
+        },
     },
 }
 aidl_interface {
@@ -399,7 +383,7 @@
         "aidl/android/media/SpatializerHeadTrackingMode.aidl",
     ],
     imports: [
-        "audio_common-aidl",
+        "android.media.audio.common.types",
         "audioclient-types-aidl",
     ],
     backend: {
@@ -410,6 +394,9 @@
                 "com.android.media",
             ],
         },
+        java: {
+            sdk_version: "module_current",
+        },
     },
 }
 
@@ -439,7 +426,7 @@
         "aidl/android/media/IAudioTrackCallback.aidl",
     ],
     imports: [
-        "audio_common-aidl",
+        "android.media.audio.common.types",
         "audioclient-types-aidl",
         "av-types-aidl",
         "effect-aidl",
@@ -455,6 +442,9 @@
                 "com.android.media",
             ],
         },
+        java: {
+            sdk_version: "module_current",
+        },
     },
 }
 
@@ -468,13 +458,12 @@
         "aidl/android/media/GetInputForAttrResponse.aidl",
         "aidl/android/media/GetOutputForAttrResponse.aidl",
         "aidl/android/media/GetSpatializerResponse.aidl",
-        "aidl/android/media/Int.aidl",
         "aidl/android/media/RecordClientInfo.aidl",
         "aidl/android/media/IAudioPolicyService.aidl",
         "aidl/android/media/IAudioPolicyServiceClient.aidl",
     ],
     imports: [
-        "audio_common-aidl",
+        "android.media.audio.common.types",
         "audioclient-types-aidl",
         "audiopolicy-types-aidl",
         "capture_state_listener-aidl",
@@ -491,6 +480,9 @@
                 "com.android.media",
             ],
         },
+        java: {
+            sdk_version: "module_current",
+        },
     },
 }
 
@@ -518,5 +510,8 @@
                 "com.android.media",
             ],
         },
+        java: {
+            sdk_version: "module_current",
+        },
     },
 }
diff --git a/media/libaudioclient/AudioAttributes.cpp b/media/libaudioclient/AudioAttributes.cpp
index 83bf5a7..260c06c 100644
--- a/media/libaudioclient/AudioAttributes.cpp
+++ b/media/libaudioclient/AudioAttributes.cpp
@@ -24,9 +24,6 @@
 #include <media/AudioAttributes.h>
 #include <media/PolicyAidlConversion.h>
 
-#define RETURN_STATUS_IF_ERROR(x) \
-    { auto _tmp = (x); if (_tmp != OK) return _tmp; }
-
 namespace android {
 
 status_t AudioAttributes::readFromParcel(const Parcel* parcel) {
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index 9091599..62f863d 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -32,16 +32,12 @@
 #include <private/media/AudioEffectShared.h>
 #include <utils/Log.h>
 
-#define RETURN_STATUS_IF_ERROR(x)    \
-    {                                \
-        auto _tmp = (x);             \
-        if (_tmp != OK) return _tmp; \
-    }
-
 namespace android {
 using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
 using media::IAudioPolicyService;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioUuid;
 
 namespace {
 
@@ -571,7 +567,7 @@
 
     int32_t audioSessionAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_session_t_int32_t(audioSession));
-    media::Int countAidl;
+    media::audio::common::Int countAidl;
     countAidl.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*count));
     std::vector<media::EffectDescriptor> retAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -619,12 +615,12 @@
         uuid = *EFFECT_UUID_NULL;
     }
 
-    media::AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
-    media::AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
+    AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
+    AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
     std::string opPackageNameAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_String16_string(opPackageName));
-    media::AudioSourceType sourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(source));
+    AudioSource sourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(source));
     int32_t retAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->addSourceDefaultEffect(typeAidl, opPackageNameAidl, uuidAidl, priority, sourceAidl,
@@ -662,11 +658,11 @@
         uuid = *EFFECT_UUID_NULL;
     }
 
-    media::AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
-    media::AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
+    AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
+    AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
     std::string opPackageNameAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_String16_string(opPackageName));
-    media::AudioUsage usageAidl = VALUE_OR_RETURN_STATUS(
+    media::audio::common::AudioUsage usageAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_usage_t_AudioUsage(usage));
     int32_t retAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
diff --git a/media/libaudioclient/AudioProductStrategy.cpp b/media/libaudioclient/AudioProductStrategy.cpp
index f98027a..ecd423a 100644
--- a/media/libaudioclient/AudioProductStrategy.cpp
+++ b/media/libaudioclient/AudioProductStrategy.cpp
@@ -21,9 +21,6 @@
 #include <media/AudioAttributes.h>
 #include <media/PolicyAidlConversion.h>
 
-#define RETURN_STATUS_IF_ERROR(x) \
-    { auto _tmp = (x); if (_tmp != OK) return _tmp; }
-
 namespace android {
 
 status_t AudioProductStrategy::readFromParcel(const Parcel* parcel) {
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 22f0295..ebd488a 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -20,8 +20,10 @@
 
 #include <inttypes.h>
 #include <android-base/macros.h>
+#include <android-base/stringprintf.h>
 #include <sys/resource.h>
 
+#include <audio_utils/format.h>
 #include <audiomanager/AudioManager.h>
 #include <audiomanager/IAudioManager.h>
 #include <binder/Binder.h>
@@ -39,6 +41,7 @@
 
 namespace android {
 
+using ::android::base::StringPrintf;
 using android::content::AttributionSourceState;
 using aidl_utils::statusTFromBinderStatus;
 
@@ -142,7 +145,7 @@
         audio_channel_mask_t channelMask,
         const AttributionSourceState& client,
         size_t frameCount,
-        callback_t cbf,
+        legacy_callback_t callback,
         void* user,
         uint32_t notificationFrames,
         audio_session_t sessionId,
@@ -162,7 +165,39 @@
 {
     uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mClientAttributionSource.uid));
     pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid));
-    (void)set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
+    (void)set(inputSource, sampleRate, format, channelMask, frameCount, callback, user,
+            notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
+            uid, pid, pAttributes, selectedDeviceId, selectedMicDirection,
+            microphoneFieldDimension);
+}
+
+AudioRecord::AudioRecord(
+        audio_source_t inputSource,
+        uint32_t sampleRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        const AttributionSourceState& client,
+        size_t frameCount,
+        const wp<IAudioRecordCallback>& callback,
+        uint32_t notificationFrames,
+        audio_session_t sessionId,
+        transfer_type transferType,
+        audio_input_flags_t flags,
+        const audio_attributes_t* pAttributes,
+        audio_port_handle_t selectedDeviceId,
+        audio_microphone_direction_t selectedMicDirection,
+        float microphoneFieldDimension)
+    : mActive(false),
+      mStatus(NO_INIT),
+      mClientAttributionSource(client),
+      mSessionId(AUDIO_SESSION_ALLOCATE),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(SP_DEFAULT),
+      mProxy(nullptr)
+{
+    uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mClientAttributionSource.uid));
+    pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid));
+    (void)set(inputSource, sampleRate, format, channelMask, frameCount, callback,
             notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
             uid, pid, pAttributes, selectedDeviceId, selectedMicDirection,
             microphoneFieldDimension);
@@ -219,14 +254,44 @@
         mDeviceCallback.clear();
     }
 }
+namespace {
+class LegacyCallbackWrapper : public AudioRecord::IAudioRecordCallback {
+    const AudioRecord::legacy_callback_t mCallback;
+    void* const mData;
+
+  public:
+    LegacyCallbackWrapper(AudioRecord::legacy_callback_t callback, void* user)
+        : mCallback(callback), mData(user) {}
+
+    size_t onMoreData(const AudioRecord::Buffer& buffer) override {
+        AudioRecord::Buffer copy = buffer;
+        mCallback(AudioRecord::EVENT_MORE_DATA, mData, &copy);
+        return copy.size;
+    }
+
+    void onOverrun() override { mCallback(AudioRecord::EVENT_OVERRUN, mData, nullptr); }
+
+    void onMarker(uint32_t markerPosition) override {
+        mCallback(AudioRecord::EVENT_MARKER, mData, &markerPosition);
+    }
+
+    void onNewPos(uint32_t newPos) override {
+        mCallback(AudioRecord::EVENT_NEW_POS, mData, &newPos);
+    }
+
+    void onNewIAudioRecord() override {
+        mCallback(AudioRecord::EVENT_NEW_IAUDIORECORD, mData, nullptr);
+    }
+};
+}  // namespace
+
 status_t AudioRecord::set(
         audio_source_t inputSource,
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
         size_t frameCount,
-        callback_t cbf,
-        void* user,
+        const wp<IAudioRecordCallback>& callback,
         uint32_t notificationFrames,
         bool threadCanCallJava,
         audio_session_t sessionId,
@@ -241,8 +306,7 @@
         int32_t maxSharedAudioHistoryMs)
 {
     status_t status = NO_ERROR;
-    uint32_t channelCount;
-
+    const sp<IAudioRecordCallback> callbackHandle = callback.promote();
     // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
     ALOGV("%s(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
           "notificationFrames %u, sessionId %d, transferType %d, flags %#x, attributionSource %s"
@@ -273,39 +337,9 @@
     mSelectedMicFieldDimension = microphoneFieldDimension;
     mMaxSharedAudioHistoryMs = maxSharedAudioHistoryMs;
 
-    switch (transferType) {
-    case TRANSFER_DEFAULT:
-        if (cbf == NULL || threadCanCallJava) {
-            transferType = TRANSFER_SYNC;
-        } else {
-            transferType = TRANSFER_CALLBACK;
-        }
-        break;
-    case TRANSFER_CALLBACK:
-        if (cbf == NULL) {
-            ALOGE("%s(): Transfer type TRANSFER_CALLBACK but cbf == NULL", __func__);
-            status = BAD_VALUE;
-            goto exit;
-        }
-        break;
-    case TRANSFER_OBTAIN:
-    case TRANSFER_SYNC:
-        break;
-    default:
-        ALOGE("%s(): Invalid transfer type %d", __func__, transferType);
-        status = BAD_VALUE;
-        goto exit;
-    }
-    mTransfer = transferType;
-
-    // invariant that mAudioRecord != 0 is true only after set() returns successfully
-    if (mAudioRecord != 0) {
-        ALOGE("%s(): Track already in use", __func__);
-        status = INVALID_OPERATION;
-        goto exit;
-    }
-
-    if (pAttributes == NULL) {
+    std::string errorMessage;
+    // Copy the state variables early so they are available for error reporting.
+    if (pAttributes == nullptr) {
         mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
         mAttributes.source = inputSource;
         if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION
@@ -316,37 +350,69 @@
     } else {
         // stream type shouldn't be looked at, this track has audio attributes
         memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
-        ALOGV("%s(): Building AudioRecord with attributes: source=%d flags=0x%x tags=[%s]",
+        ALOGV("%s: Building AudioRecord with attributes: source=%d flags=0x%x tags=[%s]",
                 __func__, mAttributes.source, mAttributes.flags, mAttributes.tags);
     }
-
     mSampleRate = sampleRate;
-
-    // these below should probably come from the audioFlinger too...
     if (format == AUDIO_FORMAT_DEFAULT) {
         format = AUDIO_FORMAT_PCM_16_BIT;
     }
-
-    // validate parameters
-    // AudioFlinger capture only supports linear PCM
-    if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
-        ALOGE("%s(): Format %#x is not linear pcm", __func__, format);
-        status = BAD_VALUE;
-        goto exit;
-    }
     mFormat = format;
-
-    if (!audio_is_input_channel(channelMask)) {
-        ALOGE("%s(): Invalid channel mask %#x", __func__, channelMask);
-        status = BAD_VALUE;
-        goto exit;
-    }
     mChannelMask = channelMask;
-    channelCount = audio_channel_count_from_in_mask(channelMask);
-    mChannelCount = channelCount;
+    mSessionId = sessionId;
+    ALOGV("%s: mSessionId %d", __func__, mSessionId);
+    mOrigFlags = mFlags = flags;
 
-    if (audio_is_linear_pcm(format)) {
-        mFrameSize = channelCount * audio_bytes_per_sample(format);
+    mTransfer = transferType;
+    switch (mTransfer) {
+    case TRANSFER_DEFAULT:
+        if (callbackHandle == nullptr || threadCanCallJava) {
+            mTransfer = TRANSFER_SYNC;
+        } else {
+            mTransfer = TRANSFER_CALLBACK;
+        }
+        break;
+    case TRANSFER_CALLBACK:
+        if (callbackHandle == nullptr) {
+            errorMessage = StringPrintf(
+                    "%s: Transfer type TRANSFER_CALLBACK but callback == nullptr", __func__);
+            status = BAD_VALUE;
+            goto error;
+        }
+        break;
+    case TRANSFER_OBTAIN:
+    case TRANSFER_SYNC:
+        break;
+    default:
+        errorMessage = StringPrintf("%s: Invalid transfer type %d", __func__, mTransfer);
+        status = BAD_VALUE;
+        goto error;
+    }
+
+    // invariant that mAudioRecord != 0 is true only after set() returns successfully
+    if (mAudioRecord != 0) {
+        errorMessage = StringPrintf("%s: Track already in use", __func__);
+        status = INVALID_OPERATION;
+        goto error;
+    }
+
+    // AudioFlinger capture only supports linear PCM
+    if (!audio_is_valid_format(mFormat) || !audio_is_linear_pcm(mFormat)) {
+        errorMessage = StringPrintf("%s: Format %#x is not linear pcm", __func__, mFormat);
+        status = BAD_VALUE;
+        goto error;
+    }
+
+    if (!audio_is_input_channel(mChannelMask)) {
+        errorMessage = StringPrintf("%s: Invalid channel mask %#x", __func__, mChannelMask);
+        status = BAD_VALUE;
+        goto error;
+    }
+
+    mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
+
+    if (audio_is_linear_pcm(mFormat)) {
+        mFrameSize = mChannelCount * audio_bytes_per_sample(mFormat);
     } else {
         mFrameSize = sizeof(uint8_t);
     }
@@ -357,13 +423,8 @@
     mNotificationFramesReq = notificationFrames;
     // mNotificationFramesAct is initialized in createRecord_l
 
-    mSessionId = sessionId;
-    ALOGV("%s(): mSessionId %d", __func__, mSessionId);
-
-    mOrigFlags = mFlags = flags;
-    mCbf = cbf;
-
-    if (cbf != NULL) {
+    mCallback = callbackHandle;
+    if (mCallback != nullptr) {
         mAudioRecordThread = new AudioRecordThread(*this);
         mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
         // thread begins in paused state, and will not reference us until start()
@@ -383,10 +444,10 @@
             mAudioRecordThread->requestExitAndWait();
             mAudioRecordThread.clear();
         }
+        // bypass error message to avoid logging twice (createRecord_l logs the error).
         goto exit;
     }
 
-    mUserData = user;
     // TODO: add audio hardware input latency here
     mLatency = (1000LL * mFrameCount) / mSampleRate;
     mMarkerPosition = 0;
@@ -400,14 +461,48 @@
     mFramesRead = 0;
     mFramesReadServerOffset = 0;
 
-exit:
-    mStatus = status;
+error:
     if (status != NO_ERROR) {
         mMediaMetrics.markError(status, __FUNCTION__);
+        ALOGE_IF(!errorMessage.empty(), "%s", errorMessage.c_str());
+        reportError(status, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE, errorMessage.c_str());
     }
+exit:
+    mStatus = status;
     return status;
 }
 
+status_t AudioRecord::set(
+        audio_source_t inputSource,
+        uint32_t sampleRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        size_t frameCount,
+        legacy_callback_t callback,
+        void* user,
+        uint32_t notificationFrames,
+        bool threadCanCallJava,
+        audio_session_t sessionId,
+        transfer_type transferType,
+        audio_input_flags_t flags,
+        uid_t uid,
+        pid_t pid,
+        const audio_attributes_t* pAttributes,
+        audio_port_handle_t selectedDeviceId,
+        audio_microphone_direction_t selectedMicDirection,
+        float microphoneFieldDimension,
+        int32_t maxSharedAudioHistoryMs)
+{
+    if (callback != nullptr) {
+        mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+    } else if (user) {
+        LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+    }
+    return set(inputSource, sampleRate, format, channelMask, frameCount, mLegacyCallbackWrapper,
+        notificationFrames, threadCanCallJava, sessionId, transferType, flags, uid, pid,
+        pAttributes, selectedDeviceId, selectedMicDirection, microphoneFieldDimension,
+        maxSharedAudioHistoryMs);
+}
 // -------------------------------------------------------------------------
 
 status_t AudioRecord::start(AudioSystem::sync_event_t event, audio_session_t triggerSession)
@@ -537,12 +632,12 @@
 
 status_t AudioRecord::setMarkerPosition(uint32_t marker)
 {
+    AutoMutex lock(mLock);
     // The only purpose of setting marker position is to get a callback
-    if (mCbf == NULL) {
+    if (mCallback.promote() == nullptr) {
         return INVALID_OPERATION;
     }
 
-    AutoMutex lock(mLock);
     mMarkerPosition = marker;
     mMarkerReached = false;
 
@@ -567,12 +662,12 @@
 
 status_t AudioRecord::setPositionUpdatePeriod(uint32_t updatePeriod)
 {
+    AutoMutex lock(mLock);
     // The only purpose of setting position update period is to get a callback
-    if (mCbf == NULL) {
+    if (mCallback.promote() == nullptr) {
         return INVALID_OPERATION;
     }
 
-    AutoMutex lock(mLock);
     mNewPosition = mProxy->getPosition() + updatePeriod;
     mUpdatePeriod = updatePeriod;
 
@@ -670,6 +765,8 @@
 // ---- Explicit Routing ---------------------------------------------------
 status_t AudioRecord::setInputDevice(audio_port_handle_t deviceId) {
     AutoMutex lock(mLock);
+    ALOGV("%s(%d): deviceId=%d mSelectedDeviceId=%d",
+            __func__, mPortId, deviceId, mSelectedDeviceId);
     if (mSelectedDeviceId != deviceId) {
         mSelectedDeviceId = deviceId;
         if (mStatus == NO_ERROR) {
@@ -756,15 +853,16 @@
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
     IAudioFlinger::CreateRecordInput input;
     IAudioFlinger::CreateRecordOutput output;
-    audio_session_t originalSessionId;
+    [[maybe_unused]] audio_session_t originalSessionId;
     void *iMemPointer;
     audio_track_cblk_t* cblk;
     status_t status;
     static const int32_t kMaxCreateAttempts = 3;
     int32_t remainingAttempts = kMaxCreateAttempts;
+    std::string errorMessage;
 
     if (audioFlinger == 0) {
-        ALOGE("%s(%d): Could not get audioflinger", __func__, mPortId);
+        errorMessage = StringPrintf("%s(%d): Could not get audioflinger", __func__, mPortId);
         status = NO_INIT;
         goto exit;
     }
@@ -830,8 +928,9 @@
             break;
         }
         if (status != FAILED_TRANSACTION || --remainingAttempts <= 0) {
-            ALOGE("%s(%d): AudioFlinger could not create record track, status: %d",
-                  __func__, mPortId, status);
+            errorMessage = StringPrintf(
+                    "%s(%d): AudioFlinger could not create record track, status: %d",
+                    __func__, mPortId, status);
             goto exit;
         }
         // FAILED_TRANSACTION happens under very specific conditions causing a state mismatch
@@ -858,9 +957,13 @@
     mRoutedDeviceId = output.selectedDeviceId;
     mSessionId = output.sessionId;
     mSampleRate = output.sampleRate;
+    mServerConfig = output.serverConfig;
+    mServerFrameSize = audio_bytes_per_frame(
+            audio_channel_count_from_in_mask(mServerConfig.channel_mask), mServerConfig.format);
+    mServerSampleSize = audio_bytes_per_sample(mServerConfig.format);
 
     if (output.cblk == 0) {
-        ALOGE("%s(%d): Could not get control block", __func__, mPortId);
+        errorMessage = StringPrintf("%s(%d): Could not get control block", __func__, mPortId);
         status = NO_INIT;
         goto exit;
     }
@@ -870,7 +973,8 @@
     //       issue (e.g. by copying).
     iMemPointer = output.cblk ->unsecurePointer();
     if (iMemPointer == NULL) {
-        ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
+        errorMessage = StringPrintf(
+                "%s(%d): Could not get control block pointer", __func__, mPortId);
         status = NO_INIT;
         goto exit;
     }
@@ -889,7 +993,8 @@
         //       issue (e.g. by copying).
         buffers = output.buffers->unsecurePointer();
         if (buffers == NULL) {
-            ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
+            errorMessage = StringPrintf(
+                    "%s(%d): Could not get buffer pointer", __func__, mPortId);
             status = NO_INIT;
             goto exit;
         }
@@ -921,6 +1026,10 @@
                 mNotificationFramesReq, output.notificationFrameCount, output.frameCount);
     }
     mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
+    if (mServerConfig.format != mFormat && mCallback.promote() != nullptr) {
+        mFormatConversionBufRaw = std::make_unique<uint8_t[]>(mNotificationFramesAct * mFrameSize);
+        mFormatConversionBuffer.raw = mFormatConversionBufRaw.get();
+    }
 
     //mInput != input includes the case where mInput == AUDIO_IO_HANDLE_NONE for first creation
     if (mDeviceCallback != 0) {
@@ -947,7 +1056,7 @@
     }
 
     // update proxy
-    mProxy = new AudioRecordClientProxy(cblk, buffers, mFrameCount, mFrameSize);
+    mProxy = new AudioRecordClientProxy(cblk, buffers, mFrameCount, mServerFrameSize);
     mProxy->setEpoch(epoch);
     mProxy->setMinimum(mNotificationFramesAct);
 
@@ -980,11 +1089,38 @@
         .record();
 
 exit:
+    if (status != NO_ERROR) {
+        ALOGE_IF(!errorMessage.empty(), "%s", errorMessage.c_str());
+        reportError(status, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE, errorMessage.c_str());
+    }
+
     mStatus = status;
     // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
     return status;
 }
 
+// Report error associated with the event and some configuration details.
+void AudioRecord::reportError(status_t status, const char *event, const char *message) const
+{
+    if (status == NO_ERROR) return;
+    // We report error on the native side because some callers do not come
+    // from Java.
+    // Ensure these variables are initialized in set().
+    mediametrics::LogItem(AMEDIAMETRICS_KEY_AUDIO_RECORD_ERROR)
+        .set(AMEDIAMETRICS_PROP_EVENT, event)
+        .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
+        .set(AMEDIAMETRICS_PROP_STATUSMESSAGE, message)
+        .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
+        .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
+        .set(AMEDIAMETRICS_PROP_SOURCE, toString(mAttributes.source).c_str())
+        .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
+        .set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
+        .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
+        .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
+        .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
+        .record();
+}
+
 status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
 {
     if (audioBuffer == NULL) {
@@ -1078,7 +1214,7 @@
     } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
 
     audioBuffer->frameCount = buffer.mFrameCount;
-    audioBuffer->size = buffer.mFrameCount * mFrameSize;
+    audioBuffer->size = buffer.mFrameCount * mServerFrameSize;
     audioBuffer->raw = buffer.mRaw;
     audioBuffer->sequence = oldSequence;
     if (nonContig != NULL) {
@@ -1091,7 +1227,7 @@
 {
     // FIXME add error checking on mode, by adding an internal version
 
-    size_t stepCount = audioBuffer->size / mFrameSize;
+    size_t stepCount = audioBuffer->frameCount;
     if (stepCount == 0) {
         return;
     }
@@ -1153,8 +1289,9 @@
             return ssize_t(err);
         }
 
-        size_t bytesRead = audioBuffer.size;
-        memcpy(buffer, audioBuffer.i8, bytesRead);
+        size_t bytesRead = audioBuffer.frameCount * mFrameSize;
+        memcpy_by_audio_format(buffer, mFormat, audioBuffer.raw, mServerConfig.format,
+                               audioBuffer.size / mServerSampleSize);
         buffer = ((char *) buffer) + bytesRead;
         userSize -= bytesRead;
         read += bytesRead;
@@ -1173,6 +1310,11 @@
 nsecs_t AudioRecord::processAudioBuffer()
 {
     mLock.lock();
+    const sp<IAudioRecordCallback> callback = mCallback.promote();
+    if (!callback) {
+        mCallback = nullptr;
+        return NS_NEVER;
+    }
     if (mAwaitBoost) {
         mAwaitBoost = false;
         mLock.unlock();
@@ -1248,26 +1390,26 @@
     uint32_t sequence = mSequence;
 
     // These fields don't need to be cached, because they are assigned only by set():
-    //      mTransfer, mCbf, mUserData, mSampleRate, mFrameSize
+    //      mTransfer, mCallback, mUserData, mSampleRate, mFrameSize
 
     mLock.unlock();
 
     // perform callbacks while unlocked
     if (newOverrun) {
-        mCbf(EVENT_OVERRUN, mUserData, NULL);
+        callback->onOverrun();
+
     }
     if (markerReached) {
-        mCbf(EVENT_MARKER, mUserData, &markerPosition);
+        callback->onMarker(markerPosition.value());
     }
     while (newPosCount > 0) {
-        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
-        mCbf(EVENT_NEW_POS, mUserData, &temp);
+        callback->onNewPos(newPosition.value());
         newPosition += updatePeriod;
         newPosCount--;
     }
     if (mObservedSequence != sequence) {
         mObservedSequence = sequence;
-        mCbf(EVENT_NEW_IAUDIORECORD, mUserData, NULL);
+        callback->onNewIAudioRecord();
     }
 
     // if inactive, then don't run me again until re-started
@@ -1351,9 +1493,19 @@
             }
         }
 
-        size_t reqSize = audioBuffer.size;
-        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
-        size_t readSize = audioBuffer.size;
+        Buffer* buffer = &audioBuffer;
+        if (mServerConfig.format != mFormat) {
+            buffer = &mFormatConversionBuffer;
+            buffer->frameCount = audioBuffer.frameCount;
+            buffer->size = buffer->frameCount * mFrameSize;
+            buffer->sequence = audioBuffer.sequence;
+            memcpy_by_audio_format(buffer->raw, mFormat, audioBuffer.raw,
+                                   mServerConfig.format, audioBuffer.size / mServerSampleSize);
+        }
+
+        const size_t reqSize = buffer->size;
+        const size_t readSize = callback->onMoreData(*buffer);
+        buffer->size = readSize;
 
         // Validate on returned size
         if (ssize_t(readSize) < 0 || readSize > reqSize) {
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 139d931..b3c82787 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -40,19 +40,25 @@
        if (!_tmp.ok()) return aidl_utils::binderStatusFromStatusT(_tmp.error()); \
        std::move(_tmp.value()); })
 
-#define RETURN_STATUS_IF_ERROR(x)    \
-    {                                \
-        auto _tmp = (x);             \
-        if (_tmp != OK) return _tmp; \
-    }
-
 // ----------------------------------------------------------------------------
 
 namespace android {
 using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
+using content::AttributionSourceState;
 using media::IAudioPolicyService;
-using android::content::AttributionSourceState;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
+using media::audio::common::AudioOffloadInfo;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::Int;
 
 // client singleton for AudioFlinger binder interface
 Mutex AudioSystem::gLock;
@@ -336,7 +342,7 @@
     if (desc == 0) {
         *samplingRate = af->sampleRate(ioHandle);
     } else {
-        *samplingRate = desc->mSamplingRate;
+        *samplingRate = desc->getSamplingRate();
     }
     if (*samplingRate == 0) {
         ALOGE("AudioSystem::getSamplingRate failed for ioHandle %d", ioHandle);
@@ -371,7 +377,7 @@
     if (desc == 0) {
         *frameCount = af->frameCount(ioHandle);
     } else {
-        *frameCount = desc->mFrameCount;
+        *frameCount = desc->getFrameCount();
     }
     if (*frameCount == 0) {
         ALOGE("AudioSystem::getFrameCount failed for ioHandle %d", ioHandle);
@@ -406,7 +412,7 @@
     if (outputDesc == 0) {
         *latency = af->latency(output);
     } else {
-        *latency = outputDesc->mLatency;
+        *latency = outputDesc->getLatency();
     }
 
     ALOGV("getLatency() output %d, latency %d", output, *latency);
@@ -494,7 +500,7 @@
     if (desc == 0) {
         *frameCount = af->frameCountHAL(ioHandle);
     } else {
-        *frameCount = desc->mFrameCountHAL;
+        *frameCount = desc->getFrameCountHAL();
     }
     if (*frameCount == 0) {
         ALOGE("AudioSystem::getFrameCountHAL failed for ioHandle %d", ioHandle);
@@ -535,15 +541,15 @@
 Status AudioSystem::AudioFlingerClient::ioConfigChanged(
         media::AudioIoConfigEvent _event,
         const media::AudioIoDescriptor& _ioDesc) {
-    audio_io_config_event event = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioIoConfigEvent_audio_io_config_event(_event));
+    audio_io_config_event_t event = VALUE_OR_RETURN_BINDER_STATUS(
+            aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(_event));
     sp<AudioIoDescriptor> ioDesc(
             VALUE_OR_RETURN_BINDER_STATUS(
                     aidl2legacy_AudioIoDescriptor_AudioIoDescriptor(_ioDesc)));
 
     ALOGV("ioConfigChanged() event %d", event);
 
-    if (ioDesc->mIoHandle == AUDIO_IO_HANDLE_NONE) return Status::ok();
+    if (ioDesc->getIoHandle() == AUDIO_IO_HANDLE_NONE) return Status::ok();
 
     audio_port_handle_t deviceId = AUDIO_PORT_HANDLE_NONE;
     std::vector<sp<AudioDeviceCallback>> callbacksToCall;
@@ -556,93 +562,88 @@
             case AUDIO_OUTPUT_REGISTERED:
             case AUDIO_INPUT_OPENED:
             case AUDIO_INPUT_REGISTERED: {
-                sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->mIoHandle);
+                sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->getIoHandle());
                 if (oldDesc == 0) {
-                    mIoDescriptors.add(ioDesc->mIoHandle, ioDesc);
+                    mIoDescriptors.add(ioDesc->getIoHandle(), ioDesc);
                 } else {
                     deviceId = oldDesc->getDeviceId();
-                    mIoDescriptors.replaceValueFor(ioDesc->mIoHandle, ioDesc);
+                    mIoDescriptors.replaceValueFor(ioDesc->getIoHandle(), ioDesc);
                 }
 
                 if (ioDesc->getDeviceId() != AUDIO_PORT_HANDLE_NONE) {
                     deviceId = ioDesc->getDeviceId();
                     if (event == AUDIO_OUTPUT_OPENED || event == AUDIO_INPUT_OPENED) {
-                        auto it = mAudioDeviceCallbacks.find(ioDesc->mIoHandle);
+                        auto it = mAudioDeviceCallbacks.find(ioDesc->getIoHandle());
                         if (it != mAudioDeviceCallbacks.end()) {
                             callbacks = it->second;
                         }
                     }
                 }
-                ALOGV("ioConfigChanged() new %s %s %d samplingRate %u, format %#x channel mask %#x "
-                      "frameCount %zu deviceId %d",
+                ALOGV("ioConfigChanged() new %s %s %s",
                       event == AUDIO_OUTPUT_OPENED || event == AUDIO_OUTPUT_REGISTERED ?
                       "output" : "input",
                       event == AUDIO_OUTPUT_OPENED || event == AUDIO_INPUT_OPENED ?
                       "opened" : "registered",
-                      ioDesc->mIoHandle, ioDesc->mSamplingRate, ioDesc->mFormat,
-                      ioDesc->mChannelMask,
-                      ioDesc->mFrameCount, ioDesc->getDeviceId());
+                      ioDesc->toDebugString().c_str());
             }
                 break;
             case AUDIO_OUTPUT_CLOSED:
             case AUDIO_INPUT_CLOSED: {
-                if (getIoDescriptor_l(ioDesc->mIoHandle) == 0) {
+                if (getIoDescriptor_l(ioDesc->getIoHandle()) == 0) {
                     ALOGW("ioConfigChanged() closing unknown %s %d",
-                          event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->mIoHandle);
+                          event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->getIoHandle());
                     break;
                 }
                 ALOGV("ioConfigChanged() %s %d closed",
-                      event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->mIoHandle);
+                      event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->getIoHandle());
 
-                mIoDescriptors.removeItem(ioDesc->mIoHandle);
-                mAudioDeviceCallbacks.erase(ioDesc->mIoHandle);
+                mIoDescriptors.removeItem(ioDesc->getIoHandle());
+                mAudioDeviceCallbacks.erase(ioDesc->getIoHandle());
             }
                 break;
 
             case AUDIO_OUTPUT_CONFIG_CHANGED:
             case AUDIO_INPUT_CONFIG_CHANGED: {
-                sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->mIoHandle);
+                sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->getIoHandle());
                 if (oldDesc == 0) {
                     ALOGW("ioConfigChanged() modifying unknown %s! %d",
                           event == AUDIO_OUTPUT_CONFIG_CHANGED ? "output" : "input",
-                          ioDesc->mIoHandle);
+                          ioDesc->getIoHandle());
                     break;
                 }
 
                 deviceId = oldDesc->getDeviceId();
-                mIoDescriptors.replaceValueFor(ioDesc->mIoHandle, ioDesc);
+                mIoDescriptors.replaceValueFor(ioDesc->getIoHandle(), ioDesc);
 
                 if (deviceId != ioDesc->getDeviceId()) {
                     deviceId = ioDesc->getDeviceId();
-                    auto it = mAudioDeviceCallbacks.find(ioDesc->mIoHandle);
+                    auto it = mAudioDeviceCallbacks.find(ioDesc->getIoHandle());
                     if (it != mAudioDeviceCallbacks.end()) {
                         callbacks = it->second;
                     }
                 }
-                ALOGV("ioConfigChanged() new config for %s %d samplingRate %u, format %#x "
-                      "channel mask %#x frameCount %zu frameCountHAL %zu deviceId %d",
+                ALOGV("ioConfigChanged() new config for %s %s",
                       event == AUDIO_OUTPUT_CONFIG_CHANGED ? "output" : "input",
-                      ioDesc->mIoHandle, ioDesc->mSamplingRate, ioDesc->mFormat,
-                      ioDesc->mChannelMask, ioDesc->mFrameCount, ioDesc->mFrameCountHAL,
-                      ioDesc->getDeviceId());
+                      ioDesc->toDebugString().c_str());
 
             }
                 break;
             case AUDIO_CLIENT_STARTED: {
-                sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->mIoHandle);
+                sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->getIoHandle());
                 if (oldDesc == 0) {
-                    ALOGW("ioConfigChanged() start client on unknown io! %d", ioDesc->mIoHandle);
+                    ALOGW("ioConfigChanged() start client on unknown io! %d",
+                            ioDesc->getIoHandle());
                     break;
                 }
                 ALOGV("ioConfigChanged() AUDIO_CLIENT_STARTED  io %d port %d num callbacks %zu",
-                      ioDesc->mIoHandle, ioDesc->mPortId, mAudioDeviceCallbacks.size());
-                oldDesc->mPatch = ioDesc->mPatch;
-                auto it = mAudioDeviceCallbacks.find(ioDesc->mIoHandle);
+                      ioDesc->getIoHandle(), ioDesc->getPortId(), mAudioDeviceCallbacks.size());
+                oldDesc->setPatch(ioDesc->getPatch());
+                auto it = mAudioDeviceCallbacks.find(ioDesc->getIoHandle());
                 if (it != mAudioDeviceCallbacks.end()) {
                     auto cbks = it->second;
-                    auto it2 = cbks.find(ioDesc->mPortId);
+                    auto it2 = cbks.find(ioDesc->getPortId());
                     if (it2 != cbks.end()) {
-                        callbacks.emplace(ioDesc->mPortId, it2->second);
+                        callbacks.emplace(ioDesc->getPortId(), it2->second);
                         deviceId = oldDesc->getDeviceId();
                     }
                 }
@@ -661,8 +662,8 @@
     // Callbacks must be called without mLock held. May lead to dead lock if calling for
     // example getRoutedDevice that updates the device and tries to acquire mLock.
     for (auto cb  : callbacksToCall) {
-        // If callbacksToCall is not empty, it implies ioDesc->mIoHandle and deviceId are valid
-        cb->onAudioDeviceUpdate(ioDesc->mIoHandle, deviceId);
+        // If callbacksToCall is not empty, it implies ioDesc->getIoHandle() and deviceId are valid
+        cb->onAudioDeviceUpdate(ioDesc->getIoHandle(), deviceId);
     }
 
     return Status::ok();
@@ -851,9 +852,8 @@
         name = device_name;
     }
 
-    media::AudioDevice deviceAidl;
-    deviceAidl.type = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
-    deviceAidl.address = address;
+    AudioDevice deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_device_AudioDevice(device, address));
 
     return statusTFromBinderStatus(
             aps->setDeviceConnectionState(
@@ -861,7 +861,8 @@
                     VALUE_OR_RETURN_STATUS(
                             legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(state)),
                     name,
-                    VALUE_OR_RETURN_STATUS(legacy2aidl_audio_format_t_AudioFormat(encodedFormat))));
+                    VALUE_OR_RETURN_STATUS(
+                            legacy2aidl_audio_format_t_AudioFormatDescription(encodedFormat))));
 }
 
 audio_policy_dev_state_t AudioSystem::getDeviceConnectionState(audio_devices_t device,
@@ -870,9 +871,8 @@
     if (aps == 0) return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
 
     auto result = [&]() -> ConversionResult<audio_policy_dev_state_t> {
-        media::AudioDevice deviceAidl;
-        deviceAidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(device));
-        deviceAidl.address = device_address;
+        AudioDevice deviceAidl = VALUE_OR_RETURN(
+                legacy2aidl_audio_device_AudioDevice(device, device_address));
 
         media::AudioPolicyDeviceState result;
         RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -900,13 +900,12 @@
         name = device_name;
     }
 
-    media::AudioDevice deviceAidl;
-    deviceAidl.type = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
-    deviceAidl.address = address;
+    AudioDevice deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_device_AudioDevice(device, address));
 
     return statusTFromBinderStatus(
             aps->handleDeviceConfigChange(deviceAidl, name, VALUE_OR_RETURN_STATUS(
-                    legacy2aidl_audio_format_t_AudioFormat(encodedFormat))));
+                    legacy2aidl_audio_format_t_AudioFormatDescription(encodedFormat))));
 }
 
 status_t AudioSystem::setPhoneState(audio_mode_t state, uid_t uid) {
@@ -955,7 +954,7 @@
     if (aps == 0) return AUDIO_IO_HANDLE_NONE;
 
     auto result = [&]() -> ConversionResult<audio_io_handle_t> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+        AudioStreamType streamAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         int32_t outputAidl;
         RETURN_IF_ERROR(
@@ -1003,8 +1002,8 @@
     media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_attributes_t_AudioAttributesInternal(*attr));
     int32_t sessionAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(session));
-    media::AudioConfig configAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_config_t_AudioConfig(*config));
+    AudioConfig configAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(*config, false /*isInput*/));
     int32_t flagsAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
     int32_t selectedDeviceIdAidl = VALUE_OR_RETURN_STATUS(
@@ -1097,8 +1096,8 @@
     int32_t inputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(*input));
     int32_t riidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_unique_id_t_int32_t(riid));
     int32_t sessionAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(session));
-    media::AudioConfigBase configAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_config_base_t_AudioConfigBase(*config));
+    AudioConfigBase configAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_base_t_AudioConfigBase(*config, true /*isInput*/));
     int32_t flagsAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
     int32_t selectedDeviceIdAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_port_handle_t_int32_t(*selectedDeviceId));
@@ -1154,7 +1153,7 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t indexMinAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(indexMin));
     int32_t indexMaxAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(indexMax));
@@ -1168,10 +1167,11 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t indexAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(index));
-    int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+    AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     return statusTFromBinderStatus(
             aps->setStreamVolumeIndex(streamAidl, deviceAidl, indexAidl));
 }
@@ -1182,9 +1182,10 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
-    int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+    AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     int32_t indexAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getStreamVolumeIndex(streamAidl, deviceAidl, &indexAidl)));
@@ -1203,7 +1204,8 @@
     media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
     int32_t indexAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(index));
-    int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+    AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     return statusTFromBinderStatus(
             aps->setVolumeIndexForAttributes(attrAidl, deviceAidl, indexAidl));
 }
@@ -1216,7 +1218,8 @@
 
     media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
-    int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+    AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     int32_t indexAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getVolumeIndexForAttributes(attrAidl, deviceAidl, &indexAidl)));
@@ -1255,7 +1258,7 @@
     if (aps == 0) return PRODUCT_STRATEGY_NONE;
 
     auto result = [&]() -> ConversionResult<product_strategy_t> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+        AudioStreamType streamAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         int32_t resultAidl;
         RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -1265,19 +1268,20 @@
     return result.value_or(PRODUCT_STRATEGY_NONE);
 }
 
-audio_devices_t AudioSystem::getDevicesForStream(audio_stream_type_t stream) {
+DeviceTypeSet AudioSystem::getDevicesForStream(audio_stream_type_t stream) {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
-    if (aps == 0) return AUDIO_DEVICE_NONE;
+    if (aps == 0) return DeviceTypeSet{};
 
-    auto result = [&]() -> ConversionResult<audio_devices_t> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+    auto result = [&]() -> ConversionResult<DeviceTypeSet> {
+        AudioStreamType streamAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
-        int32_t resultAidl;
+        std::vector<AudioDeviceDescription> resultAidl;
         RETURN_IF_ERROR(statusTFromBinderStatus(
                 aps->getDevicesForStream(streamAidl, &resultAidl)));
-        return aidl2legacy_int32_t_audio_devices_t(resultAidl);
+        return convertContainer<DeviceTypeSet>(resultAidl,
+                aidl2legacy_AudioDeviceDescription_audio_devices_t);
     }();
-    return result.value_or(AUDIO_DEVICE_NONE);
+    return result.value_or(DeviceTypeSet{});
 }
 
 status_t AudioSystem::getDevicesForAttributes(const AudioAttributes& aa,
@@ -1290,7 +1294,7 @@
 
     media::AudioAttributesEx aaAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioAttributes_AudioAttributesEx(aa));
-    std::vector<media::AudioDevice> retAidl;
+    std::vector<AudioDevice> retAidl;
     RETURN_STATUS_IF_ERROR(
             statusTFromBinderStatus(aps->getDevicesForAttributes(aaAidl, &retAidl)));
     *devices = VALUE_OR_RETURN_STATUS(
@@ -1368,7 +1372,7 @@
     if (aps == 0) return PERMISSION_DENIED;
     if (state == NULL) return BAD_VALUE;
 
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t inPastMsAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(inPastMs));
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -1382,7 +1386,7 @@
     if (aps == 0) return PERMISSION_DENIED;
     if (state == NULL) return BAD_VALUE;
 
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t inPastMsAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(inPastMs));
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -1395,8 +1399,8 @@
     if (aps == 0) return PERMISSION_DENIED;
     if (state == NULL) return BAD_VALUE;
 
-    media::AudioSourceType streamAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(stream));
+    AudioSource streamAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(stream));
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->isSourceActive(streamAidl, state)));
     return OK;
@@ -1440,9 +1444,9 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == nullptr) return PERMISSION_DENIED;
 
-    std::vector<media::AudioUsage> systemUsagesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioUsage>>(systemUsages,
-                                                             legacy2aidl_audio_usage_t_AudioUsage));
+    std::vector<AudioUsage> systemUsagesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioUsage>>(systemUsages,
+                                                      legacy2aidl_audio_usage_t_AudioUsage));
     return statusTFromBinderStatus(aps->setSupportedSystemUsages(systemUsagesAidl));
 }
 
@@ -1462,7 +1466,7 @@
     if (aps == 0) return AUDIO_OFFLOAD_NOT_SUPPORTED;
 
     auto result = [&]() -> ConversionResult<audio_offload_mode_t> {
-        media::AudioOffloadInfo infoAidl = VALUE_OR_RETURN(
+        AudioOffloadInfo infoAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_offload_info_t_AudioOffloadInfo(info));
         media::AudioOffloadMode retAidl;
         RETURN_IF_ERROR(
@@ -1490,7 +1494,7 @@
             legacy2aidl_audio_port_role_t_AudioPortRole(role));
     media::AudioPortType typeAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_port_type_t_AudioPortType(type));
-    media::Int numPortsAidl;
+    Int numPortsAidl;
     numPortsAidl.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*num_ports));
     std::vector<media::AudioPort> portsAidl;
     int32_t generationAidl;
@@ -1557,7 +1561,7 @@
     if (aps == 0) return PERMISSION_DENIED;
 
 
-    media::Int numPatchesAidl;
+    Int numPatchesAidl;
     numPatchesAidl.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*num_patches));
     std::vector<media::AudioPatch> patchesAidl;
     int32_t generationAidl;
@@ -1696,7 +1700,8 @@
             statusTFromBinderStatus(aps->acquireSoundTriggerSession(&retAidl)));
     *session = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_session_t(retAidl.session));
     *ioHandle = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_io_handle_t(retAidl.ioHandle));
-    *device = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_devices_t(retAidl.device));
+    *device = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(retAidl.device));
     return OK;
 }
 
@@ -1713,7 +1718,7 @@
     if (aps == 0) return AUDIO_MODE_INVALID;
 
     auto result = [&]() -> ConversionResult<audio_mode_t> {
-        media::AudioMode retAidl;
+        media::audio::common::AudioMode retAidl;
         RETURN_IF_ERROR(statusTFromBinderStatus(aps->getPhoneState(&retAidl)));
         return aidl2legacy_AudioMode_audio_mode_t(retAidl);
     }();
@@ -1738,8 +1743,8 @@
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t uidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(uid));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
                                                               legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(aps->setUidDeviceAffinities(uidAidl, devicesAidl));
 }
@@ -1758,9 +1763,9 @@
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t userIdAidl = VALUE_OR_RETURN_STATUS(convertReinterpret<int32_t>(userId));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(
             aps->setUserIdDeviceAffinities(userIdAidl, devicesAidl));
 }
@@ -1833,10 +1838,11 @@
     if (aps == 0) return NAN;
 
     auto result = [&]() -> ConversionResult<float> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+        AudioStreamType streamAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         int32_t indexAidl = VALUE_OR_RETURN(convertIntegral<int32_t>(index));
-        int32_t deviceAidl = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(device));
+        AudioDeviceDescription deviceAidl = VALUE_OR_RETURN(
+                legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
         float retAidl;
         RETURN_IF_ERROR(statusTFromBinderStatus(
                 aps->getStreamVolumeDB(streamAidl, indexAidl, deviceAidl, &retAidl)));
@@ -1868,10 +1874,10 @@
 
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    media::Int numSurroundFormatsAidl;
+    Int numSurroundFormatsAidl;
     numSurroundFormatsAidl.value =
             VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*numSurroundFormats));
-    std::vector<media::audio::common::AudioFormat> surroundFormatsAidl;
+    std::vector<AudioFormatDescription> surroundFormatsAidl;
     std::vector<bool> surroundFormatsEnabledAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getSurroundFormats(&numSurroundFormatsAidl, &surroundFormatsAidl,
@@ -1881,7 +1887,7 @@
             convertIntegral<unsigned int>(numSurroundFormatsAidl.value));
     RETURN_STATUS_IF_ERROR(
             convertRange(surroundFormatsAidl.begin(), surroundFormatsAidl.end(), surroundFormats,
-                         aidl2legacy_AudioFormat_audio_format_t));
+                         aidl2legacy_AudioFormatDescription_audio_format_t));
     std::copy(surroundFormatsEnabledAidl.begin(), surroundFormatsEnabledAidl.end(),
             surroundFormatsEnabled);
     return OK;
@@ -1895,10 +1901,10 @@
 
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    media::Int numSurroundFormatsAidl;
+    Int numSurroundFormatsAidl;
     numSurroundFormatsAidl.value =
             VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*numSurroundFormats));
-    std::vector<media::audio::common::AudioFormat> surroundFormatsAidl;
+    std::vector<AudioFormatDescription> surroundFormatsAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getReportedSurroundFormats(&numSurroundFormatsAidl, &surroundFormatsAidl)));
 
@@ -1906,7 +1912,7 @@
             convertIntegral<unsigned int>(numSurroundFormatsAidl.value));
     RETURN_STATUS_IF_ERROR(
             convertRange(surroundFormatsAidl.begin(), surroundFormatsAidl.end(), surroundFormats,
-                         aidl2legacy_AudioFormat_audio_format_t));
+                         aidl2legacy_AudioFormatDescription_audio_format_t));
     return OK;
 }
 
@@ -1914,8 +1920,8 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    media::audio::common::AudioFormat audioFormatAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_format_t_AudioFormat(audioFormat));
+    AudioFormatDescription audioFormatAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_format_t_AudioFormatDescription(audioFormat));
     return statusTFromBinderStatus(
             aps->setSurroundFormatEnabled(audioFormatAidl, enabled));
 }
@@ -1976,14 +1982,15 @@
             & aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    std::vector<media::audio::common::AudioFormat> formatsAidl;
-    int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
-
+    std::vector<AudioFormatDescription> formatsAidl;
+    AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getHwOffloadFormatsSupportedForBluetoothMedia(deviceAidl, &formatsAidl)));
     *formats = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<audio_format_t>>(formatsAidl,
-                                                          aidl2legacy_AudioFormat_audio_format_t));
+            convertContainer<std::vector<audio_format_t>>(
+                    formatsAidl,
+                    aidl2legacy_AudioFormatDescription_audio_format_t));
     return OK;
 }
 
@@ -2122,9 +2129,9 @@
 
     int32_t strategyAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_product_strategy_t_int32_t(strategy));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(
             aps->setDevicesRoleForStrategy(strategyAidl, roleAidl, devicesAidl));
 }
@@ -2150,7 +2157,7 @@
     }
     int32_t strategyAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_product_strategy_t_int32_t(strategy));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl;
+    std::vector<AudioDevice> devicesAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getDevicesForRoleAndStrategy(strategyAidl, roleAidl, &devicesAidl)));
     devices = VALUE_OR_RETURN_STATUS(
@@ -2167,12 +2174,12 @@
         return PERMISSION_DENIED;
     }
 
-    media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+    AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(audioSource));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(
             aps->setDevicesRoleForCapturePreset(audioSourceAidl, roleAidl, devicesAidl));
 }
@@ -2184,12 +2191,12 @@
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
-    media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+    AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(audioSource));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(
             aps->addDevicesRoleForCapturePreset(audioSourceAidl, roleAidl, devicesAidl));
 }
@@ -2200,12 +2207,12 @@
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
-    media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+    AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(audioSource));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(
             aps->removeDevicesRoleForCapturePreset(audioSourceAidl, roleAidl, devicesAidl));
 }
@@ -2216,8 +2223,8 @@
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
-    media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+    AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(audioSource));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
     return statusTFromBinderStatus(
             aps->clearDevicesRoleForCapturePreset(audioSourceAidl, roleAidl));
@@ -2230,10 +2237,10 @@
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
-    media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+    AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(audioSource));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl;
+    std::vector<AudioDevice> devicesAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getDevicesForRoleAndCapturePreset(audioSourceAidl, roleAidl, &devicesAidl)));
     devices = VALUE_OR_RETURN_STATUS(
@@ -2272,16 +2279,41 @@
 
     std::optional<media::AudioAttributesInternal> attrAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_attributes_t_AudioAttributesInternal(attributes));
-    std::optional<media::AudioConfig> configAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_config_t_AudioConfig(configuration));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                   legacy2aidl_AudioDeviceTypeAddress));
+    std::optional<AudioConfig> configAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(configuration, false /*isInput*/));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->canBeSpatialized(attrAidl, configAidl, devicesAidl, canBeSpatialized)));
     return OK;
 }
 
+status_t AudioSystem::getDirectPlaybackSupport(const audio_attributes_t *attr,
+                                               const audio_config_t *config,
+                                               audio_direct_mode_t* directMode) {
+    if (attr == nullptr || config == nullptr || directMode == nullptr) {
+        return BAD_VALUE;
+    }
+
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) {
+        return PERMISSION_DENIED;
+    }
+
+    media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_attributes_t_AudioAttributesInternal(*attr));
+    AudioConfig configAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(*config, false /*isInput*/));
+
+    media::AudioDirectMode retAidl;
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            aps->getDirectPlaybackSupport(attrAidl, configAidl, &retAidl)));
+    *directMode = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_direct_mode_t_mask(
+            static_cast<int32_t>(retAidl)));
+    return NO_ERROR;
+}
+
 
 class CaptureStateListenerImpl : public media::BnCaptureStateListener,
                                  public IBinder::DeathRecipient {
@@ -2347,6 +2379,31 @@
     return af->setVibratorInfos(vibratorInfos);
 }
 
+status_t AudioSystem::getMmapPolicyInfo(
+        AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == nullptr) {
+        return PERMISSION_DENIED;
+    }
+    return af->getMmapPolicyInfos(policyType, policyInfos);
+}
+
+int32_t AudioSystem::getAAudioMixerBurstCount() {
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == nullptr) {
+        return PERMISSION_DENIED;
+    }
+    return af->getAAudioMixerBurstCount();
+}
+
+int32_t AudioSystem::getAAudioHardwareBurstMinUsec() {
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == nullptr) {
+        return PERMISSION_DENIED;
+    }
+    return af->getAAudioHardwareBurstMinUsec();
+}
+
 // ---------------------------------------------------------------------------
 
 int AudioSystem::AudioPolicyServiceClient::addAudioPortCallback(
@@ -2458,12 +2515,12 @@
 Status AudioSystem::AudioPolicyServiceClient::onRecordingConfigurationUpdate(
         int32_t event,
         const media::RecordClientInfo& clientInfo,
-        const media::AudioConfigBase& clientConfig,
+        const AudioConfigBase& clientConfig,
         const std::vector<media::EffectDescriptor>& clientEffects,
-        const media::AudioConfigBase& deviceConfig,
+        const AudioConfigBase& deviceConfig,
         const std::vector<media::EffectDescriptor>& effects,
         int32_t patchHandle,
-        media::AudioSourceType source) {
+        AudioSource source) {
     record_config_callback cb = NULL;
     {
         Mutex::Autolock _l(AudioSystem::gLock);
@@ -2475,13 +2532,13 @@
         record_client_info_t clientInfoLegacy = VALUE_OR_RETURN_BINDER_STATUS(
                 aidl2legacy_RecordClientInfo_record_client_info_t(clientInfo));
         audio_config_base_t clientConfigLegacy = VALUE_OR_RETURN_BINDER_STATUS(
-                aidl2legacy_AudioConfigBase_audio_config_base_t(clientConfig));
+                aidl2legacy_AudioConfigBase_audio_config_base_t(clientConfig, true /*isInput*/));
         std::vector<effect_descriptor_t> clientEffectsLegacy = VALUE_OR_RETURN_BINDER_STATUS(
                 convertContainer<std::vector<effect_descriptor_t>>(
                         clientEffects,
                         aidl2legacy_EffectDescriptor_effect_descriptor_t));
         audio_config_base_t deviceConfigLegacy = VALUE_OR_RETURN_BINDER_STATUS(
-                aidl2legacy_AudioConfigBase_audio_config_base_t(deviceConfig));
+                aidl2legacy_AudioConfigBase_audio_config_base_t(deviceConfig, true /*isInput*/));
         std::vector<effect_descriptor_t> effectsLegacy = VALUE_OR_RETURN_BINDER_STATUS(
                 convertContainer<std::vector<effect_descriptor_t>>(
                         effects,
@@ -2489,7 +2546,7 @@
         audio_patch_handle_t patchHandleLegacy = VALUE_OR_RETURN_BINDER_STATUS(
                 aidl2legacy_int32_t_audio_patch_handle_t(patchHandle));
         audio_source_t sourceLegacy = VALUE_OR_RETURN_BINDER_STATUS(
-                aidl2legacy_AudioSourceType_audio_source_t(source));
+                aidl2legacy_AudioSource_audio_source_t(source));
         cb(eventLegacy, &clientInfoLegacy, &clientConfigLegacy, clientEffectsLegacy,
            &deviceConfigLegacy, effectsLegacy, patchHandleLegacy, sourceLegacy);
     }
@@ -2533,7 +2590,7 @@
     legacy.riid = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_unique_id_t(aidl.riid));
     legacy.uid = VALUE_OR_RETURN(aidl2legacy_int32_t_uid_t(aidl.uid));
     legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.session));
-    legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSourceType_audio_source_t(aidl.source));
+    legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSource_audio_source_t(aidl.source));
     legacy.port_id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
     legacy.silenced = aidl.silenced;
     return legacy;
@@ -2545,7 +2602,7 @@
     aidl.riid = VALUE_OR_RETURN(legacy2aidl_audio_unique_id_t_int32_t(legacy.riid));
     aidl.uid = VALUE_OR_RETURN(legacy2aidl_uid_t_int32_t(legacy.uid));
     aidl.session = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.session));
-    aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.source));
+    aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.source));
     aidl.portId = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.port_id));
     aidl.silenced = legacy.silenced;
     return aidl;
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index ad00bdb..bdf3147 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -173,8 +173,8 @@
     if (aps == 0) return false;
 
     auto result = [&]() -> ConversionResult<bool> {
-        media::AudioConfigBase configAidl = VALUE_OR_RETURN(
-                legacy2aidl_audio_config_base_t_AudioConfigBase(config));
+        media::audio::common::AudioConfigBase configAidl = VALUE_OR_RETURN(
+                legacy2aidl_audio_config_base_t_AudioConfigBase(config, false /*isInput*/));
         media::AudioAttributesInternal attributesAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_attributes_t_AudioAttributesInternal(attributes));
         bool retAidl;
@@ -258,8 +258,7 @@
         audio_channel_mask_t channelMask,
         size_t frameCount,
         audio_output_flags_t flags,
-        callback_t cbf,
-        void* user,
+        const wp<IAudioTrackCallback> & callback,
         int32_t notificationFrames,
         audio_session_t sessionId,
         transfer_type transferType,
@@ -279,7 +278,85 @@
     mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
 
     (void)set(streamType, sampleRate, format, channelMask,
-            frameCount, flags, cbf, user, notificationFrames,
+            frameCount, flags, callback, notificationFrames,
+            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
+            attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+}
+
+namespace {
+    class LegacyCallbackWrapper : public AudioTrack::IAudioTrackCallback {
+      const AudioTrack::legacy_callback_t mCallback;
+      void * const mData;
+      public:
+        LegacyCallbackWrapper(AudioTrack::legacy_callback_t callback, void* user)
+            : mCallback(callback), mData(user) {}
+        size_t onMoreData(const AudioTrack::Buffer & buffer) override {
+          AudioTrack::Buffer copy = buffer;
+          mCallback(AudioTrack::EVENT_MORE_DATA, mData, static_cast<void*>(&copy));
+          return copy.size;
+        }
+        void onUnderrun() override {
+            mCallback(AudioTrack::EVENT_UNDERRUN, mData, nullptr);
+        }
+        void onLoopEnd(int32_t loopsRemaining) override {
+            mCallback(AudioTrack::EVENT_LOOP_END, mData, &loopsRemaining);
+        }
+        void onMarker(uint32_t markerPosition) override {
+            mCallback(AudioTrack::EVENT_MARKER, mData, &markerPosition);
+        }
+        void onNewPos(uint32_t newPos) override {
+            mCallback(AudioTrack::EVENT_NEW_POS, mData, &newPos);
+        }
+        void onBufferEnd() override {
+            mCallback(AudioTrack::EVENT_BUFFER_END, mData, nullptr);
+        }
+        void onNewIAudioTrack() override {
+            mCallback(AudioTrack::EVENT_NEW_IAUDIOTRACK, mData, nullptr);
+        }
+        void onStreamEnd() override {
+            mCallback(AudioTrack::EVENT_STREAM_END, mData, nullptr);
+        }
+        size_t onCanWriteMoreData(const AudioTrack::Buffer & buffer) override {
+          AudioTrack::Buffer copy = buffer;
+          mCallback(AudioTrack::EVENT_CAN_WRITE_MORE_DATA, mData, static_cast<void*>(&copy));
+          return copy.size;
+        }
+    };
+}
+
+AudioTrack::AudioTrack(
+        audio_stream_type_t streamType,
+        uint32_t sampleRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        size_t frameCount,
+        audio_output_flags_t flags,
+        legacy_callback_t callback,
+        void* user,
+        int32_t notificationFrames,
+        audio_session_t sessionId,
+        transfer_type transferType,
+        const audio_offload_info_t *offloadInfo,
+        const AttributionSourceState& attributionSource,
+        const audio_attributes_t* pAttributes,
+        bool doNotReconnect,
+        float maxRequiredSpeed,
+        audio_port_handle_t selectedDeviceId)
+    : mStatus(NO_INIT),
+      mState(STATE_STOPPED),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(SP_DEFAULT),
+      mPausedPosition(0),
+      mAudioTrackCallback(new AudioTrackCallback())
+{
+    mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
+    if (callback != nullptr) {
+        mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+    } else if (user) {
+        LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+    }
+    (void)set(streamType, sampleRate, format, channelMask,
+            frameCount, flags, mLegacyCallbackWrapper, notificationFrames,
             0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
             attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
 }
@@ -291,8 +368,7 @@
         audio_channel_mask_t channelMask,
         const sp<IMemory>& sharedBuffer,
         audio_output_flags_t flags,
-        callback_t cbf,
-        void* user,
+        const wp<IAudioTrackCallback>& callback,
         int32_t notificationFrames,
         audio_session_t sessionId,
         transfer_type transferType,
@@ -312,11 +388,49 @@
     mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
 
     (void)set(streamType, sampleRate, format, channelMask,
-            0 /*frameCount*/, flags, cbf, user, notificationFrames,
+            0 /*frameCount*/, flags, callback, notificationFrames,
             sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
             attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed);
 }
 
+AudioTrack::AudioTrack(
+        audio_stream_type_t streamType,
+        uint32_t sampleRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        const sp<IMemory>& sharedBuffer,
+        audio_output_flags_t flags,
+        legacy_callback_t callback,
+        void* user,
+        int32_t notificationFrames,
+        audio_session_t sessionId,
+        transfer_type transferType,
+        const audio_offload_info_t *offloadInfo,
+        const AttributionSourceState& attributionSource,
+        const audio_attributes_t* pAttributes,
+        bool doNotReconnect,
+        float maxRequiredSpeed)
+    : mStatus(NO_INIT),
+      mState(STATE_STOPPED),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(SP_DEFAULT),
+      mPausedPosition(0),
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+      mAudioTrackCallback(new AudioTrackCallback())
+{
+    mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
+    if (callback) {
+        mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+    } else if (user) {
+        LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+    }
+
+    (void)set(streamType, sampleRate, format, channelMask, 0 /*frameCount*/, flags,
+              mLegacyCallbackWrapper, notificationFrames, sharedBuffer,
+              false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, attributionSource,
+              pAttributes, doNotReconnect, maxRequiredSpeed);
+}
+
 AudioTrack::~AudioTrack()
 {
     // pull together the numbers, before we clean up our structures
@@ -379,8 +493,38 @@
         audio_channel_mask_t channelMask,
         size_t frameCount,
         audio_output_flags_t flags,
-        callback_t cbf,
-        void* user,
+        legacy_callback_t callback,
+        void * user,
+        int32_t notificationFrames,
+        const sp<IMemory>& sharedBuffer,
+        bool threadCanCallJava,
+        audio_session_t sessionId,
+        transfer_type transferType,
+        const audio_offload_info_t *offloadInfo,
+        const AttributionSourceState& attributionSource,
+        const audio_attributes_t* pAttributes,
+        bool doNotReconnect,
+        float maxRequiredSpeed,
+        audio_port_handle_t selectedDeviceId)
+{
+    if (callback) {
+        mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+    } else if (user) {
+        LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+    }
+    return set(streamType, sampleRate,format, channelMask, frameCount, flags,
+               mLegacyCallbackWrapper, notificationFrames, sharedBuffer, threadCanCallJava,
+               sessionId, transferType, offloadInfo, attributionSource, pAttributes,
+               doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+}
+status_t AudioTrack::set(
+        audio_stream_type_t streamType,
+        uint32_t sampleRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        size_t frameCount,
+        audio_output_flags_t flags,
+        const wp<IAudioTrackCallback>& callback,
         int32_t notificationFrames,
         const sp<IMemory>& sharedBuffer,
         bool threadCanCallJava,
@@ -399,8 +543,8 @@
     pid_t myPid;
     uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
     pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
+    sp<IAudioTrackCallback> _callback = callback.promote();
     std::string errorMessage;
-
     // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
     ALOGV("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
           "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
@@ -462,7 +606,7 @@
     case TRANSFER_DEFAULT:
         if (sharedBuffer != 0) {
             transferType = TRANSFER_SHARED;
-        } else if (cbf == NULL || threadCanCallJava) {
+        } else if (_callback == nullptr|| threadCanCallJava) {
             transferType = TRANSFER_SYNC;
         } else {
             transferType = TRANSFER_CALLBACK;
@@ -470,9 +614,9 @@
         break;
     case TRANSFER_CALLBACK:
     case TRANSFER_SYNC_NOTIF_CALLBACK:
-        if (cbf == NULL || sharedBuffer != 0) {
+        if (_callback == nullptr || sharedBuffer != 0) {
             errorMessage = StringPrintf(
-                    "%s: Transfer type %s but cbf == NULL || sharedBuffer != 0",
+                    "%s: Transfer type %s but callback == nullptr || sharedBuffer != 0",
                     convertTransferToText(transferType), __func__);
             status = BAD_VALUE;
             goto error;
@@ -623,10 +767,10 @@
         mClientAttributionSource.pid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(callingPid));
     }
     mAuxEffectId = 0;
-    mCbf = cbf;
+    mCallback = callback;
 
-    if (cbf != NULL) {
-        mAudioTrackThread = new AudioTrackThread(*this);
+    if (_callback != nullptr) {
+        mAudioTrackThread = sp<AudioTrackThread>::make(*this);
         mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
         // thread begins in paused state, and will not reference us until start()
     }
@@ -646,7 +790,6 @@
         goto exit;
     }
 
-    mUserData = user;
     mLoopCount = 0;
     mLoopStart = 0;
     mLoopEnd = 0;
@@ -696,7 +839,7 @@
         uint32_t channelMask,
         size_t frameCount,
         audio_output_flags_t flags,
-        callback_t cbf,
+        legacy_callback_t callback,
         void* user,
         int32_t notificationFrames,
         const sp<IMemory>& sharedBuffer,
@@ -715,11 +858,15 @@
     attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(uid));
     attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(pid));
     attributionSource.token = sp<BBinder>::make();
-    return set(streamType, sampleRate, format,
-            static_cast<audio_channel_mask_t>(channelMask),
-            frameCount, flags, cbf, user, notificationFrames, sharedBuffer,
-            threadCanCallJava, sessionId, transferType, offloadInfo, attributionSource,
-            pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+    if (callback) {
+        mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+    } else if (user) {
+        LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+    }
+    return set(streamType, sampleRate, format, static_cast<audio_channel_mask_t>(channelMask),
+               frameCount, flags, mLegacyCallbackWrapper, notificationFrames, sharedBuffer,
+               threadCanCallJava, sessionId, transferType, offloadInfo, attributionSource,
+               pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
 }
 
 // -------------------------------------------------------------------------
@@ -1335,10 +1482,6 @@
     if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
         return NO_INIT;
     }
-    // Reject if timed track or compressed audio.
-    if (!audio_is_linear_pcm(mFormat)) {
-        return INVALID_OPERATION;
-    }
 
     ssize_t originalBufferSize = mProxy->getBufferSizeInFrames();
     ssize_t finalBufferSize  = mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
@@ -1432,7 +1575,7 @@
 status_t AudioTrack::setMarkerPosition(uint32_t marker)
 {
     // The only purpose of setting marker position is to get a callback
-    if (mCbf == NULL || isOffloadedOrDirect()) {
+    if (!mCallback.promote() || isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
 
@@ -1465,7 +1608,7 @@
 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
 {
     // The only purpose of setting position update period is to get a callback
-    if (mCbf == NULL || isOffloadedOrDirect()) {
+    if (!mCallback.promote() || isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
 
@@ -1615,6 +1758,8 @@
 
 status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
     AutoMutex lock(mLock);
+    ALOGV("%s(%d): deviceId=%d mSelectedDeviceId=%d",
+            __func__, mPortId, deviceId, mSelectedDeviceId);
     if (mSelectedDeviceId != deviceId) {
         mSelectedDeviceId = deviceId;
         if (mStatus == NO_ERROR) {
@@ -1862,7 +2007,7 @@
                 mAwaitBoost = true;
             }
         } else {
-            ALOGD("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
+            ALOGV("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
                   __func__, mPortId, mReqFrameCount, mFrameCount);
         }
     }
@@ -2268,10 +2413,14 @@
 {
     // Currently the AudioTrack thread is not created if there are no callbacks.
     // Would it ever make sense to run the thread, even without callbacks?
-    // If so, then replace this by checks at each use for mCbf != NULL.
+    // If so, then replace this by checks at each use for mCallback != NULL.
     LOG_ALWAYS_FATAL_IF(mCblk == NULL);
-
     mLock.lock();
+    sp<IAudioTrackCallback> callback = mCallback.promote();
+    if (!callback) {
+        mCallback = nullptr;
+        return NS_NEVER;
+    }
     if (mAwaitBoost) {
         mAwaitBoost = false;
         mLock.unlock();
@@ -2369,7 +2518,7 @@
     sp<AudioTrackClientProxy> proxy = mProxy;
 
     // Determine the number of new loop callback(s) that will be needed, while locked.
-    int loopCountNotifications = 0;
+    uint32_t loopCountNotifications = 0;
     uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
 
     if (mLoopCount > 0) {
@@ -2391,7 +2540,7 @@
     }
 
     // These fields don't need to be cached, because they are assigned only by set():
-    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
+    // mTransfer, mCallback, mUserData, mFormat, mFrameSize, mFlags
     // mFlags is also assigned by createTrack_l(), but not the bit we care about.
 
     mLock.unlock();
@@ -2416,7 +2565,7 @@
             if (status != DEAD_OBJECT) {
                 // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
                 // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
-                mCbf(EVENT_STREAM_END, mUserData, NULL);
+                callback->onStreamEnd();
             }
             {
                 AutoMutex lock(mLock);
@@ -2439,28 +2588,27 @@
 
     // perform callbacks while unlocked
     if (newUnderrun) {
-        mCbf(EVENT_UNDERRUN, mUserData, NULL);
+        callback->onUnderrun();
     }
     while (loopCountNotifications > 0) {
-        mCbf(EVENT_LOOP_END, mUserData, NULL);
         --loopCountNotifications;
+        callback->onLoopEnd(mLoopCount > 0 ? loopCountNotifications + mLoopCountNotified : -1);
     }
     if (flags & CBLK_BUFFER_END) {
-        mCbf(EVENT_BUFFER_END, mUserData, NULL);
+        callback->onBufferEnd();
     }
     if (markerReached) {
-        mCbf(EVENT_MARKER, mUserData, &markerPosition);
+        callback->onMarker(markerPosition.value());
     }
     while (newPosCount > 0) {
-        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
-        mCbf(EVENT_NEW_POS, mUserData, &temp);
+        callback->onNewPos(newPosition.value());
         newPosition += updatePeriod;
         newPosCount--;
     }
 
     if (mObservedSequence != sequence) {
         mObservedSequence = sequence;
-        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
+        callback->onNewIAudioTrack();
         // for offloaded tracks, just wait for the upper layers to recreate the track
         if (isOffloadedOrDirect()) {
             return NS_INACTIVE;
@@ -2598,10 +2746,9 @@
             // written in the next write() call, since it's not passed through the callback
             audioBuffer.size += nonContig;
         }
-        mCbf(mTransfer == TRANSFER_CALLBACK ? EVENT_MORE_DATA : EVENT_CAN_WRITE_MORE_DATA,
-                mUserData, &audioBuffer);
-        size_t writtenSize = audioBuffer.size;
-
+        const size_t writtenSize = (mTransfer == TRANSFER_CALLBACK)
+                                      ? callback->onMoreData(audioBuffer)
+                                      : callback->onCanWriteMoreData(audioBuffer);
         // Validate on returned size
         if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
             ALOGE("%s(%d): EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
@@ -2661,6 +2808,9 @@
             return ns;
         }
 
+        // releaseBuffer reads from audioBuffer.size
+        audioBuffer.size = writtenSize;
+
         size_t releasedFrames = writtenSize / mFrameSize;
         audioBuffer.frameCount = releasedFrames;
         mRemainingFrames -= releasedFrames;
diff --git a/media/libaudioclient/AudioVolumeGroup.cpp b/media/libaudioclient/AudioVolumeGroup.cpp
index 361f7b8..ab95246 100644
--- a/media/libaudioclient/AudioVolumeGroup.cpp
+++ b/media/libaudioclient/AudioVolumeGroup.cpp
@@ -26,11 +26,10 @@
 #include <media/AudioAttributes.h>
 #include <media/PolicyAidlConversion.h>
 
-#define RETURN_STATUS_IF_ERROR(x) \
-    { auto _tmp = (x); if (_tmp != OK) return _tmp; }
-
 namespace android {
 
+using media::audio::common::AudioStreamType;
+
 status_t AudioVolumeGroup::readFromParcel(const Parcel *parcel)
 {
     media::AudioVolumeGroup aidl;
@@ -55,7 +54,7 @@
                     legacy.getAudioAttributes(),
                     legacy2aidl_audio_attributes_t_AudioAttributesInternal));
     aidl.streams = VALUE_OR_RETURN(
-            convertContainer<std::vector<media::AudioStreamType>>(legacy.getStreamTypes(),
+            convertContainer<std::vector<AudioStreamType>>(legacy.getStreamTypes(),
             legacy2aidl_audio_stream_type_t_AudioStreamType));
     return aidl;
 }
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 2af1c50..88e7396 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -17,6 +17,7 @@
 
 #define LOG_TAG "IAudioFlinger"
 //#define LOG_NDEBUG 0
+
 #include <utils/Log.h>
 
 #include <stdint.h>
@@ -30,6 +31,13 @@
 
 using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUuid;
 
 #define MAX_ITEMS_PER_LIST 1024
 
@@ -40,12 +48,6 @@
        std::move(_tmp.value()); \
      })
 
-#define RETURN_STATUS_IF_ERROR(x)    \
-    {                                \
-       auto _tmp = (x);              \
-       if (_tmp != OK) return _tmp;  \
-    }
-
 #define RETURN_BINDER_IF_ERROR(x)                         \
     {                                                     \
        auto _tmp = (x);                                   \
@@ -55,7 +57,9 @@
 ConversionResult<media::CreateTrackRequest> IAudioFlinger::CreateTrackInput::toAidl() const {
     media::CreateTrackRequest aidl;
     aidl.attr = VALUE_OR_RETURN(legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
-    aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(config));
+    // Do not be mislead by 'Input'--this is an input to 'createTrack', which creates output tracks.
+    aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(
+                    config, false /*isInput*/));
     aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient_AudioClient(clientInfo));
     aidl.sharedBuffer = VALUE_OR_RETURN(legacy2aidl_NullableIMemory_SharedFileRegion(sharedBuffer));
     aidl.notificationsPerBuffer = VALUE_OR_RETURN(convertIntegral<int32_t>(notificationsPerBuffer));
@@ -74,7 +78,9 @@
 IAudioFlinger::CreateTrackInput::fromAidl(const media::CreateTrackRequest& aidl) {
     IAudioFlinger::CreateTrackInput legacy;
     legacy.attr = VALUE_OR_RETURN(aidl2legacy_AudioAttributesInternal_audio_attributes_t(aidl.attr));
-    legacy.config = VALUE_OR_RETURN(aidl2legacy_AudioConfig_audio_config_t(aidl.config));
+    // Do not be mislead by 'Input'--this is an input to 'createTrack', which creates output tracks.
+    legacy.config = VALUE_OR_RETURN(
+            aidl2legacy_AudioConfig_audio_config_t(aidl.config, false /*isInput*/));
     legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient_AudioClient(aidl.clientInfo));
     legacy.sharedBuffer = VALUE_OR_RETURN(aidl2legacy_NullableSharedFileRegion_IMemory(aidl.sharedBuffer));
     legacy.notificationsPerBuffer = VALUE_OR_RETURN(
@@ -139,7 +145,8 @@
 IAudioFlinger::CreateRecordInput::toAidl() const {
     media::CreateRecordRequest aidl;
     aidl.attr = VALUE_OR_RETURN(legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
-    aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(config));
+    aidl.config = VALUE_OR_RETURN(
+            legacy2aidl_audio_config_base_t_AudioConfigBase(config, true /*isInput*/));
     aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient_AudioClient(clientInfo));
     aidl.riid = VALUE_OR_RETURN(legacy2aidl_audio_unique_id_t_int32_t(riid));
     aidl.maxSharedAudioHistoryMs = VALUE_OR_RETURN(
@@ -159,7 +166,8 @@
     IAudioFlinger::CreateRecordInput legacy;
     legacy.attr = VALUE_OR_RETURN(
             aidl2legacy_AudioAttributesInternal_audio_attributes_t(aidl.attr));
-    legacy.config = VALUE_OR_RETURN(aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config));
+    legacy.config = VALUE_OR_RETURN(
+            aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config, true /*isInput*/));
     legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient_AudioClient(aidl.clientInfo));
     legacy.riid = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_unique_id_t(aidl.riid));
     legacy.maxSharedAudioHistoryMs = VALUE_OR_RETURN(
@@ -189,6 +197,8 @@
     aidl.buffers = VALUE_OR_RETURN(legacy2aidl_NullableIMemory_SharedFileRegion(buffers));
     aidl.portId = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(portId));
     aidl.audioRecord = audioRecord;
+    aidl.serverConfig = VALUE_OR_RETURN(
+            legacy2aidl_audio_config_base_t_AudioConfigBase(serverConfig, true /*isInput*/));
     return aidl;
 }
 
@@ -209,6 +219,8 @@
     legacy.buffers = VALUE_OR_RETURN(aidl2legacy_NullableSharedFileRegion_IMemory(aidl.buffers));
     legacy.portId = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
     legacy.audioRecord = aidl.audioRecord;
+    legacy.serverConfig = VALUE_OR_RETURN(
+            aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.serverConfig, true /*isInput*/));
     return legacy;
 }
 
@@ -242,9 +254,9 @@
 audio_format_t AudioFlingerClientAdapter::format(audio_io_handle_t output) const {
     auto result = [&]() -> ConversionResult<audio_format_t> {
         int32_t outputAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output));
-        media::audio::common::AudioFormat aidlRet;
+        AudioFormatDescription aidlRet;
         RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->format(outputAidl, &aidlRet)));
-        return aidl2legacy_AudioFormat_audio_format_t(aidlRet);
+        return aidl2legacy_AudioFormatDescription_audio_format_t(aidlRet);
     }();
     return result.value_or(AUDIO_FORMAT_INVALID);
 }
@@ -309,14 +321,14 @@
 
 status_t AudioFlingerClientAdapter::setStreamVolume(audio_stream_type_t stream, float value,
                                                     audio_io_handle_t output) {
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
     return statusTFromBinderStatus(mDelegate->setStreamVolume(streamAidl, value, outputAidl));
 }
 
 status_t AudioFlingerClientAdapter::setStreamMute(audio_stream_type_t stream, bool muted) {
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     return statusTFromBinderStatus(mDelegate->setStreamMute(streamAidl, muted));
 }
@@ -324,7 +336,7 @@
 float AudioFlingerClientAdapter::streamVolume(audio_stream_type_t stream,
                                               audio_io_handle_t output) const {
     auto result = [&]() -> ConversionResult<float> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+        AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
         float aidlRet;
@@ -338,7 +350,7 @@
 
 bool AudioFlingerClientAdapter::streamMute(audio_stream_type_t stream) const {
     auto result = [&]() -> ConversionResult<bool> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+        AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         bool aidlRet;
         RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -350,7 +362,7 @@
 }
 
 status_t AudioFlingerClientAdapter::setMode(audio_mode_t mode) {
-    media::AudioMode modeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_mode_t_AudioMode(mode));
+    AudioMode modeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_mode_t_AudioMode(mode));
     return statusTFromBinderStatus(mDelegate->setMode(modeAidl));
 }
 
@@ -410,10 +422,10 @@
                                                      audio_channel_mask_t channelMask) const {
     auto result = [&]() -> ConversionResult<size_t> {
         int32_t sampleRateAidl = VALUE_OR_RETURN(convertIntegral<int32_t>(sampleRate));
-        media::audio::common::AudioFormat formatAidl = VALUE_OR_RETURN(
-                legacy2aidl_audio_format_t_AudioFormat(format));
-        int32_t channelMaskAidl = VALUE_OR_RETURN(
-                legacy2aidl_audio_channel_mask_t_int32_t(channelMask));
+        AudioFormatDescription formatAidl = VALUE_OR_RETURN(
+                legacy2aidl_audio_format_t_AudioFormatDescription(format));
+        AudioChannelLayout channelMaskAidl = VALUE_OR_RETURN(
+                legacy2aidl_audio_channel_mask_t_AudioChannelLayout(channelMask, true /*isInput*/));
         int64_t aidlRet;
         RETURN_IF_ERROR(statusTFromBinderStatus(
                 mDelegate->getInputBufferSize(sampleRateAidl, formatAidl, channelMaskAidl,
@@ -469,7 +481,7 @@
 }
 
 status_t AudioFlingerClientAdapter::invalidateStream(audio_stream_type_t stream) {
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     return statusTFromBinderStatus(mDelegate->invalidateStream(streamAidl));
 }
@@ -568,9 +580,9 @@
                                                         const effect_uuid_t* pTypeUUID,
                                                         uint32_t preferredTypeFlag,
                                                         effect_descriptor_t* pDescriptor) const {
-    media::AudioUuid effectUuidAidl = VALUE_OR_RETURN_STATUS(
+    AudioUuid effectUuidAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_uuid_t_AudioUuid(*pEffectUUID));
-    media::AudioUuid typeUuidAidl = VALUE_OR_RETURN_STATUS(
+    AudioUuid typeUuidAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_uuid_t_AudioUuid(*pTypeUUID));
     int32_t preferredTypeFlagAidl = VALUE_OR_RETURN_STATUS(
             convertReinterpret<int32_t>(preferredTypeFlag));
@@ -765,6 +777,32 @@
     return statusTFromBinderStatus(mDelegate->updateSecondaryOutputs(trackSecondaryOutputInfos));
 }
 
+status_t AudioFlingerClientAdapter::getMmapPolicyInfos(
+        AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+    return statusTFromBinderStatus(mDelegate->getMmapPolicyInfos(policyType, policyInfos));
+}
+
+int32_t AudioFlingerClientAdapter::getAAudioMixerBurstCount() {
+    auto result = [&]() -> ConversionResult<int32_t> {
+        int32_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->getAAudioMixerBurstCount(&aidlRet)));
+        return convertIntegral<int32_t>(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
+int32_t AudioFlingerClientAdapter::getAAudioHardwareBurstMinUsec() {
+    auto result = [&]() -> ConversionResult<int32_t> {
+        int32_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getAAudioHardwareBurstMinUsec(&aidlRet)));
+        return convertIntegral<int32_t>(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
 
 ////////////////////////////////////////////////////////////////////////////////////////////////////
 // AudioFlingerServerAdapter
@@ -810,11 +848,11 @@
 }
 
 Status AudioFlingerServerAdapter::format(int32_t output,
-                                         media::audio::common::AudioFormat* _aidl_return) {
+                                         AudioFormatDescription* _aidl_return) {
     audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_int32_t_audio_io_handle_t(output));
     *_aidl_return = VALUE_OR_RETURN_BINDER(
-            legacy2aidl_audio_format_t_AudioFormat(mDelegate->format(outputLegacy)));
+            legacy2aidl_audio_format_t_AudioFormatDescription(mDelegate->format(outputLegacy)));
     return Status::ok();
 }
 
@@ -860,7 +898,7 @@
     return Status::fromStatusT(mDelegate->getMasterBalance(_aidl_return));
 }
 
-Status AudioFlingerServerAdapter::setStreamVolume(media::AudioStreamType stream, float value,
+Status AudioFlingerServerAdapter::setStreamVolume(AudioStreamType stream, float value,
                                                   int32_t output) {
     audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
@@ -869,13 +907,13 @@
     return Status::fromStatusT(mDelegate->setStreamVolume(streamLegacy, value, outputLegacy));
 }
 
-Status AudioFlingerServerAdapter::setStreamMute(media::AudioStreamType stream, bool muted) {
+Status AudioFlingerServerAdapter::setStreamMute(AudioStreamType stream, bool muted) {
     audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
     return Status::fromStatusT(mDelegate->setStreamMute(streamLegacy, muted));
 }
 
-Status AudioFlingerServerAdapter::streamVolume(media::AudioStreamType stream, int32_t output,
+Status AudioFlingerServerAdapter::streamVolume(AudioStreamType stream, int32_t output,
                                                float* _aidl_return) {
     audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
@@ -885,14 +923,14 @@
     return Status::ok();
 }
 
-Status AudioFlingerServerAdapter::streamMute(media::AudioStreamType stream, bool* _aidl_return) {
+Status AudioFlingerServerAdapter::streamMute(AudioStreamType stream, bool* _aidl_return) {
     audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
     *_aidl_return = mDelegate->streamMute(streamLegacy);
     return Status::ok();
 }
 
-Status AudioFlingerServerAdapter::setMode(media::AudioMode mode) {
+Status AudioFlingerServerAdapter::setMode(AudioMode mode) {
     audio_mode_t modeLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioMode_audio_mode_t(mode));
     return Status::fromStatusT(mDelegate->setMode(modeLegacy));
 }
@@ -938,13 +976,14 @@
 }
 
 Status AudioFlingerServerAdapter::getInputBufferSize(int32_t sampleRate,
-                                                     media::audio::common::AudioFormat format,
-                                                     int32_t channelMask, int64_t* _aidl_return) {
+                                                     const AudioFormatDescription& format,
+                                                     const AudioChannelLayout& channelMask,
+                                                     int64_t* _aidl_return) {
     uint32_t sampleRateLegacy = VALUE_OR_RETURN_BINDER(convertIntegral<uint32_t>(sampleRate));
     audio_format_t formatLegacy = VALUE_OR_RETURN_BINDER(
-            aidl2legacy_AudioFormat_audio_format_t(format));
+            aidl2legacy_AudioFormatDescription_audio_format_t(format));
     audio_channel_mask_t channelMaskLegacy = VALUE_OR_RETURN_BINDER(
-            aidl2legacy_int32_t_audio_channel_mask_t(channelMask));
+            aidl2legacy_AudioChannelLayout_audio_channel_mask_t(channelMask, true /*isInput*/));
     size_t size = mDelegate->getInputBufferSize(sampleRateLegacy, formatLegacy, channelMaskLegacy);
     *_aidl_return = VALUE_OR_RETURN_BINDER(convertIntegral<int64_t>(size));
     return Status::ok();
@@ -995,7 +1034,7 @@
     return Status::fromStatusT(mDelegate->closeInput(inputLegacy));
 }
 
-Status AudioFlingerServerAdapter::invalidateStream(media::AudioStreamType stream) {
+Status AudioFlingerServerAdapter::invalidateStream(AudioStreamType stream) {
     audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
     return Status::fromStatusT(mDelegate->invalidateStream(streamLegacy));
@@ -1070,8 +1109,8 @@
     return Status::ok();
 }
 
-Status AudioFlingerServerAdapter::getEffectDescriptor(const media::AudioUuid& effectUUID,
-                                                      const media::AudioUuid& typeUUID,
+Status AudioFlingerServerAdapter::getEffectDescriptor(const AudioUuid& effectUUID,
+                                                      const AudioUuid& typeUUID,
                                                       int32_t preferredTypeFlag,
                                                       media::EffectDescriptor* _aidl_return) {
     effect_uuid_t effectUuidLegacy = VALUE_OR_RETURN_BINDER(
@@ -1236,4 +1275,21 @@
     return Status::fromStatusT(mDelegate->updateSecondaryOutputs(trackSecondaryOutputs));
 }
 
+Status AudioFlingerServerAdapter::getMmapPolicyInfos(
+        AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *_aidl_return) {
+    return Status::fromStatusT(mDelegate->getMmapPolicyInfos(policyType, _aidl_return));
+}
+
+Status AudioFlingerServerAdapter::getAAudioMixerBurstCount(int32_t* _aidl_return) {
+    *_aidl_return = VALUE_OR_RETURN_BINDER(
+            convertIntegral<int32_t>(mDelegate->getAAudioMixerBurstCount()));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getAAudioHardwareBurstMinUsec(int32_t* _aidl_return) {
+    *_aidl_return = VALUE_OR_RETURN_BINDER(
+            convertIntegral<int32_t>(mDelegate->getAAudioHardwareBurstMinUsec()));
+    return Status::ok();
+}
+
 } // namespace android
diff --git a/media/libaudioclient/PolicyAidlConversion.cpp b/media/libaudioclient/PolicyAidlConversion.cpp
index 25fdb49..fd94568 100644
--- a/media/libaudioclient/PolicyAidlConversion.cpp
+++ b/media/libaudioclient/PolicyAidlConversion.cpp
@@ -25,6 +25,7 @@
 namespace android {
 
 using base::unexpected;
+using media::audio::common::AudioDeviceAddress;
 
 ConversionResult<volume_group_t>
 aidl2legacy_int32_t_volume_group_t(int32_t aidl) {
@@ -152,7 +153,7 @@
 
         case media::AudioMixMatchCriterionValue::source:
             legacy.mSource = VALUE_OR_RETURN(
-                    aidl2legacy_AudioSourceType_audio_source_t(UNION_GET(aidl, source).value()));
+                    aidl2legacy_AudioSource_audio_source_t(UNION_GET(aidl, source).value()));
             *rule |= RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET;
             return legacy;
 
@@ -184,7 +185,7 @@
 
         case RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET:
             UNION_SET(aidl, source,
-                      VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.mSource)));
+                      VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.mSource)));
             break;
 
         case RULE_MATCH_UID:
@@ -232,11 +233,14 @@
                                  std::back_inserter(legacy.mCriteria),
                                  aidl2legacy_AudioMixMatchCriterion));
     legacy.mMixType = VALUE_OR_RETURN(aidl2legacy_AudioMixType_uint32_t(aidl.mixType));
-    legacy.mFormat = VALUE_OR_RETURN(aidl2legacy_AudioConfig_audio_config_t(aidl.format));
+    // See 'convertAudioMixToNative' in 'android_media_AudioSystem.cpp' -- only
+    // an output mask is expected here.
+    legacy.mFormat = VALUE_OR_RETURN(aidl2legacy_AudioConfig_audio_config_t(
+                    aidl.format, false /*isInput*/));
     legacy.mRouteFlags = VALUE_OR_RETURN(
             aidl2legacy_AudioMixRouteFlag_uint32_t_mask(aidl.routeFlags));
-    legacy.mDeviceType = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.device.type));
-    legacy.mDeviceAddress = VALUE_OR_RETURN(aidl2legacy_string_view_String8(aidl.device.address));
+    RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
+                    aidl.device, &legacy.mDeviceType, &legacy.mDeviceAddress));
     legacy.mCbFlags = VALUE_OR_RETURN(aidl2legacy_AudioMixCallbackFlag_uint32_t_mask(aidl.cbFlags));
     legacy.mAllowPrivilegedMediaPlaybackCapture = aidl.allowPrivilegedMediaPlaybackCapture;
     legacy.mVoiceCommunicationCaptureAllowed = aidl.voiceCommunicationCaptureAllowed;
@@ -251,11 +255,15 @@
                     legacy.mCriteria,
                     legacy2aidl_AudioMixMatchCriterion));
     aidl.mixType = VALUE_OR_RETURN(legacy2aidl_uint32_t_AudioMixType(legacy.mMixType));
-    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(legacy.mFormat));
+    // See 'convertAudioMixToNative' in 'android_media_AudioSystem.cpp' -- only
+    // an output mask is expected here.
+    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(
+                    legacy.mFormat, false /*isInput*/));
     aidl.routeFlags = VALUE_OR_RETURN(
             legacy2aidl_uint32_t_AudioMixRouteFlag_mask(legacy.mRouteFlags));
-    aidl.device.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.mDeviceType));
-    aidl.device.address = VALUE_OR_RETURN(legacy2aidl_String8_string(legacy.mDeviceAddress));
+    aidl.device = VALUE_OR_RETURN(
+            legacy2aidl_audio_device_AudioDevice(
+                    legacy.mDeviceType, legacy.mDeviceAddress));
     aidl.cbFlags = VALUE_OR_RETURN(legacy2aidl_uint32_t_AudioMixCallbackFlag_mask(legacy.mCbFlags));
     aidl.allowPrivilegedMediaPlaybackCapture = legacy.mAllowPrivilegedMediaPlaybackCapture;
     aidl.voiceCommunicationCaptureAllowed = legacy.mVoiceCommunicationCaptureAllowed;
diff --git a/media/libaudioclient/TEST_MAPPING b/media/libaudioclient/TEST_MAPPING
new file mode 100644
index 0000000..d8c18c0
--- /dev/null
+++ b/media/libaudioclient/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+  "presubmit": [
+    {
+       "name": "audio_aidl_conversion_tests"
+    }
+  ]
+}
diff --git a/media/libaudioclient/ToneGenerator.cpp b/media/libaudioclient/ToneGenerator.cpp
index e5e8496..cd3eacb 100644
--- a/media/libaudioclient/ToneGenerator.cpp
+++ b/media/libaudioclient/ToneGenerator.cpp
@@ -977,7 +977,7 @@
 //    Method:        ToneGenerator::ToneGenerator()
 //
 //    Description:    Constructor. Initializes the tone sequencer, intantiates required sine wave
-//        generators, instantiates output audio track.
+//        generators, does not initialize output audio track.
 //
 //    Input:
 //        streamType:        Type of stream used for tone playback
@@ -1041,6 +1041,23 @@
         mRegion = CEPT;
     }
 
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+//    Method:        ToneGenerator::onFirstRef()
+//
+//    Description:  Called upon first RefBase reference. Initializes audio track
+//                  with weak pointer to self as the registered callback.
+//    Input:
+//        none
+//
+//    Output:
+//        none
+//
+////////////////////////////////////////////////////////////////////////////////
+
+void ToneGenerator::onFirstRef() {
     if (initAudioTrack()) {
         ALOGV("ToneGenerator INIT OK, time: %d", (unsigned int)(systemTime()/1000000));
     } else {
@@ -1048,9 +1065,6 @@
     }
 }
 
-
-
-
 ////////////////////////////////////////////////////////////////////////////////
 //
 //    Method:        ToneGenerator::~ToneGenerator()
@@ -1282,8 +1296,7 @@
             AUDIO_CHANNEL_OUT_MONO,
             frameCount,
             AUDIO_OUTPUT_FLAG_FAST,
-            audioCallback,
-            this, // user
+            wp<AudioTrack::IAudioTrackCallback>::fromExisting(this),
             0,    // notificationFrames
             0,    // sharedBuffer
             mThreadCanCallJava,
@@ -1308,50 +1321,47 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 //
-//    Method:        ToneGenerator::audioCallback()
+//    Method:        ToneGenerator::onMoreData()
 //
 //    Description:    AudioTrack callback implementation. Generates a block of
 //        PCM samples
 //        and manages tone generator sequencer: tones pulses, tone duration...
 //
 //    Input:
-//        user    reference (pointer to our ToneGenerator)
-//        info    audio buffer descriptor
+//        buffer  An buffer object containing a pointer which we will fill with
+//                buffer.size bytes.
 //
 //    Output:
-//        returned value: always true.
+//        The number of bytes we successfully wrote.
 //
 ////////////////////////////////////////////////////////////////////////////////
-void ToneGenerator::audioCallback(int event, void* user, void *info) {
+size_t ToneGenerator::onMoreData(const AudioTrack::Buffer& buffer) {
 
-    if (event != AudioTrack::EVENT_MORE_DATA) return;
-
-    AudioTrack::Buffer *buffer = static_cast<AudioTrack::Buffer *>(info);
-    ToneGenerator *lpToneGen = static_cast<ToneGenerator *>(user);
-    int16_t *lpOut = buffer->i16;
-    unsigned int lNumSmp = buffer->size/sizeof(int16_t);
-    const ToneDescriptor *lpToneDesc = lpToneGen->mpToneDesc;
-
-    if (buffer->size == 0) return;
-
+    int16_t *lpOut = buffer.i16;
+    uint32_t lNumSmp = (buffer.size / sizeof(int16_t) < UINT32_MAX) ?
+            buffer.size / sizeof(int16_t) : UINT32_MAX;
+    if (buffer.size == 0) return 0;
+    // We will write to the entire buffer unless we are stopped, then we return
+    // 0 at loop end
+    size_t bytesWritten = lNumSmp * sizeof(int16_t);
 
     // Clear output buffer: WaveGenerator accumulates into lpOut buffer
-    memset(lpOut, 0, buffer->size);
+    memset(lpOut, 0, buffer.size);
 
     while (lNumSmp) {
-        unsigned int lReqSmp = lNumSmp < lpToneGen->mProcessSize*2 ? lNumSmp : lpToneGen->mProcessSize;
+        unsigned int lReqSmp = lNumSmp < mProcessSize*2 ? lNumSmp : mProcessSize;
         unsigned int lGenSmp;
         unsigned int lWaveCmd = WaveGenerator::WAVEGEN_CONT;
         bool lSignal = false;
 
-        lpToneGen->mLock.lock();
+        mLock.lock();
 
 
         // Update pcm frame count and end time (current time at the end of this process)
-        lpToneGen->mTotalSmp += lReqSmp;
+        mTotalSmp += lReqSmp;
 
         // Update tone gen state machine and select wave gen command
-        switch (lpToneGen->mState) {
+        switch (mState) {
         case TONE_PLAYING:
             lWaveCmd = WaveGenerator::WAVEGEN_CONT;
             break;
@@ -1365,7 +1375,7 @@
             ALOGV("Stop/restart Cbk");
 
             lWaveCmd = WaveGenerator::WAVEGEN_STOP;
-            lpToneGen->mNextSegSmp = TONEGEN_INF; // forced to skip state machine management below
+            mNextSegSmp = TONEGEN_INF; // forced to skip state machine management below
             break;
         case TONE_STOPPED:
             ALOGV("Stopped Cbk");
@@ -1376,20 +1386,20 @@
         }
 
         // Exit if tone sequence is over
-        if (lpToneDesc->segments[lpToneGen->mCurSegment].duration == 0 ||
-            lpToneGen->mTotalSmp > lpToneGen->mMaxSmp) {
-            if (lpToneGen->mState == TONE_PLAYING) {
-                lpToneGen->mState = TONE_STOPPING;
+        if (mpToneDesc->segments[mCurSegment].duration == 0 ||
+            mTotalSmp > mMaxSmp) {
+            if (mState == TONE_PLAYING) {
+                mState = TONE_STOPPING;
             }
-            if (lpToneDesc->segments[lpToneGen->mCurSegment].duration == 0) {
+            if (mpToneDesc->segments[mCurSegment].duration == 0) {
                 goto audioCallback_EndLoop;
             }
             // fade out before stopping if maximum duration reached
             lWaveCmd = WaveGenerator::WAVEGEN_STOP;
-            lpToneGen->mNextSegSmp = TONEGEN_INF; // forced to skip state machine management below
+            mNextSegSmp = TONEGEN_INF; // forced to skip state machine management below
         }
 
-        if (lpToneGen->mTotalSmp > lpToneGen->mNextSegSmp) {
+        if (mTotalSmp > mNextSegSmp) {
             // Time to go to next sequence segment
 
             ALOGV("End Segment, time: %d", (unsigned int)(systemTime()/1000000));
@@ -1397,61 +1407,61 @@
             lGenSmp = lReqSmp;
 
             // If segment,  ON -> OFF transition : ramp volume down
-            if (lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[0] != 0) {
+            if (mpToneDesc->segments[mCurSegment].waveFreq[0] != 0) {
                 lWaveCmd = WaveGenerator::WAVEGEN_STOP;
                 unsigned int lFreqIdx = 0;
-                uint16_t lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[lFreqIdx];
+                uint16_t lFrequency = mpToneDesc->segments[mCurSegment].waveFreq[lFreqIdx];
 
                 while (lFrequency != 0) {
-                    WaveGenerator *lpWaveGen = lpToneGen->mWaveGens.valueFor(lFrequency);
+                    WaveGenerator *lpWaveGen = mWaveGens.valueFor(lFrequency);
                     lpWaveGen->getSamples(lpOut, lGenSmp, lWaveCmd);
-                    lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[++lFreqIdx];
+                    lFrequency = mpToneDesc->segments[mCurSegment].waveFreq[++lFreqIdx];
                 }
                 ALOGV("ON->OFF, lGenSmp: %d, lReqSmp: %d", lGenSmp, lReqSmp);
             }
 
             // check if we need to loop and loop for the reqd times
-            if (lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt) {
-                if (lpToneGen->mLoopCounter < lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt) {
+            if (mpToneDesc->segments[mCurSegment].loopCnt) {
+                if (mLoopCounter < mpToneDesc->segments[mCurSegment].loopCnt) {
                     ALOGV ("in if loop loopCnt(%d) loopctr(%d), CurSeg(%d)",
-                          lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt,
-                          lpToneGen->mLoopCounter,
-                          lpToneGen->mCurSegment);
-                    lpToneGen->mCurSegment = lpToneDesc->segments[lpToneGen->mCurSegment].loopIndx;
-                    ++lpToneGen->mLoopCounter;
+                          mpToneDesc->segments[mCurSegment].loopCnt,
+                          mLoopCounter,
+                          mCurSegment);
+                    mCurSegment = mpToneDesc->segments[mCurSegment].loopIndx;
+                    ++mLoopCounter;
                 } else {
                     // completed loop. go to next segment
-                    lpToneGen->mLoopCounter = 0;
-                    lpToneGen->mCurSegment++;
+                    mLoopCounter = 0;
+                    mCurSegment++;
                     ALOGV ("in else loop loopCnt(%d) loopctr(%d), CurSeg(%d)",
-                          lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt,
-                          lpToneGen->mLoopCounter,
-                          lpToneGen->mCurSegment);
+                          mpToneDesc->segments[mCurSegment].loopCnt,
+                          mLoopCounter,
+                          mCurSegment);
                 }
             } else {
-                lpToneGen->mCurSegment++;
+                mCurSegment++;
                 ALOGV ("Goto next seg loopCnt(%d) loopctr(%d), CurSeg(%d)",
-                      lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt,
-                      lpToneGen->mLoopCounter,
-                      lpToneGen->mCurSegment);
+                      mpToneDesc->segments[mCurSegment].loopCnt,
+                      mLoopCounter,
+                      mCurSegment);
 
             }
 
             // Handle loop if last segment reached
-            if (lpToneDesc->segments[lpToneGen->mCurSegment].duration == 0) {
-                ALOGV("Last Seg: %d", lpToneGen->mCurSegment);
+            if (mpToneDesc->segments[mCurSegment].duration == 0) {
+                ALOGV("Last Seg: %d", mCurSegment);
 
                 // Pre increment loop count and restart if total count not reached. Stop sequence otherwise
-                if (++lpToneGen->mCurCount <= lpToneDesc->repeatCnt) {
-                    ALOGV("Repeating Count: %d", lpToneGen->mCurCount);
+                if (++mCurCount <= mpToneDesc->repeatCnt) {
+                    ALOGV("Repeating Count: %d", mCurCount);
 
-                    lpToneGen->mCurSegment = lpToneDesc->repeatSegment;
-                    if (lpToneDesc->segments[lpToneDesc->repeatSegment].waveFreq[0] != 0) {
+                    mCurSegment = mpToneDesc->repeatSegment;
+                    if (mpToneDesc->segments[mpToneDesc->repeatSegment].waveFreq[0] != 0) {
                         lWaveCmd = WaveGenerator::WAVEGEN_START;
                     }
 
-                    ALOGV("New segment %d, Next Time: %lld", lpToneGen->mCurSegment,
-                            ((long long)(lpToneGen->mNextSegSmp)*1000)/lpToneGen->mSamplingRate);
+                    ALOGV("New segment %d, Next Time: %lld", mCurSegment,
+                            ((long long)(mNextSegSmp)*1000)/mSamplingRate);
 
 
                 } else {
@@ -1459,10 +1469,10 @@
                     ALOGV("End repeat, time: %d", (unsigned int)(systemTime()/1000000));
                 }
             } else {
-                ALOGV("New segment %d, Next Time: %lld", lpToneGen->mCurSegment,
-                        ((long long)(lpToneGen->mNextSegSmp)*1000)/lpToneGen->mSamplingRate);
+                ALOGV("New segment %d, Next Time: %lld", mCurSegment,
+                        ((long long)(mNextSegSmp)*1000)/mSamplingRate);
 
-                if (lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[0] != 0) {
+                if (mpToneDesc->segments[mCurSegment].waveFreq[0] != 0) {
                     // If next segment is not silent,  OFF -> ON transition : reset wave generator
                     lWaveCmd = WaveGenerator::WAVEGEN_START;
 
@@ -1472,13 +1482,13 @@
                 }
             }
 
-            // Update next segment transition position. No harm to do it also for last segment as lpToneGen->mNextSegSmp won't be used any more
-            lpToneGen->mNextSegSmp
-                    += (lpToneDesc->segments[lpToneGen->mCurSegment].duration * lpToneGen->mSamplingRate) / 1000;
+            // Update next segment transition position. No harm to do it also for last segment as
+            // mNextSegSmp won't be used any more
+            mNextSegSmp += (mpToneDesc->segments[mCurSegment].duration * mSamplingRate) / 1000;
 
         } else {
             // Inside a segment keep tone ON or OFF
-            if (lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[0] == 0) {
+            if (mpToneDesc->segments[mCurSegment].waveFreq[0] == 0) {
                 lGenSmp = 0;  // If odd segment, tone is currently OFF
             } else {
                 lGenSmp = lReqSmp;  // If event segment, tone is currently ON
@@ -1488,12 +1498,12 @@
         if (lGenSmp) {
             // If samples must be generated, call all active wave generators and acumulate waves in lpOut
             unsigned int lFreqIdx = 0;
-            uint16_t lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[lFreqIdx];
+            uint16_t lFrequency = mpToneDesc->segments[mCurSegment].waveFreq[lFreqIdx];
 
             while (lFrequency != 0) {
-                WaveGenerator *lpWaveGen = lpToneGen->mWaveGens.valueFor(lFrequency);
+                WaveGenerator *lpWaveGen = mWaveGens.valueFor(lFrequency);
                 lpWaveGen->getSamples(lpOut, lGenSmp, lWaveCmd);
-                lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[++lFreqIdx];
+                lFrequency = mpToneDesc->segments[mCurSegment].waveFreq[++lFreqIdx];
             }
         }
 
@@ -1501,21 +1511,19 @@
         lpOut += lReqSmp;
 
 audioCallback_EndLoop:
-
-        switch (lpToneGen->mState) {
+        switch (mState) {
         case TONE_RESTARTING:
             ALOGV("Cbk restarting track");
-            if (lpToneGen->prepareWave()) {
-                lpToneGen->mState = TONE_STARTING;
-                if (clock_gettime(CLOCK_MONOTONIC, &lpToneGen->mStartTime) != 0) {
-                    lpToneGen->mStartTime.tv_sec = 0;
+            if (prepareWave()) {
+                mState = TONE_STARTING;
+                if (clock_gettime(CLOCK_MONOTONIC, &mStartTime) != 0) {
+                    mStartTime.tv_sec = 0;
                 }
-                // must reload lpToneDesc as prepareWave() may change mpToneDesc
-                lpToneDesc = lpToneGen->mpToneDesc;
+                // must reload mpToneDesc as prepareWave() may change mpToneDesc
             } else {
                 ALOGW("Cbk restarting prepareWave() failed");
-                lpToneGen->mState = TONE_IDLE;
-                lpToneGen->mpAudioTrack->stop();
+                mState = TONE_IDLE;
+                mpAudioTrack->stop();
                 // Force loop exit
                 lNumSmp = 0;
             }
@@ -1523,22 +1531,22 @@
             break;
         case TONE_STOPPING:
             ALOGV("Cbk Stopping");
-            lpToneGen->mState = TONE_STOPPED;
+            mState = TONE_STOPPED;
             // Force loop exit
             lNumSmp = 0;
             break;
         case TONE_STOPPED:
-            lpToneGen->mState = TONE_INIT;
+            mState = TONE_INIT;
             ALOGV("Cbk Stopped track");
-            lpToneGen->mpAudioTrack->stop();
+            mpAudioTrack->stop();
             // Force loop exit
             lNumSmp = 0;
-            buffer->size = 0;
+            bytesWritten = 0;
             lSignal = true;
             break;
         case TONE_STARTING:
             ALOGV("Cbk starting track");
-            lpToneGen->mState = TONE_PLAYING;
+            mState = TONE_PLAYING;
             lSignal = true;
             break;
         case TONE_PLAYING:
@@ -1546,14 +1554,15 @@
         default:
             // Force loop exit
             lNumSmp = 0;
-            buffer->size = 0;
+            bytesWritten = 0;
             break;
         }
 
         if (lSignal)
-            lpToneGen->mWaitCbkCond.broadcast();
-        lpToneGen->mLock.unlock();
+            mWaitCbkCond.broadcast();
+        mLock.unlock();
     }
+    return bytesWritten;
 }
 
 
diff --git a/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl b/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl
index 04a02c7..335866f 100644
--- a/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl
@@ -17,7 +17,7 @@
 package android.media;
 
 import android.media.AudioAttributesInternal;
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
 
 /**
  * This is the equivalent of the android::AudioAttributes C++ type.
diff --git a/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl b/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl
index 699df0a..2e74206 100644
--- a/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl
@@ -16,9 +16,9 @@
 
 package android.media;
 
-import android.media.AudioContentType;
-import android.media.AudioSourceType;
-import android.media.AudioUsage;
+import android.media.audio.common.AudioContentType;
+import android.media.audio.common.AudioSource;
+import android.media.audio.common.AudioUsage;
 
 /**
  * The "Internal" suffix of this type name is to disambiguate it from the
@@ -28,7 +28,7 @@
 parcelable AudioAttributesInternal {
     AudioContentType contentType;
     AudioUsage usage;
-    AudioSourceType source;
+    AudioSource source;
     // Bitmask, indexed by AudioFlag.
     int flags;
     @utf8InCpp String tags; /* UTF8 */
diff --git a/media/libaudioclient/aidl/android/media/AudioConfig.aidl b/media/libaudioclient/aidl/android/media/AudioConfig.aidl
deleted file mode 100644
index 8dc97d3..0000000
--- a/media/libaudioclient/aidl/android/media/AudioConfig.aidl
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioOffloadInfo;
-import android.media.audio.common.AudioFormat;
-
-/**
- * {@hide}
- */
-parcelable AudioConfig {
-    int sampleRate;
-    /**
-     * Interpreted as audio_channel_mask_t.
-     * TODO(ytai): Create a designated type.
-     */
-    int channelMask;
-    AudioFormat format;
-    AudioOffloadInfo offloadInfo;
-    long frameCount;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl b/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
deleted file mode 100644
index 8353c0d..0000000
--- a/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.audio.common.AudioFormat;
-
-/**
- * {@hide}
- */
-parcelable AudioConfigBase {
-    int sampleRate;
-    /** Interpreted as audio_channel_mask_t. */
-    int channelMask;
-    AudioFormat format;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioContentType.aidl b/media/libaudioclient/aidl/android/media/AudioContentType.aidl
deleted file mode 100644
index f734fba..0000000
--- a/media/libaudioclient/aidl/android/media/AudioContentType.aidl
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-@Backing(type="int")
-enum AudioContentType {
-    UNKNOWN = 0,
-    SPEECH = 1,
-    MUSIC = 2,
-    MOVIE = 3,
-    SONIFICATION = 4,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioDevice.aidl b/media/libaudioclient/aidl/android/media/AudioDevice.aidl
deleted file mode 100644
index b200697..0000000
--- a/media/libaudioclient/aidl/android/media/AudioDevice.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioDevice {
-    /** Interpreted as audio_devices_t. */
-    int type;
-    @utf8InCpp String address;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioStandard.aidl b/media/libaudioclient/aidl/android/media/AudioDirectMode.aidl
similarity index 83%
rename from media/libaudioclient/aidl/android/media/AudioStandard.aidl
rename to media/libaudioclient/aidl/android/media/AudioDirectMode.aidl
index e131d0d..0da4721 100644
--- a/media/libaudioclient/aidl/android/media/AudioStandard.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioDirectMode.aidl
@@ -15,13 +15,10 @@
  */
 package android.media;
 
-/**
- * The audio standard that describe audio playback/capture capabilites.
- *
- * {@hide}
- */
 @Backing(type="int")
-enum AudioStandard {
+enum AudioDirectMode {
     NONE = 0,
-    EDID = 1,
+    OFFLOAD = 1,
+    OFFLOAD_GAPLESS = 2,
+    BITSTREAM = 4,
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationMetadataType.aidl b/media/libaudioclient/aidl/android/media/AudioEncapsulationMetadataType.aidl
deleted file mode 100644
index b03adfe..0000000
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationMetadataType.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioEncapsulationMetadataType {
-    NONE = 0,
-    FRAMEWORK_TUNER = 1,
-    DVB_AD_DESCRIPTOR = 2,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationMode.aidl b/media/libaudioclient/aidl/android/media/AudioEncapsulationMode.aidl
deleted file mode 100644
index 9e04e82..0000000
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationMode.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioEncapsulationMode {
-     NONE = 0,
-     ELEMENTARY_STREAM = 1,
-     HANDLE = 2,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioFlag.aidl b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
index 91361fb..acf4e6d 100644
--- a/media/libaudioclient/aidl/android/media/AudioFlag.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
@@ -36,4 +36,5 @@
     CAPTURE_PRIVATE = 13,
     CONTENT_SPATIALIZED = 14,
     NEVER_SPATIALIZE = 15,
+    CALL_REDIRECTION = 16,
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioGain.aidl b/media/libaudioclient/aidl/android/media/AudioGain.aidl
deleted file mode 100644
index 048b295..0000000
--- a/media/libaudioclient/aidl/android/media/AudioGain.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioGain {
-    int index;
-    boolean useInChannelMask;
-    boolean useForVolume;
-    /** Bitmask, indexed by AudioGainMode. */
-    int mode;
-    /** Interpreted as audio_channel_mask_t. */
-    int channelMask;
-    int minValue;
-    int maxValue;
-    int defaultValue;
-    int stepValue;
-    int minRampMs;
-    int maxRampMs;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl b/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
deleted file mode 100644
index b93c2dc..0000000
--- a/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioGainConfig {
-    /** Index of the corresponding audio_gain in the audio_port gains[] table. */
-    int index;
-
-    /** Mode requested for this command. Bitfield indexed by AudioGainMode. */
-    int mode;
-
-    /**
-     * Channels which gain value follows. N/A in joint mode.
-     * Interpreted as audio_channel_mask_t.
-     */
-    int channelMask;
-
-    /**
-     * Gain values in millibels.
-     * For each channel ordered from LSb to MSb in channel mask. The number of values is 1 in joint
-     * mode, otherwise equals the number of bits implied by channelMask.
-     */
-    int[]  values;
-
-    /** Ramp duration in ms. */
-    int rampDurationMs;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioGainMode.aidl b/media/libaudioclient/aidl/android/media/AudioGainSys.aidl
similarity index 81%
rename from media/libaudioclient/aidl/android/media/AudioGainMode.aidl
rename to media/libaudioclient/aidl/android/media/AudioGainSys.aidl
index e1b9f0b..426f4ed 100644
--- a/media/libaudioclient/aidl/android/media/AudioGainMode.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioGainSys.aidl
@@ -13,14 +13,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package android.media;
 
 /**
+ * Provides additional runtime information for AudioGain, used by the framework.
+ *
  * {@hide}
  */
-@Backing(type="int")
-enum AudioGainMode {
-    JOINT    = 0,
-    CHANNELS = 1,
-    RAMP     = 2,
+parcelable AudioGainSys {
+    int index;
+    boolean isInput;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioInputFlags.aidl b/media/libaudioclient/aidl/android/media/AudioInputFlags.aidl
deleted file mode 100644
index bfc0eb0..0000000
--- a/media/libaudioclient/aidl/android/media/AudioInputFlags.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioInputFlags {
-    FAST       = 0,
-    HW_HOTWORD = 1,
-    RAW        = 2,
-    SYNC       = 3,
-    MMAP_NOIRQ = 4,
-    VOIP_TX    = 5,
-    HW_AV_SYNC = 6,
-    DIRECT     = 7,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
index 876ef9b..b01f902 100644
--- a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
@@ -17,7 +17,8 @@
 package android.media;
 
 import android.media.AudioPatch;
-import android.media.audio.common.AudioFormat;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
 
 /**
  * {@hide}
@@ -26,10 +27,10 @@
     /** Interpreted as audio_io_handle_t. */
     int ioHandle;
     AudioPatch patch;
+    boolean isInput;
     int samplingRate;
-    AudioFormat format;
-    /** Interpreted as audio_channel_mask_t. */
-    int channelMask;
+    AudioFormatDescription format;
+    AudioChannelLayout channelMask;
     long frameCount;
     long frameCountHAL;
     /** Only valid for output. */
diff --git a/media/libaudioclient/aidl/android/media/AudioIoFlags.aidl b/media/libaudioclient/aidl/android/media/AudioIoFlags.aidl
deleted file mode 100644
index f9b25bf..0000000
--- a/media/libaudioclient/aidl/android/media/AudioIoFlags.aidl
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-union AudioIoFlags {
-    /** Bitmask indexed by AudioInputFlags. */
-    int input;
-    /** Bitmask indexed by AudioOutputFlags. */
-    int output;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioMix.aidl b/media/libaudioclient/aidl/android/media/AudioMix.aidl
index 7473372..88b0450 100644
--- a/media/libaudioclient/aidl/android/media/AudioMix.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioMix.aidl
@@ -16,12 +16,12 @@
 
 package android.media;
 
-import android.media.AudioConfig;
-import android.media.AudioDevice;
 import android.media.AudioMixCallbackFlag;
 import android.media.AudioMixMatchCriterion;
 import android.media.AudioMixRouteFlag;
 import android.media.AudioMixType;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioDevice;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioMixLatencyClass.aidl b/media/libaudioclient/aidl/android/media/AudioMixLatencyClass.aidl
deleted file mode 100644
index d70b364..0000000
--- a/media/libaudioclient/aidl/android/media/AudioMixLatencyClass.aidl
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioMixLatencyClass {
-    LOW = 0,
-    NORMAL = 1,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl b/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl
index e26a9e1..921a93a 100644
--- a/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl
@@ -16,15 +16,15 @@
 
 package android.media;
 
-import android.media.AudioSourceType;
-import android.media.AudioUsage;
+import android.media.audio.common.AudioSource;
+import android.media.audio.common.AudioUsage;
 
 /**
  * {@hide}
  */
 union AudioMixMatchCriterionValue {
     AudioUsage usage = AudioUsage.UNKNOWN;
-    AudioSourceType source;
+    AudioSource source;
     /** Interpreted as uid_t. */
     int uid;
     int userId;
diff --git a/media/libaudioclient/aidl/android/media/AudioMode.aidl b/media/libaudioclient/aidl/android/media/AudioMode.aidl
deleted file mode 100644
index 7067dd3..0000000
--- a/media/libaudioclient/aidl/android/media/AudioMode.aidl
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioMode {
-    INVALID = -2,
-    CURRENT = -1,
-    NORMAL = 0,
-    RINGTONE = 1,
-    IN_CALL = 2,
-    IN_COMMUNICATION = 3,
-    CALL_SCREEN = 4,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl b/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl
deleted file mode 100644
index c86b3f0..0000000
--- a/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioConfigBase;
-import android.media.AudioEncapsulationMode;
-import android.media.AudioStreamType;
-import android.media.AudioUsage;
-import android.media.audio.common.AudioFormat;
-
-/**
- * {@hide}
- */
-parcelable AudioOffloadInfo {
-    /** Version of the info structure. Interpreted as a uint16_t version constant. */
-    int version;
-    /** Audio configuration. */
-    AudioConfigBase config;
-    /** Stream type. */
-    AudioStreamType streamType;
-    /** Bit rate in bits per second. */
-    int bitRate;
-    /** Duration in microseconds, -1 if unknown. */
-    long durationUs;
-    /** true if stream is tied to a video stream. */
-    boolean hasVideo;
-    /** true if streaming, false if local playback. */
-    boolean isStreaming;
-    int bitWidth;
-    /** Offload fragment size. */
-    int offloadBufferSize;
-    AudioUsage usage;
-    AudioEncapsulationMode encapsulationMode;
-    /** Content id from tuner HAL (0 if none). */
-    int contentId;
-    /** Sync id from tuner HAL (0 if none). */
-    int syncId;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl b/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl
deleted file mode 100644
index cebd8f0..0000000
--- a/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioOutputFlags {
-    DIRECT           = 0,
-    PRIMARY          = 1,
-    FAST             = 2,
-    DEEP_BUFFER      = 3,
-    COMPRESS_OFFLOAD = 4,
-    NON_BLOCKING     = 5,
-    HW_AV_SYNC       = 6,
-    TTS              = 7,
-    RAW              = 8,
-    SYNC             = 9,
-    IEC958_NONAUDIO  = 10,
-    DIRECT_PCM       = 11,
-    MMAP_NOIRQ       = 12,
-    VOIP_RX          = 13,
-    INCALL_MUSIC     = 14,
-    GAPLESS_OFFLOAD  = 15,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPort.aidl b/media/libaudioclient/aidl/android/media/AudioPort.aidl
index bf0e5b7..ff177c0 100644
--- a/media/libaudioclient/aidl/android/media/AudioPort.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPort.aidl
@@ -16,35 +16,13 @@
 
 package android.media;
 
-import android.media.AudioGain;
-import android.media.AudioPortConfig;
-import android.media.AudioPortExt;
-import android.media.AudioPortRole;
-import android.media.AudioPortType;
-import android.media.AudioProfile;
-import android.media.ExtraAudioDescriptor;
+import android.media.AudioPortSys;
+import android.media.audio.common.AudioPort;
 
 /**
  * {@hide}
  */
 parcelable AudioPort {
-    /** Port unique ID. Interpreted as audio_port_handle_t. */
-    int id;
-    /** Sink or source. */
-    AudioPortRole role;
-    /** Device, mix ... */
-    AudioPortType type;
-    @utf8InCpp String name;
-    /** AudioProfiles supported by this port (format, Rates, Channels). */
-    AudioProfile[] profiles;
-    /**
-     * ExtraAudioDescriptors supported by this port. The format is not unrecognized to the
-     * platform. The audio capability is described by a hardware descriptor.
-     */
-    ExtraAudioDescriptor[] extraAudioDescriptors;
-    /** Gain controllers. */
-    AudioGain[] gains;
-    /** Current audio port configuration. */
-    AudioPortConfig activeConfig;
-    AudioPortExt ext;
+    AudioPort hal;
+    AudioPortSys sys;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
index 2dd30a4..3a4ca31 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
@@ -16,44 +16,13 @@
 
 package android.media;
 
-import android.media.AudioGainConfig;
-import android.media.AudioIoFlags;
-import android.media.AudioPortConfigExt;
-import android.media.AudioPortConfigType;
-import android.media.AudioPortRole;
-import android.media.AudioPortType;
-import android.media.audio.common.AudioFormat;
+import android.media.AudioPortConfigSys;
+import android.media.audio.common.AudioPortConfig;
 
 /**
  * {@hide}
  */
 parcelable AudioPortConfig {
-    /**
-     * Port unique ID.
-     * Interpreted as audio_port_handle_t.
-     */
-    int id;
-    /** Sink or source. */
-    AudioPortRole role;
-    /** Device, mix ... */
-    AudioPortType type;
-    /** Bitmask, indexed by AudioPortConfigType. */
-    int configMask;
-    /** Sampling rate in Hz. */
-    int sampleRate;
-    /**
-     * Channel mask, if applicable.
-     * Interpreted as audio_channel_mask_t.
-     * TODO: bitmask?
-     */
-    int channelMask;
-    /**
-     * Format, if applicable.
-     */
-    AudioFormat format;
-    /** Gain to apply, if applicable. */
-    AudioGainConfig gain;
-    /** Framework only: HW_AV_SYNC, DIRECT, ... */
-    AudioIoFlags flags;
-    AudioPortConfigExt ext;
+    AudioPortConfig hal;
+    AudioPortConfigSys sys;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl
deleted file mode 100644
index a99aa9b..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioPortConfigDeviceExt {
-    /**
-     * Module the device is attached to.
-     * Interpreted as audio_module_handle_t.
-     */
-    int hwModule;
-    /**
-     * Device type (e.g AUDIO_DEVICE_OUT_SPEAKER).
-     * Interpreted as audio_devices_t.
-     * TODO: Convert to a standalone AIDL representation.
-     */
-    int type;
-    /** Device address. "" if N/A. */
-    @utf8InCpp String address;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl
deleted file mode 100644
index 5d635b6..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioPortConfigDeviceExt;
-import android.media.AudioPortConfigMixExt;
-import android.media.AudioPortConfigSessionExt;
-
-/**
- * {@hide}
- */
-union AudioPortConfigExt {
-    /**
-     * This represents an empty union. Value is ignored.
-     * TODO(ytai): replace with the canonical representation for an empty union, as soon as it is
-     *             established.
-     */
-    boolean unspecified;
-    /** Device specific info. */
-    AudioPortConfigDeviceExt device;
-    /** Mix specific info. */
-    AudioPortConfigMixExt mix;
-    /** Session specific info. */
-    AudioPortConfigSessionExt session;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigMixExt.aidl
deleted file mode 100644
index d3226f2..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExt.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioPortConfigMixExtUseCase;
-
-/**
- * {@hide}
- */
-parcelable AudioPortConfigMixExt {
-    /**
-     * Module the stream is attached to.
-     * Interpreted as audio_module_handle_t.
-     */
-    int hwModule;
-    /**
-     * I/O handle of the input/output stream.
-     * Interpreted as audio_io_handle_t.
-     */
-    int handle;
-    AudioPortConfigMixExtUseCase usecase;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl
deleted file mode 100644
index c61f044..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioSourceType;
-import android.media.AudioStreamType;
-
-/**
- * {@hide}
- */
-union AudioPortConfigMixExtUseCase {
-    /**
-     * This to be set if the containing config has the AudioPortRole::NONE role.
-     * This represents an empty value (value is ignored).
-     * TODO(ytai): replace with the canonical representation for an empty union, as soon as it is
-     *             established.
-     */
-    boolean unspecified;
-    /** This to be set if the containing config has the AudioPortRole::SOURCE role. */
-    AudioStreamType stream;
-    /** This to be set if the containing config has the AudioPortRole::SINK role. */
-    AudioSourceType source;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigSessionExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigSessionExt.aidl
deleted file mode 100644
index a2cbf62..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigSessionExt.aidl
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioPortConfigSessionExt {
-    int session;
-}
diff --git a/media/libaudioclient/aidl/android/media/Int.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigSys.aidl
similarity index 71%
rename from media/libaudioclient/aidl/android/media/Int.aidl
rename to media/libaudioclient/aidl/android/media/AudioPortConfigSys.aidl
index 24f4d62..8692848 100644
--- a/media/libaudioclient/aidl/android/media/Int.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortConfigSys.aidl
@@ -16,12 +16,17 @@
 
 package android.media;
 
+import android.media.AudioPortExtSys;
+import android.media.AudioPortRole;
+import android.media.AudioPortType;
+
 /**
- * This is a simple wrapper around an 'int', putting it in a parcelable, so it can be used as an
- * inout parameter, be made @nullable, etc.
- *
  * {@hide}
  */
-parcelable Int {
-    int value;
+parcelable AudioPortConfigSys {
+    /** Sink or source. */
+    AudioPortRole role;
+    /** Device, mix ... */
+    AudioPortType type;
+    AudioPortExtSys ext;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigType.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigType.aidl
deleted file mode 100644
index 6e22b8d..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigType.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioPortConfigType {
-    SAMPLE_RATE  = 0,
-    CHANNEL_MASK = 1,
-    FORMAT       = 2,
-    GAIN         = 3,
-    FLAGS        = 4,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortDeviceExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl
similarity index 85%
rename from media/libaudioclient/aidl/android/media/AudioPortDeviceExt.aidl
rename to media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl
index b758f23..0f5a9b6 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortDeviceExt.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright (C) 2021 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,15 +16,12 @@
 
 package android.media;
 
-import android.media.AudioDevice;
-
 /**
  * {@hide}
  */
-parcelable AudioPortDeviceExt {
+parcelable AudioPortDeviceExtSys {
     /** Module the device is attached to. Interpreted as audio_module_handle_t. */
     int hwModule;
-    AudioDevice device;
     /** Bitmask, indexed by AudioEncapsulationMode. */
     int encapsulationModes;
     /** Bitmask, indexed by AudioEncapsulationMetadataType. */
diff --git a/media/libaudioclient/aidl/android/media/AudioPortExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortExt.aidl
deleted file mode 100644
index 453784b..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortExt.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioPortDeviceExt;
-import android.media.AudioPortMixExt;
-import android.media.AudioPortSessionExt;
-
-/**
- * {@hide}
- */
-union AudioPortExt {
-    /**
-     * This represents an empty union. Value is ignored.
-     * TODO(ytai): replace with the canonical representation for an empty union, as soon as it is
-     *             established.
-     */
-    boolean unspecified;
-    /** Device specific info. */
-    AudioPortDeviceExt device;
-    /** Mix specific info. */
-    AudioPortMixExt mix;
-    /** Session specific info. */
-    AudioPortSessionExt session;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl b/media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl
similarity index 62%
rename from media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
rename to media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl
index b08a604..2cdf4f6 100644
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl
@@ -16,14 +16,19 @@
 
 package android.media;
 
+import android.media.AudioPortDeviceExtSys;
+import android.media.AudioPortMixExtSys;
+
 /**
- * Audio encapsulation type is used to describe if the audio data should be sent with a particular
- * encapsulation type or not.
- *
  * {@hide}
  */
-@Backing(type="int")
-enum AudioEncapsulationType {
-    NONE     = 0,
-    IEC61937 = 1,
-}
\ No newline at end of file
+union AudioPortExtSys {
+    /**
+     * This represents an empty union. Value is ignored.
+     */
+    boolean unspecified;
+    /** System-only parameters when the port is an audio device. */
+    AudioPortDeviceExtSys device;
+    /** System-only parameters when the port is an audio mix. */
+    AudioPortMixExtSys mix;
+}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortMixExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortMixExt.aidl
deleted file mode 100644
index 62cdb8e..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortMixExt.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioMixLatencyClass;
-
-/**
- * {@hide}
- */
-parcelable AudioPortMixExt {
-    /** Module the stream is attached to. Interpreted as audio_module_handle_t. */
-    int hwModule;
-    /** I/O handle of the input/output stream. Interpreted as audio_io_handle_t. */
-    int handle;
-    /** Latency class */
-    AudioMixLatencyClass latencyClass;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioStandard.aidl b/media/libaudioclient/aidl/android/media/AudioPortMixExtSys.aidl
similarity index 81%
copy from media/libaudioclient/aidl/android/media/AudioStandard.aidl
copy to media/libaudioclient/aidl/android/media/AudioPortMixExtSys.aidl
index e131d0d..5999885 100644
--- a/media/libaudioclient/aidl/android/media/AudioStandard.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortMixExtSys.aidl
@@ -13,15 +13,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package android.media;
 
 /**
- * The audio standard that describe audio playback/capture capabilites.
- *
  * {@hide}
  */
-@Backing(type="int")
-enum AudioStandard {
-    NONE = 0,
-    EDID = 1,
+parcelable AudioPortMixExtSys {
+    /** Module the stream is attached to. Interpreted as audio_module_handle_t. */
+    int hwModule;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioPortSessionExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortSessionExt.aidl
deleted file mode 100644
index dbca168..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortSessionExt.aidl
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioPortSessionExt {
-    /** Audio session. Interpreted as audio_session_t. */
-    int session;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortSys.aidl b/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
new file mode 100644
index 0000000..f3b5c19
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.AudioGainSys;
+import android.media.AudioPortConfig;
+import android.media.AudioPortExtSys;
+import android.media.AudioPortRole;
+import android.media.AudioPortType;
+import android.media.AudioProfileSys;
+
+/**
+ * {@hide}
+ */
+parcelable AudioPortSys {
+    /** Sink or source. */
+    AudioPortRole role;
+    /** Device, mix ... */
+    AudioPortType type;
+    /** System-only parameters for each AudioProfile from 'port.profiles'. */
+    AudioProfileSys[] profiles;
+    /** System-only parameters for each AudioGain from 'port.gains'. */
+    AudioGainSys[] gains;
+    /** Current audio port configuration. */
+    AudioPortConfig activeConfig;
+    /** System-only extra parameters for 'port.ext'. */
+    AudioPortExtSys ext;
+}
diff --git a/media/libaudioclient/aidl/android/media/AudioProfile.aidl b/media/libaudioclient/aidl/android/media/AudioProfile.aidl
deleted file mode 100644
index afb288f..0000000
--- a/media/libaudioclient/aidl/android/media/AudioProfile.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioEncapsulationType;
-import android.media.audio.common.AudioFormat;
-
-/**
- * {@hide}
- */
-parcelable AudioProfile {
-    @utf8InCpp String name;
-    /** The format for an audio profile should only be set when initialized. */
-    AudioFormat format;
-    /** Interpreted as audio_channel_mask_t. */
-    int[] channelMasks;
-    int[] samplingRates;
-    boolean isDynamicFormat;
-    boolean isDynamicChannels;
-    boolean isDynamicRate;
-    AudioEncapsulationType encapsulationType;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioGainMode.aidl b/media/libaudioclient/aidl/android/media/AudioProfileSys.aidl
similarity index 68%
copy from media/libaudioclient/aidl/android/media/AudioGainMode.aidl
copy to media/libaudioclient/aidl/android/media/AudioProfileSys.aidl
index e1b9f0b..329c9d5 100644
--- a/media/libaudioclient/aidl/android/media/AudioGainMode.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioProfileSys.aidl
@@ -13,14 +13,18 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package android.media;
 
 /**
+ * Provides indication whether the parameters of the AudioProfiles in the
+ * AudioPort are dynamic. Each instance of AudioProfileSys corresponds
+ * to an instance of AudioProfile.
+ *
  * {@hide}
  */
-@Backing(type="int")
-enum AudioGainMode {
-    JOINT    = 0,
-    CHANNELS = 1,
-    RAMP     = 2,
+parcelable AudioProfileSys {
+    boolean isDynamicFormat;
+    boolean isDynamicChannels;
+    boolean isDynamicRate;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioSourceType.aidl b/media/libaudioclient/aidl/android/media/AudioSourceType.aidl
deleted file mode 100644
index 8673b92..0000000
--- a/media/libaudioclient/aidl/android/media/AudioSourceType.aidl
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioSourceType {
-    INVALID = -1,
-    DEFAULT = 0,
-    MIC = 1,
-    VOICE_UPLINK = 2,
-    VOICE_DOWNLINK = 3,
-    VOICE_CALL = 4,
-    CAMCORDER = 5,
-    VOICE_RECOGNITION = 6,
-    VOICE_COMMUNICATION = 7,
-    REMOTE_SUBMIX = 8,
-    UNPROCESSED = 9,
-    VOICE_PERFORMANCE = 10,
-    ECHO_REFERENCE = 1997,
-    FM_TUNER = 1998,
-    /**
-     * A low-priority, preemptible audio source for for background software
-     * hotword detection. Same tuning as VOICE_RECOGNITION.
-     * Used only internally by the framework.
-     */
-    HOTWORD = 1999,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioStreamType.aidl b/media/libaudioclient/aidl/android/media/AudioStreamType.aidl
deleted file mode 100644
index d777882..0000000
--- a/media/libaudioclient/aidl/android/media/AudioStreamType.aidl
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioStreamType {
-    DEFAULT = -1,
-    VOICE_CALL = 0,
-    SYSTEM = 1,
-    RING = 2,
-    MUSIC = 3,
-    ALARM = 4,
-    NOTIFICATION = 5,
-    BLUETOOTH_SCO = 6,
-    ENFORCED_AUDIBLE = 7,
-    DTMF = 8,
-    TTS = 9,
-    ACCESSIBILITY = 10,
-    ASSISTANT = 11,
-    /** For dynamic policy output mixes. Only used by the audio policy */
-    REROUTING = 12,
-    /** For audio flinger tracks volume. Only used by the audioflinger */
-    PATCH = 13,
-    /** stream for corresponding to AUDIO_USAGE_CALL_ASSISTANT */
-    CALL_ASSISTANT = 14,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioUsage.aidl b/media/libaudioclient/aidl/android/media/AudioUsage.aidl
deleted file mode 100644
index 66c5c30..0000000
--- a/media/libaudioclient/aidl/android/media/AudioUsage.aidl
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioUsage {
-    UNKNOWN = 0,
-    MEDIA = 1,
-    VOICE_COMMUNICATION = 2,
-    VOICE_COMMUNICATION_SIGNALLING = 3,
-    ALARM = 4,
-    NOTIFICATION = 5,
-    NOTIFICATION_TELEPHONY_RINGTONE = 6,
-    NOTIFICATION_COMMUNICATION_REQUEST = 7,
-    NOTIFICATION_COMMUNICATION_INSTANT = 8,
-    NOTIFICATION_COMMUNICATION_DELAYED = 9,
-    NOTIFICATION_EVENT = 10,
-    ASSISTANCE_ACCESSIBILITY = 11,
-    ASSISTANCE_NAVIGATION_GUIDANCE = 12,
-    ASSISTANCE_SONIFICATION = 13,
-    GAME = 14,
-    VIRTUAL_SOURCE = 15,
-    ASSISTANT = 16,
-    CALL_ASSISTANT = 17,
-    EMERGENCY = 1000,
-    SAFETY = 1001,
-    VEHICLE_STATUS = 1002,
-    ANNOUNCEMENT = 1003,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioUuid.aidl b/media/libaudioclient/aidl/android/media/AudioUuid.aidl
deleted file mode 100644
index bba9039..0000000
--- a/media/libaudioclient/aidl/android/media/AudioUuid.aidl
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioUuid {
-    int timeLow;
-    int timeMid;
-    int timeHiAndVersion;
-    int clockSeq;
-    byte[] node;  // Length = 6
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl b/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl
index 3a29a08..b95a1d3 100644
--- a/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl
@@ -17,7 +17,7 @@
 package android.media;
 
 import android.media.AudioAttributesInternal;
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl b/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
index 35a56eb..bcca04a 100644
--- a/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
@@ -16,10 +16,10 @@
 
 package android.media;
 
-import android.media.AudioDevice;
+import android.content.AttributionSourceState;
 import android.media.EffectDescriptor;
 import android.media.IEffectClient;
-import android.content.AttributionSourceState;
+import android.media.audio.common.AudioDevice;
 
 /**
  * Input arguments of the createEffect() method.
diff --git a/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl b/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
index 7e3c240..b938a3e 100644
--- a/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
@@ -18,7 +18,7 @@
 
 import android.media.AudioAttributesInternal;
 import android.media.AudioClient;
-import android.media.AudioConfigBase;
+import android.media.audio.common.AudioConfigBase;
 
 /**
  * CreateRecordRequest contains all input arguments sent by AudioRecord to AudioFlinger
diff --git a/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl b/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl
index d78b3fc..7d159d0 100644
--- a/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl
@@ -18,6 +18,7 @@
 
 import android.media.IAudioRecord;
 import android.media.SharedFileRegion;
+import android.media.audio.common.AudioConfigBase;
 
 /**
  * CreateRecordResponse contains all output arguments returned by AudioFlinger to AudioRecord
@@ -43,4 +44,5 @@
     int portId;
     /** The newly created record. */
     @nullable IAudioRecord audioRecord;
+    AudioConfigBase serverConfig;
 }
diff --git a/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl b/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl
index 014b3ca..212221e 100644
--- a/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl
@@ -18,9 +18,9 @@
 
 import android.media.AudioAttributesInternal;
 import android.media.AudioClient;
-import android.media.AudioConfig;
 import android.media.IAudioTrackCallback;
 import android.media.SharedFileRegion;
+import android.media.audio.common.AudioConfig;
 
 /**
  * CreateTrackInput contains all input arguments sent by AudioTrack to AudioFlinger
diff --git a/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl b/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl
index 40473fa..da6f454 100644
--- a/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
 import android.media.IAudioTrack;
 
 /**
diff --git a/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl b/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl
index 35a3d74..e5b5158 100644
--- a/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl
+++ b/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioUuid;
+import android.media.audio.common.AudioUuid;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/ExtraAudioDescriptor.aidl b/media/libaudioclient/aidl/android/media/ExtraAudioDescriptor.aidl
deleted file mode 100644
index ec5b67a..0000000
--- a/media/libaudioclient/aidl/android/media/ExtraAudioDescriptor.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioEncapsulationType;
-import android.media.AudioStandard;
-
-/**
- * The audio descriptor that descibes playback/capture capabilities according to
- * a particular standard.
- *
- * {@hide}
- */
-parcelable ExtraAudioDescriptor {
-    AudioStandard standard;
-    byte[] audioDescriptor;
-    AudioEncapsulationType encapsulationType;
-}
diff --git a/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl b/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl
index 164fb9d..963877a 100644
--- a/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
index 7ffcc33..c55c66e 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -16,13 +16,10 @@
 
 package android.media;
 
-import android.media.AudioMode;
 import android.media.AudioPatch;
 import android.media.AudioPort;
 import android.media.AudioPortConfig;
-import android.media.AudioStreamType;
 import android.media.AudioUniqueIdUse;
-import android.media.AudioUuid;
 import android.media.AudioVibratorInfo;
 import android.media.CreateEffectRequest;
 import android.media.CreateEffectResponse;
@@ -41,7 +38,13 @@
 import android.media.MicrophoneInfoData;
 import android.media.RenderPosition;
 import android.media.TrackSecondaryOutputInfo;
-import android.media.audio.common.AudioFormat;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
+import android.media.audio.common.AudioMMapPolicyInfo;
+import android.media.audio.common.AudioMMapPolicyType;
+import android.media.audio.common.AudioMode;
+import android.media.audio.common.AudioStreamType;
+import android.media.audio.common.AudioUuid;
 
 /**
  * {@hide}
@@ -62,7 +65,7 @@
      */
     int sampleRate(int /* audio_io_handle_t */ ioHandle);
 
-    AudioFormat format(int /* audio_io_handle_t */ output);
+    AudioFormatDescription format(int /* audio_io_handle_t */ output);
 
     long frameCount(int /* audio_io_handle_t */ ioHandle);
 
@@ -115,8 +118,8 @@
     // Retrieve the audio recording buffer size in bytes.
     // FIXME This API assumes a route, and so should be deprecated.
     long getInputBufferSize(int sampleRate,
-                            AudioFormat format,
-                            int /* audio_channel_mask_t */ channelMask);
+                            in AudioFormatDescription format,
+                            in AudioChannelLayout channelMask);
 
     OpenOutputResponse openOutput(in OpenOutputRequest request);
     int /* audio_io_handle_t */ openDuplicateOutput(int /* audio_io_handle_t */ output1,
@@ -216,4 +219,10 @@
     // This usually happens when there is a dynamic policy registered.
     void updateSecondaryOutputs(
             in TrackSecondaryOutputInfo[] trackSecondaryOutputInfos);
+
+    AudioMMapPolicyInfo[] getMmapPolicyInfos(AudioMMapPolicyType policyType);
+
+    int getAAudioMixerBurstCount();
+
+    int getAAudioHardwareBurstMinUsec();
 }
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
index 6140a64..7895ae3 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
@@ -18,16 +18,10 @@
 
 import android.content.AttributionSourceState;
 
-import android.media.audio.common.AudioFormat;
-
 import android.media.AudioAttributesEx;
 import android.media.AudioAttributesInternal;
-import android.media.AudioConfig;
-import android.media.AudioConfigBase;
-import android.media.AudioDevice;
+import android.media.AudioDirectMode;
 import android.media.AudioMix;
-import android.media.AudioMode;
-import android.media.AudioOffloadInfo;
 import android.media.AudioOffloadMode;
 import android.media.AudioPatch;
 import android.media.AudioPolicyDeviceState;
@@ -38,10 +32,6 @@
 import android.media.AudioPortRole;
 import android.media.AudioPortType;
 import android.media.AudioProductStrategy;
-import android.media.AudioSourceType;
-import android.media.AudioStreamType;
-import android.media.AudioUsage;
-import android.media.AudioUuid;
 import android.media.AudioVolumeGroup;
 import android.media.DeviceRole;
 import android.media.EffectDescriptor;
@@ -51,8 +41,19 @@
 import android.media.IAudioPolicyServiceClient;
 import android.media.ICaptureStateListener;
 import android.media.INativeSpatializerCallback;
-import android.media.Int;
 import android.media.SoundTriggerSession;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioConfigBase;
+import android.media.audio.common.AudioDevice;
+import android.media.audio.common.AudioDeviceDescription;
+import android.media.audio.common.AudioFormatDescription;
+import android.media.audio.common.AudioMode;
+import android.media.audio.common.AudioOffloadInfo;
+import android.media.audio.common.AudioSource;
+import android.media.audio.common.AudioStreamType;
+import android.media.audio.common.AudioUsage;
+import android.media.audio.common.AudioUuid;
+import android.media.audio.common.Int;
 
 /**
  * IAudioPolicyService interface (see AudioPolicyInterface for method descriptions).
@@ -65,13 +66,13 @@
     void setDeviceConnectionState(in AudioDevice device,
                                   in AudioPolicyDeviceState state,
                                   @utf8InCpp String deviceName,
-                                  in AudioFormat encodedFormat);
+                                  in AudioFormatDescription encodedFormat);
 
     AudioPolicyDeviceState getDeviceConnectionState(in AudioDevice device);
 
     void handleDeviceConfigChange(in AudioDevice device,
                                   @utf8InCpp String deviceName,
-                                  in AudioFormat encodedFormat);
+                                  in AudioFormatDescription encodedFormat);
 
     void setPhoneState(AudioMode state, int /* uid_t */ uid);
 
@@ -116,18 +117,18 @@
                           int indexMax);
 
     void setStreamVolumeIndex(AudioStreamType stream,
-                              int /* audio_devices_t */ device,
+                              in AudioDeviceDescription device,
                               int index);
 
     int getStreamVolumeIndex(AudioStreamType stream,
-                             int /* audio_devices_t */ device);
+                             in AudioDeviceDescription device);
 
     void setVolumeIndexForAttributes(in AudioAttributesInternal attr,
-                                     int /* audio_devices_t */ device,
+                                     in AudioDeviceDescription device,
                                      int index);
 
     int getVolumeIndexForAttributes(in AudioAttributesInternal attr,
-                                    int /* audio_devices_t */ device);
+                                    in AudioDeviceDescription device);
 
     int getMaxVolumeIndexForAttributes(in AudioAttributesInternal attr);
 
@@ -135,7 +136,7 @@
 
     int /* product_strategy_t */ getStrategyForStream(AudioStreamType stream);
 
-    int /* bitmask of audio_devices_t */ getDevicesForStream(AudioStreamType stream);
+    AudioDeviceDescription[] getDevicesForStream(AudioStreamType stream);
 
     AudioDevice[] getDevicesForAttributes(in AudioAttributesEx attr);
 
@@ -157,7 +158,7 @@
 
     boolean isStreamActiveRemotely(AudioStreamType stream, int inPastMs);
 
-    boolean isSourceActive(AudioSourceType source);
+    boolean isSourceActive(AudioSource source);
 
     /**
      * On input, count represents the maximum length of the returned array.
@@ -172,7 +173,7 @@
                                                        @utf8InCpp String opPackageName,
                                                        in AudioUuid uuid,
                                                        int priority,
-                                                       AudioSourceType source);
+                                                       AudioSource source);
 
     int /* audio_unique_id_t */ addStreamDefaultEffect(in AudioUuid type,
                                                        @utf8InCpp String opPackageName,
@@ -270,7 +271,7 @@
 
     boolean getMasterMono();
 
-    float getStreamVolumeDB(AudioStreamType stream, int index, int /* audio_devices_t */ device);
+    float getStreamVolumeDB(AudioStreamType stream, int index, in AudioDeviceDescription device);
 
     /**
      * Populates supported surround formats and their enabled state in formats and formatsEnabled.
@@ -281,7 +282,7 @@
      * number of elements without actually retrieving them.
      */
     void getSurroundFormats(inout Int count,
-                            out AudioFormat[] formats,
+                            out AudioFormatDescription[] formats,
                             out boolean[] formatsEnabled);
 
     /**
@@ -293,11 +294,12 @@
      * number of elements without actually retrieving them.
      */
     void getReportedSurroundFormats(inout Int count,
-                                    out AudioFormat[] formats);
+                                    out AudioFormatDescription[] formats);
 
-    AudioFormat[] getHwOffloadFormatsSupportedForBluetoothMedia(int /* audio_devices_t */ device);
+    AudioFormatDescription[] getHwOffloadFormatsSupportedForBluetoothMedia(
+                                    in AudioDeviceDescription device);
 
-    void setSurroundFormatEnabled(AudioFormat audioFormat, boolean enabled);
+    void setSurroundFormatEnabled(in AudioFormatDescription audioFormat, boolean enabled);
 
     void setAssistantUid(int /* uid_t */ uid);
 
@@ -331,22 +333,22 @@
     AudioDevice[] getDevicesForRoleAndStrategy(int /* product_strategy_t */ strategy,
                                                DeviceRole role);
 
-    void setDevicesRoleForCapturePreset(AudioSourceType audioSource,
+    void setDevicesRoleForCapturePreset(AudioSource audioSource,
                                         DeviceRole role,
                                         in AudioDevice[] devices);
 
-    void addDevicesRoleForCapturePreset(AudioSourceType audioSource,
+    void addDevicesRoleForCapturePreset(AudioSource audioSource,
                                         DeviceRole role,
                                         in AudioDevice[] devices);
 
-    void removeDevicesRoleForCapturePreset(AudioSourceType audioSource,
+    void removeDevicesRoleForCapturePreset(AudioSource audioSource,
                                            DeviceRole role,
                                            in AudioDevice[] devices);
 
-    void clearDevicesRoleForCapturePreset(AudioSourceType audioSource,
+    void clearDevicesRoleForCapturePreset(AudioSource audioSource,
                                           DeviceRole role);
 
-    AudioDevice[] getDevicesForRoleAndCapturePreset(AudioSourceType audioSource,
+    AudioDevice[] getDevicesForRoleAndCapturePreset(AudioSource audioSource,
                                                     DeviceRole role);
 
     boolean registerSoundTriggerCaptureStateListener(ICaptureStateListener listener);
@@ -375,4 +377,10 @@
     boolean canBeSpatialized(in @nullable AudioAttributesInternal attr,
                              in @nullable AudioConfig config,
                              in AudioDevice[] devices);
+
+    /**
+     * Query how the direct playback is currently supported on the device.
+     */
+     AudioDirectMode getDirectPlaybackSupport(in AudioAttributesInternal attr,
+                                              in AudioConfig config);
 }
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl
index a7782b8..d93a59d 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl
@@ -16,10 +16,10 @@
 
 package android.media;
 
-import android.media.AudioConfigBase;
-import android.media.AudioSourceType;
 import android.media.EffectDescriptor;
 import android.media.RecordClientInfo;
+import android.media.audio.common.AudioConfigBase;
+import android.media.audio.common.AudioSource;
 
 /**
  * {@hide}
@@ -43,7 +43,7 @@
                                         in AudioConfigBase deviceConfig,
                                         in EffectDescriptor[] effects,
                                         int /* audio_patch_handle_t */ patchHandle,
-                                        AudioSourceType source);
+                                        AudioSource source);
      /** Notifies a change of audio routing */
      void onRoutingUpdated();
 }
diff --git a/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl b/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl
index 2e55526..75ff8e9 100644
--- a/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl
@@ -16,9 +16,9 @@
 
 package android.media;
 
-import android.media.AudioConfig;
-import android.media.AudioDevice;
-import android.media.AudioSourceType;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioDevice;
+import android.media.audio.common.AudioSource;
 
 /**
  * {@hide}
@@ -30,7 +30,7 @@
     int input;
     AudioConfig config;
     AudioDevice device;
-    AudioSourceType source;
+    AudioSource source;
     /** Bitmask, indexed by AudioInputFlag. */
     int flags;
 }
diff --git a/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl b/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl
index b613ba5..41bc38a 100644
--- a/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl
@@ -16,8 +16,8 @@
 
 package android.media;
 
-import android.media.AudioConfig;
-import android.media.AudioDevice;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioDevice;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
index 1541948..90e7ea6 100644
--- a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
@@ -16,9 +16,9 @@
 
 package android.media;
 
-import android.media.AudioConfig;
-import android.media.AudioConfigBase;
 import android.media.AudioPort;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioConfigBase;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl b/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl
index a051969..451a0bf 100644
--- a/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioConfig;
+import android.media.audio.common.AudioConfig;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl b/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl
index 3280460..7dad58d 100644
--- a/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl
+++ b/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioSourceType;
+import android.media.audio.common.AudioSource;
 
 /**
  * {@hide}
@@ -28,7 +28,7 @@
     int uid;
     /** Interpreted as audio_session_t. */
     int session;
-    AudioSourceType source;
+    AudioSource source;
     /** Interpreted as audio_port_handle_t. */
     int portId;
     boolean silenced;
diff --git a/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl b/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl
index a829e59..4b540a9 100644
--- a/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl
+++ b/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl
@@ -16,6 +16,8 @@
 
 package android.media;
 
+import android.media.audio.common.AudioDeviceDescription;
+
 /**
  * {@hide}
  */
@@ -24,6 +26,6 @@
     int session;
     /** Interpreted as audio_io_handle_t. */
     int ioHandle;
-    /** Interpreted as audio_devices_t. */
-    int device;
+    /** Device type. */
+    AudioDeviceDescription device;
 }
diff --git a/media/libaudioclient/fuzzer/Android.bp b/media/libaudioclient/fuzzer/Android.bp
index b290aa8..969e3e6 100644
--- a/media/libaudioclient/fuzzer/Android.bp
+++ b/media/libaudioclient/fuzzer/Android.bp
@@ -46,6 +46,7 @@
     ],
     shared_libs: [
         "android.hardware.audio.common-util",
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
diff --git a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
index bd9e158..4c89249 100644
--- a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
+++ b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
@@ -58,7 +58,8 @@
 
 constexpr audio_mode_t kModes[] = {
     AUDIO_MODE_INVALID, AUDIO_MODE_CURRENT,          AUDIO_MODE_NORMAL,     AUDIO_MODE_RINGTONE,
-    AUDIO_MODE_IN_CALL, AUDIO_MODE_IN_COMMUNICATION, AUDIO_MODE_CALL_SCREEN};
+    AUDIO_MODE_IN_CALL, AUDIO_MODE_IN_COMMUNICATION, AUDIO_MODE_CALL_SCREEN,
+    AUDIO_MODE_CALL_REDIRECT, AUDIO_MODE_COMMUNICATION_REDIRECT};
 
 constexpr audio_session_t kSessionId[] = {AUDIO_SESSION_NONE, AUDIO_SESSION_OUTPUT_STAGE,
                                           AUDIO_SESSION_DEVICE};
@@ -231,7 +232,7 @@
     attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
     attributionSource.token = sp<BBinder>::make();
     track->set(AUDIO_STREAM_DEFAULT, sampleRate, format, channelMask, frameCount, flags, nullptr,
-               nullptr, notificationFrames, sharedBuffer, false, sessionId,
+               notificationFrames, sharedBuffer, false, sessionId,
                ((fast && sharedBuffer == 0) || offload) ? AudioTrack::TRANSFER_CALLBACK
                                                         : AudioTrack::TRANSFER_DEFAULT,
                offload ? &offloadInfo : nullptr, attributionSource, &attributes, false, 1.0f,
@@ -601,9 +602,10 @@
     media::OpenInputRequest request{};
     request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
     request.input = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(input));
-    request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+    request.config = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(config, true /*isInput*/));
     request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(deviceTypeAddr));
-    request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSourceType(source));
+    request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSource(source));
     request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
 
     media::OpenInputResponse response{};
@@ -658,9 +660,10 @@
     media::OpenOutputResponse response{};
 
     request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
-    request.halConfig = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
-    request.mixerConfig =
-            VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_base_t_AudioConfigBase(mixerConfig));
+    request.halConfig = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(config, false /*isInput*/));
+    request.mixerConfig = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_base_t_AudioConfigBase(mixerConfig, false /*isInput*/));
     request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_DeviceDescriptorBase(device));
     request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
 
diff --git a/media/libaudioclient/include/media/AidlConversion.h b/media/libaudioclient/include/media/AidlConversion.h
index 4ec69c7..e769303 100644
--- a/media/libaudioclient/include/media/AidlConversion.h
+++ b/media/libaudioclient/include/media/AidlConversion.h
@@ -23,34 +23,43 @@
 
 #include <android/media/AudioAttributesInternal.h>
 #include <android/media/AudioClient.h>
-#include <android/media/AudioConfig.h>
-#include <android/media/AudioConfigBase.h>
+#include <android/media/AudioDirectMode.h>
 #include <android/media/AudioDualMonoMode.h>
-#include <android/media/AudioEncapsulationMode.h>
-#include <android/media/AudioEncapsulationMetadataType.h>
-#include <android/media/AudioEncapsulationType.h>
 #include <android/media/AudioFlag.h>
-#include <android/media/AudioGain.h>
-#include <android/media/AudioGainMode.h>
-#include <android/media/AudioInputFlags.h>
 #include <android/media/AudioIoConfigEvent.h>
 #include <android/media/AudioIoDescriptor.h>
-#include <android/media/AudioMixLatencyClass.h>
-#include <android/media/AudioMode.h>
-#include <android/media/AudioOutputFlags.h>
 #include <android/media/AudioPlaybackRate.h>
 #include <android/media/AudioPort.h>
-#include <android/media/AudioPortConfigType.h>
-#include <android/media/AudioPortDeviceExt.h>
-#include <android/media/AudioPortExt.h>
-#include <android/media/AudioPortMixExt.h>
-#include <android/media/AudioPortSessionExt.h>
-#include <android/media/AudioProfile.h>
+#include <android/media/AudioPortConfig.h>
+#include <android/media/AudioPortDeviceExtSys.h>
 #include <android/media/AudioTimestampInternal.h>
 #include <android/media/AudioUniqueIdUse.h>
 #include <android/media/EffectDescriptor.h>
-#include <android/media/ExtraAudioDescriptor.h>
 #include <android/media/TrackSecondaryOutputInfo.h>
+#include <android/media/audio/common/AudioChannelLayout.h>
+#include <android/media/audio/common/AudioConfig.h>
+#include <android/media/audio/common/AudioConfigBase.h>
+#include <android/media/audio/common/AudioContentType.h>
+#include <android/media/audio/common/AudioDeviceDescription.h>
+#include <android/media/audio/common/AudioEncapsulationMetadataType.h>
+#include <android/media/audio/common/AudioEncapsulationMode.h>
+#include <android/media/audio/common/AudioEncapsulationType.h>
+#include <android/media/audio/common/AudioFormatDescription.h>
+#include <android/media/audio/common/AudioGain.h>
+#include <android/media/audio/common/AudioGainConfig.h>
+#include <android/media/audio/common/AudioGainMode.h>
+#include <android/media/audio/common/AudioInputFlags.h>
+#include <android/media/audio/common/AudioMode.h>
+#include <android/media/audio/common/AudioOffloadInfo.h>
+#include <android/media/audio/common/AudioOutputFlags.h>
+#include <android/media/audio/common/AudioPortExt.h>
+#include <android/media/audio/common/AudioPortMixExt.h>
+#include <android/media/audio/common/AudioProfile.h>
+#include <android/media/audio/common/AudioSource.h>
+#include <android/media/audio/common/AudioStandard.h>
+#include <android/media/audio/common/AudioUsage.h>
+#include <android/media/audio/common/AudioUuid.h>
+#include <android/media/audio/common/ExtraAudioDescriptor.h>
 
 #include <android/media/SharedFileRegion.h>
 #include <binder/IMemory.h>
@@ -86,19 +95,9 @@
 ConversionResult<audio_hw_sync_t> aidl2legacy_int32_t_audio_hw_sync_t(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_audio_hw_sync_t_int32_t(audio_hw_sync_t legacy);
 
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<int32_t> aidl2legacy_AudioPortConfigType_int32_t(
-        media::AudioPortConfigType aidl);
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<media::AudioPortConfigType> legacy2aidl_int32_t_AudioPortConfigType(
-        int32_t legacy);
-
 ConversionResult<unsigned int> aidl2legacy_int32_t_config_mask(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_config_mask_int32_t(unsigned int legacy);
 
-ConversionResult<audio_channel_mask_t> aidl2legacy_int32_t_audio_channel_mask_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_channel_mask_t_int32_t(audio_channel_mask_t legacy);
-
 ConversionResult<pid_t> aidl2legacy_int32_t_pid_t(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_pid_t_int32_t(pid_t legacy);
 
@@ -116,10 +115,10 @@
 ConversionResult<std::optional<std::string_view>>
 legacy2aidl_optional_String16_optional_string(std::optional<String16> legacy);
 
-ConversionResult<audio_io_config_event> aidl2legacy_AudioIoConfigEvent_audio_io_config_event(
+ConversionResult<audio_io_config_event_t> aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(
         media::AudioIoConfigEvent aidl);
-ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_AudioIoConfigEvent(
-        audio_io_config_event legacy);
+ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_t_AudioIoConfigEvent(
+        audio_io_config_event_t legacy);
 
 ConversionResult<audio_port_role_t> aidl2legacy_AudioPortRole_audio_port_role_t(
         media::AudioPortRole aidl);
@@ -131,36 +130,59 @@
 ConversionResult<media::AudioPortType> legacy2aidl_audio_port_type_t_AudioPortType(
         audio_port_type_t legacy);
 
-ConversionResult<audio_format_t> aidl2legacy_AudioFormat_audio_format_t(
-        media::audio::common::AudioFormat aidl);
-ConversionResult<media::audio::common::AudioFormat> legacy2aidl_audio_format_t_AudioFormat(
-        audio_format_t legacy);
+ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+        const media::audio::common::AudioChannelLayout& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioChannelLayout>
+legacy2aidl_audio_channel_mask_t_AudioChannelLayout(audio_channel_mask_t legacy, bool isInput);
+
+ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
+        const media::audio::common::AudioDeviceDescription& aidl);
+ConversionResult<media::audio::common::AudioDeviceDescription>
+legacy2aidl_audio_devices_t_AudioDeviceDescription(audio_devices_t legacy);
+
+status_t aidl2legacy_AudioDevice_audio_device(
+        const media::audio::common::AudioDevice& aidl,
+        audio_devices_t* legacyType, char* legacyAddress);
+status_t aidl2legacy_AudioDevice_audio_device(
+        const media::audio::common::AudioDevice& aidl,
+        audio_devices_t* legacyType, String8* legacyAddress);
+status_t aidl2legacy_AudioDevice_audio_device(
+        const media::audio::common::AudioDevice& aidl,
+        audio_devices_t* legacyType, std::string* legacyAddress);
+ConversionResult<media::audio::common::AudioDevice>
+legacy2aidl_audio_device_AudioDevice(
+        audio_devices_t legacyType, const char* legacyAddress);
+ConversionResult<media::audio::common::AudioDevice>
+legacy2aidl_audio_device_AudioDevice(
+        audio_devices_t legacyType, const String8& legacyAddress);
+
+ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
+        const media::audio::common::AudioFormatDescription& aidl);
+ConversionResult<media::audio::common::AudioFormatDescription>
+legacy2aidl_audio_format_t_AudioFormatDescription(audio_format_t legacy);
 
 ConversionResult<audio_gain_mode_t>
-aidl2legacy_AudioGainMode_audio_gain_mode_t(media::AudioGainMode aidl);
-ConversionResult<media::AudioGainMode>
+aidl2legacy_AudioGainMode_audio_gain_mode_t(media::audio::common::AudioGainMode aidl);
+ConversionResult<media::audio::common::AudioGainMode>
 legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy);
 
 ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy);
 
-ConversionResult<audio_devices_t> aidl2legacy_int32_t_audio_devices_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_devices_t_int32_t(audio_devices_t legacy);
-
 ConversionResult<audio_gain_config> aidl2legacy_AudioGainConfig_audio_gain_config(
-        const media::AudioGainConfig& aidl, media::AudioPortRole role, media::AudioPortType type);
-ConversionResult<media::AudioGainConfig> legacy2aidl_audio_gain_config_AudioGainConfig(
-        const audio_gain_config& legacy, audio_port_role_t role, audio_port_type_t type);
+        const media::audio::common::AudioGainConfig& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioGainConfig>
+legacy2aidl_audio_gain_config_AudioGainConfig(const audio_gain_config& legacy, bool isInput);
 
-ConversionResult<audio_input_flags_t> aidl2legacy_AudioInputFlags_audio_input_flags_t(
-        media::AudioInputFlags aidl);
-ConversionResult<media::AudioInputFlags> legacy2aidl_audio_input_flags_t_AudioInputFlags(
-        audio_input_flags_t legacy);
+ConversionResult<audio_input_flags_t>
+aidl2legacy_AudioInputFlags_audio_input_flags_t(media::audio::common::AudioInputFlags aidl);
+ConversionResult<media::audio::common::AudioInputFlags>
+legacy2aidl_audio_input_flags_t_AudioInputFlags(audio_input_flags_t legacy);
 
-ConversionResult<audio_output_flags_t> aidl2legacy_AudioOutputFlags_audio_output_flags_t(
-        media::AudioOutputFlags aidl);
-ConversionResult<media::AudioOutputFlags> legacy2aidl_audio_output_flags_t_AudioOutputFlags(
-        audio_output_flags_t legacy);
+ConversionResult<audio_output_flags_t>
+aidl2legacy_AudioOutputFlags_audio_output_flags_t(media::audio::common::AudioOutputFlags aidl);
+ConversionResult<media::audio::common::AudioOutputFlags>
+legacy2aidl_audio_output_flags_t_AudioOutputFlags(audio_output_flags_t legacy);
 
 ConversionResult<audio_input_flags_t> aidl2legacy_int32_t_audio_input_flags_t_mask(
         int32_t aidl);
@@ -173,40 +195,43 @@
         audio_output_flags_t legacy);
 
 ConversionResult<audio_io_flags> aidl2legacy_AudioIoFlags_audio_io_flags(
-        const media::AudioIoFlags& aidl, media::AudioPortRole role, media::AudioPortType type);
-ConversionResult<media::AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
-        const audio_io_flags& legacy, audio_port_role_t role, audio_port_type_t type);
+        const media::audio::common::AudioIoFlags& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
+        const audio_io_flags& legacy, bool isInput);
 
 ConversionResult<audio_port_config_device_ext>
-aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
-        const media::AudioPortConfigDeviceExt& aidl);
-ConversionResult<media::AudioPortConfigDeviceExt>
-legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
-        const audio_port_config_device_ext& legacy);
+aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(
+        const media::audio::common::AudioPortDeviceExt& aidl,
+        const media::AudioPortDeviceExtSys& aidlDeviceExt);
+status_t legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(
+        const audio_port_config_device_ext& legacy,
+        media::audio::common::AudioPortDeviceExt* aidl,
+        media::AudioPortDeviceExtSys* aidlDeviceExt);
 
 ConversionResult<audio_stream_type_t> aidl2legacy_AudioStreamType_audio_stream_type_t(
-        media::AudioStreamType aidl);
-ConversionResult<media::AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
-        audio_stream_type_t legacy);
+        media::audio::common::AudioStreamType aidl);
+ConversionResult<media::audio::common::AudioStreamType>
+legacy2aidl_audio_stream_type_t_AudioStreamType(audio_stream_type_t legacy);
 
-ConversionResult<audio_source_t> aidl2legacy_AudioSourceType_audio_source_t(
-        media::AudioSourceType aidl);
-ConversionResult<media::AudioSourceType> legacy2aidl_audio_source_t_AudioSourceType(
+ConversionResult<audio_source_t> aidl2legacy_AudioSource_audio_source_t(
+        media::audio::common::AudioSource aidl);
+ConversionResult<media::audio::common::AudioSource>
+        legacy2aidl_audio_source_t_AudioSource(
         audio_source_t legacy);
 
 ConversionResult<audio_session_t> aidl2legacy_int32_t_audio_session_t(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_audio_session_t_int32_t(audio_session_t legacy);
 
-ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortConfigMixExt(
-        const media::AudioPortConfigMixExt& aidl, media::AudioPortRole role);
-ConversionResult<media::AudioPortConfigMixExt> legacy2aidl_AudioPortConfigMixExt(
-        const audio_port_config_mix_ext& legacy, audio_port_role_t role);
+ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortMixExt(
+        const media::audio::common::AudioPortMixExt& aidl, media::AudioPortRole role,
+        const media::AudioPortMixExtSys& aidlMixExt);
+status_t legacy2aidl_AudioPortMixExt(
+        const audio_port_config_mix_ext& legacy, audio_port_role_t role,
+        media::audio::common::AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt);
 
 ConversionResult<audio_port_config_session_ext>
-aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
-        const media::AudioPortConfigSessionExt& aidl);
-ConversionResult<media::AudioPortConfigSessionExt>
-legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
+aidl2legacy_int32_t_audio_port_config_session_ext(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
         const audio_port_config_session_ext& legacy);
 
 ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
@@ -221,7 +246,6 @@
 
 ConversionResult<sp<AudioIoDescriptor>> aidl2legacy_AudioIoDescriptor_AudioIoDescriptor(
         const media::AudioIoDescriptor& aidl);
-
 ConversionResult<media::AudioIoDescriptor> legacy2aidl_AudioIoDescriptor_AudioIoDescriptor(
         const sp<AudioIoDescriptor>& legacy);
 
@@ -231,13 +255,14 @@
         const AudioClient& legacy);
 
 ConversionResult<audio_content_type_t>
-aidl2legacy_AudioContentType_audio_content_type_t(media::AudioContentType aidl);
-ConversionResult<media::AudioContentType>
+aidl2legacy_AudioContentType_audio_content_type_t(
+        media::audio::common::AudioContentType aidl);
+ConversionResult<media::audio::common::AudioContentType>
 legacy2aidl_audio_content_type_t_AudioContentType(audio_content_type_t legacy);
 
 ConversionResult<audio_usage_t>
-aidl2legacy_AudioUsage_audio_usage_t(media::AudioUsage aidl);
-ConversionResult<media::AudioUsage>
+aidl2legacy_AudioUsage_audio_usage_t(media::audio::common::AudioUsage aidl);
+ConversionResult<media::audio::common::AudioUsage>
 legacy2aidl_audio_usage_t_AudioUsage(audio_usage_t legacy);
 
 ConversionResult<audio_flags_mask_t>
@@ -256,24 +281,27 @@
 legacy2aidl_audio_attributes_t_AudioAttributesInternal(const audio_attributes_t& legacy);
 
 ConversionResult<audio_encapsulation_mode_t>
-aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(media::AudioEncapsulationMode aidl);
-ConversionResult<media::AudioEncapsulationMode>
+aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(
+        media::audio::common::AudioEncapsulationMode aidl);
+ConversionResult<media::audio::common::AudioEncapsulationMode>
 legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode(audio_encapsulation_mode_t legacy);
 
 ConversionResult<audio_offload_info_t>
-aidl2legacy_AudioOffloadInfo_audio_offload_info_t(const media::AudioOffloadInfo& aidl);
-ConversionResult<media::AudioOffloadInfo>
+aidl2legacy_AudioOffloadInfo_audio_offload_info_t(
+        const media::audio::common::AudioOffloadInfo& aidl);
+ConversionResult<media::audio::common::AudioOffloadInfo>
 legacy2aidl_audio_offload_info_t_AudioOffloadInfo(const audio_offload_info_t& legacy);
 
 ConversionResult<audio_config_t>
-aidl2legacy_AudioConfig_audio_config_t(const media::AudioConfig& aidl);
-ConversionResult<media::AudioConfig>
-legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy);
+aidl2legacy_AudioConfig_audio_config_t(const media::audio::common::AudioConfig& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioConfig>
+legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy, bool isInput);
 
 ConversionResult<audio_config_base_t>
-aidl2legacy_AudioConfigBase_audio_config_base_t(const media::AudioConfigBase& aidl);
-ConversionResult<media::AudioConfigBase>
-legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy);
+aidl2legacy_AudioConfigBase_audio_config_base_t(
+        const media::audio::common::AudioConfigBase& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioConfigBase>
+legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy, bool isInput);
 
 ConversionResult<sp<IMemory>>
 aidl2legacy_SharedFileRegion_IMemory(const media::SharedFileRegion& aidl);
@@ -291,8 +319,8 @@
 legacy2aidl_AudioTimestamp_AudioTimestampInternal(const AudioTimestamp& legacy);
 
 ConversionResult<audio_uuid_t>
-aidl2legacy_AudioUuid_audio_uuid_t(const media::AudioUuid& aidl);
-ConversionResult<media::AudioUuid>
+aidl2legacy_AudioUuid_audio_uuid_t(const media::audio::common::AudioUuid& aidl);
+ConversionResult<media::audio::common::AudioUuid>
 legacy2aidl_audio_uuid_t_AudioUuid(const audio_uuid_t& legacy);
 
 ConversionResult<effect_descriptor_t>
@@ -302,8 +330,8 @@
 
 ConversionResult<audio_encapsulation_metadata_type_t>
 aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t(
-        media::AudioEncapsulationMetadataType aidl);
-ConversionResult<media::AudioEncapsulationMetadataType>
+        media::audio::common::AudioEncapsulationMetadataType aidl);
+ConversionResult<media::audio::common::AudioEncapsulationMetadataType>
 legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType(
         audio_encapsulation_metadata_type_t legacy);
 
@@ -317,37 +345,39 @@
 ConversionResult<int32_t>
 legacy2aidl_AudioEncapsulationMetadataType_mask(uint32_t legacy);
 
-ConversionResult<audio_mix_latency_class_t>
-aidl2legacy_AudioMixLatencyClass_audio_mix_latency_class_t(
-        media::AudioMixLatencyClass aidl);
-ConversionResult<media::AudioMixLatencyClass>
-legacy2aidl_audio_mix_latency_class_t_AudioMixLatencyClass(
-        audio_mix_latency_class_t legacy);
-
 ConversionResult<audio_port_device_ext>
-aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(const media::AudioPortDeviceExt& aidl);
-ConversionResult<media::AudioPortDeviceExt>
-legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(const audio_port_device_ext& legacy);
+aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
+        const media::audio::common::AudioPortDeviceExt& aidl,
+        const media::AudioPortDeviceExtSys& aidlDeviceExt);
+status_t legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(
+        const audio_port_device_ext& legacy,
+        media::audio::common::AudioPortDeviceExt* aidl,
+        media::AudioPortDeviceExtSys* aidlDeviceExt);
 
 ConversionResult<audio_port_mix_ext>
-aidl2legacy_AudioPortMixExt_audio_port_mix_ext(const media::AudioPortMixExt& aidl);
-ConversionResult<media::AudioPortMixExt>
-legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy);
+aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
+        const media::audio::common::AudioPortMixExt& aidl,
+        const media::AudioPortMixExtSys& aidlMixExt);
+status_t legacy2aidl_audio_port_mix_ext_AudioPortMixExt(
+        const audio_port_mix_ext& legacy,
+        media::audio::common::AudioPortMixExt* aidl,
+        media::AudioPortMixExtSys* aidlMixExt);
 
 ConversionResult<audio_port_session_ext>
-aidl2legacy_AudioPortSessionExt_audio_port_session_ext(const media::AudioPortSessionExt& aidl);
-ConversionResult<media::AudioPortSessionExt>
-legacy2aidl_audio_port_session_ext_AudioPortSessionExt(const audio_port_session_ext& legacy);
+aidl2legacy_int32_t_audio_port_session_ext(int32_t aidl);
+ConversionResult<int32_t>
+legacy2aidl_audio_port_session_ext_int32_t(const audio_port_session_ext& legacy);
 
 ConversionResult<audio_profile>
-aidl2legacy_AudioProfile_audio_profile(const media::AudioProfile& aidl);
-ConversionResult<media::AudioProfile>
-legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy);
+aidl2legacy_AudioProfile_audio_profile(
+        const media::audio::common::AudioProfile& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioProfile>
+legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy, bool isInput);
 
 ConversionResult<audio_gain>
-aidl2legacy_AudioGain_audio_gain(const media::AudioGain& aidl);
-ConversionResult<media::AudioGain>
-legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy);
+aidl2legacy_AudioGain_audio_gain(const media::audio::common::AudioGain& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioGain>
+legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy, bool isInput);
 
 ConversionResult<audio_port_v7>
 aidl2legacy_AudioPort_audio_port_v7(const media::AudioPort& aidl);
@@ -355,8 +385,8 @@
 legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy);
 
 ConversionResult<audio_mode_t>
-aidl2legacy_AudioMode_audio_mode_t(media::AudioMode aidl);
-ConversionResult<media::AudioMode>
+aidl2legacy_AudioMode_audio_mode_t(media::audio::common::AudioMode aidl);
+ConversionResult<media::audio::common::AudioMode>
 legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy);
 
 ConversionResult<audio_unique_id_use_t>
@@ -390,21 +420,21 @@
 legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(const audio_playback_rate_t& legacy);
 
 ConversionResult<audio_standard_t>
-aidl2legacy_AudioStandard_audio_standard_t(media::AudioStandard aidl);
-ConversionResult<media::AudioStandard>
+aidl2legacy_AudioStandard_audio_standard_t(media::audio::common::AudioStandard aidl);
+ConversionResult<media::audio::common::AudioStandard>
 legacy2aidl_audio_standard_t_AudioStandard(audio_standard_t legacy);
 
 ConversionResult<audio_extra_audio_descriptor>
 aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor(
-        const media::ExtraAudioDescriptor& aidl);
-ConversionResult<media::ExtraAudioDescriptor>
+        const media::audio::common::ExtraAudioDescriptor& aidl);
+ConversionResult<media::audio::common::ExtraAudioDescriptor>
 legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor(
         const audio_extra_audio_descriptor& legacy);
 
 ConversionResult<audio_encapsulation_type_t>
 aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
-        const media::AudioEncapsulationType& aidl);
-ConversionResult<media::AudioEncapsulationType>
+        const media::audio::common::AudioEncapsulationType& aidl);
+ConversionResult<media::audio::common::AudioEncapsulationType>
 legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
         const audio_encapsulation_type_t & legacy);
 
@@ -416,5 +446,13 @@
 legacy2aidl_TrackSecondaryOutputInfoPair_TrackSecondaryOutputInfo(
         const TrackSecondaryOutputInfoPair& legacy);
 
+ConversionResult<audio_direct_mode_t>
+aidl2legacy_AudioDirectMode_audio_direct_mode_t(media::AudioDirectMode aidl);
+ConversionResult<media::AudioDirectMode>
+legacy2aidl_audio_direct_mode_t_AudioDirectMode(audio_direct_mode_t legacy);
+
+ConversionResult<audio_direct_mode_t> aidl2legacy_int32_t_audio_direct_mode_t_mask(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_direct_mode_t_int32_t_mask(audio_direct_mode_t legacy);
+
 
 }  // namespace android
diff --git a/media/libaudioclient/include/media/AidlConversionUtil.h b/media/libaudioclient/include/media/AidlConversionUtil.h
index c1a2be3..dfabd55 100644
--- a/media/libaudioclient/include/media/AidlConversionUtil.h
+++ b/media/libaudioclient/include/media/AidlConversionUtil.h
@@ -41,6 +41,9 @@
 #define RETURN_IF_ERROR(result) \
     if (status_t _tmp = (result); _tmp != OK) return base::unexpected(_tmp);
 
+#define RETURN_STATUS_IF_ERROR(result) \
+    if (status_t _tmp = (result); _tmp != OK) return _tmp;
+
 #define VALUE_OR_RETURN_STATUS(x)           \
     ({                                      \
        auto _tmp = (x);                     \
@@ -119,6 +122,47 @@
     return output;
 }
 
+/**
+ * A generic template that helps to "zip" two input containers of the same size
+ * into a single vector of converted types. The conversion function must
+ * thus accept two arguments.
+ */
+template<typename OutputContainer, typename InputContainer1,
+        typename InputContainer2, typename Func>
+ConversionResult<OutputContainer>
+convertContainers(const InputContainer1& input1, const InputContainer2& input2,
+        const Func& itemConversion) {
+    auto iter2 = input2.begin();
+    OutputContainer output;
+    auto ins = std::inserter(output, output.begin());
+    for (const auto& item1 : input1) {
+        RETURN_IF_ERROR(iter2 != input2.end() ? OK : BAD_VALUE);
+        *ins = VALUE_OR_RETURN(itemConversion(item1, *iter2++));
+    }
+    return output;
+}
+
+/**
+ * A generic template that helps to "unzip" a per-element conversion into
+ * a pair of elements into a pair of containers. The conversion function
+ * must emit a pair of elements.
+ */
+template<typename OutputContainer1, typename OutputContainer2,
+        typename InputContainer, typename Func>
+ConversionResult<std::pair<OutputContainer1, OutputContainer2>>
+convertContainerSplit(const InputContainer& input, const Func& itemConversion) {
+    OutputContainer1 output1;
+    OutputContainer2 output2;
+    auto ins1 = std::inserter(output1, output1.begin());
+    auto ins2 = std::inserter(output2, output2.begin());
+    for (const auto& item : input) {
+        auto out_pair = VALUE_OR_RETURN(itemConversion(item));
+        *ins1 = out_pair.first;
+        *ins2 = out_pair.second;
+    }
+    return std::make_pair(output1, output2);
+}
+
 ////////////////////////////////////////////////////////////////////////////////////////////////////
 // The code below establishes:
 // IntegralTypeOf<T>, which works for either integral types (in which case it evaluates to T), or
diff --git a/media/libaudioclient/include/media/AudioCommonTypes.h b/media/libaudioclient/include/media/AudioCommonTypes.h
index 5f0c590..862a0f9 100644
--- a/media/libaudioclient/include/media/AudioCommonTypes.h
+++ b/media/libaudioclient/include/media/AudioCommonTypes.h
@@ -17,9 +17,75 @@
 
 #pragma once
 
+#include <functional>
+
+#include <android/media/audio/common/AudioChannelLayout.h>
+#include <android/media/audio/common/AudioDeviceDescription.h>
+#include <android/media/audio/common/AudioFormatDescription.h>
+#include <binder/Parcelable.h>
 #include <system/audio.h>
 #include <system/audio_policy.h>
-#include <binder/Parcelable.h>
+
+namespace {
+// see boost::hash_combine
+#if defined(__clang__)
+__attribute__((no_sanitize("unsigned-integer-overflow")))
+#endif
+static size_t hash_combine(size_t seed, size_t v) {
+    return std::hash<size_t>{}(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
+}
+}
+
+namespace std {
+
+// Note: when extending the types hashed below we need to account for the
+// possibility of processing types belonging to different versions of the type,
+// e.g. a HAL may be using a previous version of the AIDL interface.
+
+template<> struct hash<android::media::audio::common::AudioChannelLayout>
+{
+    std::size_t operator()(
+            const android::media::audio::common::AudioChannelLayout& acl) const noexcept {
+        using Tag = android::media::audio::common::AudioChannelLayout::Tag;
+        const size_t seed = std::hash<Tag>{}(acl.getTag());
+        switch (acl.getTag()) {
+            case Tag::none:
+                return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::none>()));
+            case Tag::invalid:
+                return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::invalid>()));
+            case Tag::indexMask:
+                return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::indexMask>()));
+            case Tag::layoutMask:
+                return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::layoutMask>()));
+            case Tag::voiceMask:
+                return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::voiceMask>()));
+        }
+        return seed;
+    }
+};
+
+template<> struct hash<android::media::audio::common::AudioDeviceDescription>
+{
+    std::size_t operator()(
+            const android::media::audio::common::AudioDeviceDescription& add) const noexcept {
+        return hash_combine(
+                std::hash<android::media::audio::common::AudioDeviceType>{}(add.type),
+                std::hash<std::string>{}(add.connection));
+    }
+};
+
+template<> struct hash<android::media::audio::common::AudioFormatDescription>
+{
+    std::size_t operator()(
+            const android::media::audio::common::AudioFormatDescription& afd) const noexcept {
+        return hash_combine(
+                std::hash<android::media::audio::common::AudioFormatType>{}(afd.type),
+                hash_combine(
+                        std::hash<android::media::audio::common::PcmType>{}(afd.pcm),
+                        std::hash<std::string>{}(afd.encoding)));
+    }
+};
+}  // namespace std
 
 namespace android {
 
@@ -81,4 +147,3 @@
 static const volume_group_t VOLUME_GROUP_NONE = static_cast<volume_group_t>(-1);
 
 } // namespace android
-
diff --git a/media/libaudioclient/include/media/AudioEffect.h b/media/libaudioclient/include/media/AudioEffect.h
index dd4d2da..ee262f3 100644
--- a/media/libaudioclient/include/media/AudioEffect.h
+++ b/media/libaudioclient/include/media/AudioEffect.h
@@ -40,7 +40,7 @@
 
 // ----------------------------------------------------------------------------
 
-class AudioEffect : public RefBase
+class AudioEffect : public virtual RefBase
 {
 public:
 
diff --git a/media/libaudioclient/include/media/AudioIoDescriptor.h b/media/libaudioclient/include/media/AudioIoDescriptor.h
index 981d33a..405ec7d 100644
--- a/media/libaudioclient/include/media/AudioIoDescriptor.h
+++ b/media/libaudioclient/include/media/AudioIoDescriptor.h
@@ -17,9 +17,15 @@
 #ifndef ANDROID_AUDIO_IO_DESCRIPTOR_H
 #define ANDROID_AUDIO_IO_DESCRIPTOR_H
 
+#include <sstream>
+#include <string>
+
+#include <system/audio.h>
+#include <utils/RefBase.h>
+
 namespace android {
 
-enum audio_io_config_event {
+enum audio_io_config_event_t {
     AUDIO_OUTPUT_REGISTERED,
     AUDIO_OUTPUT_OPENED,
     AUDIO_OUTPUT_CLOSED,
@@ -33,41 +39,70 @@
 
 // audio input/output descriptor used to cache output configurations in client process to avoid
 // frequent calls through IAudioFlinger
-class AudioIoDescriptor : public RefBase {
+class AudioIoDescriptor : public virtual RefBase {
 public:
-    AudioIoDescriptor() :
-        mIoHandle(AUDIO_IO_HANDLE_NONE),
-        mSamplingRate(0), mFormat(AUDIO_FORMAT_DEFAULT), mChannelMask(AUDIO_CHANNEL_NONE),
-        mFrameCount(0), mFrameCountHAL(0), mLatency(0), mPortId(AUDIO_PORT_HANDLE_NONE)
-    {
-        memset(&mPatch, 0, sizeof(struct audio_patch));
-    }
+    AudioIoDescriptor() = default;
+    // For AUDIO_{INPUT|OUTPUT}_CLOSED events.
+    AudioIoDescriptor(audio_io_handle_t ioHandle) : mIoHandle(ioHandle) {}
+    // For AUDIO_CLIENT_STARTED events.
+    AudioIoDescriptor(
+            audio_io_handle_t ioHandle, const audio_patch& patch, audio_port_handle_t portId) :
+            mIoHandle(ioHandle), mPatch(patch), mPortId(portId) {}
+    // For everything else.
+    AudioIoDescriptor(
+            audio_io_handle_t ioHandle, const audio_patch& patch, bool isInput,
+            uint32_t samplingRate, audio_format_t format, audio_channel_mask_t channelMask,
+            size_t frameCount, size_t frameCountHal, uint32_t latency = 0,
+            audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE) :
+            mIoHandle(ioHandle), mPatch(patch), mIsInput(isInput),
+            mSamplingRate(samplingRate), mFormat(format), mChannelMask(channelMask),
+            mFrameCount(frameCount), mFrameCountHAL(frameCountHal), mLatency(latency),
+            mPortId(portId) {}
 
-    virtual ~AudioIoDescriptor() {}
-
-    audio_port_handle_t getDeviceId() {
+    audio_io_handle_t getIoHandle() const { return mIoHandle; }
+    const audio_patch& getPatch() const { return mPatch; }
+    bool getIsInput() const { return mIsInput; }
+    uint32_t getSamplingRate() const { return mSamplingRate; }
+    audio_format_t getFormat() const { return mFormat; }
+    audio_channel_mask_t getChannelMask() const { return mChannelMask; }
+    size_t getFrameCount() const { return mFrameCount; }
+    size_t getFrameCountHAL() const { return mFrameCountHAL; }
+    uint32_t getLatency() const { return mLatency; }
+    audio_port_handle_t getPortId() const { return mPortId; }
+    audio_port_handle_t getDeviceId() const {
         if (mPatch.num_sources != 0 && mPatch.num_sinks != 0) {
-            if (mPatch.sources[0].type == AUDIO_PORT_TYPE_MIX) {
-                // this is an output mix
-                // FIXME: the API only returns the first device in case of multiple device selection
-                return mPatch.sinks[0].id;
-            } else {
-                // this is an input mix
-                return mPatch.sources[0].id;
-            }
+            // FIXME: the API only returns the first device in case of multiple device selection
+            return mIsInput ? mPatch.sources[0].id : mPatch.sinks[0].id;
         }
         return AUDIO_PORT_HANDLE_NONE;
     }
+    void setPatch(const audio_patch& patch) { mPatch = patch; }
 
-    audio_io_handle_t       mIoHandle;
-    struct audio_patch      mPatch;
-    uint32_t                mSamplingRate;
-    audio_format_t          mFormat;
-    audio_channel_mask_t    mChannelMask;
-    size_t                  mFrameCount;
-    size_t                  mFrameCountHAL;
-    uint32_t                mLatency;   // only valid for output
-    audio_port_handle_t     mPortId;    // valid for event AUDIO_CLIENT_STARTED
+    std::string toDebugString() const {
+        std::ostringstream ss;
+        ss << mIoHandle << ", samplingRate " << mSamplingRate << ", "
+           << audio_format_to_string(mFormat) << ", "
+           << (audio_channel_mask_get_representation(mChannelMask) ==
+                   AUDIO_CHANNEL_REPRESENTATION_INDEX ?
+                   audio_channel_index_mask_to_string(mChannelMask) :
+                   (mIsInput ? audio_channel_in_mask_to_string(mChannelMask) :
+                           audio_channel_out_mask_to_string(mChannelMask)))
+           << ", frameCount " << mFrameCount << ", frameCountHAL " << mFrameCountHAL
+           << ", deviceId " << getDeviceId();
+        return ss.str();
+    }
+
+  private:
+    const audio_io_handle_t    mIoHandle = AUDIO_IO_HANDLE_NONE;
+          struct audio_patch   mPatch = {};
+    const bool                 mIsInput = false;
+    const uint32_t             mSamplingRate = 0;
+    const audio_format_t       mFormat = AUDIO_FORMAT_DEFAULT;
+    const audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
+    const size_t               mFrameCount = 0;
+    const size_t               mFrameCountHAL = 0;
+    const uint32_t             mLatency = 0;
+    const audio_port_handle_t  mPortId = AUDIO_PORT_HANDLE_NONE;
 };
 
 
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index f17ee3a..3cfcbf3 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -47,7 +47,7 @@
 {
 public:
 
-    /* Events used by AudioRecord callback function (callback_t).
+    /* Events used by AudioRecord callback function (legacy_callback_t).
      * Keep in sync with frameworks/base/media/java/android/media/AudioRecord.java NATIVE_EVENT_*.
      */
     enum event_type {
@@ -65,7 +65,7 @@
     };
 
     /* Client should declare a Buffer and pass address to obtainBuffer()
-     * and releaseBuffer().  See also callback_t for EVENT_MORE_DATA.
+     * and releaseBuffer().  See also legacy_callback_t for EVENT_MORE_DATA.
      */
 
     class Buffer
@@ -117,7 +117,28 @@
      *          - EVENT_NEW_IAUDIORECORD: unused.
      */
 
-    typedef void (*callback_t)(int event, void* user, void *info);
+    typedef void (*legacy_callback_t)(int event, void* user, void *info);
+
+    class IAudioRecordCallback : public virtual RefBase {
+        friend AudioRecord;
+     protected:
+        // Request for client to read newly available data.
+        // Used for TRANSFER_CALLBACK mode.
+        // Parameters:
+        //  - buffer : Buffer to read from
+        // Returns:
+        //  - Number of bytes actually consumed.
+        virtual size_t onMoreData([[maybe_unused]] const AudioRecord::Buffer& buffer) { return 0; }
+        // A buffer overrun occurred.
+        virtual void onOverrun() {}
+        // Record head is at the specified marker (see setMarkerPosition()).
+        virtual void onMarker([[maybe_unused]] uint32_t markerPosition) {}
+        // Record head is at a new position (see setPositionUpdatePeriod()).
+        virtual void onNewPos([[maybe_unused]] uint32_t newPos) {}
+        // IAudioRecord was recreated due to re-routing, server invalidation or
+        // server crash.
+        virtual void onNewIAudioRecord() {}
+    };
 
     /* Returns the minimum frame count required for the successful creation of
      * an AudioRecord object.
@@ -182,20 +203,37 @@
      * pAttributes:        If not NULL, supersedes inputSource for use case selection.
      * threadCanCallJava:  Not present in parameter list, and so is fixed at false.
      */
-
                         AudioRecord(audio_source_t inputSource,
                                     uint32_t sampleRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
                                     const android::content::AttributionSourceState& client,
                                     size_t frameCount = 0,
-                                    callback_t cbf = NULL,
-                                    void* user = NULL,
+                                    const wp<IAudioRecordCallback> &callback = nullptr,
                                     uint32_t notificationFrames = 0,
                                     audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
                                     audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
-                                    const audio_attributes_t* pAttributes = NULL,
+                                    const audio_attributes_t* pAttributes = nullptr,
+                                    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+                                    audio_microphone_direction_t
+                                        selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
+                                    float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT);
+
+
+                        AudioRecord(audio_source_t inputSource,
+                                    uint32_t sampleRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    const android::content::AttributionSourceState& client,
+                                    size_t frameCount,
+                                    legacy_callback_t callback,
+                                    void* user,
+                                    uint32_t notificationFrames = 0,
+                                    audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+                                    transfer_type transferType = TRANSFER_DEFAULT,
+                                    audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+                                    const audio_attributes_t* pAttributes = nullptr,
                                     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
                                     audio_microphone_direction_t
                                         selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
@@ -223,13 +261,12 @@
      *
      * threadCanCallJava:  Whether callbacks are made from an attached thread and thus can call JNI.
      */
-            status_t    set(audio_source_t inputSource,
+           status_t    set(audio_source_t inputSource,
                             uint32_t sampleRate,
                             audio_format_t format,
                             audio_channel_mask_t channelMask,
                             size_t frameCount = 0,
-                            callback_t cbf = NULL,
-                            void* user = NULL,
+                            const wp<IAudioRecordCallback> &callback = nullptr,
                             uint32_t notificationFrames = 0,
                             bool threadCanCallJava = false,
                             audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
@@ -237,7 +274,28 @@
                             audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
                             uid_t uid = AUDIO_UID_INVALID,
                             pid_t pid = -1,
-                            const audio_attributes_t* pAttributes = NULL,
+                            const audio_attributes_t* pAttributes = nullptr,
+                            audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+                            audio_microphone_direction_t
+                                selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
+                            float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT,
+                            int32_t maxSharedAudioHistoryMs = 0);
+
+           status_t    set(audio_source_t inputSource,
+                            uint32_t sampleRate,
+                            audio_format_t format,
+                            audio_channel_mask_t channelMask,
+                            size_t frameCount,
+                            legacy_callback_t callback,
+                            void* user,
+                            uint32_t notificationFrames = 0,
+                            bool threadCanCallJava = false,
+                            audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+                            transfer_type transferType = TRANSFER_DEFAULT,
+                            audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+                            uid_t uid = AUDIO_UID_INVALID,
+                            pid_t pid = -1,
+                            const audio_attributes_t* pAttributes = nullptr,
                             audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
                             audio_microphone_direction_t
                                 selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
@@ -673,8 +731,9 @@
     bool                    mActive;
 
     // for client callback handler
-    callback_t              mCbf;                   // callback handler for events, or NULL
-    void*                   mUserData;
+
+    wp<IAudioRecordCallback> mCallback;
+    sp<IAudioRecordCallback> mLegacyCallbackWrapper;
 
     // for notification APIs
     uint32_t                mNotificationFramesReq; // requested number of frames between each
@@ -760,6 +819,13 @@
     bool                    mTimestampRetrogradePositionReported = false; // reduce log spam
     bool                    mTimestampRetrogradeTimeReported = false;     // reduce log spam
 
+    // Format conversion. Maybe needed for adding fast tracks whose format is different from server.
+    audio_config_base_t     mServerConfig;
+    size_t                  mServerFrameSize;
+    size_t                  mServerSampleSize;
+    std::unique_ptr<uint8_t[]> mFormatConversionBufRaw;
+    Buffer                  mFormatConversionBuffer;
+
 private:
     class DeathNotifier : public IBinder::DeathRecipient {
     public:
@@ -824,6 +890,8 @@
     MediaMetrics mMediaMetrics;
     std::string mMetricsId;  // GUARDED_BY(mLock), could change in createRecord_l().
     std::string mCallerName; // for example "aaudio"
+
+    void reportError(status_t status, const char *event, const char *message) const;
 };
 
 }; // namespace android
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 4d85f7a..11eb070 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -19,26 +19,30 @@
 
 #include <sys/types.h>
 
+#include <set>
+#include <vector>
+
+#include <android/content/AttributionSourceState.h>
 #include <android/media/AudioVibratorInfo.h>
 #include <android/media/BnAudioFlingerClient.h>
 #include <android/media/BnAudioPolicyServiceClient.h>
 #include <android/media/INativeSpatializerCallback.h>
 #include <android/media/ISpatializer.h>
-#include <android/content/AttributionSourceState.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
 #include <media/AidlConversionUtil.h>
+#include <media/AudioContainers.h>
 #include <media/AudioDeviceTypeAddr.h>
 #include <media/AudioPolicy.h>
 #include <media/AudioProductStrategy.h>
 #include <media/AudioVolumeGroup.h>
 #include <media/AudioIoDescriptor.h>
 #include <media/MicrophoneInfo.h>
-#include <set>
 #include <system/audio.h>
 #include <system/audio_effect.h>
 #include <system/audio_policy.h>
 #include <utils/Errors.h>
 #include <utils/Mutex.h>
-#include <vector>
 
 using android::content::AttributionSourceState;
 
@@ -323,7 +327,7 @@
     static status_t getMinVolumeIndexForAttributes(const audio_attributes_t &attr, int &index);
 
     static product_strategy_t getStrategyForStream(audio_stream_type_t stream);
-    static audio_devices_t getDevicesForStream(audio_stream_type_t stream);
+    static DeviceTypeSet getDevicesForStream(audio_stream_type_t stream);
     static status_t getDevicesForAttributes(const AudioAttributes &aa,
                                             AudioDeviceTypeAddrVector *devices);
 
@@ -532,9 +536,22 @@
                                      const AudioDeviceTypeAddrVector &devices,
                                      bool *canBeSpatialized);
 
+    /**
+     * Query how the direct playback is currently supported on the device.
+     * @param attr audio attributes describing the playback use case
+     * @param config audio configuration for the playback
+     * @param directMode out: a set of flags describing how the direct playback is currently
+     *        supported on the device
+     * @return NO_ERROR in case of success, DEAD_OBJECT, NO_INIT, BAD_VALUE, PERMISSION_DENIED
+     *         in case of error.
+     */
+    static status_t getDirectPlaybackSupport(const audio_attributes_t *attr,
+                                             const audio_config_t *config,
+                                             audio_direct_mode_t *directMode);
+
 
     // A listener for capture state changes.
-    class CaptureStateListener : public RefBase {
+    class CaptureStateListener : public virtual RefBase {
     public:
         // Called whenever capture state changes.
         virtual void onStateChanged(bool active) = 0;
@@ -559,7 +576,7 @@
 
     // ----------------------------------------------------------------------------
 
-    class AudioVolumeGroupCallback : public RefBase
+    class AudioVolumeGroupCallback : public virtual RefBase
     {
     public:
 
@@ -574,7 +591,7 @@
     static status_t addAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback);
     static status_t removeAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback);
 
-    class AudioPortCallback : public RefBase
+    class AudioPortCallback : public virtual RefBase
     {
     public:
 
@@ -590,7 +607,7 @@
     static status_t addAudioPortCallback(const sp<AudioPortCallback>& callback);
     static status_t removeAudioPortCallback(const sp<AudioPortCallback>& callback);
 
-    class AudioDeviceCallback : public RefBase
+    class AudioDeviceCallback : public virtual RefBase
     {
     public:
 
@@ -612,6 +629,14 @@
 
     static status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos);
 
+    static status_t getMmapPolicyInfo(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos);
+
+    static int32_t getAAudioMixerBurstCount();
+
+    static int32_t getAAudioHardwareBurstMinUsec();
+
 private:
 
     class AudioFlingerClient: public IBinder::DeathRecipient, public media::BnAudioFlingerClient
@@ -688,12 +713,12 @@
         binder::Status onRecordingConfigurationUpdate(
                 int32_t event,
                 const media::RecordClientInfo& clientInfo,
-                const media::AudioConfigBase& clientConfig,
+                const media::audio::common::AudioConfigBase& clientConfig,
                 const std::vector<media::EffectDescriptor>& clientEffects,
-                const media::AudioConfigBase& deviceConfig,
+                const media::audio::common::AudioConfigBase& deviceConfig,
                 const std::vector<media::EffectDescriptor>& effects,
                 int32_t patchHandle,
-                media::AudioSourceType source) override;
+                media::audio::common::AudioSource source) override;
         binder::Status onRoutingUpdated();
 
     private:
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index fa21265..16e10b5 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -146,7 +146,79 @@
      *          - EVENT_NEW_TIMESTAMP: pointer to const AudioTimestamp.
      */
 
-    typedef void (*callback_t)(int event, void* user, void *info);
+    typedef void (*legacy_callback_t)(int event, void* user, void* info);
+    class IAudioTrackCallback : public virtual RefBase {
+      friend AudioTrack;
+      protected:
+       /* Request to write more data to buffer.
+        * This event only occurs for TRANSFER_CALLBACK.
+        * If this event is delivered but the callback handler does not want to write more data,
+        * the handler must ignore the event by returning zero.
+        * This might occur, for example, if the application is waiting for source data or is at
+        * the end of stream.
+        * For data filling, it is preferred that the callback does not block and instead returns
+        * a short count of the amount of data actually delivered.
+        * Parameters:
+        *  - buffer: Buffer to fill
+        * Returns:
+        * Amount of data actually written in bytes.
+        */
+        virtual size_t onMoreData([[maybe_unused]] const AudioTrack::Buffer& buffer) { return 0; }
+
+        // Buffer underrun occurred. This will not occur for static tracks.
+        virtual void onUnderrun() {}
+
+       /* Sample loop end was reached; playback restarted from loop start if loop count was not 0
+        * for a static track.
+        * Parameters:
+        *  - loopsRemaining: Number of loops remaining to be played. -1 if infinite looping.
+        */
+        virtual void onLoopEnd([[maybe_unused]] int32_t loopsRemaining) {}
+
+       /* Playback head is at the specified marker (See setMarkerPosition()).
+        * Parameters:
+        *  - onMarker: Marker position in frames
+        */
+        virtual void onMarker([[maybe_unused]] uint32_t markerPosition) {}
+
+       /* Playback head is at a new position (See setPositionUpdatePeriod()).
+        * Parameters:
+        *  - newPos: New position in frames
+        */
+        virtual void onNewPos([[maybe_unused]] uint32_t newPos) {}
+
+        // Playback has completed for a static track.
+        virtual void onBufferEnd() {}
+
+        // IAudioTrack was re-created, either due to re-routing and voluntary invalidation
+        // by mediaserver, or mediaserver crash.
+        virtual void onNewIAudioTrack() {}
+
+        // Sent after all the buffers queued in AF and HW are played back (after stop is called)
+        // for an offloaded track.
+        virtual void onStreamEnd() {}
+
+       /* Delivered periodically and when there's a significant change
+        * in the mapping from frame position to presentation time.
+        * See AudioTimestamp for the information included with event.
+        * TODO not yet implemented.
+        * Parameters:
+        *  - timestamp: New frame position and presentation time mapping.
+        */
+        virtual void onNewTimestamp([[maybe_unused]] AudioTimestamp timestamp) {}
+
+       /* Notification that more data can be given by write()
+        * This event only occurs for TRANSFER_SYNC_NOTIF_CALLBACK.
+        * Similar to onMoreData(), return the number of frames actually written
+        * Parameters:
+        *  - buffer: Buffer to fill
+        * Returns:
+        * Amount of data actually written in bytes.
+        */
+        virtual size_t onCanWriteMoreData([[maybe_unused]] const AudioTrack::Buffer& buffer) {
+            return 0;
+        }
+    };
 
     /* Returns the minimum frame count required for the successful creation of
      * an AudioTrack object.
@@ -257,15 +329,34 @@
                                     audio_channel_mask_t channelMask,
                                     size_t frameCount    = 0,
                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                                    callback_t cbf       = NULL,
-                                    void* user           = NULL,
+                                    const wp<IAudioTrackCallback>& callback = nullptr,
                                     int32_t notificationFrames = 0,
                                     audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
-                                    const audio_offload_info_t *offloadInfo = NULL,
+                                    const audio_offload_info_t *offloadInfo = nullptr,
                                     const AttributionSourceState& attributionSource =
                                         AttributionSourceState(),
-                                    const audio_attributes_t* pAttributes = NULL,
+                                    const audio_attributes_t* pAttributes = nullptr,
+                                    bool doNotReconnect = false,
+                                    float maxRequiredSpeed = 1.0f,
+                                    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+
+
+                        AudioTrack( audio_stream_type_t streamType,
+                                    uint32_t sampleRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    size_t frameCount,
+                                    audio_output_flags_t flags,
+                                    legacy_callback_t cbf,
+                                    void* user = nullptr,
+                                    int32_t notificationFrames = 0,
+                                    audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
+                                    transfer_type transferType = TRANSFER_DEFAULT,
+                                    const audio_offload_info_t *offloadInfo = nullptr,
+                                    const AttributionSourceState& attributionSource =
+                                        AttributionSourceState(),
+                                    const audio_attributes_t* pAttributes = nullptr,
                                     bool doNotReconnect = false,
                                     float maxRequiredSpeed = 1.0f,
                                     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
@@ -281,22 +372,39 @@
      * It is recommended to pass a callback function to be notified of playback end by an
      * EVENT_UNDERRUN event.
      */
-
                         AudioTrack( audio_stream_type_t streamType,
                                     uint32_t sampleRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
                                     const sp<IMemory>& sharedBuffer,
                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                                    callback_t cbf      = NULL,
-                                    void* user          = NULL,
+                                    const wp<IAudioTrackCallback>& callback = nullptr,
                                     int32_t notificationFrames = 0,
                                     audio_session_t sessionId   = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
-                                    const audio_offload_info_t *offloadInfo = NULL,
+                                    const audio_offload_info_t *offloadInfo = nullptr,
                                     const AttributionSourceState& attributionSource =
                                         AttributionSourceState(),
-                                    const audio_attributes_t* pAttributes = NULL,
+                                    const audio_attributes_t* pAttributes = nullptr,
+                                    bool doNotReconnect = false,
+                                    float maxRequiredSpeed = 1.0f);
+
+
+                        AudioTrack( audio_stream_type_t streamType,
+                                    uint32_t sampleRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    const sp<IMemory>& sharedBuffer,
+                                    audio_output_flags_t flags,
+                                    legacy_callback_t cbf,
+                                    void* user          = nullptr,
+                                    int32_t notificationFrames = 0,
+                                    audio_session_t sessionId   = AUDIO_SESSION_ALLOCATE,
+                                    transfer_type transferType = TRANSFER_DEFAULT,
+                                    const audio_offload_info_t *offloadInfo = nullptr,
+                                    const AttributionSourceState& attributionSource =
+                                        AttributionSourceState(),
+                                    const audio_attributes_t* pAttributes = nullptr,
                                     bool doNotReconnect = false,
                                     float maxRequiredSpeed = 1.0f);
 
@@ -334,20 +442,41 @@
                             audio_channel_mask_t channelMask,
                             size_t frameCount   = 0,
                             audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                            callback_t cbf      = NULL,
-                            void* user          = NULL,
+                            const wp<IAudioTrackCallback>& callback = nullptr,
                             int32_t notificationFrames = 0,
                             const sp<IMemory>& sharedBuffer = 0,
                             bool threadCanCallJava = false,
                             audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
                             transfer_type transferType = TRANSFER_DEFAULT,
-                            const audio_offload_info_t *offloadInfo = NULL,
+                            const audio_offload_info_t *offloadInfo = nullptr,
                             const AttributionSourceState& attributionSource =
                                 AttributionSourceState(),
-                            const audio_attributes_t* pAttributes = NULL,
+                            const audio_attributes_t* pAttributes = nullptr,
                             bool doNotReconnect = false,
                             float maxRequiredSpeed = 1.0f,
                             audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+
+            status_t    set(audio_stream_type_t streamType,
+                            uint32_t sampleRate,
+                            audio_format_t format,
+                            audio_channel_mask_t channelMask,
+                            size_t frameCount,
+                            audio_output_flags_t flags,
+                            legacy_callback_t callback,
+                            void * user = nullptr,
+                            int32_t notificationFrames = 0,
+                            const sp<IMemory>& sharedBuffer = 0,
+                            bool threadCanCallJava = false,
+                            audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
+                            transfer_type transferType = TRANSFER_DEFAULT,
+                            const audio_offload_info_t *offloadInfo = nullptr,
+                            const AttributionSourceState& attributionSource =
+                                AttributionSourceState(),
+                            const audio_attributes_t* pAttributes = nullptr,
+                            bool doNotReconnect = false,
+                            float maxRequiredSpeed = 1.0f,
+                            audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+
     // FIXME(b/169889714): Vendor code depends on the old method signature at link time
             status_t    set(audio_stream_type_t streamType,
                             uint32_t sampleRate,
@@ -355,17 +484,17 @@
                             uint32_t channelMask,
                             size_t frameCount   = 0,
                             audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                            callback_t cbf      = NULL,
-                            void* user          = NULL,
+                            legacy_callback_t cbf = nullptr,
+                            void* user          = nullptr,
                             int32_t notificationFrames = 0,
                             const sp<IMemory>& sharedBuffer = 0,
                             bool threadCanCallJava = false,
                             audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
                             transfer_type transferType = TRANSFER_DEFAULT,
-                            const audio_offload_info_t *offloadInfo = NULL,
+                            const audio_offload_info_t *offloadInfo = nullptr,
                             uid_t uid = AUDIO_UID_INVALID,
                             pid_t pid = -1,
-                            const audio_attributes_t* pAttributes = NULL,
+                            const audio_attributes_t* pAttributes = nullptr,
                             bool doNotReconnect = false,
                             float maxRequiredSpeed = 1.0f,
                             audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
@@ -429,8 +558,7 @@
      * less than or equal to the getBufferCapacityInFrames().
      * It may also be adjusted slightly for internal reasons.
      *
-     * Return the final size or a negative error if the track is unitialized
-     * or does not support variable sizes.
+     * Return the final size or a negative value (NO_INIT) if the track is uninitialized.
      */
             ssize_t     setBufferSizeInFrames(size_t size);
 
@@ -1216,9 +1344,8 @@
     }
 
     // for client callback handler
-    callback_t              mCbf;                   // callback handler for events, or NULL
-    void*                   mUserData;
-
+    wp<IAudioTrackCallback> mCallback;                   // callback handler for events, or NULL
+    sp<IAudioTrackCallback> mLegacyCallbackWrapper;      // wrapper for legacy callback interface
     // for notification APIs
 
     // next 2 fields are const after constructor or set()
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 9e5019e..b4ee4dc 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -37,10 +37,12 @@
 #include <string>
 #include <vector>
 
+#include <android/content/AttributionSourceState.h>
 #include <android/media/AudioVibratorInfo.h>
 #include <android/media/BnAudioFlingerService.h>
 #include <android/media/BpAudioFlingerService.h>
-#include <android/content/AttributionSourceState.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
 #include "android/media/CreateEffectRequest.h"
 #include "android/media/CreateEffectResponse.h"
 #include "android/media/CreateRecordRequest.h"
@@ -63,7 +65,7 @@
 
 // ----------------------------------------------------------------------------
 
-class IAudioFlinger : public RefBase {
+class IAudioFlinger : public virtual RefBase {
 public:
     static constexpr char DEFAULT_SERVICE_NAME[] = "media.audio_flinger";
 
@@ -166,6 +168,7 @@
         sp<IMemory> buffers;
         audio_port_handle_t portId;
         sp<media::IAudioRecord> audioRecord;
+        audio_config_base_t serverConfig;
 
         ConversionResult<media::CreateRecordResponse> toAidl() const;
         static ConversionResult<CreateRecordOutput>
@@ -347,6 +350,14 @@
 
     virtual status_t updateSecondaryOutputs(
             const TrackSecondaryOutputsMap& trackSecondaryOutputs) = 0;
+
+    virtual status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) = 0;
+
+    virtual int32_t getAAudioMixerBurstCount() = 0;
+
+    virtual int32_t getAAudioHardwareBurstMinUsec() = 0;
 };
 
 /**
@@ -444,6 +455,14 @@
     status_t updateSecondaryOutputs(
             const TrackSecondaryOutputsMap& trackSecondaryOutputs) override;
 
+    status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) override;
+
+    int32_t getAAudioMixerBurstCount() override;
+
+    int32_t getAAudioHardwareBurstMinUsec() override;
+
 private:
     const sp<media::IAudioFlingerService> mDelegate;
 };
@@ -528,6 +547,9 @@
             SET_AUDIO_HAL_PIDS = media::BnAudioFlingerService::TRANSACTION_setAudioHalPids,
             SET_VIBRATOR_INFOS = media::BnAudioFlingerService::TRANSACTION_setVibratorInfos,
             UPDATE_SECONDARY_OUTPUTS = media::BnAudioFlingerService::TRANSACTION_updateSecondaryOutputs,
+            GET_MMAP_POLICY_INFOS = media::BnAudioFlingerService::TRANSACTION_getMmapPolicyInfos,
+            GET_AAUDIO_MIXER_BURST_COUNT = media::BnAudioFlingerService::TRANSACTION_getAAudioMixerBurstCount,
+            GET_AAUDIO_HARDWARE_BURST_MIN_USEC = media::BnAudioFlingerService::TRANSACTION_getAAudioHardwareBurstMinUsec,
         };
 
         /**
@@ -569,7 +591,8 @@
     Status createRecord(const media::CreateRecordRequest& request,
                         media::CreateRecordResponse* _aidl_return) override;
     Status sampleRate(int32_t ioHandle, int32_t* _aidl_return) override;
-    Status format(int32_t output, media::audio::common::AudioFormat* _aidl_return) override;
+    Status format(int32_t output,
+                  media::audio::common::AudioFormatDescription* _aidl_return) override;
     Status frameCount(int32_t ioHandle, int64_t* _aidl_return) override;
     Status latency(int32_t output, int32_t* _aidl_return) override;
     Status setMasterVolume(float value) override;
@@ -578,12 +601,13 @@
     Status masterMute(bool* _aidl_return) override;
     Status setMasterBalance(float balance) override;
     Status getMasterBalance(float* _aidl_return) override;
-    Status setStreamVolume(media::AudioStreamType stream, float value, int32_t output) override;
-    Status setStreamMute(media::AudioStreamType stream, bool muted) override;
-    Status
-    streamVolume(media::AudioStreamType stream, int32_t output, float* _aidl_return) override;
-    Status streamMute(media::AudioStreamType stream, bool* _aidl_return) override;
-    Status setMode(media::AudioMode mode) override;
+    Status setStreamVolume(media::audio::common::AudioStreamType stream,
+                           float value, int32_t output) override;
+    Status setStreamMute(media::audio::common::AudioStreamType stream, bool muted) override;
+    Status streamVolume(media::audio::common::AudioStreamType stream,
+                        int32_t output, float* _aidl_return) override;
+    Status streamMute(media::audio::common::AudioStreamType stream, bool* _aidl_return) override;
+    Status setMode(media::audio::common::AudioMode mode) override;
     Status setMicMute(bool state) override;
     Status getMicMute(bool* _aidl_return) override;
     Status setRecordSilenced(int32_t portId, bool silenced) override;
@@ -591,8 +615,10 @@
     Status
     getParameters(int32_t ioHandle, const std::string& keys, std::string* _aidl_return) override;
     Status registerClient(const sp<media::IAudioFlingerClient>& client) override;
-    Status getInputBufferSize(int32_t sampleRate, media::audio::common::AudioFormat format,
-                              int32_t channelMask, int64_t* _aidl_return) override;
+    Status getInputBufferSize(int32_t sampleRate,
+                              const media::audio::common::AudioFormatDescription& format,
+                              const media::audio::common::AudioChannelLayout& channelMask,
+                              int64_t* _aidl_return) override;
     Status openOutput(const media::OpenOutputRequest& request,
                       media::OpenOutputResponse* _aidl_return) override;
     Status openDuplicateOutput(int32_t output1, int32_t output2, int32_t* _aidl_return) override;
@@ -602,7 +628,7 @@
     Status openInput(const media::OpenInputRequest& request,
                      media::OpenInputResponse* _aidl_return) override;
     Status closeInput(int32_t input) override;
-    Status invalidateStream(media::AudioStreamType stream) override;
+    Status invalidateStream(media::audio::common::AudioStreamType stream) override;
     Status setVoiceVolume(float volume) override;
     Status getRenderPosition(int32_t output, media::RenderPosition* _aidl_return) override;
     Status getInputFramesLost(int32_t ioHandle, int32_t* _aidl_return) override;
@@ -611,7 +637,8 @@
     Status releaseAudioSessionId(int32_t audioSession, int32_t pid) override;
     Status queryNumberEffects(int32_t* _aidl_return) override;
     Status queryEffect(int32_t index, media::EffectDescriptor* _aidl_return) override;
-    Status getEffectDescriptor(const media::AudioUuid& effectUUID, const media::AudioUuid& typeUUID,
+    Status getEffectDescriptor(const media::audio::common::AudioUuid& effectUUID,
+                               const media::audio::common::AudioUuid& typeUUID,
                                int32_t preferredTypeFlag,
                                media::EffectDescriptor* _aidl_return) override;
     Status createEffect(const media::CreateEffectRequest& request,
@@ -637,6 +664,11 @@
     Status setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos) override;
     Status updateSecondaryOutputs(
             const std::vector<media::TrackSecondaryOutputInfo>& trackSecondaryOutputInfos) override;
+    Status getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *_aidl_return) override;
+    Status getAAudioMixerBurstCount(int32_t* _aidl_return) override;
+    Status getAAudioHardwareBurstMinUsec(int32_t* _aidl_return) override;
 
 private:
     const sp<AudioFlingerServerAdapter::Delegate> mDelegate;
diff --git a/media/libaudioclient/include/media/PolicyAidlConversion.h b/media/libaudioclient/include/media/PolicyAidlConversion.h
index 873f27a..2296fdb 100644
--- a/media/libaudioclient/include/media/PolicyAidlConversion.h
+++ b/media/libaudioclient/include/media/PolicyAidlConversion.h
@@ -23,10 +23,8 @@
 
 #include <android/media/AudioMix.h>
 #include <android/media/AudioMixCallbackFlag.h>
-#include <android/media/AudioMixLatencyClass.h>
 #include <android/media/AudioMixRouteFlag.h>
 #include <android/media/AudioMixType.h>
-#include <android/media/AudioMode.h>
 #include <android/media/AudioOffloadMode.h>
 #include <android/media/AudioPolicyForceUse.h>
 #include <android/media/AudioPolicyForcedConfig.h>
diff --git a/media/libaudioclient/include/media/ToneGenerator.h b/media/libaudioclient/include/media/ToneGenerator.h
index a575616..43c0100 100644
--- a/media/libaudioclient/include/media/ToneGenerator.h
+++ b/media/libaudioclient/include/media/ToneGenerator.h
@@ -28,7 +28,7 @@
 
 namespace android {
 
-class ToneGenerator {
+class ToneGenerator : public AudioTrack::IAudioTrackCallback {
 public:
 
     // List of all available tones
@@ -156,6 +156,9 @@
 
     ToneGenerator(audio_stream_type_t streamType, float volume, bool threadCanCallJava = false,
             std::string opPackageName = {});
+
+    void onFirstRef() override;
+
     ~ToneGenerator();
 
     bool startTone(tone_type toneType, int durationMs = -1);
@@ -311,6 +314,7 @@
     unsigned int mProcessSize;  // Size of audio blocks generated at a time by audioCallback() (in PCM frames).
     struct timespec mStartTime; // tone start time: needed to guaranty actual tone duration
 
+    size_t onMoreData(const AudioTrack::Buffer& buffer) override;
     bool initAudioTrack();
     static void audioCallback(int event, void* user, void *info);
     bool prepareWave();
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index def7ca6..891293e 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -9,10 +9,35 @@
 
 cc_defaults {
     name: "libaudioclient_tests_defaults",
+    test_suites: ["device-tests"],
     cflags: [
         "-Wall",
         "-Werror",
     ],
+    sanitize: {
+        misc_undefined: [
+            "unsigned-integer-overflow",
+            "signed-integer-overflow",
+        ],
+    },
+}
+
+cc_test {
+    name: "audio_aidl_conversion_tests",
+    defaults: ["libaudioclient_tests_defaults"],
+    srcs: ["audio_aidl_legacy_conversion_tests.cpp"],
+    shared_libs: [
+        "libbinder",
+        "libcutils",
+        "liblog",
+        "libutils",
+    ],
+    static_libs: [
+        "android.media.audio.common.types-V1-cpp",
+        "audioclient-types-aidl-cpp",
+        "libaudioclient_aidl_conversion",
+        "libstagefright_foundation",
+    ],
 }
 
 cc_test {
@@ -30,8 +55,10 @@
 cc_test {
     name: "test_create_audiotrack",
     defaults: ["libaudioclient_tests_defaults"],
-    srcs: ["test_create_audiotrack.cpp",
-           "test_create_utils.cpp"],
+    srcs: [
+        "test_create_audiotrack.cpp",
+        "test_create_utils.cpp",
+    ],
     header_libs: [
         "libmedia_headers",
         "libmediametrics_headers",
@@ -49,8 +76,10 @@
 cc_test {
     name: "test_create_audiorecord",
     defaults: ["libaudioclient_tests_defaults"],
-    srcs: ["test_create_audiorecord.cpp",
-           "test_create_utils.cpp"],
+    srcs: [
+        "test_create_audiorecord.cpp",
+        "test_create_utils.cpp",
+    ],
     header_libs: [
         "libmedia_headers",
         "libmediametrics_headers",
diff --git a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
new file mode 100644
index 0000000..997f62a
--- /dev/null
+++ b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include <media/AudioCommonTypes.h>
+#include <media/AidlConversion.h>
+
+using namespace android;
+using namespace android::aidl_utils;
+
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioDeviceType;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioFormatType;
+using media::audio::common::PcmType;
+
+namespace {
+
+template<typename T> size_t hash(const T& t) {
+    return std::hash<T>{}(t);
+}
+
+AudioChannelLayout make_ACL_None() {
+    return AudioChannelLayout{};
+}
+
+AudioChannelLayout make_ACL_Invalid() {
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::invalid>(0);
+}
+
+AudioChannelLayout make_ACL_Stereo() {
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>(
+            AudioChannelLayout::LAYOUT_STEREO);
+}
+
+AudioChannelLayout make_ACL_LayoutArbitrary() {
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>(
+            // Use channels that exist both for input and output,
+            // but doesn't form a known layout mask.
+            AudioChannelLayout::CHANNEL_FRONT_LEFT |
+            AudioChannelLayout::CHANNEL_FRONT_RIGHT |
+            AudioChannelLayout::CHANNEL_TOP_SIDE_LEFT |
+            AudioChannelLayout::CHANNEL_TOP_SIDE_RIGHT);
+}
+
+AudioChannelLayout make_ACL_ChannelIndex2() {
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::indexMask>(
+            AudioChannelLayout::INDEX_MASK_2);
+}
+
+AudioChannelLayout make_ACL_ChannelIndexArbitrary() {
+    // Use channels 1 and 3.
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::indexMask>(5);
+}
+
+AudioChannelLayout make_ACL_VoiceCall() {
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::voiceMask>(
+            AudioChannelLayout::VOICE_CALL_MONO);
+}
+
+AudioDeviceDescription make_AudioDeviceDescription(AudioDeviceType type,
+        const std::string& connection = "") {
+    AudioDeviceDescription result;
+    result.type = type;
+    result.connection = connection;
+    return result;
+}
+
+AudioDeviceDescription make_ADD_None() {
+    return AudioDeviceDescription{};
+}
+
+AudioDeviceDescription make_ADD_DefaultIn() {
+    return make_AudioDeviceDescription(AudioDeviceType::IN_DEFAULT);
+}
+
+AudioDeviceDescription make_ADD_DefaultOut() {
+    return make_AudioDeviceDescription(AudioDeviceType::OUT_DEFAULT);
+}
+
+AudioDeviceDescription make_ADD_WiredHeadset() {
+    return make_AudioDeviceDescription(AudioDeviceType::OUT_HEADSET,
+            AudioDeviceDescription::CONNECTION_ANALOG());
+}
+
+AudioDeviceDescription make_ADD_BtScoHeadset() {
+    return make_AudioDeviceDescription(AudioDeviceType::OUT_HEADSET,
+            AudioDeviceDescription::CONNECTION_BT_SCO());
+}
+
+AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
+    AudioFormatDescription result;
+    result.type = type;
+    return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(PcmType pcm) {
+    auto result = make_AudioFormatDescription(AudioFormatType::PCM);
+    result.pcm = pcm;
+    return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
+    AudioFormatDescription result;
+    result.encoding = encoding;
+    return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(PcmType transport,
+        const std::string& encoding) {
+    auto result = make_AudioFormatDescription(encoding);
+    result.pcm = transport;
+    return result;
+}
+
+AudioFormatDescription make_AFD_Default() {
+    return AudioFormatDescription{};
+}
+
+AudioFormatDescription make_AFD_Invalid() {
+    return make_AudioFormatDescription(AudioFormatType::SYS_RESERVED_INVALID);
+}
+
+AudioFormatDescription make_AFD_Pcm16Bit() {
+    return make_AudioFormatDescription(PcmType::INT_16_BIT);
+}
+
+AudioFormatDescription make_AFD_Bitstream() {
+    return make_AudioFormatDescription("example");
+}
+
+AudioFormatDescription make_AFD_Encap() {
+    return make_AudioFormatDescription(PcmType::INT_16_BIT, "example.encap");
+}
+
+AudioFormatDescription make_AFD_Encap_with_Enc() {
+    auto afd = make_AFD_Encap();
+    afd.encoding += "+example";
+    return afd;
+}
+
+}  // namespace
+
+// Verify that two independently constructed ADDs/AFDs have the same hash.
+// This ensures that regardless of whether the ADD/AFD instance originates
+// from, it can be correctly compared to other ADD/AFD instance. Thus,
+// for example, a 16-bit integer format description provided by HAL
+// is identical to the same format description constructed by the framework.
+class HashIdentityTest : public ::testing::Test {
+  public:
+    template<typename T> void verifyHashIdentity(const std::vector<std::function<T()>>& valueGens) {
+        for (size_t i = 0; i < valueGens.size(); ++i) {
+            for (size_t j = 0; j < valueGens.size(); ++j) {
+                if (i == j) {
+                    EXPECT_EQ(hash(valueGens[i]()), hash(valueGens[i]())) << i;
+                } else {
+                    EXPECT_NE(hash(valueGens[i]()), hash(valueGens[j]())) << i << ", " << j;
+                }
+            }
+        }
+    }
+};
+
+TEST_F(HashIdentityTest, AudioChannelLayoutHashIdentity) {
+    verifyHashIdentity<AudioChannelLayout>({
+            make_ACL_None, make_ACL_Invalid, make_ACL_Stereo,
+            make_ACL_LayoutArbitrary, make_ACL_ChannelIndex2,
+            make_ACL_ChannelIndexArbitrary, make_ACL_VoiceCall});
+}
+
+TEST_F(HashIdentityTest, AudioDeviceDescriptionHashIdentity) {
+    verifyHashIdentity<AudioDeviceDescription>({
+            make_ADD_None, make_ADD_DefaultIn, make_ADD_DefaultOut, make_ADD_WiredHeadset,
+            make_ADD_BtScoHeadset});
+}
+
+TEST_F(HashIdentityTest, AudioFormatDescriptionHashIdentity) {
+    verifyHashIdentity<AudioFormatDescription>({
+            make_AFD_Default, make_AFD_Invalid, make_AFD_Pcm16Bit, make_AFD_Bitstream,
+            make_AFD_Encap, make_AFD_Encap_with_Enc});
+}
+
+using ChannelLayoutParam = std::tuple<AudioChannelLayout, bool /*isInput*/>;
+class AudioChannelLayoutRoundTripTest :
+        public testing::TestWithParam<ChannelLayoutParam> {};
+TEST_P(AudioChannelLayoutRoundTripTest, Aidl2Legacy2Aidl) {
+    const auto initial = std::get<0>(GetParam());
+    const bool isInput = std::get<1>(GetParam());
+    auto conv = aidl2legacy_AudioChannelLayout_audio_channel_mask_t(initial, isInput);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = legacy2aidl_audio_channel_mask_t_AudioChannelLayout(conv.value(), isInput);
+    ASSERT_TRUE(convBack.ok());
+    EXPECT_EQ(initial, convBack.value());
+}
+INSTANTIATE_TEST_SUITE_P(AudioChannelLayoutRoundTrip,
+        AudioChannelLayoutRoundTripTest,
+        testing::Combine(
+                testing::Values(AudioChannelLayout{}, make_ACL_Invalid(), make_ACL_Stereo(),
+                        make_ACL_LayoutArbitrary(), make_ACL_ChannelIndex2(),
+                        make_ACL_ChannelIndexArbitrary()),
+                testing::Values(false, true)));
+INSTANTIATE_TEST_SUITE_P(AudioChannelVoiceRoundTrip,
+        AudioChannelLayoutRoundTripTest,
+        // In legacy constants the voice call is only defined for input.
+        testing::Combine(testing::Values(make_ACL_VoiceCall()), testing::Values(true)));
+
+using ChannelLayoutEdgeCaseParam = std::tuple<int /*legacy*/, bool /*isInput*/, bool /*isValid*/>;
+class AudioChannelLayoutEdgeCaseTest :
+        public testing::TestWithParam<ChannelLayoutEdgeCaseParam> {};
+TEST_P(AudioChannelLayoutEdgeCaseTest, Legacy2Aidl) {
+    const audio_channel_mask_t legacy = static_cast<audio_channel_mask_t>(std::get<0>(GetParam()));
+    const bool isInput = std::get<1>(GetParam());
+    const bool isValid = std::get<2>(GetParam());
+    auto conv = legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy, isInput);
+    EXPECT_EQ(isValid, conv.ok());
+}
+INSTANTIATE_TEST_SUITE_P(AudioChannelLayoutEdgeCase,
+        AudioChannelLayoutEdgeCaseTest,
+        testing::Values(
+                // Valid legacy input masks.
+                std::make_tuple(AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO, true, true),
+                std::make_tuple(AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO, true, true),
+                std::make_tuple(AUDIO_CHANNEL_IN_VOICE_CALL_MONO, true, true),
+                // Valid legacy output masks.
+                std::make_tuple(
+                        // This has the same numerical representation as Mask 'A' below
+                        AUDIO_CHANNEL_OUT_FRONT_CENTER | AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
+                        AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT, false, true),
+                std::make_tuple(
+                        // This has the same numerical representation as Mask 'B' below
+                        AUDIO_CHANNEL_OUT_FRONT_CENTER | AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
+                        AUDIO_CHANNEL_OUT_TOP_BACK_LEFT, false, true),
+                // Invalid legacy input masks.
+                std::make_tuple(AUDIO_CHANNEL_IN_6, true, false),
+                std::make_tuple(
+                        AUDIO_CHANNEL_IN_6 | AUDIO_CHANNEL_IN_FRONT_PROCESSED, true, false),
+                std::make_tuple(
+                        AUDIO_CHANNEL_IN_PRESSURE | AUDIO_CHANNEL_IN_X_AXIS |
+                        AUDIO_CHANNEL_IN_Y_AXIS | AUDIO_CHANNEL_IN_Z_AXIS, true, false),
+                std::make_tuple(  // Mask 'A'
+                        AUDIO_CHANNEL_IN_STEREO | AUDIO_CHANNEL_IN_VOICE_UPLINK, true, false),
+                std::make_tuple(  // Mask 'B'
+                        AUDIO_CHANNEL_IN_STEREO | AUDIO_CHANNEL_IN_VOICE_DNLINK, true, false)));
+
+class AudioDeviceDescriptionRoundTripTest :
+        public testing::TestWithParam<AudioDeviceDescription> {};
+TEST_P(AudioDeviceDescriptionRoundTripTest, Aidl2Legacy2Aidl) {
+    const auto initial = GetParam();
+    auto conv = aidl2legacy_AudioDeviceDescription_audio_devices_t(initial);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = legacy2aidl_audio_devices_t_AudioDeviceDescription(conv.value());
+    ASSERT_TRUE(convBack.ok());
+    EXPECT_EQ(initial, convBack.value());
+}
+INSTANTIATE_TEST_SUITE_P(AudioDeviceDescriptionRoundTrip,
+        AudioDeviceDescriptionRoundTripTest,
+        testing::Values(AudioDeviceDescription{}, make_ADD_DefaultIn(),
+                make_ADD_DefaultOut(), make_ADD_WiredHeadset(), make_ADD_BtScoHeadset()));
+
+class AudioFormatDescriptionRoundTripTest :
+        public testing::TestWithParam<AudioFormatDescription> {};
+TEST_P(AudioFormatDescriptionRoundTripTest, Aidl2Legacy2Aidl) {
+    const auto initial = GetParam();
+    auto conv = aidl2legacy_AudioFormatDescription_audio_format_t(initial);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = legacy2aidl_audio_format_t_AudioFormatDescription(conv.value());
+    ASSERT_TRUE(convBack.ok());
+    EXPECT_EQ(initial, convBack.value());
+}
+INSTANTIATE_TEST_SUITE_P(AudioFormatDescriptionRoundTrip,
+        AudioFormatDescriptionRoundTripTest,
+        testing::Values(make_AFD_Invalid(), AudioFormatDescription{}, make_AFD_Pcm16Bit()));
diff --git a/media/libaudiofoundation/Android.bp b/media/libaudiofoundation/Android.bp
index 3bef55b..727b86f 100644
--- a/media/libaudiofoundation/Android.bp
+++ b/media/libaudiofoundation/Android.bp
@@ -24,9 +24,11 @@
         "libmedia_helper_headers",
     ],
     static_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
     ],
     export_static_lib_headers: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
     ],
     host_supported: true,
@@ -52,6 +54,7 @@
     ],
 
     shared_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "libaudioclient_aidl_conversion",
         "libaudioutils",
@@ -63,6 +66,7 @@
     ],
 
     export_shared_lib_headers: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "libaudioclient_aidl_conversion",
     ],
diff --git a/media/libaudiofoundation/AudioContainers.cpp b/media/libaudiofoundation/AudioContainers.cpp
index 3df9378..553a319 100644
--- a/media/libaudiofoundation/AudioContainers.cpp
+++ b/media/libaudiofoundation/AudioContainers.cpp
@@ -70,48 +70,39 @@
     return audioDeviceOutAllBleSet;
 }
 
-bool deviceTypesToString(const DeviceTypeSet &deviceTypes, std::string &str) {
+std::string deviceTypesToString(const DeviceTypeSet &deviceTypes) {
     if (deviceTypes.empty()) {
-        str = "Empty device types";
-        return true;
+        return "Empty device types";
     }
-    bool ret = true;
-    for (auto it = deviceTypes.begin(); it != deviceTypes.end();) {
-        std::string deviceTypeStr;
-        ret = audio_is_output_device(*it) ?
-              OutputDeviceConverter::toString(*it, deviceTypeStr) :
-              InputDeviceConverter::toString(*it, deviceTypeStr);
-        if (!ret) {
-            break;
+    std::stringstream ss;
+    for (auto it = deviceTypes.begin(); it != deviceTypes.end(); ++it) {
+        if (it != deviceTypes.begin()) {
+            ss << ", ";
         }
-        str.append(deviceTypeStr);
-        if (++it != deviceTypes.end()) {
-            str.append(" , ");
+        const char* strType = audio_device_to_string(*it);
+        if (strlen(strType) != 0) {
+            ss << strType;
+        } else {
+            ss << "unknown type:0x" << std::hex << *it;
         }
     }
-    if (!ret) {
-        str = "Unknown values";
-    }
-    return ret;
+    return ss.str();
+}
+
+bool deviceTypesToString(const DeviceTypeSet &deviceTypes, std::string &str) {
+    str = deviceTypesToString(deviceTypes);
+    return true;
 }
 
 std::string dumpDeviceTypes(const DeviceTypeSet &deviceTypes) {
-    std::string ret;
-    for (auto it = deviceTypes.begin(); it != deviceTypes.end();) {
-        std::stringstream ss;
-        ss << "0x" << std::hex << (*it);
-        ret.append(ss.str());
-        if (++it != deviceTypes.end()) {
-            ret.append(" , ");
+    std::stringstream ss;
+    for (auto it = deviceTypes.begin(); it != deviceTypes.end(); ++it) {
+        if (it != deviceTypes.begin()) {
+            ss << ", ";
         }
+        ss << "0x" << std::hex << (*it);
     }
-    return ret;
-}
-
-std::string toString(const DeviceTypeSet& deviceTypes) {
-    std::string ret;
-    deviceTypesToString(deviceTypes, ret);
-    return ret;
+    return ss.str();
 }
 
 } // namespace android
diff --git a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
index c5d7da8..4a7e956 100644
--- a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
+++ b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
@@ -25,6 +25,9 @@
 
 namespace android {
 
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceAddress;
+
 namespace {
 
 static const std::string SUPPRESSED = "SUPPRESSED";
@@ -97,10 +100,13 @@
 
 std::string AudioDeviceTypeAddr::toString(bool includeSensitiveInfo) const {
     std::stringstream sstream;
-    sstream << "type:0x" << std::hex << mType;
+    sstream << audio_device_to_string(mType);
+    if (sstream.str().empty()) {
+        sstream << "unknown type:0x" << std::hex << mType;
+    }
     // IP and MAC address are sensitive information. The sensitive information will be suppressed
     // is `includeSensitiveInfo` is false.
-    sstream << ",@:"
+    sstream << ", @:"
             << (!includeSensitiveInfo && mIsAddressSensitive ? SUPPRESSED : mAddress);
     return sstream.str();
 }
@@ -157,17 +163,16 @@
 }
 
 ConversionResult<AudioDeviceTypeAddr>
-aidl2legacy_AudioDeviceTypeAddress(const media::AudioDevice& aidl) {
-    audio_devices_t type = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.type));
-    return AudioDeviceTypeAddr(type, aidl.address);
+aidl2legacy_AudioDeviceTypeAddress(const AudioDevice& aidl) {
+    audio_devices_t type;
+    std::string address;
+    RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(aidl, &type, &address));
+    return AudioDeviceTypeAddr(type, address);
 }
 
-ConversionResult<media::AudioDevice>
+ConversionResult<AudioDevice>
 legacy2aidl_AudioDeviceTypeAddress(const AudioDeviceTypeAddr& legacy) {
-    media::AudioDevice aidl;
-    aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.mType));
-    aidl.address = legacy.getAddress();
-    return aidl;
+    return legacy2aidl_audio_device_AudioDevice(legacy.mType, legacy.getAddress());
 }
 
 } // namespace android
diff --git a/media/libaudiofoundation/AudioGain.cpp b/media/libaudiofoundation/AudioGain.cpp
index 1dee938..47e0edb 100644
--- a/media/libaudiofoundation/AudioGain.cpp
+++ b/media/libaudiofoundation/AudioGain.cpp
@@ -24,22 +24,18 @@
 #define ALOGVV(a...) do { } while(0)
 #endif
 
+#include <math.h>
+
 #include <algorithm>
 
 #include <android-base/stringprintf.h>
 #include <media/AudioGain.h>
 #include <utils/Log.h>
 
-#include <math.h>
-
 namespace android {
 
-AudioGain::AudioGain(int index, bool useInChannelMask)
-{
-    mIndex = index;
-    mUseInChannelMask = useInChannelMask;
-    memset(&mGain, 0, sizeof(struct audio_gain));
-}
+AudioGain::AudioGain(int index, bool isInput)
+        : mIndex(index), mIsInput(isInput) {}
 
 void AudioGain::getDefaultConfig(struct audio_gain_config *config)
 {
@@ -49,12 +45,9 @@
     if ((mGain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
         config->values[0] = mGain.default_value;
     } else {
-        uint32_t numValues;
-        if (mUseInChannelMask) {
-            numValues = audio_channel_count_from_in_mask(mGain.channel_mask);
-        } else {
-            numValues = audio_channel_count_from_out_mask(mGain.channel_mask);
-        }
+        const uint32_t numValues = mIsInput ?
+                audio_channel_count_from_in_mask(mGain.channel_mask) :
+                audio_channel_count_from_out_mask(mGain.channel_mask);
         for (size_t i = 0; i < numValues; i++) {
             config->values[i] = mGain.default_value;
         }
@@ -78,12 +71,9 @@
         if ((config->channel_mask & ~mGain.channel_mask) != 0) {
             return BAD_VALUE;
         }
-        uint32_t numValues;
-        if (mUseInChannelMask) {
-            numValues = audio_channel_count_from_in_mask(config->channel_mask);
-        } else {
-            numValues = audio_channel_count_from_out_mask(config->channel_mask);
-        }
+        const uint32_t numValues = mIsInput ?
+                audio_channel_count_from_in_mask(config->channel_mask) :
+                audio_channel_count_from_out_mask(config->channel_mask);
         for (size_t i = 0; i < numValues; i++) {
             if ((config->values[i] < mGain.min_value) ||
                     (config->values[i] > mGain.max_value)) {
@@ -116,7 +106,7 @@
 bool AudioGain::equals(const sp<AudioGain>& other) const
 {
     return other != nullptr &&
-           mUseInChannelMask == other->mUseInChannelMask &&
+           mIsInput == other->mIsInput &&
            mUseForVolume == other->mUseForVolume &&
            // Compare audio gain
            mGain.mode == other->mGain.mode &&
@@ -129,51 +119,24 @@
            mGain.max_ramp_ms == other->mGain.max_ramp_ms;
 }
 
-status_t AudioGain::writeToParcel(android::Parcel *parcel) const {
-    media::AudioGain parcelable;
-    return writeToParcelable(&parcelable)
-        ?: parcelable.writeToParcel(parcel);
+ConversionResult<AudioGain::Aidl> AudioGain::toParcelable() const {
+    media::audio::common::AudioGain aidl = VALUE_OR_RETURN(
+            legacy2aidl_audio_gain_AudioGain(mGain, mIsInput));
+    aidl.useForVolume = mUseForVolume;
+    media::AudioGainSys aidlSys;
+    aidlSys.index = VALUE_OR_RETURN(convertIntegral<int32_t>(mIndex));
+    aidlSys.isInput = mIsInput;
+    return std::make_pair(aidl, aidlSys);
 }
 
-status_t AudioGain::writeToParcelable(media::AudioGain* parcelable) const {
-    parcelable->index = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mIndex));
-    parcelable->useInChannelMask = mUseInChannelMask;
-    parcelable->useForVolume = mUseForVolume;
-    parcelable->mode = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_gain_mode_t_int32_t_mask(mGain.mode));
-    parcelable->channelMask = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_channel_mask_t_int32_t(mGain.channel_mask));
-    parcelable->minValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.min_value));
-    parcelable->maxValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.max_value));
-    parcelable->defaultValue = VALUE_OR_RETURN_STATUS(
-            convertIntegral<int32_t>(mGain.default_value));
-    parcelable->stepValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.step_value));
-    parcelable->minRampMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.min_ramp_ms));
-    parcelable->maxRampMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.max_ramp_ms));
-    return OK;
-}
-
-status_t AudioGain::readFromParcel(const android::Parcel *parcel) {
-    media::AudioGain parcelable;
-    return parcelable.readFromParcel(parcel)
-        ?: readFromParcelable(parcelable);
-}
-
-status_t AudioGain::readFromParcelable(const media::AudioGain& parcelable) {
-    mIndex = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.index));
-    mUseInChannelMask = parcelable.useInChannelMask;
-    mUseForVolume = parcelable.useForVolume;
-    mGain.mode = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_int32_t_audio_gain_mode_t_mask(parcelable.mode));
-    mGain.channel_mask = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_int32_t_audio_channel_mask_t(parcelable.channelMask));
-    mGain.min_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.minValue));
-    mGain.max_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.maxValue));
-    mGain.default_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.defaultValue));
-    mGain.step_value = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.stepValue));
-    mGain.min_ramp_ms = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.minRampMs));
-    mGain.max_ramp_ms = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.maxRampMs));
-    return OK;
+ConversionResult<sp<AudioGain>> AudioGain::fromParcelable(const AudioGain::Aidl& aidl) {
+    const media::audio::common::AudioGain& hal = aidl.first;
+    const media::AudioGainSys& sys = aidl.second;
+    auto index = VALUE_OR_RETURN(convertIntegral<int>(sys.index));
+    sp<AudioGain> legacy = sp<AudioGain>::make(index, sys.isInput);
+    legacy->mGain = VALUE_OR_RETURN(aidl2legacy_AudioGain_audio_gain(hal, sys.isInput));
+    legacy->mUseForVolume = hal.useForVolume;
+    return legacy;
 }
 
 bool AudioGains::equals(const AudioGains &other) const
@@ -184,59 +147,30 @@
                       });
 }
 
-status_t AudioGains::writeToParcel(android::Parcel *parcel) const {
-    status_t status = NO_ERROR;
-    if ((status = parcel->writeVectorSize(*this)) != NO_ERROR) return status;
-    for (const auto &audioGain : *this) {
-        if ((status = parcel->writeParcelable(*audioGain)) != NO_ERROR) {
-            break;
-        }
-    }
-    return status;
-}
-
-status_t AudioGains::readFromParcel(const android::Parcel *parcel) {
-    status_t status = NO_ERROR;
-    this->clear();
-    if ((status = parcel->resizeOutVector(this)) != NO_ERROR) return status;
-    for (size_t i = 0; i < this->size(); i++) {
-        this->at(i) = new AudioGain(0, false);
-        if ((status = parcel->readParcelable(this->at(i).get())) != NO_ERROR) {
-            this->clear();
-            break;
-        }
-    }
-    return status;
-}
-
 ConversionResult<sp<AudioGain>>
-aidl2legacy_AudioGain(const media::AudioGain& aidl) {
-    sp<AudioGain> legacy = new AudioGain(0, false);
-    status_t status = legacy->readFromParcelable(aidl);
-    if (status != OK) {
-        return base::unexpected(status);
-    }
-    return legacy;
+aidl2legacy_AudioGain(const AudioGain::Aidl& aidl) {
+    return AudioGain::fromParcelable(aidl);
 }
 
-ConversionResult<media::AudioGain>
+ConversionResult<AudioGain::Aidl>
 legacy2aidl_AudioGain(const sp<AudioGain>& legacy) {
-    media::AudioGain aidl;
-    status_t status = legacy->writeToParcelable(&aidl);
-    if (status != OK) {
-        return base::unexpected(status);
-    }
-    return aidl;
+    return legacy->toParcelable();
 }
 
 ConversionResult<AudioGains>
-aidl2legacy_AudioGains(const std::vector<media::AudioGain>& aidl) {
-    return convertContainer<AudioGains>(aidl, aidl2legacy_AudioGain);
+aidl2legacy_AudioGains(const AudioGains::Aidl& aidl) {
+    return convertContainers<AudioGains>(aidl.first, aidl.second,
+            [](const media::audio::common::AudioGain& g,
+               const media::AudioGainSys& gs) {
+                return aidl2legacy_AudioGain(std::make_pair(g, gs));
+            });
 }
 
-ConversionResult<std::vector<media::AudioGain>>
+ConversionResult<AudioGains::Aidl>
 legacy2aidl_AudioGains(const AudioGains& legacy) {
-    return convertContainer<std::vector<media::AudioGain>>(legacy, legacy2aidl_AudioGain);
+    return convertContainerSplit<
+            std::vector<media::audio::common::AudioGain>,
+            std::vector<media::AudioGainSys>>(legacy, legacy2aidl_AudioGain);
 }
 
 } // namespace android
diff --git a/media/libaudiofoundation/AudioPort.cpp b/media/libaudiofoundation/AudioPort.cpp
index fafabd9..4513323 100644
--- a/media/libaudiofoundation/AudioPort.cpp
+++ b/media/libaudiofoundation/AudioPort.cpp
@@ -18,13 +18,28 @@
 #include <algorithm>
 #include <utility>
 
-#include <android/media/ExtraAudioDescriptor.h>
 #include <android-base/stringprintf.h>
 #include <media/AudioPort.h>
 #include <utils/Log.h>
 
 namespace android {
 
+void AudioPort::setFlags(uint32_t flags)
+{
+    // force direct flag if offload flag is set: offloading implies a direct output stream
+    // and all common behaviors are driven by checking only the direct flag
+    // this should normally be set appropriately in the policy configuration file
+    if (mRole == AUDIO_PORT_ROLE_SOURCE &&
+            (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+        flags |= AUDIO_OUTPUT_FLAG_DIRECT;
+    }
+    if (useInputChannelMask()) {
+        mFlags.input = static_cast<audio_input_flags_t>(flags);
+    } else {
+        mFlags.output = static_cast<audio_output_flags_t>(flags);
+    }
+}
+
 void AudioPort::importAudioPort(const sp<AudioPort>& port, bool force __unused)
 {
     for (const auto& profileToImport : port->mProfiles) {
@@ -147,9 +162,16 @@
     }
 }
 
-void AudioPort::dump(std::string *dst, int spaces, bool verbose) const {
+void AudioPort::dump(std::string *dst, int spaces, const char* extraInfo, bool verbose) const {
     if (!mName.empty()) {
-        dst->append(base::StringPrintf("%*s- name: %s\n", spaces, "", mName.c_str()));
+        dst->append(base::StringPrintf("\"%s\"%s", mName.c_str(),
+                        extraInfo != nullptr ? "; " : ""));
+    }
+    if (extraInfo != nullptr) {
+        dst->append(base::StringPrintf("%s", extraInfo));
+    }
+    if (!mName.empty() || extraInfo != nullptr) {
+        dst->append("\n");
     }
     if (verbose) {
         std::string profilesStr;
@@ -196,39 +218,59 @@
            mType == other->getType() &&
            mRole == other->getRole() &&
            mProfiles.equals(other->getAudioProfiles()) &&
+           getFlags() == other->getFlags() &&
            mExtraAudioDescriptors == other->getExtraAudioDescriptors();
 }
 
-status_t AudioPort::writeToParcel(Parcel *parcel) const
-{
-    media::AudioPort parcelable;
-    return writeToParcelable(&parcelable)
-        ?: parcelable.writeToParcel(parcel);
-}
-
 status_t AudioPort::writeToParcelable(media::AudioPort* parcelable) const {
-    parcelable->name = mName;
-    parcelable->type = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_type_t_AudioPortType(mType));
-    parcelable->role = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_role_t_AudioPortRole(mRole));
-    parcelable->profiles = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioProfileVector(mProfiles));
-    parcelable->extraAudioDescriptors = mExtraAudioDescriptors;
-    parcelable->gains = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioGains(mGains));
+    parcelable->hal.name = mName;
+    parcelable->sys.type = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_port_type_t_AudioPortType(mType));
+    parcelable->sys.role = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_port_role_t_AudioPortRole(mRole));
+    auto aidlProfiles = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_AudioProfileVector(mProfiles, useInputChannelMask()));
+    parcelable->hal.profiles = aidlProfiles.first;
+    parcelable->sys.profiles = aidlProfiles.second;
+    parcelable->hal.flags = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_io_flags_AudioIoFlags(mFlags, useInputChannelMask()));
+    parcelable->hal.extraAudioDescriptors = mExtraAudioDescriptors;
+    auto aidlGains = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioGains(mGains));
+    parcelable->hal.gains = aidlGains.first;
+    parcelable->sys.gains = aidlGains.second;
+    if (mType == AUDIO_PORT_TYPE_MIX) {
+        media::audio::common::AudioPortMixExt mixExt{};
+        mixExt.maxOpenStreamCount = maxOpenCount;
+        mixExt.maxActiveStreamCount = maxActiveCount;
+        mixExt.recommendedMuteDurationMs = recommendedMuteDurationMs;
+        parcelable->hal.ext = media::audio::common::AudioPortExt::make<
+                media::audio::common::AudioPortExt::mix>(mixExt);
+    }
     return OK;
 }
 
-status_t AudioPort::readFromParcel(const Parcel *parcel) {
-    media::AudioPort parcelable;
-    return parcelable.readFromParcel(parcel)
-        ?: readFromParcelable(parcelable);
-}
-
 status_t AudioPort::readFromParcelable(const media::AudioPort& parcelable) {
-    mName = parcelable.name;
-    mType = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPortType_audio_port_type_t(parcelable.type));
-    mRole = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPortRole_audio_port_role_t(parcelable.role));
-    mProfiles = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioProfileVector(parcelable.profiles));
-    mExtraAudioDescriptors = parcelable.extraAudioDescriptors;
-    mGains = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioGains(parcelable.gains));
+    mName = parcelable.hal.name;
+    mType = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioPortType_audio_port_type_t(parcelable.sys.type));
+    mRole = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioPortRole_audio_port_role_t(parcelable.sys.role));
+    mProfiles = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioProfileVector(
+                    std::make_pair(parcelable.hal.profiles, parcelable.sys.profiles),
+                    useInputChannelMask()));
+    mFlags = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioIoFlags_audio_io_flags(parcelable.hal.flags, useInputChannelMask()));
+    mExtraAudioDescriptors = parcelable.hal.extraAudioDescriptors;
+    mGains = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioGains(std::make_pair(parcelable.hal.gains, parcelable.sys.gains)));
+    if (mType == AUDIO_PORT_TYPE_MIX) {
+        const media::audio::common::AudioPortMixExt& mixExt =
+                parcelable.hal.ext.get<media::audio::common::AudioPortExt::mix>();
+        maxOpenCount = mixExt.maxOpenStreamCount;
+        maxActiveCount = mixExt.maxActiveStreamCount;
+        recommendedMuteDurationMs = mixExt.recommendedMuteDurationMs;
+    }
     return OK;
 }
 
@@ -250,6 +292,9 @@
     if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
         mGain = config->gain;
     }
+    if (config->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+        mFlags = config->flags;
+    }
 
     return NO_ERROR;
 }
@@ -303,6 +348,9 @@
     } else {
         dstConfig->config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
     }
+
+    updateField(mFlags, &audio_port_config::flags,
+            dstConfig, srcConfig, AUDIO_PORT_CONFIG_FLAGS, { AUDIO_INPUT_FLAG_NONE });
 }
 
 bool AudioPortConfig::hasGainController(bool canUseForVolume) const
@@ -315,12 +363,14 @@
                            : audioport->getGains().size() > 0;
 }
 
-bool AudioPortConfig::equals(const sp<AudioPortConfig> &other) const
+bool AudioPortConfig::equals(const sp<AudioPortConfig> &other, bool isInput) const
 {
     return other != nullptr &&
            mSamplingRate == other->getSamplingRate() &&
            mFormat == other->getFormat() &&
            mChannelMask == other->getChannelMask() &&
+           (isInput ? mFlags.input == other->getFlags().input :
+                   mFlags.output == other->getFlags().output )&&
            // Compare audio gain config
            mGain.index == other->mGain.index &&
            mGain.mode == other->mGain.mode &&
@@ -330,54 +380,47 @@
            mGain.ramp_duration_ms == other->mGain.ramp_duration_ms;
 }
 
-status_t AudioPortConfig::writeToParcel(Parcel *parcel) const {
-    media::AudioPortConfig parcelable;
-    return writeToParcelable(&parcelable)
-        ?: parcelable.writeToParcel(parcel);
-}
-
-status_t AudioPortConfig::writeToParcelable(media::AudioPortConfig* parcelable) const {
-    parcelable->sampleRate = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mSamplingRate));
-    parcelable->format = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_format_t_AudioFormat(mFormat));
+status_t AudioPortConfig::writeToParcelable(
+        media::audio::common::AudioPortConfig* parcelable, bool isInput) const {
+    media::audio::common::Int aidl_sampleRate;
+    aidl_sampleRate.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mSamplingRate));
+    parcelable->sampleRate = aidl_sampleRate;
+    parcelable->format = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_format_t_AudioFormatDescription(mFormat));
     parcelable->channelMask = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_channel_mask_t_int32_t(mChannelMask));
+            legacy2aidl_audio_channel_mask_t_AudioChannelLayout(mChannelMask, isInput));
     parcelable->id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
-    parcelable->gain.index = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.index));
-    parcelable->gain.mode = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_gain_mode_t_int32_t_mask(mGain.mode));
-    parcelable->gain.channelMask = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_channel_mask_t_int32_t(mGain.channel_mask));
-    parcelable->gain.rampDurationMs = VALUE_OR_RETURN_STATUS(
-            convertIntegral<int32_t>(mGain.ramp_duration_ms));
-    parcelable->gain.values = VALUE_OR_RETURN_STATUS(convertContainer<std::vector<int32_t>>(
-            mGain.values, convertIntegral<int32_t, int>));
+    media::audio::common::AudioGainConfig aidl_gain = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_gain_config_AudioGainConfig(mGain, isInput));
+    parcelable->gain = aidl_gain;
+    parcelable->flags = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_io_flags_AudioIoFlags(mFlags, isInput));
     return OK;
 }
 
-status_t AudioPortConfig::readFromParcel(const Parcel *parcel) {
-    media::AudioPortConfig parcelable;
-    return parcelable.readFromParcel(parcel)
-        ?: readFromParcelable(parcelable);
-}
-
-status_t AudioPortConfig::readFromParcelable(const media::AudioPortConfig& parcelable) {
-    mSamplingRate = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.sampleRate));
-    mFormat = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioFormat_audio_format_t(parcelable.format));
-    mChannelMask = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_int32_t_audio_channel_mask_t(parcelable.channelMask));
-    mId = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_port_handle_t(parcelable.id));
-    mGain.index = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.gain.index));
-    mGain.mode = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_int32_t_audio_gain_mode_t_mask(parcelable.gain.mode));
-    mGain.channel_mask = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_int32_t_audio_channel_mask_t(parcelable.gain.channelMask));
-    mGain.ramp_duration_ms = VALUE_OR_RETURN_STATUS(
-            convertIntegral<unsigned int>(parcelable.gain.rampDurationMs));
-    if (parcelable.gain.values.size() > std::size(mGain.values)) {
-        return BAD_VALUE;
+status_t AudioPortConfig::readFromParcelable(
+        const media::audio::common::AudioPortConfig& parcelable, bool isInput) {
+    if (parcelable.sampleRate.has_value()) {
+        mSamplingRate = VALUE_OR_RETURN_STATUS(
+                convertIntegral<unsigned int>(parcelable.sampleRate.value().value));
     }
-    for (size_t i = 0; i < parcelable.gain.values.size(); ++i) {
-        mGain.values[i] = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.gain.values[i]));
+    if (parcelable.format.has_value()) {
+        mFormat = VALUE_OR_RETURN_STATUS(
+                aidl2legacy_AudioFormatDescription_audio_format_t(parcelable.format.value()));
+    }
+    if (parcelable.channelMask.has_value()) {
+        mChannelMask = VALUE_OR_RETURN_STATUS(
+                aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+                        parcelable.channelMask.value(), isInput));
+    }
+    mId = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_port_handle_t(parcelable.id));
+    if (parcelable.gain.has_value()) {
+        mGain = VALUE_OR_RETURN_STATUS(
+                aidl2legacy_AudioGainConfig_audio_gain_config(parcelable.gain.value(), isInput));
+    }
+    if (parcelable.flags.has_value()) {
+        mFlags = VALUE_OR_RETURN_STATUS(
+                aidl2legacy_AudioIoFlags_audio_io_flags(parcelable.flags.value(), isInput));
     }
     return OK;
 }
diff --git a/media/libaudiofoundation/AudioProfile.cpp b/media/libaudiofoundation/AudioProfile.cpp
index 8ac3f73..9a67bb7 100644
--- a/media/libaudiofoundation/AudioProfile.cpp
+++ b/media/libaudiofoundation/AudioProfile.cpp
@@ -27,6 +27,8 @@
 
 namespace android {
 
+using media::audio::common::AudioChannelLayout;
+
 bool operator == (const AudioProfile &left, const AudioProfile &right)
 {
     return (left.getFormat() == right.getFormat()) &&
@@ -97,18 +99,14 @@
 
 void AudioProfile::dump(std::string *dst, int spaces) const
 {
-    dst->append(base::StringPrintf("%s%s%s\n", mIsDynamicFormat ? "[dynamic format]" : "",
+    dst->append(base::StringPrintf("\"%s\"; ", mName.c_str()));
+    dst->append(base::StringPrintf("%s%s%s%s", mIsDynamicFormat ? "[dynamic format]" : "",
              mIsDynamicChannels ? "[dynamic channels]" : "",
-             mIsDynamicRate ? "[dynamic rates]" : ""));
-    if (mName.length() != 0) {
-        dst->append(base::StringPrintf("%*s- name: %s\n", spaces, "", mName.c_str()));
-    }
-    std::string formatLiteral;
-    if (FormatConverter::toString(mFormat, formatLiteral)) {
-        dst->append(base::StringPrintf("%*s- format: %s\n", spaces, "", formatLiteral.c_str()));
-    }
+             mIsDynamicRate ? "[dynamic rates]" : "", isDynamic() ? "; " : ""));
+    dst->append(base::StringPrintf("%s (0x%x)\n", audio_format_to_string(mFormat), mFormat));
+
     if (!mSamplingRates.empty()) {
-        dst->append(base::StringPrintf("%*s- sampling rates:", spaces, ""));
+        dst->append(base::StringPrintf("%*ssampling rates: ", spaces, ""));
         for (auto it = mSamplingRates.begin(); it != mSamplingRates.end();) {
             dst->append(base::StringPrintf("%d", *it));
             dst->append(++it == mSamplingRates.end() ? "" : ", ");
@@ -117,7 +115,7 @@
     }
 
     if (!mChannelMasks.empty()) {
-        dst->append(base::StringPrintf("%*s- channel masks:", spaces, ""));
+        dst->append(base::StringPrintf("%*schannel masks: ", spaces, ""));
         for (auto it = mChannelMasks.begin(); it != mChannelMasks.end();) {
             dst->append(base::StringPrintf("0x%04x", *it));
             dst->append(++it == mChannelMasks.end() ? "" : ", ");
@@ -126,7 +124,7 @@
     }
 
     dst->append(base::StringPrintf(
-            "%*s- encapsulation type: %#x\n", spaces, "", mEncapsulationType));
+             "%*s%s\n", spaces, "", audio_encapsulation_type_to_string(mEncapsulationType)));
 }
 
 bool AudioProfile::equals(const sp<AudioProfile>& other) const
@@ -154,67 +152,67 @@
     return *this;
 }
 
-status_t AudioProfile::writeToParcel(Parcel *parcel) const {
-    media::AudioProfile parcelable = VALUE_OR_RETURN_STATUS(toParcelable());
-    return parcelable.writeToParcel(parcel);
- }
-
-ConversionResult<media::AudioProfile>
-AudioProfile::toParcelable() const {
-    media::AudioProfile parcelable;
+ConversionResult<AudioProfile::Aidl>
+AudioProfile::toParcelable(bool isInput) const {
+    media::audio::common::AudioProfile parcelable;
     parcelable.name = mName;
-    parcelable.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(mFormat));
+    parcelable.format = VALUE_OR_RETURN(
+            legacy2aidl_audio_format_t_AudioFormatDescription(mFormat));
+    // Note: legacy 'audio_profile' imposes a limit on the number of
+    // channel masks and sampling rates. That's why it's not used here
+    // and conversions are performed directly on the fields instead
+    // of using 'legacy2aidl_audio_profile_AudioProfile' from AidlConversion.
     parcelable.channelMasks = VALUE_OR_RETURN(
-            convertContainer<std::vector<int32_t>>(mChannelMasks,
-                                                   legacy2aidl_audio_channel_mask_t_int32_t));
-    parcelable.samplingRates = VALUE_OR_RETURN(
+            convertContainer<std::vector<AudioChannelLayout>>(
+                    mChannelMasks,
+                    [isInput](audio_channel_mask_t m) {
+                        return legacy2aidl_audio_channel_mask_t_AudioChannelLayout(m, isInput);
+                    }));
+    parcelable.sampleRates = VALUE_OR_RETURN(
             convertContainer<std::vector<int32_t>>(mSamplingRates,
                                                    convertIntegral<int32_t, uint32_t>));
-    parcelable.isDynamicFormat = mIsDynamicFormat;
-    parcelable.isDynamicChannels = mIsDynamicChannels;
-    parcelable.isDynamicRate = mIsDynamicRate;
     parcelable.encapsulationType = VALUE_OR_RETURN(
             legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(mEncapsulationType));
-    return parcelable;
+    media::AudioProfileSys parcelableSys;
+    parcelableSys.isDynamicFormat = mIsDynamicFormat;
+    parcelableSys.isDynamicChannels = mIsDynamicChannels;
+    parcelableSys.isDynamicRate = mIsDynamicRate;
+    return std::make_pair(parcelable, parcelableSys);
 }
 
-status_t AudioProfile::readFromParcel(const Parcel *parcel) {
-    media::AudioProfile parcelable;
-    if (status_t status = parcelable.readFromParcel(parcel); status != OK) {
-        return status;
-    }
-    *this = *VALUE_OR_RETURN_STATUS(fromParcelable(parcelable));
-    return OK;
-}
-
-ConversionResult<sp<AudioProfile>>
-AudioProfile::fromParcelable(const media::AudioProfile& parcelable) {
+ConversionResult<sp<AudioProfile>> AudioProfile::fromParcelable(
+        const AudioProfile::Aidl& aidl, bool isInput) {
     sp<AudioProfile> legacy = new AudioProfile();
+    const auto& parcelable = aidl.first;
     legacy->mName = parcelable.name;
-    legacy->mFormat = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(parcelable.format));
+    legacy->mFormat = VALUE_OR_RETURN(
+            aidl2legacy_AudioFormatDescription_audio_format_t(parcelable.format));
     legacy->mChannelMasks = VALUE_OR_RETURN(
             convertContainer<ChannelMaskSet>(parcelable.channelMasks,
-                                             aidl2legacy_int32_t_audio_channel_mask_t));
+                    [isInput](const AudioChannelLayout& l) {
+                        return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
+                    }));
     legacy->mSamplingRates = VALUE_OR_RETURN(
-            convertContainer<SampleRateSet>(parcelable.samplingRates,
+            convertContainer<SampleRateSet>(parcelable.sampleRates,
                                             convertIntegral<uint32_t, int32_t>));
-    legacy->mIsDynamicFormat = parcelable.isDynamicFormat;
-    legacy->mIsDynamicChannels = parcelable.isDynamicChannels;
-    legacy->mIsDynamicRate = parcelable.isDynamicRate;
     legacy->mEncapsulationType = VALUE_OR_RETURN(
             aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
                     parcelable.encapsulationType));
+    const auto& parcelableSys = aidl.second;
+    legacy->mIsDynamicFormat = parcelableSys.isDynamicFormat;
+    legacy->mIsDynamicChannels = parcelableSys.isDynamicChannels;
+    legacy->mIsDynamicRate = parcelableSys.isDynamicRate;
     return legacy;
 }
 
 ConversionResult<sp<AudioProfile>>
-aidl2legacy_AudioProfile(const media::AudioProfile& aidl) {
-    return AudioProfile::fromParcelable(aidl);
+aidl2legacy_AudioProfile(const AudioProfile::Aidl& aidl, bool isInput) {
+    return AudioProfile::fromParcelable(aidl, isInput);
 }
 
-ConversionResult<media::AudioProfile>
-legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy) {
-    return legacy->toParcelable();
+ConversionResult<AudioProfile::Aidl>
+legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy, bool isInput) {
+    return legacy->toParcelable(isInput);
 }
 
 ssize_t AudioProfileVector::add(const sp<AudioProfile> &profile)
@@ -319,42 +317,16 @@
 
 void AudioProfileVector::dump(std::string *dst, int spaces) const
 {
-    dst->append(base::StringPrintf("%*s- Profiles:\n", spaces, ""));
+    dst->append(base::StringPrintf("%*s- Profiles (%zu):\n", spaces - 2, "", size()));
     for (size_t i = 0; i < size(); i++) {
-        dst->append(base::StringPrintf("%*sProfile %zu:", spaces + 4, "", i));
+        const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+        dst->append(prefix);
         std::string profileStr;
-        at(i)->dump(&profileStr, spaces + 8);
+        at(i)->dump(&profileStr, prefix.size());
         dst->append(profileStr);
     }
 }
 
-status_t AudioProfileVector::writeToParcel(Parcel *parcel) const
-{
-    status_t status = NO_ERROR;
-    if ((status = parcel->writeVectorSize(*this)) != NO_ERROR) return status;
-    for (const auto &audioProfile : *this) {
-        if ((status = parcel->writeParcelable(*audioProfile)) != NO_ERROR) {
-            break;
-        }
-    }
-    return status;
-}
-
-status_t AudioProfileVector::readFromParcel(const Parcel *parcel)
-{
-    status_t status = NO_ERROR;
-    this->clear();
-    if ((status = parcel->resizeOutVector(this)) != NO_ERROR) return status;
-    for (size_t i = 0; i < this->size(); ++i) {
-        this->at(i) = new AudioProfile(AUDIO_FORMAT_DEFAULT, AUDIO_CHANNEL_NONE, 0 /*sampleRate*/);
-        if ((status = parcel->readParcelable(this->at(i).get())) != NO_ERROR) {
-            this->clear();
-            break;
-        }
-    }
-    return status;
-}
-
 bool AudioProfileVector::equals(const AudioProfileVector& other) const
 {
     return std::equal(begin(), end(), other.begin(), other.end(),
@@ -364,13 +336,22 @@
 }
 
 ConversionResult<AudioProfileVector>
-aidl2legacy_AudioProfileVector(const std::vector<media::AudioProfile>& aidl) {
-    return convertContainer<AudioProfileVector>(aidl, aidl2legacy_AudioProfile);
+aidl2legacy_AudioProfileVector(const AudioProfileVector::Aidl& aidl, bool isInput) {
+    return convertContainers<AudioProfileVector>(aidl.first, aidl.second,
+            [isInput](const media::audio::common::AudioProfile& p,
+                      const media::AudioProfileSys& ps) {
+                return aidl2legacy_AudioProfile(std::make_pair(p, ps), isInput);
+            });
 }
 
-ConversionResult<std::vector<media::AudioProfile>>
-legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy) {
-    return convertContainer<std::vector<media::AudioProfile>>(legacy, legacy2aidl_AudioProfile);
+ConversionResult<AudioProfileVector::Aidl>
+legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy, bool isInput) {
+    return convertContainerSplit<
+            std::vector<media::audio::common::AudioProfile>,
+            std::vector<media::AudioProfileSys>>(legacy,
+            [isInput](const sp<AudioProfile>& p) {
+                return legacy2aidl_AudioProfile(p, isInput);
+            });
 }
 
 AudioProfileVector intersectAudioProfiles(const AudioProfileVector& profiles1,
diff --git a/media/libaudiofoundation/DeviceDescriptorBase.cpp b/media/libaudiofoundation/DeviceDescriptorBase.cpp
index 5cfea81..5ffbffc 100644
--- a/media/libaudiofoundation/DeviceDescriptorBase.cpp
+++ b/media/libaudiofoundation/DeviceDescriptorBase.cpp
@@ -30,16 +30,20 @@
 {
 }
 
-DeviceDescriptorBase::DeviceDescriptorBase(audio_devices_t type, const std::string& address) :
-        DeviceDescriptorBase(AudioDeviceTypeAddr(type, address))
+DeviceDescriptorBase::DeviceDescriptorBase(
+        audio_devices_t type, const std::string& address,
+        const FormatVector &encodedFormats) :
+        DeviceDescriptorBase(AudioDeviceTypeAddr(type, address), encodedFormats)
 {
 }
 
-DeviceDescriptorBase::DeviceDescriptorBase(const AudioDeviceTypeAddr &deviceTypeAddr) :
+DeviceDescriptorBase::DeviceDescriptorBase(
+        const AudioDeviceTypeAddr &deviceTypeAddr, const FormatVector &encodedFormats) :
         AudioPort("", AUDIO_PORT_TYPE_DEVICE,
                   audio_is_output_device(deviceTypeAddr.mType) ? AUDIO_PORT_ROLE_SINK :
                                          AUDIO_PORT_ROLE_SOURCE),
-        mDeviceTypeAddr(deviceTypeAddr)
+        mDeviceTypeAddr(deviceTypeAddr),
+        mEncodedFormats(encodedFormats)
 {
     if (mDeviceTypeAddr.address().empty() && audio_is_remote_submix_device(mDeviceTypeAddr.mType)) {
         mDeviceTypeAddr.setAddress("0");
@@ -106,32 +110,23 @@
     return NO_ERROR;
 }
 
-void DeviceDescriptorBase::dump(std::string *dst, int spaces, int index,
+void DeviceDescriptorBase::dump(std::string *dst, int spaces,
                                 const char* extraInfo, bool verbose) const
 {
-    dst->append(base::StringPrintf("%*sDevice %d:\n", spaces, "", index + 1));
     if (mId != 0) {
-        dst->append(base::StringPrintf("%*s- id: %2d\n", spaces, "", mId));
+        dst->append(base::StringPrintf("Port ID: %d; ", mId));
     }
-
     if (extraInfo != nullptr) {
-        dst->append(extraInfo);
+        dst->append(base::StringPrintf("%s; ", extraInfo));
     }
-
-    dst->append(base::StringPrintf("%*s- type: %-48s\n",
-            spaces, "", ::android::toString(mDeviceTypeAddr.mType).c_str()));
+    dst->append(base::StringPrintf("{%s}\n",
+                    mDeviceTypeAddr.toString(true /*includeSensitiveInfo*/).c_str()));
 
     dst->append(base::StringPrintf(
-            "%*s- supported encapsulation modes: %u\n", spaces, "", mEncapsulationModes));
-    dst->append(base::StringPrintf(
-            "%*s- supported encapsulation metadata types: %u\n",
-            spaces, "", mEncapsulationMetadataTypes));
+                    "%*sEncapsulation modes: %u, metadata types: %u\n", spaces, "",
+                    mEncapsulationModes, mEncapsulationMetadataTypes));
 
-    if (mDeviceTypeAddr.address().size() != 0) {
-        dst->append(base::StringPrintf(
-                "%*s- address: %-32s\n", spaces, "", mDeviceTypeAddr.getAddress()));
-    }
-    AudioPort::dump(dst, spaces, verbose);
+    AudioPort::dump(dst, spaces, nullptr, verbose);
 }
 
 std::string DeviceDescriptorBase::toString(bool includeSensitiveInfo) const
@@ -148,60 +143,83 @@
     AudioPort::log("  ");
 }
 
+template<typename T>
+bool checkEqual(const T& f1, const T& f2)
+{
+    std::set<typename T::value_type> s1(f1.begin(), f1.end());
+    std::set<typename T::value_type> s2(f2.begin(), f2.end());
+    return s1 == s2;
+}
+
 bool DeviceDescriptorBase::equals(const sp<DeviceDescriptorBase> &other) const
 {
     return other != nullptr &&
            static_cast<const AudioPort*>(this)->equals(other) &&
-           static_cast<const AudioPortConfig*>(this)->equals(other) &&
-           mDeviceTypeAddr.equals(other->mDeviceTypeAddr);
+           static_cast<const AudioPortConfig*>(this)->equals(other, useInputChannelMask()) &&
+           mDeviceTypeAddr.equals(other->mDeviceTypeAddr) &&
+           checkEqual(mEncodedFormats, other->mEncodedFormats);
 }
 
-
-status_t DeviceDescriptorBase::writeToParcel(Parcel *parcel) const
+bool DeviceDescriptorBase::supportsFormat(audio_format_t format)
 {
-    media::AudioPort parcelable;
-    return writeToParcelable(&parcelable)
-        ?: parcelable.writeToParcel(parcel);
+    if (mEncodedFormats.empty()) {
+        return true;
+    }
+
+    for (const auto& devFormat : mEncodedFormats) {
+        if (devFormat == format) {
+            return true;
+        }
+    }
+    return false;
 }
 
 status_t DeviceDescriptorBase::writeToParcelable(media::AudioPort* parcelable) const {
     AudioPort::writeToParcelable(parcelable);
-    AudioPortConfig::writeToParcelable(&parcelable->activeConfig);
-    parcelable->id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
+    AudioPortConfig::writeToParcelable(&parcelable->sys.activeConfig.hal, useInputChannelMask());
+    parcelable->hal.id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
+    parcelable->sys.activeConfig.hal.portId = parcelable->hal.id;
 
-    media::AudioPortDeviceExt ext;
-    ext.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(mDeviceTypeAddr));
-    ext.encapsulationModes = VALUE_OR_RETURN_STATUS(
+    media::audio::common::AudioPortDeviceExt deviceExt;
+    deviceExt.device = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_AudioDeviceTypeAddress(mDeviceTypeAddr));
+    deviceExt.encodedFormats = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<media::audio::common::AudioFormatDescription>>(
+                    mEncodedFormats, legacy2aidl_audio_format_t_AudioFormatDescription));
+    UNION_SET(parcelable->hal.ext, device, deviceExt);
+    media::AudioPortDeviceExtSys deviceSys;
+    deviceSys.encapsulationModes = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioEncapsulationMode_mask(mEncapsulationModes));
-    ext.encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
+    deviceSys.encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioEncapsulationMetadataType_mask(mEncapsulationMetadataTypes));
-    UNION_SET(parcelable->ext, device, std::move(ext));
+    UNION_SET(parcelable->sys.ext, device, deviceSys);
     return OK;
 }
 
-status_t DeviceDescriptorBase::readFromParcel(const Parcel *parcel) {
-    media::AudioPort parcelable;
-    return parcelable.readFromParcel(parcel)
-        ?: readFromParcelable(parcelable);
-}
-
 status_t DeviceDescriptorBase::readFromParcelable(const media::AudioPort& parcelable) {
-    if (parcelable.type != media::AudioPortType::DEVICE) {
+    if (parcelable.sys.type != media::AudioPortType::DEVICE) {
         return BAD_VALUE;
     }
     status_t status = AudioPort::readFromParcelable(parcelable)
-                      ?: AudioPortConfig::readFromParcelable(parcelable.activeConfig);
+            ?: AudioPortConfig::readFromParcelable(
+                    parcelable.sys.activeConfig.hal, useInputChannelMask());
     if (status != OK) {
         return status;
     }
 
-    media::AudioPortDeviceExt ext = VALUE_OR_RETURN_STATUS(UNION_GET(parcelable.ext, device));
+    media::audio::common::AudioPortDeviceExt deviceExt = VALUE_OR_RETURN_STATUS(
+            UNION_GET(parcelable.hal.ext, device));
     mDeviceTypeAddr = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioDeviceTypeAddress(ext.device));
+            aidl2legacy_AudioDeviceTypeAddress(deviceExt.device));
+    mEncodedFormats = VALUE_OR_RETURN_STATUS(
+            convertContainer<FormatVector>(deviceExt.encodedFormats,
+                    aidl2legacy_AudioFormatDescription_audio_format_t));
+    media::AudioPortDeviceExtSys deviceSys = VALUE_OR_RETURN_STATUS(
+            UNION_GET(parcelable.sys.ext, device));
     mEncapsulationModes = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioEncapsulationMode_mask(ext.encapsulationModes));
+            aidl2legacy_AudioEncapsulationMode_mask(deviceSys.encapsulationModes));
     mEncapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioEncapsulationMetadataType_mask(ext.encapsulationMetadataTypes));
+            aidl2legacy_AudioEncapsulationMetadataType_mask(deviceSys.encapsulationMetadataTypes));
     return OK;
 }
 
diff --git a/media/libaudiofoundation/include/media/AudioContainers.h b/media/libaudiofoundation/include/media/AudioContainers.h
index 60b42fb..a9c7824 100644
--- a/media/libaudiofoundation/include/media/AudioContainers.h
+++ b/media/libaudiofoundation/include/media/AudioContainers.h
@@ -111,25 +111,7 @@
     return types;
 }
 
-// FIXME: This is temporary helper function. Remove this when getting rid of all
-//  bit mask usages of audio device types.
-static inline DeviceTypeSet deviceTypesFromBitMask(audio_devices_t types) {
-    DeviceTypeSet deviceTypes;
-    if ((types & AUDIO_DEVICE_BIT_IN) == 0) {
-        for (auto deviceType : AUDIO_DEVICE_OUT_ALL_ARRAY) {
-            if ((types & deviceType) == deviceType) {
-                deviceTypes.insert(deviceType);
-            }
-        }
-    } else {
-        for (auto deviceType : AUDIO_DEVICE_IN_ALL_ARRAY) {
-            if ((types & deviceType) == deviceType) {
-                deviceTypes.insert(deviceType);
-            }
-        }
-    }
-    return deviceTypes;
-}
+std::string deviceTypesToString(const DeviceTypeSet& deviceTypes);
 
 bool deviceTypesToString(const DeviceTypeSet& deviceTypes, std::string &str);
 
@@ -138,7 +120,9 @@
 /**
  * Return human readable string for device types.
  */
-std::string toString(const DeviceTypeSet& deviceTypes);
+inline std::string toString(const DeviceTypeSet& deviceTypes) {
+    return deviceTypesToString(deviceTypes);
+}
 
 
 } // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h b/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
index 8edcc58..11aa222 100644
--- a/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
+++ b/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
@@ -19,7 +19,7 @@
 #include <string>
 #include <vector>
 
-#include <android/media/AudioDevice.h>
+#include <android/media/audio/common/AudioDevice.h>
 #include <binder/Parcelable.h>
 #include <binder/Parcel.h>
 #include <media/AudioContainers.h>
@@ -32,6 +32,7 @@
 class AudioDeviceTypeAddr : public Parcelable {
 public:
     AudioDeviceTypeAddr() = default;
+    AudioDeviceTypeAddr(const AudioDeviceTypeAddr&) = default;
 
     AudioDeviceTypeAddr(audio_devices_t type, const std::string& address);
 
@@ -88,8 +89,8 @@
 
 // Conversion routines, according to AidlConversion.h conventions.
 ConversionResult<AudioDeviceTypeAddr>
-aidl2legacy_AudioDeviceTypeAddress(const media::AudioDevice& aidl);
-ConversionResult<media::AudioDevice>
+aidl2legacy_AudioDeviceTypeAddress(const media::audio::common::AudioDevice& aidl);
+ConversionResult<media::audio::common::AudioDevice>
 legacy2aidl_AudioDeviceTypeAddress(const AudioDeviceTypeAddr& legacy);
 
 } // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioGain.h b/media/libaudiofoundation/include/media/AudioGain.h
index a06b686..10088f2 100644
--- a/media/libaudiofoundation/include/media/AudioGain.h
+++ b/media/libaudiofoundation/include/media/AudioGain.h
@@ -16,23 +16,23 @@
 
 #pragma once
 
-#include <android/media/AudioGain.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <android/media/AudioGainSys.h>
 #include <media/AidlConversion.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 #include <system/audio.h>
-#include <string>
-#include <vector>
 
 namespace android {
 
-class AudioGain: public RefBase, public Parcelable
+class AudioGain: public RefBase
 {
 public:
-    AudioGain(int index, bool useInChannelMask);
-    virtual ~AudioGain() {}
+    AudioGain(int index, bool isInput);
+    virtual ~AudioGain() = default;
 
     void setMode(audio_gain_mode_t mode) { mGain.mode = mode; }
     const audio_gain_mode_t &getMode() const { return mGain.mode; }
@@ -71,26 +71,24 @@
 
     bool equals(const sp<AudioGain>& other) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
-
-    status_t writeToParcelable(media::AudioGain* parcelable) const;
-    status_t readFromParcelable(const media::AudioGain& parcelable);
+    using Aidl = std::pair<media::audio::common::AudioGain, media::AudioGainSys>;
+    ConversionResult<Aidl> toParcelable() const;
+    static ConversionResult<sp<AudioGain>> fromParcelable(const Aidl& aidl);
 
 private:
     int               mIndex;
-    struct audio_gain mGain;
-    bool              mUseInChannelMask;
+    bool              mIsInput;
+    struct audio_gain mGain = {};
     bool              mUseForVolume = false;
 };
 
 // Conversion routines, according to AidlConversion.h conventions.
 ConversionResult<sp<AudioGain>>
-aidl2legacy_AudioGain(const media::AudioGain& aidl);
-ConversionResult<media::AudioGain>
+aidl2legacy_AudioGain(const AudioGain::Aidl& aidl);
+ConversionResult<AudioGain::Aidl>
 legacy2aidl_AudioGain(const sp<AudioGain>& legacy);
 
-class AudioGains : public std::vector<sp<AudioGain> >, public Parcelable
+class AudioGains : public std::vector<sp<AudioGain>>
 {
 public:
     bool canUseForVolume() const
@@ -103,7 +101,7 @@
         return false;
     }
 
-    int32_t add(const sp<AudioGain> gain)
+    int32_t add(const sp<AudioGain>& gain)
     {
         push_back(gain);
         return 0;
@@ -111,14 +109,15 @@
 
     bool equals(const AudioGains& other) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
+    using Aidl = std::pair<
+            std::vector<media::audio::common::AudioGain>,
+            std::vector<media::AudioGainSys>>;
 };
 
 // Conversion routines, according to AidlConversion.h conventions.
 ConversionResult<AudioGains>
-aidl2legacy_AudioGains(const std::vector<media::AudioGain>& aidl);
-ConversionResult<std::vector<media::AudioGain>>
+aidl2legacy_AudioGains(const AudioGains::Aidl& aidl);
+ConversionResult<AudioGains::Aidl>
 legacy2aidl_AudioGains(const AudioGains& legacy);
 
 } // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioPort.h b/media/libaudiofoundation/include/media/AudioPort.h
index 1cee1c9..d6a098f 100644
--- a/media/libaudiofoundation/include/media/AudioPort.h
+++ b/media/libaudiofoundation/include/media/AudioPort.h
@@ -21,7 +21,7 @@
 
 #include <android/media/AudioPort.h>
 #include <android/media/AudioPortConfig.h>
-#include <android/media/ExtraAudioDescriptor.h>
+#include <android/media/audio/common/ExtraAudioDescriptor.h>
 #include <binder/Parcel.h>
 #include <binder/Parcelable.h>
 #include <media/AudioGain.h>
@@ -33,7 +33,7 @@
 
 namespace android {
 
-class AudioPort : public virtual RefBase, public virtual Parcelable
+class AudioPort : public virtual RefBase
 {
 public:
     AudioPort(const std::string& name, audio_port_type_t type,  audio_port_role_t role) :
@@ -47,6 +47,9 @@
     audio_port_type_t getType() const { return mType; }
     audio_port_role_t getRole() const { return mRole; }
 
+    virtual void setFlags(uint32_t flags);
+    uint32_t getFlags() const { return useInputChannelMask() ? mFlags.input : mFlags.output; }
+
     void setGains(const AudioGains &gains) { mGains = gains; }
     const AudioGains &getGains() const { return mGains; }
 
@@ -69,10 +72,10 @@
     AudioProfileVector &getAudioProfiles() { return mProfiles; }
 
     void setExtraAudioDescriptors(
-            const std::vector<media::ExtraAudioDescriptor> extraAudioDescriptors) {
+            const std::vector<media::audio::common::ExtraAudioDescriptor> extraAudioDescriptors) {
         mExtraAudioDescriptors = extraAudioDescriptors;
     }
-    std::vector<media::ExtraAudioDescriptor> &getExtraAudioDescriptors() {
+    std::vector<media::audio::common::ExtraAudioDescriptor> &getExtraAudioDescriptors() {
         return mExtraAudioDescriptors;
     }
 
@@ -93,19 +96,47 @@
                 ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SINK));
     }
 
-    void dump(std::string *dst, int spaces, bool verbose = true) const;
+    bool isDirectOutput() const
+    {
+        return (mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
+                ((mFlags.output & AUDIO_OUTPUT_FLAG_DIRECT) != 0);
+    }
+
+    bool isMmap() const
+    {
+        return (mType == AUDIO_PORT_TYPE_MIX)
+                && (((mRole == AUDIO_PORT_ROLE_SOURCE) &&
+                        ((mFlags.output & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) != 0))
+                    || ((mRole == AUDIO_PORT_ROLE_SINK) &&
+                        ((mFlags.input & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0)));
+    }
+
+    void dump(std::string *dst, int spaces,
+              const char* extraInfo = nullptr, bool verbose = true) const;
 
     void log(const char* indent) const;
 
     bool equals(const sp<AudioPort>& other) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
-
     status_t writeToParcelable(media::AudioPort* parcelable) const;
     status_t readFromParcelable(const media::AudioPort& parcelable);
 
     AudioGains mGains; // gain controllers
+    // Maximum number of input or output streams that can be simultaneously
+    // opened for this profile. By convention 0 means no limit. To respect
+    // legacy behavior, initialized to 1 for output profiles and 0 for input
+    // profiles
+    // FIXME: IOProfile code used the same value for both cases.
+    uint32_t maxOpenCount = 1;
+    // Maximum number of input or output streams that can be simultaneously
+    // active for this profile. By convention 0 means no limit. To respect
+    // legacy behavior, initialized to 0 for output profiles and 1 for input
+    // profiles
+    // FIXME: IOProfile code used the same value for both cases.
+    uint32_t maxActiveCount = 1;
+    // Mute duration while changing device on this output profile.
+    uint32_t recommendedMuteDurationMs = 0;
+
 protected:
     std::string  mName;
     audio_port_type_t mType;
@@ -114,7 +145,8 @@
 
     // Audio capabilities that are defined by hardware descriptors when the format is unrecognized
     // by the platform, e.g. short audio descriptor in EDID for HDMI.
-    std::vector<media::ExtraAudioDescriptor> mExtraAudioDescriptors;
+    std::vector<media::audio::common::ExtraAudioDescriptor> mExtraAudioDescriptors;
+    union audio_io_flags mFlags = { .output = AUDIO_OUTPUT_FLAG_NONE };
 private:
     template <typename T, std::enable_if_t<std::is_same<T, struct audio_port>::value
                                         || std::is_same<T, struct audio_port_v7>::value, int> = 0>
@@ -130,7 +162,7 @@
 };
 
 
-class AudioPortConfig : public virtual RefBase, public virtual Parcelable
+class AudioPortConfig : public virtual RefBase
 {
 public:
     virtual ~AudioPortConfig() = default;
@@ -147,15 +179,16 @@
     audio_format_t getFormat() const { return mFormat; }
     audio_channel_mask_t getChannelMask() const { return mChannelMask; }
     audio_port_handle_t getId() const { return mId; }
+    audio_io_flags getFlags() const { return mFlags; }
 
     bool hasGainController(bool canUseForVolume = false) const;
 
-    bool equals(const sp<AudioPortConfig>& other) const;
+    bool equals(const sp<AudioPortConfig>& other, bool isInput) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
-    status_t writeToParcelable(media::AudioPortConfig* parcelable) const;
-    status_t readFromParcelable(const media::AudioPortConfig& parcelable);
+    status_t writeToParcelable(
+            media::audio::common::AudioPortConfig* parcelable, bool isInput) const;
+    status_t readFromParcelable(
+            const media::audio::common::AudioPortConfig& parcelable, bool isInput);
 
 protected:
     unsigned int mSamplingRate = 0u;
@@ -163,6 +196,7 @@
     audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
     audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
     struct audio_gain_config mGain = { .index = -1 };
+    union audio_io_flags mFlags = { AUDIO_INPUT_FLAG_NONE };
 };
 
 } // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioProfile.h b/media/libaudiofoundation/include/media/AudioProfile.h
index 6a36e78..d7cddb7 100644
--- a/media/libaudiofoundation/include/media/AudioProfile.h
+++ b/media/libaudiofoundation/include/media/AudioProfile.h
@@ -17,11 +17,10 @@
 #pragma once
 
 #include <string>
+#include <utility>
 #include <vector>
 
-#include <android/media/AudioProfile.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
+#include <android/media/AudioProfileSys.h>
 #include <media/AidlConversion.h>
 #include <media/AudioContainers.h>
 #include <system/audio.h>
@@ -29,7 +28,7 @@
 
 namespace android {
 
-class AudioProfile final : public RefBase, public Parcelable
+class AudioProfile final : public RefBase
 {
 public:
     static sp<AudioProfile> createFullDynamic(audio_format_t dynamicFormat = AUDIO_FORMAT_DEFAULT);
@@ -70,7 +69,7 @@
     void setDynamicFormat(bool dynamic) { mIsDynamicFormat = dynamic; }
     bool isDynamicFormat() const { return mIsDynamicFormat; }
 
-    bool isDynamic() { return mIsDynamicFormat || mIsDynamicChannels || mIsDynamicRate; }
+    bool isDynamic() const { return mIsDynamicFormat || mIsDynamicChannels || mIsDynamicRate; }
 
     audio_encapsulation_type_t getEncapsulationType() const { return mEncapsulationType; }
     void setEncapsulationType(audio_encapsulation_type_t encapsulationType) {
@@ -81,11 +80,10 @@
 
     bool equals(const sp<AudioProfile>& other) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
-
-    ConversionResult<media::AudioProfile> toParcelable() const;
-    static ConversionResult<sp<AudioProfile>> fromParcelable(const media::AudioProfile& parcelable);
+    using Aidl = std::pair<media::audio::common::AudioProfile, media::AudioProfileSys>;
+    ConversionResult<Aidl> toParcelable(bool isInput) const;
+    static ConversionResult<sp<AudioProfile>> fromParcelable(
+            const Aidl& aidl, bool isInput);
 
 private:
 
@@ -106,11 +104,11 @@
 
 // Conversion routines, according to AidlConversion.h conventions.
 ConversionResult<sp<AudioProfile>>
-aidl2legacy_AudioProfile(const media::AudioProfile& aidl);
-ConversionResult<media::AudioProfile>
-legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy);
+aidl2legacy_AudioProfile(const AudioProfile::Aidl& aidl, bool isInput);
+ConversionResult<AudioProfile::Aidl>
+legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy, bool isInput);
 
-class AudioProfileVector : public std::vector<sp<AudioProfile>>, public Parcelable
+class AudioProfileVector : public std::vector<sp<AudioProfile>>
 {
 public:
     virtual ~AudioProfileVector() = default;
@@ -137,17 +135,18 @@
 
     bool equals(const AudioProfileVector& other) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
+    using Aidl = std::pair<
+            std::vector<media::audio::common::AudioProfile>,
+            std::vector<media::AudioProfileSys>>;
 };
 
 bool operator == (const AudioProfile &left, const AudioProfile &right);
 
 // Conversion routines, according to AidlConversion.h conventions.
 ConversionResult<AudioProfileVector>
-aidl2legacy_AudioProfileVector(const std::vector<media::AudioProfile>& aidl);
-ConversionResult<std::vector<media::AudioProfile>>
-legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy);
+aidl2legacy_AudioProfileVector(const AudioProfileVector::Aidl& aidl, bool isInput);
+ConversionResult<AudioProfileVector::Aidl>
+legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy, bool isInput);
 
 AudioProfileVector intersectAudioProfiles(const AudioProfileVector& profiles1,
                                           const AudioProfileVector& profiles2);
diff --git a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
index 140ce36..1f0c768 100644
--- a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
+++ b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
@@ -36,16 +36,21 @@
 public:
      // Note that empty name refers by convention to a generic device.
     explicit DeviceDescriptorBase(audio_devices_t type);
-    DeviceDescriptorBase(audio_devices_t type, const std::string& address);
-    explicit DeviceDescriptorBase(const AudioDeviceTypeAddr& deviceTypeAddr);
+    DeviceDescriptorBase(audio_devices_t type, const std::string& address,
+            const FormatVector &encodedFormats = FormatVector{});
+    DeviceDescriptorBase(const AudioDeviceTypeAddr& deviceTypeAddr,
+            const FormatVector &encodedFormats = FormatVector{});
 
-    virtual ~DeviceDescriptorBase() {}
+    virtual ~DeviceDescriptorBase() = default;
 
     audio_devices_t type() const { return mDeviceTypeAddr.mType; }
     const std::string& address() const { return mDeviceTypeAddr.address(); }
     void setAddress(const std::string &address);
     const AudioDeviceTypeAddr& getDeviceTypeAddr() const { return mDeviceTypeAddr; }
 
+    const FormatVector& encodedFormats() const { return mEncodedFormats; }
+    bool supportsFormat(audio_format_t format);
+
     // AudioPortConfig
     virtual sp<AudioPort> getAudioPort() const {
         return static_cast<AudioPort*>(const_cast<DeviceDescriptorBase*>(this));
@@ -60,7 +65,7 @@
     status_t setEncapsulationModes(uint32_t encapsulationModes);
     status_t setEncapsulationMetadataTypes(uint32_t encapsulationMetadataTypes);
 
-    void dump(std::string *dst, int spaces, int index,
+    void dump(std::string *dst, int spaces,
               const char* extraInfo = nullptr, bool verbose = true) const;
     void log() const;
 
@@ -74,14 +79,12 @@
 
     bool equals(const sp<DeviceDescriptorBase>& other) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
-
     status_t writeToParcelable(media::AudioPort* parcelable) const;
     status_t readFromParcelable(const media::AudioPort& parcelable);
 
 protected:
     AudioDeviceTypeAddr mDeviceTypeAddr;
+    FormatVector        mEncodedFormats;
     uint32_t mEncapsulationModes = 0;
     uint32_t mEncapsulationMetadataTypes = 0;
 private:
diff --git a/media/libaudiofoundation/tests/Android.bp b/media/libaudiofoundation/tests/Android.bp
index bb9a5f2..3f1fbea 100644
--- a/media/libaudiofoundation/tests/Android.bp
+++ b/media/libaudiofoundation/tests/Android.bp
@@ -11,12 +11,20 @@
     name: "audiofoundation_parcelable_test",
 
     shared_libs: [
-        "libaudiofoundation",
+        "libbase",
         "libbinder",
         "liblog",
         "libutils",
     ],
 
+    static_libs: [
+        "android.media.audio.common.types-V1-cpp",
+        "audioclient-types-aidl-cpp",
+        "libaudioclient_aidl_conversion",
+        "libaudiofoundation",
+        "libstagefright_foundation",
+    ],
+
     header_libs: [
         "libaudio_system_headers",
     ],
diff --git a/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp b/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
index 068b5d8..50d8dc8 100644
--- a/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
+++ b/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
@@ -53,7 +53,7 @@
 
 AudioGains getAudioGainsForTest() {
     AudioGains audioGains;
-    sp<AudioGain> audioGain = new AudioGain(0 /*index*/, false /*useInChannelMask*/);
+    sp<AudioGain> audioGain = new AudioGain(0 /*index*/, false /*isInput*/);
     audioGain->setMode(AUDIO_GAIN_MODE_JOINT);
     audioGain->setChannelMask(AUDIO_CHANNEL_OUT_STEREO);
     audioGain->setMinValueInMb(-3200);
@@ -75,57 +75,74 @@
     return audioProfiles;
 }
 
-TEST(AudioFoundationParcelableTest, ParcelingAudioGain) {
-    Parcel data;
-    AudioGains audioGains = getAudioGainsForTest();
-
-    ASSERT_EQ(data.writeParcelable(audioGains), NO_ERROR);
-    data.setDataPosition(0);
-    AudioGains audioGainsFromParcel;
-    ASSERT_EQ(data.readParcelable(&audioGainsFromParcel), NO_ERROR);
-    ASSERT_TRUE(audioGainsFromParcel.equals(audioGains));
+TEST(AudioFoundationParcelableTest, ParcelingAudioProfile) {
+    sp<AudioProfile> profile = getAudioProfileVectorForTest()[0];
+    auto conv = legacy2aidl_AudioProfile(profile, false /*isInput*/);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = aidl2legacy_AudioProfile(conv.value(), false /*isInput*/);
+    ASSERT_TRUE(convBack.ok());
+    ASSERT_TRUE(profile->equals(convBack.value()));
 }
 
 TEST(AudioFoundationParcelableTest, ParcelingAudioProfileVector) {
-    Parcel data;
-    AudioProfileVector audioProfiles = getAudioProfileVectorForTest();
+    AudioProfileVector profiles = getAudioProfileVectorForTest();
+    auto conv = legacy2aidl_AudioProfileVector(profiles, false /*isInput*/);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = aidl2legacy_AudioProfileVector(conv.value(), false /*isInput*/);
+    ASSERT_TRUE(convBack.ok());
+    ASSERT_TRUE(profiles.equals(convBack.value()));
+}
 
-    ASSERT_EQ(data.writeParcelable(audioProfiles), NO_ERROR);
-    data.setDataPosition(0);
-    AudioProfileVector audioProfilesFromParcel;
-    ASSERT_EQ(data.readParcelable(&audioProfilesFromParcel), NO_ERROR);
-    ASSERT_TRUE(audioProfilesFromParcel.equals(audioProfiles));
+TEST(AudioFoundationParcelableTest, ParcelingAudioGain) {
+    sp<AudioGain> audioGain = getAudioGainsForTest()[0];
+    auto conv = legacy2aidl_AudioGain(audioGain);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = aidl2legacy_AudioGain(conv.value());
+    ASSERT_TRUE(convBack.ok());
+    ASSERT_TRUE(audioGain->equals(convBack.value()));
+}
+
+TEST(AudioFoundationParcelableTest, ParcelingAudioGains) {
+    AudioGains audioGains = getAudioGainsForTest();
+    auto conv = legacy2aidl_AudioGains(audioGains);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = aidl2legacy_AudioGains(conv.value());
+    ASSERT_TRUE(convBack.ok());
+    ASSERT_TRUE(audioGains.equals(convBack.value()));
 }
 
 TEST(AudioFoundationParcelableTest, ParcelingAudioPort) {
-    Parcel data;
     sp<AudioPort> audioPort = new AudioPort(
             "AudioPortName", AUDIO_PORT_TYPE_DEVICE, AUDIO_PORT_ROLE_SINK);
     audioPort->setGains(getAudioGainsForTest());
     audioPort->setAudioProfiles(getAudioProfileVectorForTest());
 
-    ASSERT_EQ(data.writeParcelable(*audioPort), NO_ERROR);
-    data.setDataPosition(0);
+    media::AudioPort parcelable;
+    ASSERT_EQ(NO_ERROR, audioPort->writeToParcelable(&parcelable));
     sp<AudioPort> audioPortFromParcel = new AudioPort(
             "", AUDIO_PORT_TYPE_NONE, AUDIO_PORT_ROLE_NONE);
-    ASSERT_EQ(data.readParcelable(audioPortFromParcel.get()), NO_ERROR);
+    ASSERT_EQ(NO_ERROR, audioPortFromParcel->readFromParcelable(parcelable));
     ASSERT_TRUE(audioPortFromParcel->equals(audioPort));
 }
 
 TEST(AudioFoundationParcelableTest, ParcelingAudioPortConfig) {
+    const bool isInput = false;
     Parcel data;
     sp<AudioPortConfig> audioPortConfig = new AudioPortConfigTestStub();
     audioPortConfig->applyAudioPortConfig(&TEST_AUDIO_PORT_CONFIG);
-
-    ASSERT_EQ(data.writeParcelable(*audioPortConfig), NO_ERROR);
+    media::audio::common::AudioPortConfig parcelable{};
+    ASSERT_EQ(NO_ERROR, audioPortConfig->writeToParcelable(&parcelable, isInput));
+    ASSERT_EQ(NO_ERROR, data.writeParcelable(parcelable));
     data.setDataPosition(0);
+    media::audio::common::AudioPortConfig parcelableFromParcel{};
+    ASSERT_EQ(NO_ERROR, data.readParcelable(&parcelableFromParcel));
     sp<AudioPortConfig> audioPortConfigFromParcel = new AudioPortConfigTestStub();
-    ASSERT_EQ(data.readParcelable(audioPortConfigFromParcel.get()), NO_ERROR);
-    ASSERT_TRUE(audioPortConfigFromParcel->equals(audioPortConfig));
+    ASSERT_EQ(NO_ERROR, audioPortConfigFromParcel->readFromParcelable(
+                    parcelableFromParcel, isInput));
+    ASSERT_TRUE(audioPortConfigFromParcel->equals(audioPortConfig, isInput));
 }
 
 TEST(AudioFoundationParcelableTest, ParcelingDeviceDescriptorBase) {
-    Parcel data;
     sp<DeviceDescriptorBase> desc = new DeviceDescriptorBase(AUDIO_DEVICE_OUT_SPEAKER);
     desc->setGains(getAudioGainsForTest());
     desc->setAudioProfiles(getAudioProfileVectorForTest());
@@ -135,10 +152,10 @@
     ASSERT_EQ(desc->setEncapsulationMetadataTypes(
             AUDIO_ENCAPSULATION_METADATA_TYPE_ALL_POSITION_BITS), NO_ERROR);
 
-    ASSERT_EQ(data.writeParcelable(*desc), NO_ERROR);
-    data.setDataPosition(0);
+    media::AudioPort parcelable;
+    ASSERT_EQ(NO_ERROR, desc->writeToParcelable(&parcelable));
     sp<DeviceDescriptorBase> descFromParcel = new DeviceDescriptorBase(AUDIO_DEVICE_NONE);
-    ASSERT_EQ(data.readParcelable(descFromParcel.get()), NO_ERROR);
+    ASSERT_EQ(NO_ERROR, descFromParcel->readFromParcelable(parcelable));
     ASSERT_TRUE(descFromParcel->equals(desc));
 }
 
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index bd24c84..5fe74f9 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -30,6 +30,7 @@
     ],
 
     shared_libs: [
+        "audioclient-types-aidl-cpp",
         "libdl",
         "libhidlbase",
         "liblog",
diff --git a/media/libaudiohal/impl/Android.bp b/media/libaudiohal/impl/Android.bp
index a2c6e8a..d6576f5 100644
--- a/media/libaudiohal/impl/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -50,6 +50,7 @@
         "libmedia_helper",
         "libmediautils",
         "libutils",
+        "audioclient-types-aidl-cpp",
     ],
     header_libs: [
         "android.hardware.audio.common.util@all-versions",
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index aa94eea..47acb19 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -20,6 +20,7 @@
 //#define LOG_NDEBUG 0
 
 #include <cutils/native_handle.h>
+#include <cutils/properties.h>
 #include <hwbinder/IPCThreadState.h>
 #include <media/AudioContainers.h>
 #include <utils/Log.h>
diff --git a/media/libaudiohal/impl/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
index 2694ab3..9fd0ac0 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -119,6 +119,23 @@
     status_t addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
     status_t removeDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
 
+    status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType __unused,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos __unused) override {
+        // TODO: Implement the HAL query when moving to AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
+    int32_t getAAudioMixerBurstCount() override {
+        // TODO: Implement the HAL query when moving to AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
+    int32_t getAAudioHardwareBurstMinUsec() override {
+        // TODO: Implement the HAL query when moving to AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
     status_t dump(int fd, const Vector<String16>& args) override;
 
   private:
diff --git a/media/libaudiohal/impl/DeviceHalLocal.h b/media/libaudiohal/impl/DeviceHalLocal.h
index 2fde936..ee1d2c5 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.h
+++ b/media/libaudiohal/impl/DeviceHalLocal.h
@@ -112,6 +112,23 @@
     status_t addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
     status_t removeDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
 
+    status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType __unused,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos __unused) override {
+        // This function will only be available on AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
+    int32_t getAAudioMixerBurstCount() override {
+        // This function will only be available on AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
+    int32_t getAAudioHardwareBurstMinUsec() override {
+        // This function will only be available on AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
     status_t dump(int fd, const Vector<String16>& args) override;
 
     void closeOutputStream(struct audio_stream_out *stream_out);
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.h b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
index 6f84efe..b46259b 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
@@ -46,6 +46,8 @@
 
     status_t setCallbackOnce(sp<DevicesFactoryHalCallback> callback) override;
 
+    float getHalVersion() const override { return MAJOR_VERSION + (float)MINOR_VERSION / 10; }
+
   private:
     friend class ServiceNotificationListener;
     void addDeviceFactory(sp<IDevicesFactory> factory, bool needToNotify);
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
index 568a1fb..5baefa4b 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
@@ -40,6 +40,10 @@
 
             status_t setCallbackOnce(sp<DevicesFactoryHalCallback> callback) override;
 
+            float getHalVersion() const override {
+                return MAJOR_VERSION + (float)MINOR_VERSION / 10;
+            }
+
   private:
     sp<DevicesFactoryHalInterface> mLocalFactory;
     sp<DevicesFactoryHalInterface> mHidlFactory;
diff --git a/media/libaudiohal/impl/DevicesFactoryHalLocal.h b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
index 32bf362..d2b9104 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalLocal.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
@@ -41,6 +41,10 @@
                 return INVALID_OPERATION;
             }
 
+            float getHalVersion() const override {
+                return MAJOR_VERSION + (float)MINOR_VERSION / 10;
+            }
+
   private:
     friend class DevicesFactoryHalHybrid;
 
diff --git a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
index 69cbcec..70c3199 100644
--- a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
 #define ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
 
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
 #include <media/audiohal/EffectHalInterface.h>
 #include <media/MicrophoneInfo.h>
 #include <system/audio.h>
@@ -120,6 +122,12 @@
     virtual status_t removeDeviceEffect(
             audio_port_handle_t device, sp<EffectHalInterface> effect) = 0;
 
+    virtual status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos)  = 0;
+    virtual int32_t getAAudioMixerBurstCount() = 0;
+    virtual int32_t getAAudioHardwareBurstMinUsec() = 0;
+
     virtual status_t dump(int fd, const Vector<String16>& args) = 0;
 
   protected:
diff --git a/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h b/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h
index 5091558..17010e6 100644
--- a/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h
@@ -43,6 +43,8 @@
     // The callback can be only set once.
     virtual status_t setCallbackOnce(sp<DevicesFactoryHalCallback> callback) = 0;
 
+    virtual float getHalVersion() const = 0;
+
     static sp<DevicesFactoryHalInterface> create();
 
   protected:
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index 19a8b2f..61a2bf5 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -150,6 +150,7 @@
 
 bool sHasAuxChannels[PREPROC_NUM_EFFECTS] = {
         false,  // PREPROC_AGC
+        false,  // PREPROC_AGC2
         true,   // PREPROC_AEC
         true,   // PREPROC_NS
 };
diff --git a/media/libeffects/testlibs/Android.bp b/media/libeffects/testlibs/Android.bp
new file mode 100644
index 0000000..5ba56bb
--- /dev/null
+++ b/media/libeffects/testlibs/Android.bp
@@ -0,0 +1,77 @@
+// Test Reverb library
+package {
+    default_applicable_licenses: [
+        "frameworks_av_media_libeffects_testlibs_license",
+    ],
+}
+
+license {
+    name: "frameworks_av_media_libeffects_testlibs_license",
+    visibility: [":__subpackages__"],
+    license_kinds: [
+        "SPDX-license-identifier-Apache-2.0",
+    ],
+    license_text: [
+        "NOTICE",
+    ],
+}
+
+cc_library {
+    name: "libreverbtest",
+    host_supported: true,
+    vendor: true,
+    srcs: [
+        "EffectReverb.c",
+        "EffectsMath.c",
+    ],
+
+    shared_libs: [
+        "libcutils",
+        "liblog",
+    ],
+
+    relative_install_path: "soundfx",
+
+    cflags: [
+        "-fvisibility=hidden",
+        "-Wall",
+        "-Werror",
+        "-Wno-address-of-packed-member",
+    ],
+
+    header_libs: [
+        "libaudioeffects",
+    ],
+}
+
+cc_library {
+    name: "libequalizertest",
+    host_supported: true,
+    vendor: true,
+    srcs: [
+        "AudioBiquadFilter.cpp",
+        "AudioCoefInterpolator.cpp",
+        "AudioEqualizer.cpp",
+        "AudioPeakingFilter.cpp",
+        "AudioShelvingFilter.cpp",
+        "EffectEqualizer.cpp",
+        "EffectsMath.c",
+    ],
+
+    shared_libs: [
+        "libcutils",
+        "liblog",
+    ],
+
+    relative_install_path: "soundfx",
+
+    cflags: [
+        "-fvisibility=hidden",
+        "-Wall",
+        "-Werror",
+    ],
+
+    header_libs: [
+        "libaudioeffects",
+    ],
+}
diff --git a/media/libeffects/testlibs/Android.mk_ b/media/libeffects/testlibs/Android.mk_
deleted file mode 100644
index 14c373f..0000000
--- a/media/libeffects/testlibs/Android.mk_
+++ /dev/null
@@ -1,55 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-# Test Reverb library
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
-	EffectReverb.c.arm \
-	EffectsMath.c.arm
-
-LOCAL_CFLAGS := -O2
-
-LOCAL_SHARED_LIBRARIES := \
-	libcutils \
-	libdl
-
-LOCAL_MODULE_RELATIVE_PATH := soundfx
-LOCAL_MODULE := libreverbtest
-
-LOCAL_C_INCLUDES := \
-	$(call include-path-for, audio-effects) \
-	$(call include-path-for, graphics corecg)
-
-LOCAL_MODULE_TAGS := optional
-
-include $(BUILD_SHARED_LIBRARY)
-
-# Test Equalizer library
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
-	EffectsMath.c.arm \
-	EffectEqualizer.cpp \
-	AudioBiquadFilter.cpp.arm \
-	AudioCoefInterpolator.cpp.arm \
-	AudioPeakingFilter.cpp.arm \
-	AudioShelvingFilter.cpp.arm \
-	AudioEqualizer.cpp.arm
-
-LOCAL_CFLAGS := -O2
-
-LOCAL_SHARED_LIBRARIES := \
-	libcutils \
-	libdl
-
-LOCAL_MODULE_RELATIVE_PATH := soundfx
-LOCAL_MODULE := libequalizertest
-
-LOCAL_C_INCLUDES := \
-	$(call include-path-for, graphics corecg) \
-	$(call include-path-for, audio-effects)
-
-LOCAL_MODULE_TAGS := optional
-
-include $(BUILD_SHARED_LIBRARY)
-
diff --git a/media/libeffects/testlibs/AudioEqualizer.cpp b/media/libeffects/testlibs/AudioEqualizer.cpp
index 4f3a308..141750b 100644
--- a/media/libeffects/testlibs/AudioEqualizer.cpp
+++ b/media/libeffects/testlibs/AudioEqualizer.cpp
@@ -19,7 +19,7 @@
 #include <assert.h>
 #include <stdlib.h>
 #include <new>
-#include <utils/Log.h>
+#include <log/log.h>
 
 #include "AudioEqualizer.h"
 #include "AudioPeakingFilter.h"
diff --git a/media/libeffects/testlibs/AudioPeakingFilter.cpp b/media/libeffects/testlibs/AudioPeakingFilter.cpp
index 99323ac..4257eca 100644
--- a/media/libeffects/testlibs/AudioPeakingFilter.cpp
+++ b/media/libeffects/testlibs/AudioPeakingFilter.cpp
@@ -87,9 +87,9 @@
 void AudioPeakingFilter::commit(bool immediate) {
     audio_coef_t coefs[5];
     int intCoord[3] = {
-        mFrequency >> FREQ_PRECISION_BITS,
+        (int)(mFrequency >> FREQ_PRECISION_BITS),
         mGain >> GAIN_PRECISION_BITS,
-        mBandwidth >> BANDWIDTH_PRECISION_BITS
+        (int)(mBandwidth >> BANDWIDTH_PRECISION_BITS)
     };
     uint32_t fracCoord[3] = {
         mFrequency << (32 - FREQ_PRECISION_BITS),
diff --git a/media/libeffects/testlibs/AudioShelvingFilter.cpp b/media/libeffects/testlibs/AudioShelvingFilter.cpp
index e031287..ad43c5a 100644
--- a/media/libeffects/testlibs/AudioShelvingFilter.cpp
+++ b/media/libeffects/testlibs/AudioShelvingFilter.cpp
@@ -89,8 +89,8 @@
 void AudioShelvingFilter::commit(bool immediate) {
     audio_coef_t coefs[5];
     int intCoord[2] = {
-        mFrequency >> FREQ_PRECISION_BITS,
-        mGain >> GAIN_PRECISION_BITS
+        (int)(mFrequency >> FREQ_PRECISION_BITS),
+        (int)(mGain >> GAIN_PRECISION_BITS)
     };
     uint32_t fracCoord[2] = {
         mFrequency << (32 - FREQ_PRECISION_BITS),
diff --git a/media/libeffects/testlibs/EffectEqualizer.cpp b/media/libeffects/testlibs/EffectEqualizer.cpp
index db4d009..72b530d 100644
--- a/media/libeffects/testlibs/EffectEqualizer.cpp
+++ b/media/libeffects/testlibs/EffectEqualizer.cpp
@@ -131,7 +131,8 @@
                             int32_t ioId,
                             effect_handle_t *pHandle) {
     int ret;
-    int i;
+    (void)sessionId;
+    (void)ioId;
 
     ALOGV("EffectLibCreateEffect start");
 
@@ -160,7 +161,7 @@
     pContext->state = EQUALIZER_STATE_INITIALIZED;
 
     ALOGV("EffectLibCreateEffect %p, size %d",
-         pContext, AudioEqualizer::GetInstanceSize(kNumBands)+sizeof(EqualizerContext));
+         pContext, (int)(AudioEqualizer::GetInstanceSize(kNumBands)+sizeof(EqualizerContext)));
 
     return 0;
 
@@ -294,7 +295,6 @@
 
 int Equalizer_init(EqualizerContext *pContext)
 {
-    int status;
 
     ALOGV("Equalizer_init start");
 
@@ -630,7 +630,6 @@
         void *pCmdData, uint32_t *replySize, void *pReplyData) {
 
     android::EqualizerContext * pContext = (android::EqualizerContext *) self;
-    int retsize;
 
     if (pContext == NULL || pContext->state == EQUALIZER_STATE_UNINITIALIZED) {
         return -EINVAL;
@@ -750,13 +749,13 @@
         NULL
 };
 
-
+__attribute__ ((visibility ("default")))
 audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
-    tag : AUDIO_EFFECT_LIBRARY_TAG,
-    version : EFFECT_LIBRARY_API_VERSION,
-    name : "Test Equalizer Library",
-    implementor : "The Android Open Source Project",
-    create_effect : android::EffectCreate,
-    release_effect : android::EffectRelease,
-    get_descriptor : android::EffectGetDescriptor,
+    .tag = AUDIO_EFFECT_LIBRARY_TAG,
+    .version = EFFECT_LIBRARY_API_VERSION,
+    .name = "Test Equalizer Library",
+    .implementor = "The Android Open Source Project",
+    .create_effect = android::EffectCreate,
+    .release_effect = android::EffectRelease,
+    .get_descriptor = android::EffectGetDescriptor,
 };
diff --git a/media/libeffects/testlibs/EffectReverb.c b/media/libeffects/testlibs/EffectReverb.c
index fce9bed..efba4f4 100644
--- a/media/libeffects/testlibs/EffectReverb.c
+++ b/media/libeffects/testlibs/EffectReverb.c
@@ -107,6 +107,8 @@
     const effect_descriptor_t *desc;
     int aux = 0;
     int preset = 0;
+    (void)sessionId;
+    (void)ioId;
 
     ALOGV("EffectLibCreateEffect start");
 
@@ -149,7 +151,7 @@
 
     module->context.mState = REVERB_STATE_INITIALIZED;
 
-    ALOGV("EffectLibCreateEffect %p ,size %d", module, sizeof(reverb_module_t));
+    ALOGV("EffectLibCreateEffect %p ,size %zu", module, sizeof(reverb_module_t));
 
     return 0;
 }
@@ -283,7 +285,6 @@
         void *pCmdData, uint32_t *replySize, void *pReplyData) {
     reverb_module_t *pRvbModule = (reverb_module_t *) self;
     reverb_object_t *pReverb;
-    int retsize;
 
     if (pRvbModule == NULL ||
             pRvbModule->context.mState == REVERB_STATE_UNINITIALIZED) {
@@ -758,7 +759,6 @@
     int32_t *pValue32;
     int16_t *pValue16;
     t_reverb_settings *pProperties;
-    int32_t i;
     int32_t temp;
     int32_t temp2;
     uint32_t size;
@@ -1654,7 +1654,6 @@
     int32_t nApOut;
 
     int32_t j;
-    int32_t nEarlyOut;
 
     int32_t tempValue;
 
@@ -2203,6 +2202,7 @@
     return 0;
 }
 
+__attribute__ ((visibility ("default")))
 audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
     .tag = AUDIO_EFFECT_LIBRARY_TAG,
     .version = EFFECT_LIBRARY_API_VERSION,
diff --git a/media/libeffects/testlibs/EffectReverb.h b/media/libeffects/testlibs/EffectReverb.h
index 756c5ea..8f405d4 100644
--- a/media/libeffects/testlibs/EffectReverb.h
+++ b/media/libeffects/testlibs/EffectReverb.h
@@ -443,7 +443,4 @@
 */
 static int ReverbUpdateRoom(reverb_object_t* pReverbData, bool fullUpdate);
 
-
-static int ReverbComputeConstants(reverb_object_t *pReverbData, uint32_t samplingRate);
-
 #endif /*ANDROID_EFFECTREVERB_H_*/
diff --git a/media/libheadtracking/Android.bp b/media/libheadtracking/Android.bp
index 63b769e..b0563e2 100644
--- a/media/libheadtracking/Android.bp
+++ b/media/libheadtracking/Android.bp
@@ -18,6 +18,7 @@
       "PoseRateLimiter.cpp",
       "QuaternionUtil.cpp",
       "ScreenHeadFusion.cpp",
+      "StillnessDetector.cpp",
       "Twist.cpp",
     ],
     export_include_dirs: [
@@ -70,6 +71,7 @@
         "PoseRateLimiter-test.cpp",
         "QuaternionUtil-test.cpp",
         "ScreenHeadFusion-test.cpp",
+        "StillnessDetector-test.cpp",
         "Twist-test.cpp",
     ],
     shared_libs: [
diff --git a/media/libheadtracking/HeadTrackingProcessor.cpp b/media/libheadtracking/HeadTrackingProcessor.cpp
index 47f7cf0..f2f15df 100644
--- a/media/libheadtracking/HeadTrackingProcessor.cpp
+++ b/media/libheadtracking/HeadTrackingProcessor.cpp
@@ -20,6 +20,7 @@
 #include "PoseDriftCompensator.h"
 #include "QuaternionUtil.h"
 #include "ScreenHeadFusion.h"
+#include "StillnessDetector.h"
 
 namespace android {
 namespace media {
@@ -40,6 +41,18 @@
                   .translationalDriftTimeConstant = options.translationalDriftTimeConstant,
                   .rotationalDriftTimeConstant = options.rotationalDriftTimeConstant,
           }),
+          mHeadStillnessDetector(StillnessDetector::Options{
+                  .defaultValue = false,
+                  .windowDuration = options.autoRecenterWindowDuration,
+                  .translationalThreshold = options.autoRecenterTranslationalThreshold,
+                  .rotationalThreshold = options.autoRecenterRotationalThreshold,
+          }),
+          mScreenStillnessDetector(StillnessDetector::Options{
+                  .defaultValue = true,
+                  .windowDuration = options.screenStillnessWindowDuration,
+                  .translationalThreshold = options.screenStillnessTranslationalThreshold,
+                  .rotationalThreshold = options.screenStillnessRotationalThreshold,
+          }),
           mModeSelector(ModeSelector::Options{.freshnessTimeout = options.freshnessTimeout},
                         initialMode),
           mRateLimiter(PoseRateLimiter::Options{
@@ -53,6 +66,7 @@
         Pose3f predictedWorldToHead =
                 worldToHead * integrate(headTwist, mOptions.predictionDuration);
         mHeadPoseDriftCompensator.setInput(timestamp, predictedWorldToHead);
+        mHeadStillnessDetector.setInput(timestamp, predictedWorldToHead);
         mWorldToHeadTimestamp = timestamp;
     }
 
@@ -63,8 +77,9 @@
             mPhysicalToLogicalAngle = mPendingPhysicalToLogicalAngle;
         }
 
-        mScreenPoseDriftCompensator.setInput(
-                timestamp, worldToScreen * Pose3f(rotateY(-mPhysicalToLogicalAngle)));
+        Pose3f worldToLogicalScreen = worldToScreen * Pose3f(rotateY(-mPhysicalToLogicalAngle));
+        mScreenPoseDriftCompensator.setInput(timestamp, worldToLogicalScreen);
+        mScreenStillnessDetector.setInput(timestamp, worldToLogicalScreen);
         mWorldToScreenTimestamp = timestamp;
     }
 
@@ -77,18 +92,32 @@
     }
 
     void calculate(int64_t timestamp) override {
-        if (mWorldToHeadTimestamp.has_value()) {
-            const Pose3f worldToHead = mHeadPoseDriftCompensator.getOutput();
-            mScreenHeadFusion.setWorldToHeadPose(mWorldToHeadTimestamp.value(), worldToHead);
-            mModeSelector.setWorldToHeadPose(mWorldToHeadTimestamp.value(), worldToHead);
-        }
-
+        // Handle the screen first, since it might trigger a recentering of the head.
         if (mWorldToScreenTimestamp.has_value()) {
             const Pose3f worldToLogicalScreen = mScreenPoseDriftCompensator.getOutput();
+            bool screenStable = mScreenStillnessDetector.calculate(timestamp);
+            mModeSelector.setScreenStable(mWorldToScreenTimestamp.value(), screenStable);
+            // Whenever the screen is unstable, recenter the head pose.
+            if (!screenStable) {
+                recenter(true, false);
+            }
             mScreenHeadFusion.setWorldToScreenPose(mWorldToScreenTimestamp.value(),
                                                    worldToLogicalScreen);
         }
 
+        // Handle head.
+        if (mWorldToHeadTimestamp.has_value()) {
+            Pose3f worldToHead = mHeadPoseDriftCompensator.getOutput();
+            // Auto-recenter.
+            if (mHeadStillnessDetector.calculate(timestamp)) {
+                recenter(true, false);
+                worldToHead = mHeadPoseDriftCompensator.getOutput();
+            }
+
+            mScreenHeadFusion.setWorldToHeadPose(mWorldToHeadTimestamp.value(), worldToHead);
+            mModeSelector.setWorldToHeadPose(mWorldToHeadTimestamp.value(), worldToHead);
+        }
+
         auto maybeScreenToHead = mScreenHeadFusion.calculate();
         if (maybeScreenToHead.has_value()) {
             mModeSelector.setScreenToHeadPose(maybeScreenToHead->timestamp,
@@ -114,9 +143,11 @@
     void recenter(bool recenterHead, bool recenterScreen) override {
         if (recenterHead) {
             mHeadPoseDriftCompensator.recenter();
+            mHeadStillnessDetector.reset();
         }
         if (recenterScreen) {
             mScreenPoseDriftCompensator.recenter();
+            mScreenStillnessDetector.reset();
         }
 
         // If a sensor being recentered is included in the current mode, apply rate limiting to
@@ -140,6 +171,8 @@
     Pose3f mHeadToStagePose;
     PoseDriftCompensator mHeadPoseDriftCompensator;
     PoseDriftCompensator mScreenPoseDriftCompensator;
+    StillnessDetector mHeadStillnessDetector;
+    StillnessDetector mScreenStillnessDetector;
     ScreenHeadFusion mScreenHeadFusion;
     ModeSelector mModeSelector;
     PoseRateLimiter mRateLimiter;
diff --git a/media/libheadtracking/ModeSelector-test.cpp b/media/libheadtracking/ModeSelector-test.cpp
index 6247d84..a136e6b 100644
--- a/media/libheadtracking/ModeSelector-test.cpp
+++ b/media/libheadtracking/ModeSelector-test.cpp
@@ -44,6 +44,7 @@
     ModeSelector selector(options, HeadTrackingMode::WORLD_RELATIVE);
 
     selector.setWorldToHeadPose(0, worldToHead);
+    selector.setScreenStable(0, true);
     selector.calculate(0);
     EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
     EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse());
@@ -69,14 +70,46 @@
     ModeSelector selector(options);
 
     selector.setScreenToStagePose(screenToStage);
-
     selector.setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
     selector.setWorldToHeadPose(0, worldToHead);
+    selector.setScreenStable(0, true);
     selector.calculate(0);
     EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
     EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse() * screenToStage);
 }
 
+TEST(ModeSelector, WorldRelativeUnstable) {
+    const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+    ModeSelector::Options options{.freshnessTimeout = 100};
+    ModeSelector selector(options);
+
+    selector.setScreenToStagePose(screenToStage);
+    selector.setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+    selector.setWorldToHeadPose(0, worldToHead);
+    selector.setScreenStable(0, false);
+    selector.calculate(10);
+    EXPECT_EQ(HeadTrackingMode::STATIC, selector.getActualMode());
+    EXPECT_EQ(selector.getHeadToStagePose(), screenToStage);
+}
+
+TEST(ModeSelector, WorldRelativeStableStale) {
+    const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+    ModeSelector::Options options{.freshnessTimeout = 100};
+    ModeSelector selector(options);
+
+    selector.setScreenToStagePose(screenToStage);
+    selector.setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+    selector.setWorldToHeadPose(100, worldToHead);
+    selector.setScreenStable(0, true);
+    selector.calculate(101);
+    EXPECT_EQ(HeadTrackingMode::STATIC, selector.getActualMode());
+    EXPECT_EQ(selector.getHeadToStagePose(), screenToStage);
+}
+
 TEST(ModeSelector, WorldRelativeStale) {
     const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
     const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
@@ -85,7 +118,6 @@
     ModeSelector selector(options);
 
     selector.setScreenToStagePose(screenToStage);
-
     selector.setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
     selector.setWorldToHeadPose(0, worldToHead);
     selector.calculate(101);
@@ -101,7 +133,6 @@
     ModeSelector selector(options);
 
     selector.setScreenToStagePose(screenToStage);
-
     selector.setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
     selector.setScreenToHeadPose(0, screenToHead);
     selector.calculate(0);
@@ -118,10 +149,10 @@
     ModeSelector selector(options);
 
     selector.setScreenToStagePose(screenToStage);
-
     selector.setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
     selector.setScreenToHeadPose(0, screenToHead);
     selector.setWorldToHeadPose(50, worldToHead);
+    selector.setScreenStable(50, true);
     selector.calculate(101);
     EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
     EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse() * screenToStage);
@@ -139,6 +170,7 @@
     selector.setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
     selector.setScreenToHeadPose(50, std::nullopt);
     selector.setWorldToHeadPose(50, worldToHead);
+    selector.setScreenStable(50, true);
     selector.calculate(101);
     EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
     EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse() * screenToStage);
diff --git a/media/libheadtracking/ModeSelector.cpp b/media/libheadtracking/ModeSelector.cpp
index 16e1712..cb3a27f 100644
--- a/media/libheadtracking/ModeSelector.cpp
+++ b/media/libheadtracking/ModeSelector.cpp
@@ -41,11 +41,18 @@
     mWorldToHeadTimestamp = timestamp;
 }
 
+void ModeSelector::setScreenStable(int64_t timestamp, bool stable) {
+    mScreenStable = stable;
+    mScreenStableTimestamp = timestamp;
+}
+
 void ModeSelector::calculateActualMode(int64_t timestamp) {
     bool isValidScreenToHead = mScreenToHead.has_value() &&
                                timestamp - mScreenToHeadTimestamp < mOptions.freshnessTimeout;
     bool isValidWorldToHead = mWorldToHead.has_value() &&
                               timestamp - mWorldToHeadTimestamp < mOptions.freshnessTimeout;
+    bool isValidScreenStable = mScreenStable.has_value() &&
+                              timestamp - mScreenStableTimestamp < mOptions.freshnessTimeout;
 
     HeadTrackingMode mode = mDesiredMode;
 
@@ -58,7 +65,7 @@
 
     // Optional downgrade from world-relative to static.
     if (mode == HeadTrackingMode::WORLD_RELATIVE) {
-        if (!isValidWorldToHead) {
+        if (!isValidWorldToHead || !isValidScreenStable || !mScreenStable.value()) {
             mode = HeadTrackingMode::STATIC;
         }
     }
diff --git a/media/libheadtracking/ModeSelector.h b/media/libheadtracking/ModeSelector.h
index 17a5142..e537040 100644
--- a/media/libheadtracking/ModeSelector.h
+++ b/media/libheadtracking/ModeSelector.h
@@ -56,6 +56,7 @@
  *   from screen-relative to world-relative.
  * - When we cannot get a fresh estimate of the world-to-head pose, we will fall back from
  *   world-relative to static.
+ * - In world-relative mode, if the screen is unstable, we will fall back to static.
  *
  * All the timestamps used here are of arbitrary units and origin. They just need to be consistent
  * between all the calls and with the Options provided for determining freshness and rate limiting.
@@ -92,6 +93,12 @@
     void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead);
 
     /**
+     * Set whether the screen is considered stable.
+     * The timestamp needs to reflect how fresh the sample is.
+     */
+     void setScreenStable(int64_t timestamp, bool stable);
+
+    /**
      * Process all the previous inputs and update the outputs.
      */
     void calculate(int64_t timestamp);
@@ -116,6 +123,8 @@
     int64_t mScreenToHeadTimestamp;
     std::optional<Pose3f> mWorldToHead;
     int64_t mWorldToHeadTimestamp;
+    std::optional<bool> mScreenStable;
+    int64_t mScreenStableTimestamp;
 
     HeadTrackingMode mActualMode;
     Pose3f mHeadToStage;
diff --git a/media/libheadtracking/Pose.cpp b/media/libheadtracking/Pose.cpp
index 47241ce..ae39512 100644
--- a/media/libheadtracking/Pose.cpp
+++ b/media/libheadtracking/Pose.cpp
@@ -43,7 +43,7 @@
         return {to, false};
     }
     // Always rate limit if t is 0 (required to avoid division by 0).
-    if (t == 0) {
+    if (t == 0 || maxTranslationalVelocity == 0 || maxRotationalVelocity == 0) {
         return {from, true};
     }
 
diff --git a/media/libheadtracking/StillnessDetector-test.cpp b/media/libheadtracking/StillnessDetector-test.cpp
new file mode 100644
index 0000000..b6cd479
--- /dev/null
+++ b/media/libheadtracking/StillnessDetector-test.cpp
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "StillnessDetector.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+using Options = StillnessDetector::Options;
+
+class StillnessDetectorTest : public testing::TestWithParam<bool> {
+  public:
+    void SetUp() override { mDefaultValue = GetParam(); }
+
+  protected:
+    bool mDefaultValue;
+};
+
+TEST_P(StillnessDetectorTest, Still) {
+    StillnessDetector detector(Options{.defaultValue = mDefaultValue,
+                                       .windowDuration = 1000,
+                                       .translationalThreshold = 1,
+                                       .rotationalThreshold = 0.05});
+
+    const Pose3f baseline(Vector3f{1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f withinThreshold =
+            baseline * Pose3f(Vector3f(0.3, -0.3, 0), rotateX(0.01) * rotateY(-0.01));
+
+    EXPECT_EQ(mDefaultValue, detector.calculate(0));
+    detector.setInput(0, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(0));
+    detector.setInput(300, withinThreshold);
+    EXPECT_EQ(mDefaultValue, detector.calculate(300));
+    detector.setInput(600, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(600));
+    detector.setInput(999, withinThreshold);
+    EXPECT_EQ(mDefaultValue, detector.calculate(999));
+    detector.setInput(1000, baseline);
+    EXPECT_TRUE(detector.calculate(1000));
+}
+
+TEST_P(StillnessDetectorTest, ZeroDuration) {
+    StillnessDetector detector(Options{.defaultValue = mDefaultValue, .windowDuration = 0});
+    EXPECT_TRUE(detector.calculate(0));
+    EXPECT_TRUE(detector.calculate(1000));
+}
+
+TEST_P(StillnessDetectorTest, NotStillTranslation) {
+    StillnessDetector detector(Options{.defaultValue = mDefaultValue,
+                                       .windowDuration = 1000,
+                                       .translationalThreshold = 1,
+                                       .rotationalThreshold = 0.05});
+
+    const Pose3f baseline(Vector3f{1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f withinThreshold =
+            baseline * Pose3f(Vector3f(0.3, -0.3, 0), rotateX(0.01) * rotateY(-0.01));
+    const Pose3f outsideThreshold = baseline * Pose3f(Vector3f(1, 1, 0));
+
+    EXPECT_EQ(mDefaultValue, detector.calculate(0));
+    detector.setInput(0, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(0));
+    detector.setInput(300, outsideThreshold);
+    EXPECT_EQ(mDefaultValue, detector.calculate(300));
+    detector.setInput(600, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(600));
+    detector.setInput(1299, withinThreshold);
+    EXPECT_FALSE(detector.calculate(1299));
+    detector.setInput(1300, baseline);
+    EXPECT_TRUE(detector.calculate(1300));
+}
+
+TEST_P(StillnessDetectorTest, NotStillRotation) {
+    StillnessDetector detector(Options{.defaultValue = mDefaultValue,
+                                       .windowDuration = 1000,
+                                       .translationalThreshold = 1,
+                                       .rotationalThreshold = 0.05});
+
+    const Pose3f baseline(Vector3f{1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f withinThreshold =
+            baseline * Pose3f(Vector3f(0.3, -0.3, 0), rotateX(0.03) * rotateY(-0.03));
+    const Pose3f outsideThreshold = baseline * Pose3f(rotateZ(0.06));
+
+    EXPECT_EQ(mDefaultValue, detector.calculate(0));
+    detector.setInput(0, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(0));
+    detector.setInput(300, outsideThreshold);
+    EXPECT_EQ(mDefaultValue, detector.calculate(300));
+    detector.setInput(600, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(600));
+    detector.setInput(1299, withinThreshold);
+    EXPECT_FALSE(detector.calculate(1299));
+    detector.setInput(1300, baseline);
+    EXPECT_TRUE(detector.calculate(1300));
+}
+
+TEST_P(StillnessDetectorTest, Suppression) {
+    StillnessDetector detector(Options{.defaultValue = mDefaultValue,
+                                       .windowDuration = 1000,
+                                       .translationalThreshold = 1,
+                                       .rotationalThreshold = 0.05});
+
+    const Pose3f baseline(Vector3f{1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f outsideThreshold = baseline * Pose3f(Vector3f(1.1, 0, 0));
+    const Pose3f middlePoint = baseline * Pose3f(Vector3f(0.55, 0, 0));
+
+    detector.setInput(0, baseline);
+    detector.setInput(1000, baseline);
+    EXPECT_TRUE(detector.calculate(1000));
+    detector.setInput(1100, outsideThreshold);
+    EXPECT_FALSE(detector.calculate(1100));
+    detector.setInput(1500, middlePoint);
+    EXPECT_FALSE(detector.calculate(1500));
+    EXPECT_FALSE(detector.calculate(1999));
+    EXPECT_TRUE(detector.calculate(2000));
+}
+
+TEST_P(StillnessDetectorTest, Reset) {
+    StillnessDetector detector(Options{.defaultValue = mDefaultValue,
+                                       .windowDuration = 1000,
+                                       .translationalThreshold = 1,
+                                       .rotationalThreshold = 0.05});
+
+    const Pose3f baseline(Vector3f{1, 2, 3}, Quaternionf::UnitRandom());
+    const Pose3f withinThreshold =
+            baseline * Pose3f(Vector3f(0.3, -0.3, 0), rotateX(0.01) * rotateY(-0.01));
+    EXPECT_EQ(mDefaultValue, detector.calculate(0));
+    detector.setInput(300, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(300));
+    detector.reset();
+    detector.setInput(600, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(600));
+    detector.setInput(900, withinThreshold);
+    EXPECT_EQ(mDefaultValue, detector.calculate(900));
+    detector.setInput(1200, baseline);
+    EXPECT_EQ(mDefaultValue, detector.calculate(1200));
+    detector.setInput(1599, withinThreshold);
+    EXPECT_EQ(mDefaultValue, detector.calculate(1599));
+    detector.setInput(1600, baseline);
+    EXPECT_TRUE(detector.calculate(1600));
+}
+
+INSTANTIATE_TEST_SUITE_P(StillnessDetectorTestParametrized, StillnessDetectorTest,
+                         testing::Values(false, true));
+
+}  // namespace
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/StillnessDetector.cpp b/media/libheadtracking/StillnessDetector.cpp
new file mode 100644
index 0000000..be7c893
--- /dev/null
+++ b/media/libheadtracking/StillnessDetector.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "StillnessDetector.h"
+
+namespace android {
+namespace media {
+
+StillnessDetector::StillnessDetector(const Options& options)
+    : mOptions(options), mCosHalfRotationalThreshold(cos(mOptions.rotationalThreshold / 2)) {}
+
+void StillnessDetector::reset() {
+    mFifo.clear();
+    mWindowFull = false;
+    mSuppressionDeadline.reset();
+}
+
+void StillnessDetector::setInput(int64_t timestamp, const Pose3f& input) {
+    mFifo.push_back(TimestampedPose{timestamp, input});
+    discardOld(timestamp);
+}
+
+bool StillnessDetector::calculate(int64_t timestamp) {
+    discardOld(timestamp);
+
+    // Check whether all the poses in the queue are in the proximity of the new one. We want to do
+    // this before checking the overriding conditions below, in order to update the suppression
+    // deadline correctly. We always go from end to start, to find the most recent pose that
+    // violated stillness and update the suppression deadline if it has not been set or if the new
+    // one ends after the current one.
+    bool moved = false;
+
+    if (!mFifo.empty()) {
+        for (auto iter = mFifo.rbegin() + 1; iter != mFifo.rend(); ++iter) {
+            const auto& event = *iter;
+            if (!areNear(event.pose, mFifo.back().pose)) {
+                // Enable suppression for the duration of the window.
+                int64_t deadline = event.timestamp + mOptions.windowDuration;
+                if (!mSuppressionDeadline.has_value() || mSuppressionDeadline.value() < deadline) {
+                    mSuppressionDeadline = deadline;
+                }
+                moved = true;
+                break;
+            }
+        }
+    }
+
+    // If the window has not been full, return the default value.
+    if (!mWindowFull) {
+        return mOptions.defaultValue;
+    }
+
+    // Force "in motion" while the suppression deadline is active.
+    if (mSuppressionDeadline.has_value()) {
+        return false;
+    }
+
+    return !moved;
+}
+
+void StillnessDetector::discardOld(int64_t timestamp) {
+    // Handle the special case of the window duration being zero (always considered full).
+    if (mOptions.windowDuration == 0) {
+        mFifo.clear();
+        mWindowFull = true;
+    }
+
+    // Remove any events from the queue that are older than the window. If there were any such
+    // events we consider the window full.
+    const int64_t windowStart = timestamp - mOptions.windowDuration;
+    while (!mFifo.empty() && mFifo.front().timestamp <= windowStart) {
+        mWindowFull = true;
+        mFifo.pop_front();
+    }
+
+    // Expire the suppression deadline.
+    if (mSuppressionDeadline.has_value() && mSuppressionDeadline <= timestamp) {
+        mSuppressionDeadline.reset();
+    }
+}
+
+bool StillnessDetector::areNear(const Pose3f& pose1, const Pose3f& pose2) const {
+    // Check translation. We use the L1 norm to reduce computational load on expense of accuracy.
+    // The L1 norm is an upper bound for the actual (L2) norm, so this approach will err on the side
+    // of "not near".
+    if ((pose1.translation() - pose2.translation()).lpNorm<1>() > mOptions.translationalThreshold) {
+        return false;
+    }
+
+    // Check orientation.
+    // The angle x between the quaternions is greater than that threshold iff
+    // cos(x/2) < cos(threshold/2).
+    // cos(x/2) can be efficiently calculated as the dot product of both quaternions.
+    if (pose1.rotation().dot(pose2.rotation()) < mCosHalfRotationalThreshold) {
+        return false;
+    }
+
+    return true;
+}
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/StillnessDetector.h b/media/libheadtracking/StillnessDetector.h
new file mode 100644
index 0000000..ee4b2d8
--- /dev/null
+++ b/media/libheadtracking/StillnessDetector.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <deque>
+
+#include <media/Pose.h>
+
+namespace android {
+namespace media {
+
+/**
+ * Given a stream of poses, determines if the pose is stable ("still").
+ * Stillness is defined as all poses in the recent history ("window") being near the most recent
+ * sample.
+ *
+ * Typical usage:
+ *
+ * StillnessDetector detector(StilnessDetector::Options{...});
+ *
+ * while (...) {
+ *    detector.setInput(timestamp, pose);
+ *    bool still = detector.calculate(timestamp);
+ * }
+ *
+ * The detection is not considered reliable until a sufficient number of samples has been provided
+ * for an initial fill-up of the window. During that time, the detector will return whatever default
+ * value has been configured.
+ * The reset() method can be used to empty the window again and get back to this initial state.
+ * In the special case of the window size being 0, the state will always be considered "still".
+ */
+class StillnessDetector {
+  public:
+    /**
+     * Configuration options for the detector.
+     */
+    struct Options {
+        /**
+         * During the initial fill of the window, should we consider the state still?
+         */
+         bool defaultValue;
+        /**
+         * How long is the window, in ticks. The special value of 0 indicates that the stream is
+         * always considered still.
+         */
+        int64_t windowDuration;
+        /**
+         * How much of a translational deviation from the target (in meters) is considered motion.
+         * This is an approximate quantity - the actual threshold might be a little different as we
+         * trade-off accuracy with computational efficiency.
+         */
+        float translationalThreshold;
+        /**
+         * How much of a rotational deviation from the target (in radians) is considered motion.
+         * This is an approximate quantity - the actual threshold might be a little different as we
+         * trade-off accuracy with computational efficiency.
+         */
+        float rotationalThreshold;
+    };
+
+    /** Ctor. */
+    explicit StillnessDetector(const Options& options);
+
+    /** Clear the window. */
+    void reset();
+    /** Push a new sample. */
+    void setInput(int64_t timestamp, const Pose3f& input);
+    /** Calculate whether the stream is still at the given timestamp. */
+    bool calculate(int64_t timestamp);
+
+  private:
+    struct TimestampedPose {
+        int64_t timestamp;
+        Pose3f pose;
+    };
+
+    const Options mOptions;
+    // Precalculated cos(mOptions.rotationalThreshold / 2)
+    const float mCosHalfRotationalThreshold;
+    std::deque<TimestampedPose> mFifo;
+    bool mWindowFull = false;
+    // As soon as motion is detected, this will be set for the time of detection + window duration,
+    // and during this time we will always consider outselves in motion without checking. This is
+    // used for hyteresis purposes, since because of the approximate method we use for determining
+    // stillness, we may toggle back and forth at a rate faster than the window side.
+    std::optional<int64_t> mSuppressionDeadline;
+
+    bool areNear(const Pose3f& pose1, const Pose3f& pose2) const;
+    void discardOld(int64_t timestamp);
+};
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/include/media/HeadTrackingProcessor.h b/media/libheadtracking/include/media/HeadTrackingProcessor.h
index 9fea273..2af560e 100644
--- a/media/libheadtracking/include/media/HeadTrackingProcessor.h
+++ b/media/libheadtracking/include/media/HeadTrackingProcessor.h
@@ -42,6 +42,12 @@
         float rotationalDriftTimeConstant = std::numeric_limits<float>::infinity();
         int64_t freshnessTimeout = std::numeric_limits<int64_t>::max();
         float predictionDuration = 0;
+        int64_t autoRecenterWindowDuration = std::numeric_limits<int64_t>::max();
+        float autoRecenterTranslationalThreshold = std::numeric_limits<float>::infinity();
+        float autoRecenterRotationalThreshold = std::numeric_limits<float>::infinity();
+        int64_t screenStillnessWindowDuration = 0;
+        float screenStillnessTranslationalThreshold = std::numeric_limits<float>::infinity();
+        float screenStillnessRotationalThreshold = std::numeric_limits<float>::infinity();
     };
 
     /** Sets the desired head-tracking mode. */
diff --git a/media/libheif/Android.bp b/media/libheif/Android.bp
index 6a3427e..55ba61a 100644
--- a/media/libheif/Android.bp
+++ b/media/libheif/Android.bp
@@ -26,7 +26,5 @@
         "-Wall",
     ],
 
-    include_dirs: [],
-
     export_include_dirs: ["include"],
 }
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 9c1b563..4a2523f 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -347,6 +347,7 @@
 
     shared_libs: [
         "android.hidl.token@1.0-utils",
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "av-types-aidl-cpp",
         "liblog",
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index c89c023..c9f361e 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -17,7 +17,6 @@
 
 #include <arpa/inet.h>
 #include <stdint.h>
-#include <sys/types.h>
 
 #include <android/IDataSource.h>
 #include <binder/IPCThreadState.h>
diff --git a/media/libmedia/tests/codeclist/Android.bp b/media/libmedia/tests/codeclist/Android.bp
index 7dd0caa..2ed3126 100644
--- a/media/libmedia/tests/codeclist/Android.bp
+++ b/media/libmedia/tests/codeclist/Android.bp
@@ -25,9 +25,25 @@
 
 cc_test {
     name: "CodecListTest",
-    test_suites: ["device-tests"],
+    test_suites: ["device-tests", "mts"],
     gtest: true,
 
+    // Support multilib variants (using different suffix per sub-architecture), which is needed on
+    // build targets with secondary architectures, as the MTS test suite packaging logic flattens
+    // all test artifacts into a single `testcases` directory.
+    compile_multilib: "both",
+    multilib: {
+        lib32: {
+            suffix: "32",
+        },
+        lib64: {
+            suffix: "64",
+        },
+    },
+
+    // used within mainline MTS, but only to R, not to Q.
+    min_sdk_version: "30",
+
     srcs: [
         "CodecListTest.cpp",
     ],
@@ -35,7 +51,7 @@
     shared_libs: [
         "libbinder",
         "liblog",
-        "libmedia_codeclist",
+        "libmedia_codeclist", // available >= R
         "libstagefright",
         "libstagefright_foundation",
         "libstagefright_xmlparser",
diff --git a/media/libmedia/tests/codeclist/AndroidTest.xml b/media/libmedia/tests/codeclist/AndroidTest.xml
new file mode 100644
index 0000000..eeaab8e
--- /dev/null
+++ b/media/libmedia/tests/codeclist/AndroidTest.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2020 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<configuration description="Test module config for CodecList unit tests">
+    <option name="test-suite-tag" value="CodecListTest" />
+    <object type="module_controller" class="com.android.tradefed.testtype.suite.module.Sdk30ModuleController" />
+
+    <target_preparer class="com.android.compatibility.common.tradefed.targetprep.FilePusher">
+        <option name="cleanup" value="true" />
+        <option name="append-bitness" value="true" />
+        <option name="push" value="CodecListTest->/data/local/tmp/CodecListTest" />
+    </target_preparer>
+
+    <test class="com.android.tradefed.testtype.GTest" >
+        <option name="native-test-device-path" value="/data/local/tmp" />
+        <option name="module-name" value="CodecListTest" />
+    </test>
+
+
+</configuration>
diff --git a/media/libmediahelper/TypeConverter.cpp b/media/libmediahelper/TypeConverter.cpp
index 97b5b95..e29364c 100644
--- a/media/libmediahelper/TypeConverter.cpp
+++ b/media/libmediahelper/TypeConverter.cpp
@@ -30,6 +30,8 @@
     MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_CALL),
     MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_COMMUNICATION),
     MAKE_STRING_FROM_ENUM(AUDIO_MODE_CALL_SCREEN),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_CALL_REDIRECT),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_COMMUNICATION_REDIRECT),
     TERMINATOR
 };
 
diff --git a/media/libmediametrics/IMediaMetricsService.cpp b/media/libmediametrics/IMediaMetricsService.cpp
deleted file mode 100644
index b5675e6..0000000
--- a/media/libmediametrics/IMediaMetricsService.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MediaMetrics"
-
-#include <stdint.h>
-#include <inttypes.h>
-#include <sys/types.h>
-
-#include <binder/Parcel.h>
-#include <binder/IMemory.h>
-#include <binder/IPCThreadState.h>
-
-#include <utils/Errors.h>  // for status_t
-#include <utils/List.h>
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-#include <media/MediaMetricsItem.h>
-#include <media/IMediaMetricsService.h>
-
-namespace android {
-
-// TODO: Currently ONE_WAY transactions, make both ONE_WAY and synchronous options.
-
-enum {
-    SUBMIT_ITEM = IBinder::FIRST_CALL_TRANSACTION,
-    SUBMIT_BUFFER,
-};
-
-class BpMediaMetricsService: public BpInterface<IMediaMetricsService>
-{
-public:
-    explicit BpMediaMetricsService(const sp<IBinder>& impl)
-        : BpInterface<IMediaMetricsService>(impl)
-    {
-    }
-
-    status_t submit(mediametrics::Item *item) override
-    {
-        if (item == nullptr) {
-            return BAD_VALUE;
-        }
-        ALOGV("%s: (ONEWAY) item=%s", __func__, item->toString().c_str());
-
-        Parcel data;
-        data.writeInterfaceToken(IMediaMetricsService::getInterfaceDescriptor());
-
-        status_t status = item->writeToParcel(&data);
-        if (status != NO_ERROR) { // assume failure logged in item
-            return status;
-        }
-
-        status = remote()->transact(
-                SUBMIT_ITEM, data, nullptr /* reply */, IBinder::FLAG_ONEWAY);
-        ALOGW_IF(status != NO_ERROR, "%s: bad response from service for submit, status=%d",
-                __func__, status);
-        return status;
-    }
-
-    status_t submitBuffer(const char *buffer, size_t length) override
-    {
-        if (buffer == nullptr || length > INT32_MAX) {
-            return BAD_VALUE;
-        }
-        ALOGV("%s: (ONEWAY) length:%zu", __func__, length);
-
-        Parcel data;
-        data.writeInterfaceToken(IMediaMetricsService::getInterfaceDescriptor());
-
-        status_t status = data.writeInt32(length)
-                ?: data.write((uint8_t*)buffer, length);
-        if (status != NO_ERROR) {
-            return status;
-        }
-
-        status = remote()->transact(
-                SUBMIT_BUFFER, data, nullptr /* reply */, IBinder::FLAG_ONEWAY);
-        ALOGW_IF(status != NO_ERROR, "%s: bad response from service for submit, status=%d",
-                __func__, status);
-        return status;
-    }
-};
-
-IMPLEMENT_META_INTERFACE(MediaMetricsService, "android.media.IMediaMetricsService");
-
-// ----------------------------------------------------------------------
-
-status_t BnMediaMetricsService::onTransact(
-    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
-    switch (code) {
-    case SUBMIT_ITEM: {
-        CHECK_INTERFACE(IMediaMetricsService, data, reply);
-
-        mediametrics::Item * const item = mediametrics::Item::create();
-        status_t status = item->readFromParcel(data);
-        if (status != NO_ERROR) { // assume failure logged in item
-            return status;
-        }
-        status = submitInternal(item, true /* release */);
-        // assume failure logged by submitInternal
-        return NO_ERROR;
-    }
-    case SUBMIT_BUFFER: {
-        CHECK_INTERFACE(IMediaMetricsService, data, reply);
-        int32_t length;
-        status_t status = data.readInt32(&length);
-        if (status != NO_ERROR || length <= 0) {
-            return BAD_VALUE;
-        }
-        const void *ptr = data.readInplace(length);
-        if (ptr == nullptr) {
-            return BAD_VALUE;
-        }
-        status = submitBuffer(static_cast<const char *>(ptr), length);
-        // assume failure logged by submitBuffer
-        return NO_ERROR;
-    }
-
-    default:
-        return BBinder::onTransact(code, data, reply, flags);
-    }
-}
-
-// ----------------------------------------------------------------------------
-
-} // namespace android
diff --git a/media/libmediametrics/include/MediaMetricsConstants.h b/media/libmediametrics/include/MediaMetricsConstants.h
index 2bf72a7..7f0a045 100644
--- a/media/libmediametrics/include/MediaMetricsConstants.h
+++ b/media/libmediametrics/include/MediaMetricsConstants.h
@@ -63,6 +63,7 @@
 
 // Error keys
 #define AMEDIAMETRICS_KEY_AUDIO_TRACK_ERROR   AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK "error"
+#define AMEDIAMETRICS_KEY_AUDIO_RECORD_ERROR  AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD "error"
 
 /*
  * MediaMetrics Properties are unified space for consistency and readability.
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
index c416a51..a23d1d9 100644
--- a/media/libmediaplayerservice/Android.bp
+++ b/media/libmediaplayerservice/Android.bp
@@ -70,6 +70,8 @@
 
     header_libs: [
         "media_plugin_headers",
+        "libmediautils_headers",
+        "libstagefright_rtsp_headers",
     ],
 
     static_libs: [
@@ -77,6 +79,9 @@
         "libstagefright_nuplayer",
         "libstagefright_rtsp",
         "libstagefright_timedtext",
+        // this needs it, but it can get it transitively through libstagefright.
+        // i'm going to leave it here.
+        "libstagefright_webm",
         "framework-permission-aidl-cpp",
     ],
 
@@ -85,13 +90,16 @@
         "framework-permission-aidl-cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright/rtsp",
-        "frameworks/av/media/libstagefright/webm",
+    export_header_lib_headers: [
+        "libmediautils_headers",
     ],
 
     local_include_dirs: ["include"],
 
+    export_include_dirs: [
+        ".",
+    ],
+
     cflags: [
         "-Werror",
         "-Wno-error=deprecated-declarations",
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index f85887e..c7a7a3a 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -1831,7 +1831,6 @@
 {
     close();
     free(mAttributes);
-    delete mCallbackData;
 }
 
 //static
@@ -2052,8 +2051,7 @@
 
         mRecycledTrack.clear();
         close_l();
-        delete mCallbackData;
-        mCallbackData = NULL;
+        mCallbackData.clear();
     }
 }
 
@@ -2174,7 +2172,7 @@
     }
 
     sp<AudioTrack> t;
-    CallbackData *newcbd = NULL;
+    sp<CallbackData> newcbd;
 
     // We don't attempt to create a new track if we are recycling an
     // offloaded track. But, if we are recycling a non-offloaded or we
@@ -2184,8 +2182,8 @@
     if (!(reuse && bothOffloaded)) {
         ALOGV("creating new AudioTrack");
 
-        if (mCallback != NULL) {
-            newcbd = new CallbackData(this);
+        if (mCallback != nullptr) {
+            newcbd = sp<CallbackData>::make(wp<AudioOutput>::fromExisting(this));
             t = new AudioTrack(
                     mStreamType,
                     sampleRate,
@@ -2193,7 +2191,6 @@
                     channelMask,
                     frameCount,
                     flags,
-                    CallbackWrapper,
                     newcbd,
                     0,  // notification frames
                     mSessionId,
@@ -2220,8 +2217,7 @@
                     channelMask,
                     frameCount,
                     flags,
-                    NULL, // callback
-                    NULL, // user data
+                    nullptr, // callback
                     0, // notification frames
                     mSessionId,
                     AudioTrack::TRANSFER_DEFAULT,
@@ -2237,8 +2233,7 @@
         t->setCallerName("media");
         if ((t == 0) || (t->initCheck() != NO_ERROR)) {
             ALOGE("Unable to create audio track");
-            delete newcbd;
-            // t goes out of scope, so reference count drops to zero
+            // t, newcbd goes out of scope, so reference count drops to zero
             return NO_INIT;
         } else {
             // successful AudioTrack initialization implies a legacy stream type was generated
@@ -2272,7 +2267,6 @@
             if (mCallbackData != NULL) {
                 mCallbackData->setOutput(this);
             }
-            delete newcbd;
             return updateTrack();
         }
     }
@@ -2378,7 +2372,7 @@
             if (mCallbackData != NULL) {
                 // two alternative approaches
 #if 1
-                CallbackData *callbackData = mCallbackData;
+                sp<CallbackData> callbackData = mCallbackData;
                 mLock.unlock();
                 // proper acquisition sequence
                 callbackData->lock();
@@ -2415,9 +2409,8 @@
             // for example, the next player could be prepared and seeked.
             //
             // Presuming it isn't advisable to force the track over.
-             if (mNextOutput->mTrack == NULL) {
+             if (mNextOutput->mTrack == nullptr) {
                 ALOGD("Recycling track for gapless playback");
-                delete mNextOutput->mCallbackData;
                 mNextOutput->mCallbackData = mCallbackData;
                 mNextOutput->mRecycledTrack = mTrack;
                 mNextOutput->mSampleRateHz = mSampleRateHz;
@@ -2425,11 +2418,11 @@
                 mNextOutput->mFlags = mFlags;
                 mNextOutput->mFrameSize = mFrameSize;
                 close_l();
-                mCallbackData = NULL;  // destruction handled by mNextOutput
+                mCallbackData.clear();
             } else {
                 ALOGW("Ignoring gapless playback because next player has already started");
                 // remove track in case resource needed for future players.
-                if (mCallbackData != NULL) {
+                if (mCallbackData != nullptr) {
                     mCallbackData->endTrackSwitch();  // release lock for callbacks before close.
                 }
                 close_l();
@@ -2656,76 +2649,71 @@
     }
 }
 
-// static
-void MediaPlayerService::AudioOutput::CallbackWrapper(
-        int event, void *cookie, void *info) {
-    //ALOGV("callbackwrapper");
-    CallbackData *data = (CallbackData*)cookie;
-    // lock to ensure we aren't caught in the middle of a track switch.
-    data->lock();
-    AudioOutput *me = data->getOutput();
-    AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
-    if (me == NULL) {
-        // no output set, likely because the track was scheduled to be reused
-        // by another player, but the format turned out to be incompatible.
-        data->unlock();
-        if (buffer != NULL) {
-            buffer->size = 0;
-        }
+size_t MediaPlayerService::AudioOutput::CallbackData::onMoreData(const AudioTrack::Buffer& buffer) {
+    ALOGD("data callback");
+    lock();
+    sp<AudioOutput> me = getOutput();
+    if (me == nullptr) {
+        unlock();
+        return 0;
+    }
+    size_t actualSize = (*me->mCallback)(
+            me.get(), buffer.raw, buffer.size, me->mCallbackCookie,
+            CB_EVENT_FILL_BUFFER);
+
+    // Log when no data is returned from the callback.
+    // (1) We may have no data (especially with network streaming sources).
+    // (2) We may have reached the EOS and the audio track is not stopped yet.
+    // Note that AwesomePlayer/AudioPlayer will only return zero size when it reaches the EOS.
+    // NuPlayerRenderer will return zero when it doesn't have data (it doesn't block to fill).
+    //
+    // This is a benign busy-wait, with the next data request generated 10 ms or more later;
+    // nevertheless for power reasons, we don't want to see too many of these.
+
+    ALOGV_IF(actualSize == 0 && buffer->size > 0, "callbackwrapper: empty buffer returned");
+    unlock();
+    return actualSize;
+}
+
+void MediaPlayerService::AudioOutput::CallbackData::onStreamEnd() {
+    lock();
+    sp<AudioOutput> me = getOutput();
+    if (me == nullptr) {
+        unlock();
         return;
     }
+    ALOGV("callbackwrapper: deliver EVENT_STREAM_END");
+    (*me->mCallback)(me.get(), NULL /* buffer */, 0 /* size */,
+            me->mCallbackCookie, CB_EVENT_STREAM_END);
+    unlock();
+}
 
-    switch(event) {
-    case AudioTrack::EVENT_MORE_DATA: {
-        size_t actualSize = (*me->mCallback)(
-                me, buffer->raw, buffer->size, me->mCallbackCookie,
-                CB_EVENT_FILL_BUFFER);
 
-        // Log when no data is returned from the callback.
-        // (1) We may have no data (especially with network streaming sources).
-        // (2) We may have reached the EOS and the audio track is not stopped yet.
-        // Note that AwesomePlayer/AudioPlayer will only return zero size when it reaches the EOS.
-        // NuPlayerRenderer will return zero when it doesn't have data (it doesn't block to fill).
-        //
-        // This is a benign busy-wait, with the next data request generated 10 ms or more later;
-        // nevertheless for power reasons, we don't want to see too many of these.
-
-        ALOGV_IF(actualSize == 0 && buffer->size > 0, "callbackwrapper: empty buffer returned");
-
-        buffer->size = actualSize;
-        } break;
-
-    case AudioTrack::EVENT_STREAM_END:
-        // currently only occurs for offloaded callbacks
-        ALOGV("callbackwrapper: deliver EVENT_STREAM_END");
-        (*me->mCallback)(me, NULL /* buffer */, 0 /* size */,
-                me->mCallbackCookie, CB_EVENT_STREAM_END);
-        break;
-
-    case AudioTrack::EVENT_NEW_IAUDIOTRACK :
-        ALOGV("callbackwrapper: deliver EVENT_TEAR_DOWN");
-        (*me->mCallback)(me,  NULL /* buffer */, 0 /* size */,
-                me->mCallbackCookie, CB_EVENT_TEAR_DOWN);
-        break;
-
-    case AudioTrack::EVENT_UNDERRUN:
-        // This occurs when there is no data available, typically
-        // when there is a failure to supply data to the AudioTrack.  It can also
-        // occur in non-offloaded mode when the audio device comes out of standby.
-        //
-        // If an AudioTrack underruns it outputs silence. Since this happens suddenly
-        // it may sound like an audible pop or glitch.
-        //
-        // The underrun event is sent once per track underrun; the condition is reset
-        // when more data is sent to the AudioTrack.
-        ALOGD("callbackwrapper: EVENT_UNDERRUN (discarded)");
-        break;
-
-    default:
-        ALOGE("received unknown event type: %d inside CallbackWrapper !", event);
+void MediaPlayerService::AudioOutput::CallbackData::onNewIAudioTrack() {
+    lock();
+    sp<AudioOutput> me = getOutput();
+    if (me == nullptr) {
+        unlock();
+        return;
     }
+    ALOGV("callbackwrapper: deliver EVENT_TEAR_DOWN");
+    (*me->mCallback)(me.get(),  NULL /* buffer */, 0 /* size */,
+            me->mCallbackCookie, CB_EVENT_TEAR_DOWN);
+    unlock();
+}
 
-    data->unlock();
+void MediaPlayerService::AudioOutput::CallbackData::onUnderrun() {
+    // This occurs when there is no data available, typically
+    // when there is a failure to supply data to the AudioTrack.  It can also
+    // occur in non-offloaded mode when the audio device comes out of standby.
+    //
+    // If an AudioTrack underruns it outputs silence. Since this happens suddenly
+    // it may sound like an audible pop or glitch.
+    //
+    // The underrun event is sent once per track underrun; the condition is reset
+    // when more data is sent to the AudioTrack.
+    ALOGD("callbackwrapper: EVENT_UNDERRUN (discarded)");
+
 }
 
 audio_session_t MediaPlayerService::AudioOutput::getSessionId() const
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 98091be..86be3fe 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -30,9 +30,11 @@
 #include <media/AidlConversion.h>
 #include <media/AudioResamplerPublic.h>
 #include <media/AudioSystem.h>
+#include <media/AudioTrack.h>
 #include <media/MediaPlayerInterface.h>
 #include <media/Metadata.h>
 #include <media/stagefright/foundation/ABase.h>
+#include <mediautils/Synchronization.h>
 #include <android/content/AttributionSourceState.h>
 
 #include <system/audio.h>
@@ -41,7 +43,6 @@
 
 using content::AttributionSourceState;
 
-class AudioTrack;
 struct AVSyncSettings;
 class DeathNotifier;
 class IDataSource;
@@ -161,7 +162,7 @@
         sp<AudioOutput>         mNextOutput;
         AudioCallback           mCallback;
         void *                  mCallbackCookie;
-        CallbackData *          mCallbackData;
+        sp<CallbackData>        mCallbackData;
         audio_stream_type_t     mStreamType;
         audio_attributes_t *    mAttributes;
         float                   mLeftVolume;
@@ -189,15 +190,15 @@
         // CallbackData is what is passed to the AudioTrack as the "user" data.
         // We need to be able to target this to a different Output on the fly,
         // so we can't use the Output itself for this.
-        class CallbackData {
+        class CallbackData : public AudioTrack::IAudioTrackCallback {
             friend AudioOutput;
         public:
-            explicit CallbackData(AudioOutput *cookie) {
+            explicit CallbackData(const wp<AudioOutput>& cookie) {
                 mData = cookie;
                 mSwitching = false;
             }
-            AudioOutput *   getOutput() const { return mData; }
-            void            setOutput(AudioOutput* newcookie) { mData = newcookie; }
+            sp<AudioOutput> getOutput() const { return mData.load().promote(); }
+            void            setOutput(const wp<AudioOutput>& newcookie) { mData.store(newcookie); }
             // lock/unlock are used by the callback before accessing the payload of this object
             void            lock() const { mLock.lock(); }
             void            unlock() const { mLock.unlock(); }
@@ -220,8 +221,13 @@
                 }
                 mSwitching = false;
             }
+        protected:
+            size_t onMoreData(const AudioTrack::Buffer& buffer) override;
+            void onUnderrun() override;
+            void onStreamEnd() override;
+            void onNewIAudioTrack() override;
         private:
-            AudioOutput *   mData;
+            mediautils::atomic_wp<AudioOutput> mData;
             mutable Mutex   mLock; // a recursive mutex might make this unnecessary.
             bool            mSwitching;
             DISALLOW_EVIL_CONSTRUCTORS(CallbackData);
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index a914006..4aa80be 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -126,8 +126,13 @@
     }
 
     if ((as == AUDIO_SOURCE_FM_TUNER
-            && !(captureAudioOutputAllowed(mAttributionSource)
+                && !(captureAudioOutputAllowed(mAttributionSource)
                     || captureTunerAudioInputAllowed(mAttributionSource)))
+            || (as == AUDIO_SOURCE_REMOTE_SUBMIX
+                && !(captureAudioOutputAllowed(mAttributionSource)
+                    || modifyAudioRoutingAllowed(mAttributionSource)))
+            || (as == AUDIO_SOURCE_ECHO_REFERENCE
+                && !captureAudioOutputAllowed(mAttributionSource))
             || !recordingAllowed(mAttributionSource, (audio_source_t)as)) {
         return PERMISSION_DENIED;
     }
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 6347b7a..ea1fdf4 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -24,7 +24,8 @@
 #include <android-base/logging.h>
 #include <utils/Log.h>
 
-#include "WebmWriter.h"
+#include <webm/WebmWriter.h>
+
 #include "StagefrightRecorder.h"
 
 #include <algorithm>
@@ -66,7 +67,7 @@
 
 #include <system/audio.h>
 
-#include "ARTPWriter.h"
+#include <media/stagefright/rtsp/ARTPWriter.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index d6de47f..d7785da 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -26,6 +26,7 @@
 #include <system/audio.h>
 
 #include <media/hardware/MetadataBufferType.h>
+#include <media/stagefright/foundation/AString.h>
 #include <android/content/AttributionSourceState.h>
 
 namespace android {
diff --git a/media/libmediaplayerservice/fuzzer/Android.bp b/media/libmediaplayerservice/fuzzer/Android.bp
new file mode 100644
index 0000000..a36f1d6
--- /dev/null
+++ b/media/libmediaplayerservice/fuzzer/Android.bp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_av_media_libmediaplayerservice_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: [
+        "frameworks_av_media_libmediaplayerservice_license",
+    ],
+}
+
+cc_defaults {
+    name: "libmediaplayerserviceFuzzer_defaults",
+    static_libs: [
+        "libmediaplayerservice",
+        "liblog",
+    ],
+    shared_libs: [
+        "framework-permission-aidl-cpp",
+        "libbinder",
+        "libcutils",
+        "libmedia",
+        "libstagefright",
+        "libutils",
+        "libstagefright_foundation",
+    ],
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
+
+cc_fuzz {
+    name: "mediarecorder_fuzzer",
+    srcs: [
+        "mediarecorder_fuzzer.cpp",
+    ],
+    defaults: [
+        "libmediaplayerserviceFuzzer_defaults",
+    ],
+    static_libs: [
+        "libstagefright_rtsp",
+        "libbase",
+    ],
+    shared_libs: [
+        "av-types-aidl-cpp",
+        "media_permission-aidl-cpp",
+        "libaudioclient_aidl_conversion",
+        "libandroid_net",
+        "libcamera_client",
+        "libgui",
+        "libmediametrics",
+    ],
+}
+
+cc_fuzz {
+    name: "metadataretriever_fuzzer",
+    srcs: [
+        "metadataretriever_fuzzer.cpp",
+    ],
+    defaults: [
+        "libmediaplayerserviceFuzzer_defaults",
+    ],
+    static_libs: [
+        "libplayerservice_datasource",
+    ],
+    shared_libs: [
+        "libdatasource",
+        "libdrmframework",
+    ],
+}
+
+cc_fuzz {
+    name: "mediaplayer_fuzzer",
+    srcs: [
+        "mediaplayer_fuzzer.cpp",
+    ],
+    defaults: [
+        "libmediaplayerserviceFuzzer_defaults",
+    ],
+    static_libs: [
+        "libplayerservice_datasource",
+        "libstagefright_nuplayer",
+        "libstagefright_rtsp",
+        "libstagefright_timedtext",
+    ],
+    shared_libs: [
+        "android.hardware.media.c2@1.0",
+        "android.hardware.media.omx@1.0",
+        "av-types-aidl-cpp",
+        "libaudioclient_aidl_conversion",
+        "libbase",
+        "libactivitymanager_aidl",
+        "libandroid_net",
+        "libaudioclient",
+        "libcamera_client",
+        "libcodec2_client",
+        "libcrypto",
+        "libdatasource",
+        "libdrmframework",
+        "libgui",
+        "libhidlbase",
+        "liblog",
+        "libmedia_codeclist",
+        "libmedia_omx",
+        "libmediadrm",
+        "libmediametrics",
+        "libmediautils",
+        "libmemunreachable",
+        "libnetd_client",
+        "libpowermanager",
+        "libstagefright_httplive",
+    ],
+}
diff --git a/media/libmediaplayerservice/fuzzer/README.md b/media/libmediaplayerservice/fuzzer/README.md
new file mode 100644
index 0000000..a93c809
--- /dev/null
+++ b/media/libmediaplayerservice/fuzzer/README.md
@@ -0,0 +1,83 @@
+# Fuzzer for libmediaplayerservice
+## Table of contents
++ [StagefrightMediaRecorder](#StagefrightMediaRecorder)
++ [StagefrightMetadataRetriever](#StagefrightMetadataRetriever)
++ [MediaPlayer](#MediaPlayer)
+
+# <a name="StagefrightMediaRecorder"></a> Fuzzer for StagefrightMediaRecorder
+
+StagefrightMediaRecorder supports the following parameters:
+1. Output Formats (parameter name: `setOutputFormat`)
+2. Audio Encoders (parameter name: `setAudioEncoder`)
+3. Video Encoders (parameter name: `setVideoEncoder`)
+4. Audio Sources (parameter name: `setAudioSource`)
+5. Video Sources (parameter name: `setVideoSource`)
+6. Microphone Direction (parameter name: `setMicrophoneDirection`)
+
+You can find the possible values in the fuzzer's source code.
+
+#### Steps to run
+1. Build the fuzzer
+```
+  $ mm -j$(nproc) mediarecorder_fuzzer
+```
+2. Run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/mediarecorder_fuzzer/mediarecorder_fuzzer
+```
+
+# <a name="StagefrightMetadataRetriever"></a> Fuzzer for StagefrightMetadataRetriever
+
+StagefrightMetadataRetriever supports the following data sources:
+1. Url (parameter name: `url`)
+2. File descriptor (parameter name: `fd`)
+3. DataSource (parameter name: `source`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `url` | Url of data source | Value obtained from FuzzedDataProvider |
+| `fd` | File descriptor value of input file | Value obtained from FuzzedDataProvider |
+| `source` | DataSource object | Data obtained from FuzzedDataProvider |
+
+#### Steps to run
+1. Build the fuzzer
+```
+  $ mm -j$(nproc) metadataretriever_fuzzer
+```
+2. To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/metadataretriever_fuzzer/metadataretriever_fuzzer
+```
+
+# <a name="MediaPlayer"></a> Fuzzer for MediaPlayer
+
+MediaPlayerService supports the following data sources:
+1. Url (parameter name: `url`)
+2. File descriptor (parameter name: `fd`)
+3. IStreamSource  (parameter name: `source`)
+4. IDataSource (parameter name: `source`)
+5. RTP Parameters  (parameter name: `rtpParams`)
+
+MediaPlayerService supports the following parameters:
+1. Audio sessions (parameter name: `audioSessionId`)
+2. Audio stretch modes (parameter name: `mStretchMode`)
+3. Audio fallback modes  (parameter name: `mFallbackMode`)
+4. Media parameter keys (parameter name: `key`)
+5. Audio Stream Types (parameter name: `streamType`)
+6. Media Event Types (parameter name: `msg`)
+7. Media Info Types (parameter name: `ext1`)
+
+You can find the possible values in the fuzzer's source code.
+
+#### Steps to run
+1. Build the fuzzer
+```
+  $ mm -j$(nproc) mediaplayer_fuzzer
+```
+2. To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/mediaplayer_fuzzer/mediaplayer_fuzzer
+```
diff --git a/media/libmediaplayerservice/fuzzer/mediaplayer_fuzzer.cpp b/media/libmediaplayerservice/fuzzer/mediaplayer_fuzzer.cpp
new file mode 100644
index 0000000..7799f44
--- /dev/null
+++ b/media/libmediaplayerservice/fuzzer/mediaplayer_fuzzer.cpp
@@ -0,0 +1,368 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <MediaPlayerService.h>
+#include <camera/Camera.h>
+#include <datasource/FileSource.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include <media/IMediaCodecList.h>
+#include <media/IMediaHTTPService.h>
+#include <media/IMediaPlayer.h>
+#include <media/IMediaRecorder.h>
+#include <media/IRemoteDisplay.h>
+#include <media/IRemoteDisplayClient.h>
+#include <media/stagefright/RemoteDataSource.h>
+#include <media/stagefright/foundation/base64.h>
+#include <thread>
+#include "fuzzer/FuzzedDataProvider.h"
+
+constexpr int32_t kUuidSize = 16;
+constexpr int32_t kMaxSleepTimeInMs = 100;
+constexpr int32_t kMinSleepTimeInMs = 0;
+constexpr int32_t kPlayCountMin = 1;
+constexpr int32_t kPlayCountMax = 10;
+constexpr int32_t kMaxDimension = 8192;
+constexpr int32_t kMinDimension = 0;
+
+using namespace std;
+using namespace android;
+
+constexpr audio_session_t kSupportedAudioSessions[] = {
+    AUDIO_SESSION_DEVICE, AUDIO_SESSION_OUTPUT_STAGE, AUDIO_SESSION_OUTPUT_MIX};
+
+constexpr audio_timestretch_stretch_mode_t kAudioStretchModes[] = {
+    AUDIO_TIMESTRETCH_STRETCH_DEFAULT, AUDIO_TIMESTRETCH_STRETCH_VOICE};
+
+constexpr audio_timestretch_fallback_mode_t kAudioFallbackModes[] = {
+    AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT, AUDIO_TIMESTRETCH_FALLBACK_DEFAULT,
+    AUDIO_TIMESTRETCH_FALLBACK_MUTE, AUDIO_TIMESTRETCH_FALLBACK_FAIL};
+
+constexpr media_parameter_keys kMediaParamKeys[] = {
+    KEY_PARAMETER_CACHE_STAT_COLLECT_FREQ_MS, KEY_PARAMETER_AUDIO_CHANNEL_COUNT,
+    KEY_PARAMETER_PLAYBACK_RATE_PERMILLE, KEY_PARAMETER_AUDIO_ATTRIBUTES,
+    KEY_PARAMETER_RTP_ATTRIBUTES};
+
+constexpr audio_stream_type_t kAudioStreamTypes[] = {
+    AUDIO_STREAM_DEFAULT,      AUDIO_STREAM_VOICE_CALL,    AUDIO_STREAM_SYSTEM,
+    AUDIO_STREAM_RING,         AUDIO_STREAM_MUSIC,         AUDIO_STREAM_ALARM,
+    AUDIO_STREAM_NOTIFICATION, AUDIO_STREAM_BLUETOOTH_SCO, AUDIO_STREAM_ENFORCED_AUDIBLE,
+    AUDIO_STREAM_DTMF,         AUDIO_STREAM_TTS,           AUDIO_STREAM_ASSISTANT};
+
+constexpr media_event_type kMediaEventTypes[] = {MEDIA_NOP,
+                                                 MEDIA_PREPARED,
+                                                 MEDIA_PLAYBACK_COMPLETE,
+                                                 MEDIA_BUFFERING_UPDATE,
+                                                 MEDIA_SEEK_COMPLETE,
+                                                 MEDIA_SET_VIDEO_SIZE,
+                                                 MEDIA_STARTED,
+                                                 MEDIA_PAUSED,
+                                                 MEDIA_STOPPED,
+                                                 MEDIA_SKIPPED,
+                                                 MEDIA_NOTIFY_TIME,
+                                                 MEDIA_TIMED_TEXT,
+                                                 MEDIA_ERROR,
+                                                 MEDIA_INFO,
+                                                 MEDIA_SUBTITLE_DATA,
+                                                 MEDIA_META_DATA,
+                                                 MEDIA_DRM_INFO,
+                                                 MEDIA_TIME_DISCONTINUITY,
+                                                 MEDIA_IMS_RX_NOTICE,
+                                                 MEDIA_AUDIO_ROUTING_CHANGED};
+
+constexpr media_info_type kMediaInfoTypes[] = {
+    MEDIA_INFO_UNKNOWN,           MEDIA_INFO_STARTED_AS_NEXT,
+    MEDIA_INFO_RENDERING_START,   MEDIA_INFO_VIDEO_TRACK_LAGGING,
+    MEDIA_INFO_BUFFERING_START,   MEDIA_INFO_BUFFERING_END,
+    MEDIA_INFO_NETWORK_BANDWIDTH, MEDIA_INFO_BAD_INTERLEAVING,
+    MEDIA_INFO_NOT_SEEKABLE,      MEDIA_INFO_METADATA_UPDATE,
+    MEDIA_INFO_PLAY_AUDIO_ERROR,  MEDIA_INFO_PLAY_VIDEO_ERROR,
+    MEDIA_INFO_TIMED_TEXT_ERROR};
+
+const char *kUrlPrefix[] = {"data:", "http://", "https://", "rtsp://", "content://", "test://"};
+
+struct TestStreamSource : public IStreamSource {
+    void setListener(const sp<IStreamListener> & /*listener*/) override{};
+    void setBuffers(const Vector<sp<IMemory>> & /*buffers*/) override{};
+    void onBufferAvailable(size_t /*index*/) override{};
+    IBinder *onAsBinder() { return nullptr; };
+};
+
+class BinderDeathNotifier : public IBinder::DeathRecipient {
+   public:
+    void binderDied(const wp<IBinder> &) { abort(); }
+};
+
+class MediaPlayerServiceFuzzer {
+   public:
+    MediaPlayerServiceFuzzer(const uint8_t *data, size_t size)
+        : mFdp(data, size), mDataSourceFd(memfd_create("InputFile", MFD_ALLOW_SEALING)){};
+    ~MediaPlayerServiceFuzzer() { close(mDataSourceFd); };
+    void process(const uint8_t *data, size_t size);
+
+   private:
+    bool setDataSource(const uint8_t *data, size_t size);
+    void invokeMediaPlayer();
+    FuzzedDataProvider mFdp;
+    sp<IMediaPlayer> mMediaPlayer = nullptr;
+    sp<IMediaPlayerClient> mMediaPlayerClient = nullptr;
+    const int32_t mDataSourceFd;
+};
+
+bool MediaPlayerServiceFuzzer::setDataSource(const uint8_t *data, size_t size) {
+    status_t status = -1;
+    enum DataSourceType {http, fd, stream, file, socket, kMaxValue = socket};
+    switch (mFdp.ConsumeEnum<DataSourceType>()) {
+        case http: {
+            KeyedVector<String8, String8> headers;
+            headers.add(String8(mFdp.ConsumeRandomLengthString().c_str()),
+                        String8(mFdp.ConsumeRandomLengthString().c_str()));
+
+            uint32_t dataBlobSize = mFdp.ConsumeIntegralInRange<uint16_t>(0, size);
+            vector<uint8_t> uriSuffix = mFdp.ConsumeBytes<uint8_t>(dataBlobSize);
+
+            string uri(mFdp.PickValueInArray(kUrlPrefix));
+            uri += ";base64,";
+            AString out;
+            encodeBase64(uriSuffix.data(), uriSuffix.size(), &out);
+            uri += out.c_str();
+            status = mMediaPlayer->setDataSource(nullptr /*httpService*/, uri.c_str(), &headers);
+            break;
+        }
+        case fd: {
+            write(mDataSourceFd, data, size);
+
+            status = mMediaPlayer->setDataSource(mDataSourceFd, 0, size);
+            break;
+        }
+        case stream: {
+            sp<IStreamSource> streamSource = sp<TestStreamSource>::make();
+            status = mMediaPlayer->setDataSource(streamSource);
+            break;
+        }
+        case file: {
+            write(mDataSourceFd, data, size);
+
+            sp<DataSource> dataSource = new FileSource(dup(mDataSourceFd), 0, size);
+            sp<IDataSource> iDataSource = RemoteDataSource::wrap(dataSource);
+            if (!iDataSource) {
+                return false;
+            }
+            status = mMediaPlayer->setDataSource(iDataSource);
+            break;
+        }
+        case socket: {
+            String8 rtpParams = String8(mFdp.ConsumeRandomLengthString().c_str());
+            struct sockaddr_in endpoint;
+            endpoint.sin_family = mFdp.ConsumeIntegral<unsigned short>();
+            endpoint.sin_port = mFdp.ConsumeIntegral<uint16_t>();
+            mMediaPlayer->setRetransmitEndpoint(&endpoint);
+            status = mMediaPlayer->setDataSource(rtpParams);
+            break;
+        }
+    }
+
+    if (status != 0) {
+        return false;
+    }
+    return true;
+}
+
+void MediaPlayerServiceFuzzer::invokeMediaPlayer() {
+    sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
+    String8 name = String8(mFdp.ConsumeRandomLengthString().c_str());
+    uint32_t width = mFdp.ConsumeIntegralInRange<uint32_t>(kMinDimension, kMaxDimension);
+    uint32_t height = mFdp.ConsumeIntegralInRange<uint32_t>(kMinDimension, kMaxDimension);
+    uint32_t pixelFormat = mFdp.ConsumeIntegral<int32_t>();
+    uint32_t flags = mFdp.ConsumeIntegral<int32_t>();
+    sp<SurfaceControl> surfaceControl =
+        composerClient->createSurface(name, width, height, pixelFormat, flags);
+    if (surfaceControl) {
+        sp<Surface> surface = surfaceControl->getSurface();
+        mMediaPlayer->setVideoSurfaceTexture(surface->getIGraphicBufferProducer());
+    }
+
+    BufferingSettings buffering;
+    buffering.mInitialMarkMs = mFdp.ConsumeIntegral<int32_t>();
+    buffering.mResumePlaybackMarkMs = mFdp.ConsumeIntegral<int32_t>();
+    mMediaPlayer->setBufferingSettings(buffering);
+    mMediaPlayer->getBufferingSettings(&buffering);
+
+    mMediaPlayer->prepareAsync();
+    size_t playCount = mFdp.ConsumeIntegralInRange<size_t>(kPlayCountMin, kPlayCountMax);
+    for (size_t Idx = 0; Idx < playCount; ++Idx) {
+        mMediaPlayer->start();
+        this_thread::sleep_for(chrono::milliseconds(
+            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
+        mMediaPlayer->pause();
+        this_thread::sleep_for(chrono::milliseconds(
+            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
+        mMediaPlayer->stop();
+    }
+    bool state;
+    mMediaPlayer->isPlaying(&state);
+
+    AudioPlaybackRate rate;
+    rate.mSpeed = mFdp.ConsumeFloatingPoint<float>();
+    rate.mPitch = mFdp.ConsumeFloatingPoint<float>();
+    rate.mStretchMode = mFdp.PickValueInArray(kAudioStretchModes);
+    rate.mFallbackMode = mFdp.PickValueInArray(kAudioFallbackModes);
+    mMediaPlayer->setPlaybackSettings(rate);
+    mMediaPlayer->getPlaybackSettings(&rate);
+
+    AVSyncSettings *avSyncSettings = new AVSyncSettings();
+    float videoFpsHint = mFdp.ConsumeFloatingPoint<float>();
+    mMediaPlayer->setSyncSettings(*avSyncSettings, videoFpsHint);
+    mMediaPlayer->getSyncSettings(avSyncSettings, &videoFpsHint);
+    delete avSyncSettings;
+
+    mMediaPlayer->seekTo(mFdp.ConsumeIntegral<int32_t>());
+
+    int32_t msec;
+    mMediaPlayer->getCurrentPosition(&msec);
+    mMediaPlayer->getDuration(&msec);
+    mMediaPlayer->reset();
+
+    mMediaPlayer->notifyAt(mFdp.ConsumeIntegral<int64_t>());
+
+    mMediaPlayer->setAudioStreamType(mFdp.PickValueInArray(kAudioStreamTypes));
+    mMediaPlayer->setLooping(mFdp.ConsumeIntegral<int32_t>());
+    float left = mFdp.ConsumeFloatingPoint<float>();
+    float right = mFdp.ConsumeFloatingPoint<float>();
+    mMediaPlayer->setVolume(left, right);
+
+    Parcel request, reply;
+    request.writeInt32(mFdp.ConsumeIntegral<int32_t>());
+    request.setDataPosition(0);
+    mMediaPlayer->invoke(request, &reply);
+
+    Parcel filter;
+    filter.writeInt32(mFdp.ConsumeIntegral<int32_t>());
+    filter.setDataPosition(0);
+    mMediaPlayer->setMetadataFilter(filter);
+
+    bool updateOnly = mFdp.ConsumeBool();
+    bool applyFilter = mFdp.ConsumeBool();
+    mMediaPlayer->getMetadata(updateOnly, applyFilter, &reply);
+    mMediaPlayer->setAuxEffectSendLevel(mFdp.ConsumeFloatingPoint<float>());
+    mMediaPlayer->attachAuxEffect(mFdp.ConsumeIntegral<int32_t>());
+
+    int32_t key = mFdp.PickValueInArray(kMediaParamKeys);
+    request.writeInt32(mFdp.ConsumeIntegral<int32_t>());
+    request.setDataPosition(0);
+    mMediaPlayer->setParameter(key, request);
+    key = mFdp.PickValueInArray(kMediaParamKeys);
+    mMediaPlayer->getParameter(key, &reply);
+
+    struct sockaddr_in endpoint;
+    mMediaPlayer->getRetransmitEndpoint(&endpoint);
+
+    AttributionSourceState attributionSource;
+    attributionSource.packageName = mFdp.ConsumeRandomLengthString().c_str();
+    attributionSource.token = sp<BBinder>::make();
+    const sp<IMediaPlayerService> mpService(IMediaDeathNotifier::getMediaPlayerService());
+    sp<IMediaPlayer> mNextMediaPlayer = mpService->create(
+        mMediaPlayerClient, mFdp.PickValueInArray(kSupportedAudioSessions), attributionSource);
+    mMediaPlayer->setNextPlayer(mNextMediaPlayer);
+
+    const sp<media::VolumeShaper::Configuration> configuration =
+        sp<media::VolumeShaper::Configuration>::make();
+    const sp<media::VolumeShaper::Operation> operation = sp<media::VolumeShaper::Operation>::make();
+    mMediaPlayer->applyVolumeShaper(configuration, operation);
+
+    mMediaPlayer->getVolumeShaperState(mFdp.ConsumeIntegral<int32_t>());
+    uint8_t uuid[kUuidSize];
+    for (int32_t index = 0; index < kUuidSize; ++index) {
+        uuid[index] = mFdp.ConsumeIntegral<uint8_t>();
+    }
+    Vector<uint8_t> drmSessionId;
+    drmSessionId.push_back(mFdp.ConsumeIntegral<uint8_t>());
+    mMediaPlayer->prepareDrm(uuid, drmSessionId);
+    mMediaPlayer->releaseDrm();
+
+    audio_port_handle_t deviceId = mFdp.ConsumeIntegral<int32_t>();
+    mMediaPlayer->setOutputDevice(deviceId);
+    mMediaPlayer->getRoutedDeviceId(&deviceId);
+
+    mMediaPlayer->enableAudioDeviceCallback(mFdp.ConsumeBool());
+
+    sp<MediaPlayer> mediaPlayer = (MediaPlayer *)mMediaPlayer.get();
+
+    int32_t msg = mFdp.PickValueInArray(kMediaEventTypes);
+    int32_t ext1 = mFdp.PickValueInArray(kMediaInfoTypes);
+    int32_t ext2 = mFdp.ConsumeIntegral<int32_t>();
+    Parcel obj;
+    obj.writeInt32(mFdp.ConsumeIntegral<int32_t>());
+    obj.setDataPosition(0);
+    mediaPlayer->notify(msg, ext1, ext2, &obj);
+
+    int32_t mediaPlayerDumpFd = memfd_create("OutputDumpFile", MFD_ALLOW_SEALING);
+    Vector<String16> args;
+    args.push_back(String16(mFdp.ConsumeRandomLengthString().c_str()));
+    mediaPlayer->dump(mediaPlayerDumpFd, args);
+    close(mediaPlayerDumpFd);
+
+    mMediaPlayer->disconnect();
+}
+
+void MediaPlayerServiceFuzzer::process(const uint8_t *data, size_t size) {
+    MediaPlayerService::instantiate();
+
+    const sp<IMediaPlayerService> mpService(IMediaDeathNotifier::getMediaPlayerService());
+    if (!mpService) {
+        return;
+    }
+
+    sp<IMediaCodecList> mediaCodecList = mpService->getCodecList();
+
+    sp<IRemoteDisplayClient> remoteDisplayClient;
+    sp<IRemoteDisplay> remoteDisplay = mpService->listenForRemoteDisplay(
+        String16(mFdp.ConsumeRandomLengthString().c_str()) /*opPackageName*/, remoteDisplayClient,
+        String8(mFdp.ConsumeRandomLengthString().c_str()) /*iface*/);
+
+    mpService->addBatteryData(mFdp.ConsumeIntegral<uint32_t>());
+    Parcel reply;
+    mpService->pullBatteryData(&reply);
+
+    sp<MediaPlayerService> mediaPlayerService = (MediaPlayerService *)mpService.get();
+    AttributionSourceState attributionSource;
+    attributionSource.packageName = mFdp.ConsumeRandomLengthString().c_str();
+    attributionSource.token = sp<BBinder>::make();
+    mMediaPlayer = mediaPlayerService->create(
+        mMediaPlayerClient, mFdp.PickValueInArray(kSupportedAudioSessions), attributionSource);
+
+    int32_t mediaPlayerServiceDumpFd = memfd_create("OutputDumpFile", MFD_ALLOW_SEALING);
+    Vector<String16> args;
+    args.push_back(String16(mFdp.ConsumeRandomLengthString().c_str()));
+    mediaPlayerService->dump(mediaPlayerServiceDumpFd, args);
+    close(mediaPlayerServiceDumpFd);
+
+    if (!mMediaPlayer) {
+        return;
+    }
+
+    if (setDataSource(data, size)) {
+        invokeMediaPlayer();
+    }
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+    MediaPlayerServiceFuzzer mpsFuzzer(data, size);
+    ProcessState::self()->startThreadPool();
+    mpsFuzzer.process(data, size);
+    return 0;
+};
diff --git a/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp b/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp
new file mode 100644
index 0000000..b0040fe
--- /dev/null
+++ b/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp
@@ -0,0 +1,312 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <media/stagefright/foundation/AString.h>
+#include "fuzzer/FuzzedDataProvider.h"
+
+#include <StagefrightRecorder.h>
+#include <camera/Camera.h>
+#include <camera/android/hardware/ICamera.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include <media/stagefright/PersistentSurface.h>
+#include <thread>
+
+using namespace std;
+using namespace android;
+using namespace android::hardware;
+
+constexpr video_source kSupportedVideoSources[] = {VIDEO_SOURCE_DEFAULT, VIDEO_SOURCE_CAMERA,
+                                                   VIDEO_SOURCE_SURFACE};
+
+constexpr audio_source_t kSupportedAudioSources[] = {
+    AUDIO_SOURCE_DEFAULT,           AUDIO_SOURCE_MIC,
+    AUDIO_SOURCE_VOICE_UPLINK,      AUDIO_SOURCE_VOICE_DOWNLINK,
+    AUDIO_SOURCE_VOICE_CALL,        AUDIO_SOURCE_CAMCORDER,
+    AUDIO_SOURCE_VOICE_RECOGNITION, AUDIO_SOURCE_VOICE_COMMUNICATION,
+    AUDIO_SOURCE_REMOTE_SUBMIX,     AUDIO_SOURCE_UNPROCESSED,
+    AUDIO_SOURCE_VOICE_PERFORMANCE, AUDIO_SOURCE_ECHO_REFERENCE,
+    AUDIO_SOURCE_FM_TUNER,          AUDIO_SOURCE_HOTWORD};
+
+constexpr audio_microphone_direction_t kSupportedMicrophoneDirections[] = {
+    MIC_DIRECTION_UNSPECIFIED, MIC_DIRECTION_FRONT, MIC_DIRECTION_BACK, MIC_DIRECTION_EXTERNAL};
+
+struct RecordingConfig {
+    output_format outputFormat;
+    audio_encoder audioEncoder;
+    video_encoder videoEncoder;
+};
+
+const struct RecordingConfig kRecordingConfigList[] = {
+    {OUTPUT_FORMAT_AMR_NB, AUDIO_ENCODER_AMR_NB, VIDEO_ENCODER_DEFAULT},
+    {OUTPUT_FORMAT_AMR_WB, AUDIO_ENCODER_AMR_WB, VIDEO_ENCODER_DEFAULT},
+    {OUTPUT_FORMAT_AAC_ADTS, AUDIO_ENCODER_AAC, VIDEO_ENCODER_DEFAULT},
+    {OUTPUT_FORMAT_AAC_ADTS, AUDIO_ENCODER_HE_AAC, VIDEO_ENCODER_DEFAULT},
+    {OUTPUT_FORMAT_AAC_ADTS, AUDIO_ENCODER_AAC_ELD, VIDEO_ENCODER_DEFAULT},
+    {OUTPUT_FORMAT_OGG, AUDIO_ENCODER_OPUS, VIDEO_ENCODER_DEFAULT},
+    {OUTPUT_FORMAT_RTP_AVP, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_DEFAULT},
+    {OUTPUT_FORMAT_MPEG2TS, AUDIO_ENCODER_AAC, VIDEO_ENCODER_H264},
+    {OUTPUT_FORMAT_WEBM, AUDIO_ENCODER_VORBIS, VIDEO_ENCODER_VP8},
+    {OUTPUT_FORMAT_THREE_GPP, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_MPEG_4_SP},
+    {OUTPUT_FORMAT_MPEG_4, AUDIO_ENCODER_AAC, VIDEO_ENCODER_H264},
+    {OUTPUT_FORMAT_MPEG_4, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_MPEG_4_SP},
+    {OUTPUT_FORMAT_MPEG_4, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_HEVC}};
+
+const string kParametersList[] = {"max-duration",
+                                  "max-filesize",
+                                  "interleave-duration-us",
+                                  "param-movie-time-scale",
+                                  "param-geotag-longitude",
+                                  "param-geotag-latitude",
+                                  "param-track-time-status",
+                                  "audio-param-sampling-rate",
+                                  "audio-param-encoding-bitrate",
+                                  "audio-param-number-of-channels",
+                                  "audio-param-time-scale",
+                                  "video-param-rotation-angle-degrees",
+                                  "video-param-encoding-bitrate",
+                                  "video-param-bitrate-mode",
+                                  "video-param-i-frames-interval",
+                                  "video-param-encoder-profile",
+                                  "video-param-encoder-level",
+                                  "video-param-camera-id",
+                                  "video-param-time-scale",
+                                  "param-use-64bit-offset",
+                                  "time-lapse-enable",
+                                  "time-lapse-fps",
+                                  "rtp-param-local-ip",
+                                  "rtp-param-local-port",
+                                  "rtp-param-remote-port",
+                                  "rtp-param-self-id",
+                                  "rtp-param-opponent-id",
+                                  "rtp-param-payload-type",
+                                  "rtp-param-ext-cvo-extmap",
+                                  "rtp-param-ext-cvo-degrees",
+                                  "video-param-request-i-frame",
+                                  "rtp-param-set-socket-dscp",
+                                  "rtp-param-set-socket-network"};
+
+constexpr int32_t kMaxSleepTimeInMs = 100;
+constexpr int32_t kMinSleepTimeInMs = 0;
+constexpr int32_t kMinVideoSize = 2;
+constexpr int32_t kMaxVideoSize = 8192;
+constexpr int32_t kNumRecordMin = 1;
+constexpr int32_t kNumRecordMax = 10;
+
+class TestAudioDeviceCallback : public AudioSystem::AudioDeviceCallback {
+   public:
+    virtual ~TestAudioDeviceCallback() = default;
+
+    void onAudioDeviceUpdate(audio_io_handle_t /*audioIo*/,
+                             audio_port_handle_t /*deviceId*/) override{};
+};
+
+class TestCamera : public ICamera {
+   public:
+    virtual ~TestCamera() = default;
+
+    binder::Status disconnect() override { return binder::Status::ok(); };
+    status_t connect(const sp<ICameraClient> & /*client*/) override { return 0; };
+    status_t lock() override { return 0; };
+    status_t unlock() override { return 0; };
+    status_t setPreviewTarget(const sp<IGraphicBufferProducer> & /*bufferProducer*/) override {
+        return 0;
+    };
+    void setPreviewCallbackFlag(int /*flag*/) override{};
+    status_t setPreviewCallbackTarget(
+        const sp<IGraphicBufferProducer> & /*callbackProducer*/) override {
+        return 0;
+    };
+    status_t startPreview() override { return 0; };
+    void stopPreview() override{};
+    bool previewEnabled() override { return true; };
+    status_t startRecording() override { return 0; };
+    void stopRecording() override{};
+    bool recordingEnabled() override { return true; };
+    void releaseRecordingFrame(const sp<IMemory> & /*mem*/) override{};
+    void releaseRecordingFrameHandle(native_handle_t * /*handle*/) override{};
+    void releaseRecordingFrameHandleBatch(const vector<native_handle_t *> & /*handles*/) override{};
+    status_t autoFocus() override { return 0; };
+    status_t cancelAutoFocus() override { return 0; };
+    status_t takePicture(int /*msgType*/) override { return 0; };
+    status_t setParameters(const String8 & /*params*/) override { return 0; };
+    String8 getParameters() const override { return String8(); };
+    status_t sendCommand(int32_t /*cmd*/, int32_t /*arg1*/, int32_t /*arg2*/) override {
+        return 0;
+    };
+    status_t setVideoBufferMode(int32_t /*videoBufferMode*/) override { return 0; };
+    status_t setVideoTarget(const sp<IGraphicBufferProducer> & /*bufferProducer*/) override {
+        return 0;
+    };
+    status_t setAudioRestriction(int32_t /*mode*/) override { return 0; };
+    int32_t getGlobalAudioRestriction() override { return 0; };
+    IBinder *onAsBinder() override { return reinterpret_cast<IBinder *>(this); };
+};
+
+class TestMediaRecorderClient : public IMediaRecorderClient {
+   public:
+    virtual ~TestMediaRecorderClient() = default;
+
+    void notify(int /*msg*/, int /*ext1*/, int /*ext2*/) override{};
+    IBinder *onAsBinder() override { return reinterpret_cast<IBinder *>(this); };
+};
+
+class MediaRecorderClientFuzzer {
+   public:
+    MediaRecorderClientFuzzer(const uint8_t *data, size_t size);
+    ~MediaRecorderClientFuzzer() { close(mMediaRecorderOutputFd); }
+    void process();
+
+   private:
+    void setConfig();
+    void getConfig();
+    void dumpInfo();
+
+    FuzzedDataProvider mFdp;
+    unique_ptr<MediaRecorderBase> mStfRecorder = nullptr;
+    SurfaceComposerClient mComposerClient;
+    sp<SurfaceControl> mSurfaceControl = nullptr;
+    sp<Surface> mSurface = nullptr;
+    const int32_t mMediaRecorderOutputFd;
+};
+
+void MediaRecorderClientFuzzer::getConfig() {
+    int32_t max;
+    mStfRecorder->getMaxAmplitude(&max);
+
+    int32_t deviceId = mFdp.ConsumeIntegral<int32_t>();
+    mStfRecorder->setInputDevice(deviceId);
+    mStfRecorder->getRoutedDeviceId(&deviceId);
+
+    vector<android::media::MicrophoneInfo> activeMicrophones{};
+    mStfRecorder->getActiveMicrophones(&activeMicrophones);
+
+    int32_t portId;
+    mStfRecorder->getPortId(&portId);
+
+    uint64_t bytes;
+    mStfRecorder->getRtpDataUsage(&bytes);
+
+    Parcel parcel;
+    mStfRecorder->getMetrics(&parcel);
+
+    sp<IGraphicBufferProducer> buffer = mStfRecorder->querySurfaceMediaSource();
+}
+
+void MediaRecorderClientFuzzer::dumpInfo() {
+    int32_t dumpFd = memfd_create("DumpFile", MFD_ALLOW_SEALING);
+    Vector<String16> args;
+    args.push_back(String16(mFdp.ConsumeRandomLengthString().c_str()));
+    mStfRecorder->dump(dumpFd, args);
+    close(dumpFd);
+}
+
+void MediaRecorderClientFuzzer::setConfig() {
+    mStfRecorder->setOutputFile(mMediaRecorderOutputFd);
+    mStfRecorder->setAudioSource(mFdp.PickValueInArray(kSupportedAudioSources));
+    mStfRecorder->setVideoSource(mFdp.PickValueInArray(kSupportedVideoSources));
+    mStfRecorder->setPreferredMicrophoneDirection(
+        mFdp.PickValueInArray(kSupportedMicrophoneDirections));
+    mStfRecorder->setPrivacySensitive(mFdp.ConsumeBool());
+    bool isPrivacySensitive;
+    mStfRecorder->isPrivacySensitive(&isPrivacySensitive);
+    mStfRecorder->setVideoSize(mFdp.ConsumeIntegralInRange<int32_t>(kMinVideoSize, kMaxVideoSize),
+                               mFdp.ConsumeIntegralInRange<int32_t>(kMinVideoSize, kMaxVideoSize));
+    mStfRecorder->setVideoFrameRate(mFdp.ConsumeIntegral<int32_t>());
+    mStfRecorder->enableAudioDeviceCallback(mFdp.ConsumeBool());
+    mStfRecorder->setPreferredMicrophoneFieldDimension(mFdp.ConsumeFloatingPoint<float>());
+    mStfRecorder->setClientName(String16(mFdp.ConsumeRandomLengthString().c_str()));
+
+    int32_t Idx = mFdp.ConsumeIntegralInRange<int32_t>(0, size(kRecordingConfigList) - 1);
+    mStfRecorder->setOutputFormat(kRecordingConfigList[Idx].outputFormat);
+    mStfRecorder->setAudioEncoder(kRecordingConfigList[Idx].audioEncoder);
+    mStfRecorder->setVideoEncoder(kRecordingConfigList[Idx].videoEncoder);
+
+    int32_t nextOutputFd = memfd_create("NextOutputFile", MFD_ALLOW_SEALING);
+    mStfRecorder->setNextOutputFile(nextOutputFd);
+    close(nextOutputFd);
+
+    for (Idx = 0; Idx < size(kParametersList); ++Idx) {
+        if (mFdp.ConsumeBool()) {
+            int32_t value = mFdp.ConsumeIntegral<int32_t>();
+            mStfRecorder->setParameters(
+                String8((kParametersList[Idx] + "=" + to_string(value)).c_str()));
+        }
+    }
+}
+
+MediaRecorderClientFuzzer::MediaRecorderClientFuzzer(const uint8_t *data, size_t size)
+    : mFdp(data, size), mMediaRecorderOutputFd(memfd_create("OutputFile", MFD_ALLOW_SEALING)) {
+    AttributionSourceState attributionSource;
+    attributionSource.packageName = mFdp.ConsumeRandomLengthString().c_str();
+    attributionSource.token = sp<BBinder>::make();
+    mStfRecorder = make_unique<StagefrightRecorder>(attributionSource);
+
+    mSurfaceControl = mComposerClient.createSurface(
+        String8(mFdp.ConsumeRandomLengthString().c_str()), mFdp.ConsumeIntegral<uint32_t>(),
+        mFdp.ConsumeIntegral<uint32_t>(), mFdp.ConsumeIntegral<int32_t>(),
+        mFdp.ConsumeIntegral<int32_t>());
+    if (mSurfaceControl) {
+        mSurface = mSurfaceControl->getSurface();
+        mStfRecorder->setPreviewSurface(mSurface->getIGraphicBufferProducer());
+    }
+
+    sp<TestMediaRecorderClient> listener = sp<TestMediaRecorderClient>::make();
+    mStfRecorder->setListener(listener);
+
+    sp<TestCamera> testCamera = sp<TestCamera>::make();
+    sp<Camera> camera = Camera::create(testCamera);
+    mStfRecorder->setCamera(camera->remote(), camera->getRecordingProxy());
+
+    sp<PersistentSurface> persistentSurface = sp<PersistentSurface>::make();
+    mStfRecorder->setInputSurface(persistentSurface);
+
+    sp<TestAudioDeviceCallback> callback = sp<TestAudioDeviceCallback>::make();
+    mStfRecorder->setAudioDeviceCallback(callback);
+}
+
+void MediaRecorderClientFuzzer::process() {
+    setConfig();
+
+    mStfRecorder->init();
+    mStfRecorder->prepare();
+    size_t numRecord = mFdp.ConsumeIntegralInRange<size_t>(kNumRecordMin, kNumRecordMax);
+    for (size_t Idx = 0; Idx < numRecord; ++Idx) {
+        mStfRecorder->start();
+        this_thread::sleep_for(chrono::milliseconds(
+            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
+        mStfRecorder->pause();
+        this_thread::sleep_for(chrono::milliseconds(
+            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
+        mStfRecorder->resume();
+        this_thread::sleep_for(chrono::milliseconds(
+            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
+        mStfRecorder->stop();
+    }
+    dumpInfo();
+    getConfig();
+
+    mStfRecorder->close();
+    mStfRecorder->reset();
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+    MediaRecorderClientFuzzer mrcFuzzer(data, size);
+    mrcFuzzer.process();
+    return 0;
+}
diff --git a/media/libmediaplayerservice/fuzzer/metadataretriever_fuzzer.cpp b/media/libmediaplayerservice/fuzzer/metadataretriever_fuzzer.cpp
new file mode 100644
index 0000000..a7cb689
--- /dev/null
+++ b/media/libmediaplayerservice/fuzzer/metadataretriever_fuzzer.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <StagefrightMetadataRetriever.h>
+#include <binder/ProcessState.h>
+#include <datasource/FileSource.h>
+#include <media/IMediaHTTPService.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+#include <media/stagefright/foundation/base64.h>
+
+#include <fuzzer/FuzzedDataProvider.h>
+
+using namespace std;
+using namespace android;
+
+const char *kMimeTypes[] = {MEDIA_MIMETYPE_IMAGE_JPEG,         MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC,
+                            MEDIA_MIMETYPE_VIDEO_VP8,          MEDIA_MIMETYPE_VIDEO_VP9,
+                            MEDIA_MIMETYPE_VIDEO_AV1,          MEDIA_MIMETYPE_VIDEO_AVC,
+                            MEDIA_MIMETYPE_VIDEO_HEVC,         MEDIA_MIMETYPE_VIDEO_MPEG4,
+                            MEDIA_MIMETYPE_VIDEO_H263,         MEDIA_MIMETYPE_VIDEO_MPEG2,
+                            MEDIA_MIMETYPE_VIDEO_RAW,          MEDIA_MIMETYPE_VIDEO_DOLBY_VISION,
+                            MEDIA_MIMETYPE_VIDEO_SCRAMBLED,    MEDIA_MIMETYPE_VIDEO_DIVX,
+                            MEDIA_MIMETYPE_VIDEO_DIVX3,        MEDIA_MIMETYPE_VIDEO_XVID,
+                            MEDIA_MIMETYPE_VIDEO_MJPEG,        MEDIA_MIMETYPE_AUDIO_AMR_NB,
+                            MEDIA_MIMETYPE_AUDIO_AMR_WB,       MEDIA_MIMETYPE_AUDIO_MPEG,
+                            MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II,
+                            MEDIA_MIMETYPE_AUDIO_MIDI,         MEDIA_MIMETYPE_AUDIO_AAC,
+                            MEDIA_MIMETYPE_AUDIO_QCELP,        MEDIA_MIMETYPE_AUDIO_VORBIS,
+                            MEDIA_MIMETYPE_AUDIO_OPUS,         MEDIA_MIMETYPE_AUDIO_G711_ALAW,
+                            MEDIA_MIMETYPE_AUDIO_G711_MLAW,    MEDIA_MIMETYPE_AUDIO_RAW,
+                            MEDIA_MIMETYPE_AUDIO_FLAC,         MEDIA_MIMETYPE_AUDIO_AAC_ADTS,
+                            MEDIA_MIMETYPE_AUDIO_MSGSM,        MEDIA_MIMETYPE_AUDIO_AC3,
+                            MEDIA_MIMETYPE_AUDIO_EAC3,         MEDIA_MIMETYPE_AUDIO_EAC3_JOC,
+                            MEDIA_MIMETYPE_AUDIO_AC4,          MEDIA_MIMETYPE_AUDIO_SCRAMBLED,
+                            MEDIA_MIMETYPE_AUDIO_ALAC,         MEDIA_MIMETYPE_AUDIO_WMA,
+                            MEDIA_MIMETYPE_AUDIO_MS_ADPCM,     MEDIA_MIMETYPE_AUDIO_DVI_IMA_ADPCM,
+                            MEDIA_MIMETYPE_CONTAINER_MPEG4,    MEDIA_MIMETYPE_CONTAINER_WAV,
+                            MEDIA_MIMETYPE_CONTAINER_OGG,      MEDIA_MIMETYPE_CONTAINER_MATROSKA,
+                            MEDIA_MIMETYPE_CONTAINER_MPEG2TS,  MEDIA_MIMETYPE_CONTAINER_AVI,
+                            MEDIA_MIMETYPE_CONTAINER_MPEG2PS,  MEDIA_MIMETYPE_CONTAINER_HEIF,
+                            MEDIA_MIMETYPE_TEXT_3GPP,          MEDIA_MIMETYPE_TEXT_SUBRIP,
+                            MEDIA_MIMETYPE_TEXT_VTT,           MEDIA_MIMETYPE_TEXT_CEA_608,
+                            MEDIA_MIMETYPE_TEXT_CEA_708,       MEDIA_MIMETYPE_DATA_TIMED_ID3};
+
+class MetadataRetrieverFuzzer {
+   public:
+    MetadataRetrieverFuzzer(const uint8_t *data, size_t size)
+        : mFdp(data, size),
+          mMdRetriever(new StagefrightMetadataRetriever()),
+          mDataSourceFd(memfd_create("InputFile", MFD_ALLOW_SEALING)) {}
+    ~MetadataRetrieverFuzzer() { close(mDataSourceFd); }
+    bool setDataSource(const uint8_t *data, size_t size);
+    void getData();
+
+   private:
+    FuzzedDataProvider mFdp;
+    sp<StagefrightMetadataRetriever> mMdRetriever = nullptr;
+    const int32_t mDataSourceFd;
+};
+
+void MetadataRetrieverFuzzer::getData() {
+    int64_t timeUs = mFdp.ConsumeIntegral<int64_t>();
+    int32_t option = mFdp.ConsumeIntegral<int32_t>();
+    int32_t colorFormat = mFdp.ConsumeIntegral<int32_t>();
+    bool metaOnly = mFdp.ConsumeBool();
+    mMdRetriever->getFrameAtTime(timeUs, option, colorFormat, metaOnly);
+
+    int32_t index = mFdp.ConsumeIntegral<int32_t>();
+    colorFormat = mFdp.ConsumeIntegral<int32_t>();
+    metaOnly = mFdp.ConsumeBool();
+    bool thumbnail = mFdp.ConsumeBool();
+    mMdRetriever->getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
+
+    index = mFdp.ConsumeIntegral<int32_t>();
+    colorFormat = mFdp.ConsumeIntegral<int32_t>();
+    int32_t left = mFdp.ConsumeIntegral<int32_t>();
+    int32_t top = mFdp.ConsumeIntegral<int32_t>();
+    int32_t right = mFdp.ConsumeIntegral<int32_t>();
+    int32_t bottom = mFdp.ConsumeIntegral<int32_t>();
+    mMdRetriever->getImageRectAtIndex(index, colorFormat, left, top, right, bottom);
+
+    index = mFdp.ConsumeIntegral<int32_t>();
+    colorFormat = mFdp.ConsumeIntegral<int32_t>();
+    metaOnly = mFdp.ConsumeBool();
+    mMdRetriever->getFrameAtIndex(index, colorFormat, metaOnly);
+
+    mMdRetriever->extractAlbumArt();
+
+    int32_t keyCode = mFdp.ConsumeIntegral<int32_t>();
+    mMdRetriever->extractMetadata(keyCode);
+}
+
+bool MetadataRetrieverFuzzer::setDataSource(const uint8_t *data, size_t size) {
+    status_t status = -1;
+
+    enum DataSourceChoice {FromHttp, FromFd, FromFileSource, kMaxValue = FromFileSource};
+    switch (mFdp.ConsumeEnum<DataSourceChoice>()) {
+        case FromHttp: {
+            KeyedVector<String8, String8> mHeaders;
+            mHeaders.add(String8(mFdp.ConsumeRandomLengthString().c_str()),
+                         String8(mFdp.ConsumeRandomLengthString().c_str()));
+
+            uint32_t dataBlobSize = mFdp.ConsumeIntegralInRange<uint16_t>(0, size);
+            vector<uint8_t> uriSuffix = mFdp.ConsumeBytes<uint8_t>(dataBlobSize);
+
+            string uri("data:");
+            uri += ";base64,";
+            AString out;
+            encodeBase64(uriSuffix.data(), uriSuffix.size(), &out);
+            uri += out.c_str();
+            status = mMdRetriever->setDataSource(nullptr /*httpService*/, uri.c_str(), &mHeaders);
+            break;
+        }
+        case FromFd: {
+            write(mDataSourceFd, data, size);
+
+            status = mMdRetriever->setDataSource(mDataSourceFd, 0, size);
+            break;
+        }
+        case FromFileSource: {
+            write(mDataSourceFd, data, size);
+
+            sp<DataSource> dataSource = new FileSource(dup(mDataSourceFd), 0, size);
+            status = mMdRetriever->setDataSource(dataSource, mFdp.PickValueInArray(kMimeTypes));
+            break;
+        }
+    }
+
+    if (status != 0) {
+        return false;
+    }
+    return true;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+    MetadataRetrieverFuzzer mrtFuzzer(data, size);
+    ProcessState::self()->startThreadPool();
+    if (mrtFuzzer.setDataSource(data, size)) {
+        mrtFuzzer.getData();
+    }
+    return 0;
+}
diff --git a/media/libmediaplayerservice/nuplayer/Android.bp b/media/libmediaplayerservice/nuplayer/Android.bp
index 6d338db..89ba584 100644
--- a/media/libmediaplayerservice/nuplayer/Android.bp
+++ b/media/libmediaplayerservice/nuplayer/Android.bp
@@ -41,16 +41,10 @@
         "libmediadrm_headers",
         "libmediametrics_headers",
         "media_plugin_headers",
-    ],
-
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-        "frameworks/av/media/libstagefright/httplive",
-        "frameworks/av/media/libstagefright/include",
-        "frameworks/av/media/libstagefright/mpeg2ts",
-        "frameworks/av/media/libstagefright/rtsp",
-        "frameworks/av/media/libstagefright/timedtext",
-        "frameworks/native/include/android",
+        "libstagefright_headers",
+        "libstagefright_httplive_headers",
+        "libstagefright_mpeg2support_headers",
+        "libstagefright_rtsp_headers",
     ],
 
     cflags: [
@@ -78,6 +72,7 @@
 
     static_libs: [
         "libplayerservice_datasource",
+        "libstagefright_timedtext",
     ],
 
     name: "libstagefright_nuplayer",
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 439dbe8..36e4d4a 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -20,7 +20,6 @@
 #include "GenericSource.h"
 #include "NuPlayerDrm.h"
 
-#include "AnotherPacketSource.h"
 #include <binder/IServiceManager.h>
 #include <cutils/properties.h>
 #include <datasource/PlayerServiceDataSourceFactory.h>
@@ -44,6 +43,7 @@
 #include <media/stagefright/MediaExtractorFactory.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
+#include <mpeg2ts/AnotherPacketSource.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 7a2ab8f..80e06f1 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -21,11 +21,10 @@
 #include "NuPlayer.h"
 #include "NuPlayerSource.h"
 
-#include "ATSParser.h"
-
 #include <android-base/unique_fd.h>
 #include <media/mediaplayer.h>
 #include <media/stagefright/MediaBuffer.h>
+#include <mpeg2ts/ATSParser.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 77e7885..4e71e89 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -19,8 +19,6 @@
 #include <utils/Log.h>
 
 #include "HTTPLiveSource.h"
-
-#include "AnotherPacketSource.h"
 #include "LiveDataSource.h"
 
 #include <media/IMediaHTTPService.h>
@@ -31,6 +29,7 @@
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/Utils.h>
+#include <mpeg2ts/AnotherPacketSource.h>
 
 // default buffer prepare/ready/underflow marks
 static const int kReadyMarkMs     = 5000;  // 5 seconds
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 9ae7ddb..c6b22a6 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -35,9 +35,7 @@
 #include "RTSPSource.h"
 #include "StreamingSource.h"
 #include "GenericSource.h"
-#include "TextDescriptions.h"
-
-#include "ATSParser.h"
+#include <timedtext/TextDescriptions.h>
 
 #include <cutils/properties.h>
 
@@ -56,6 +54,8 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 
+#include <mpeg2ts/ATSParser.h>
+
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/Surface.h>
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 2c1f158..52b2041 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -40,10 +40,9 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/SurfaceUtils.h>
+#include <mpeg2ts/ATSParser.h>
 #include <gui/Surface.h>
 
-#include "ATSParser.h"
-
 namespace android {
 
 static float kDisplayRefreshingRate = 60.f; // TODO: get this from the display
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index 793014e..cb91fd9 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -30,8 +30,7 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaErrors.h>
-
-#include "ATSParser.h"
+#include <mpeg2ts/ATSParser.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 4a65f71..2828d44 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -1673,24 +1673,18 @@
 
         mDrainAudioQueuePending = false;
 
-        if (offloadingAudio()) {
-            mAudioSink->pause();
-            mAudioSink->flush();
-            if (!mPaused) {
-                mAudioSink->start();
-            }
-        } else {
-            mAudioSink->pause();
-            mAudioSink->flush();
+        mAudioSink->pause();
+        mAudioSink->flush();
+        if (!offloadingAudio()) {
             // Call stop() to signal to the AudioSink to completely fill the
             // internal buffer before resuming playback.
             // FIXME: this is ignored after flush().
             mAudioSink->stop();
-            if (!mPaused) {
-                mAudioSink->start();
-            }
             mNumFramesWritten = 0;
         }
+        if (!mPaused) {
+            mAudioSink->start();
+        }
         mNextAudioClockUpdateTimeUs = -1;
     } else {
         flushQueue(&mVideoQueue);
diff --git a/media/libmediaplayerservice/nuplayer/RTPSource.cpp b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
index 4d6a483..6a17972 100644
--- a/media/libmediaplayerservice/nuplayer/RTPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
@@ -336,7 +336,7 @@
     *durationUs = 0ll;
 
     int64_t audioDurationUs;
-    if (mAudioTrack != NULL
+    if (mAudioTrack != NULL && mAudioTrack->getFormat() != NULL
             && mAudioTrack->getFormat()->findInt64(
                 kKeyDuration, &audioDurationUs)
             && audioDurationUs > *durationUs) {
@@ -344,7 +344,7 @@
     }
 
     int64_t videoDurationUs;
-    if (mVideoTrack != NULL
+    if (mVideoTrack != NULL && mVideoTrack->getFormat() != NULL
             && mVideoTrack->getFormat()->findInt64(
                 kKeyDuration, &videoDurationUs)
             && videoDurationUs > *durationUs) {
diff --git a/media/libmediaplayerservice/nuplayer/RTPSource.h b/media/libmediaplayerservice/nuplayer/RTPSource.h
index 3b4f9e9..7d9bb8f 100644
--- a/media/libmediaplayerservice/nuplayer/RTPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTPSource.h
@@ -23,25 +23,20 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaSource.h>
+#include <media/stagefright/rtsp/APacketSource.h>
+#include <media/stagefright/rtsp/ARTPConnection.h>
+#include <media/stagefright/rtsp/ARTPSource.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 #include <media/stagefright/Utils.h>
 #include <media/BufferingSettings.h>
+#include <mpeg2ts/AnotherPacketSource.h>
 
 #include <utils/KeyedVector.h>
 #include <utils/Vector.h>
 #include <utils/RefBase.h>
 
-#include "AnotherPacketSource.h"
-#include "APacketSource.h"
-#include "ARTPConnection.h"
-#include "ARTPSource.h"
-#include "ASessionDescription.h"
 #include "NuPlayerSource.h"
 
-
-
-
-
-
 namespace android {
 
 struct ALooper;
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 8e05de8..75cedcc 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -20,13 +20,12 @@
 
 #include "RTSPSource.h"
 
-#include "AnotherPacketSource.h"
-#include "MyHandler.h"
-#include "SDPLoader.h"
-
 #include <media/IMediaHTTPService.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/rtsp/MyHandler.h>
+#include <media/stagefright/rtsp/SDPLoader.h>
+#include <mpeg2ts/AnotherPacketSource.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index 03fce08..7497e41 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -20,7 +20,7 @@
 
 #include "NuPlayerSource.h"
 
-#include "ATSParser.h"
+#include <mpeg2ts/ATSParser.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index bec27d3..9d67ca4 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -20,8 +20,6 @@
 
 #include "StreamingSource.h"
 
-#include "ATSParser.h"
-#include "AnotherPacketSource.h"
 #include "NuPlayerStreamListener.h"
 
 #include <media/stagefright/MediaSource.h>
@@ -31,6 +29,8 @@
 #include <media/stagefright/foundation/MediaKeys.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
+#include <mpeg2ts/AnotherPacketSource.h>
+#include <mpeg2ts/ATSParser.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp b/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp
index 92236ea..6eb8c6f 100644
--- a/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp
+++ b/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp
@@ -33,10 +33,6 @@
         "StagefrightRecorderTest.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libmediaplayerservice",
-    ],
-
     static_libs: [
         "libmediaplayerservice",
         "libstagefright_httplive",
diff --git a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
index e20f7ab..411b6ef 100644
--- a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
@@ -55,8 +55,8 @@
     AMediaFormat_getString(srcFormat, AMEDIAFORMAT_KEY_MIME, &srcMime);
     if (!AMediaFormat_getString(options, AMEDIAFORMAT_KEY_MIME, &dstMime) ||
         strcmp(srcMime, dstMime) == 0) {
-        srcParamsToCopy.push_back(ENTRY_COPIER(AMEDIAFORMAT_KEY_PROFILE, String));
-        srcParamsToCopy.push_back(ENTRY_COPIER(AMEDIAFORMAT_KEY_LEVEL, String));
+        srcParamsToCopy.push_back(ENTRY_COPIER(AMEDIAFORMAT_KEY_PROFILE, Int32));
+        srcParamsToCopy.push_back(ENTRY_COPIER(AMEDIAFORMAT_KEY_LEVEL, Int32));
     }
 
     // ------- Define parameters to copy from the caller's options -------
diff --git a/media/libnbaio/include/media/nbaio/AudioStreamOutSink.h b/media/libnbaio/include/media/nbaio/AudioStreamOutSink.h
index 348b4f8..635f67f 100644
--- a/media/libnbaio/include/media/nbaio/AudioStreamOutSink.h
+++ b/media/libnbaio/include/media/nbaio/AudioStreamOutSink.h
@@ -42,10 +42,6 @@
     //virtual size_t framesUnderrun() const;
     //virtual size_t underruns() const;
 
-    // This is an over-estimate, and could dupe the caller into making a blocking write()
-    // FIXME Use an audio HAL API to query the buffer emptying status when it's available.
-    virtual ssize_t availableToWrite() { return mStreamBufferSizeBytes / mFrameSize; }
-
     virtual ssize_t write(const void *buffer, size_t count);
 
     virtual status_t getTimestamp(ExtendedTimestamp &timestamp);
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 7c7fcac..3784dde 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -470,6 +470,7 @@
     void writeHdlrBox();
     void writeTkhdBox(uint32_t now);
     void writeColrBox();
+    void writeMdcvAndClliBoxes();
     void writeMp4aEsdsBox();
     void writeMp4vEsdsBox();
     void writeAudioFourCCBox();
@@ -4341,6 +4342,7 @@
 
     writePaspBox();
     writeColrBox();
+    writeMdcvAndClliBoxes();
     mOwner->endBox();  // mp4v, s263 or avc1
 }
 
@@ -4375,6 +4377,54 @@
     }
 }
 
+void MPEG4Writer::Track::writeMdcvAndClliBoxes() {
+    sp<MetaData> meta = mSource->getFormat();
+    uint32_t type;
+    const uint8_t* data;
+    size_t size;
+    bool found =
+            meta->findData(kKeyHdrStaticInfo, &type, reinterpret_cast<const void**>(&data), &size);
+    if (found && size == 25) {
+        uint16_t displayPrimariesRX = U16LE_AT(&data[1]);
+        uint16_t displayPrimariesRY = U16LE_AT(&data[3]);
+
+        uint16_t displayPrimariesGX = U16LE_AT(&data[5]);
+        uint16_t displayPrimariesGY = U16LE_AT(&data[7]);
+
+        uint16_t displayPrimariesBX = U16LE_AT(&data[9]);
+        uint16_t displayPrimariesBY = U16LE_AT(&data[11]);
+
+        uint16_t whitePointX = U16LE_AT(&data[13]);
+        uint16_t whitePointY = U16LE_AT(&data[15]);
+
+        uint16_t maxDisplayMasteringLuminance = U16LE_AT(&data[17]);
+        uint16_t minDisplayMasteringLuminance = U16LE_AT(&data[19]);
+
+        uint16_t maxContentLightLevel = U16LE_AT(&data[21]);
+        uint16_t maxPicAverageLightLevel = U16LE_AT(&data[23]);
+
+        mOwner->beginBox("mdcv");
+        mOwner->writeInt16(displayPrimariesGX);
+        mOwner->writeInt16(displayPrimariesGY);
+        mOwner->writeInt16(displayPrimariesBX);
+        mOwner->writeInt16(displayPrimariesBY);
+        mOwner->writeInt16(displayPrimariesRX);
+        mOwner->writeInt16(displayPrimariesRY);
+        mOwner->writeInt16(whitePointX);
+        mOwner->writeInt16(whitePointY);
+        mOwner->writeInt32(maxDisplayMasteringLuminance * 10000);
+        mOwner->writeInt32(minDisplayMasteringLuminance * 10000);
+        mOwner->endBox();  // mdcv.
+
+        mOwner->beginBox("clli");
+        mOwner->writeInt16(maxContentLightLevel);
+        mOwner->writeInt16(maxPicAverageLightLevel);
+        mOwner->endBox();  // clli.
+    } else {
+        ALOGW("Ignoring HDR static info with unexpected size %d", (int)size);
+    }
+}
+
 void MPEG4Writer::Track::writeAudioFourCCBox() {
     const char *mime;
     bool success = mMeta->findCString(kKeyMIMEType, &mime);
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 6b5ba16..1ea3f99 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -4794,8 +4794,8 @@
     }
     const CryptoPlugin::SubSample *subSamples;
     size_t numSubSamples;
-    const uint8_t *key;
-    const uint8_t *iv;
+    const uint8_t *key = NULL;
+    const uint8_t *iv = NULL;
     CryptoPlugin::Mode mode = CryptoPlugin::kMode_Unencrypted;
 
     // We allow the simpler queueInputBuffer API to be used even in
@@ -4810,8 +4810,6 @@
 
             subSamples = &ss;
             numSubSamples = 1;
-            key = NULL;
-            iv = NULL;
             pattern.mEncryptBlocks = 0;
             pattern.mSkipBlocks = 0;
         }
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 2ffe728..a3040f4 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -18,8 +18,6 @@
 #define LOG_TAG "MediaCodecList"
 #include <utils/Log.h>
 
-#include "MediaCodecListOverrides.h"
-
 #include <binder/IServiceManager.h>
 
 #include <media/IMediaCodecList.h>
@@ -34,6 +32,7 @@
 #include <media/stagefright/CCodec.h>
 #include <media/stagefright/Codec2InfoBuilder.h>
 #include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/MediaCodecListOverrides.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/OmxInfoBuilder.h>
 #include <media/stagefright/PersistentSurface.h>
diff --git a/media/libstagefright/MediaCodecListOverrides.cpp b/media/libstagefright/MediaCodecListOverrides.cpp
index 4a167d1..9304e45 100644
--- a/media/libstagefright/MediaCodecListOverrides.cpp
+++ b/media/libstagefright/MediaCodecListOverrides.cpp
@@ -18,8 +18,6 @@
 #define LOG_TAG "MediaCodecListOverrides"
 #include <utils/Log.h>
 
-#include "MediaCodecListOverrides.h"
-
 #include <cutils/properties.h>
 #include <gui/Surface.h>
 #include <mediadrm/ICrypto.h>
@@ -30,6 +28,7 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/MediaCodecListOverrides.h>
 
 namespace android {
 
diff --git a/media/libstagefright/OggWriter.cpp b/media/libstagefright/OggWriter.cpp
index 0bc5976..0f5e95e 100644
--- a/media/libstagefright/OggWriter.cpp
+++ b/media/libstagefright/OggWriter.cpp
@@ -67,7 +67,11 @@
         mFd = -1;
     }
 
-    free(mOs);
+    if (mOs != nullptr) {
+        ogg_stream_clear(mOs);
+        free(mOs);
+        mOs = nullptr;
+    }
 }
 
 status_t OggWriter::initCheck() const {
diff --git a/media/libstagefright/colorconversion/Android.bp b/media/libstagefright/colorconversion/Android.bp
index 06cebd3..7ff9b10 100644
--- a/media/libstagefright/colorconversion/Android.bp
+++ b/media/libstagefright/colorconversion/Android.bp
@@ -25,10 +25,6 @@
         "SoftwareRenderer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/native/include/media/openmax",
-    ],
-
     shared_libs: [
         "libui",
         "libnativewindow",
@@ -37,6 +33,7 @@
     header_libs: [
         "libstagefright_headers",
         "libstagefright_foundation_headers",
+        "media_plugin_headers",
     ],
 
     static_libs: ["libyuv_static"],
diff --git a/media/libstagefright/filters/Android.bp b/media/libstagefright/filters/Android.bp
index acc9e87..e6d59ad 100644
--- a/media/libstagefright/filters/Android.bp
+++ b/media/libstagefright/filters/Android.bp
@@ -22,8 +22,12 @@
         "ZeroFilter.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/native/include/media/openmax",
+    export_include_dirs: [
+        "include",
+    ],
+
+    local_include_dirs: [
+        "include/filters",
     ],
 
     cflags: [
diff --git a/media/libstagefright/filters/ColorConvert.h b/media/libstagefright/filters/include/filters/ColorConvert.h
similarity index 100%
rename from media/libstagefright/filters/ColorConvert.h
rename to media/libstagefright/filters/include/filters/ColorConvert.h
diff --git a/media/libstagefright/filters/GraphicBufferListener.h b/media/libstagefright/filters/include/filters/GraphicBufferListener.h
similarity index 100%
rename from media/libstagefright/filters/GraphicBufferListener.h
rename to media/libstagefright/filters/include/filters/GraphicBufferListener.h
diff --git a/media/libstagefright/filters/IntrinsicBlurFilter.h b/media/libstagefright/filters/include/filters/IntrinsicBlurFilter.h
similarity index 100%
rename from media/libstagefright/filters/IntrinsicBlurFilter.h
rename to media/libstagefright/filters/include/filters/IntrinsicBlurFilter.h
diff --git a/media/libstagefright/filters/RSFilter.h b/media/libstagefright/filters/include/filters/RSFilter.h
similarity index 100%
rename from media/libstagefright/filters/RSFilter.h
rename to media/libstagefright/filters/include/filters/RSFilter.h
diff --git a/media/libstagefright/filters/SaturationFilter.h b/media/libstagefright/filters/include/filters/SaturationFilter.h
similarity index 100%
rename from media/libstagefright/filters/SaturationFilter.h
rename to media/libstagefright/filters/include/filters/SaturationFilter.h
diff --git a/media/libstagefright/filters/SimpleFilter.h b/media/libstagefright/filters/include/filters/SimpleFilter.h
similarity index 100%
rename from media/libstagefright/filters/SimpleFilter.h
rename to media/libstagefright/filters/include/filters/SimpleFilter.h
diff --git a/media/libstagefright/filters/ZeroFilter.h b/media/libstagefright/filters/include/filters/ZeroFilter.h
similarity index 100%
rename from media/libstagefright/filters/ZeroFilter.h
rename to media/libstagefright/filters/include/filters/ZeroFilter.h
diff --git a/media/libstagefright/foundation/Android.bp b/media/libstagefright/foundation/Android.bp
index dd2c66f..5f86c22 100644
--- a/media/libstagefright/foundation/Android.bp
+++ b/media/libstagefright/foundation/Android.bp
@@ -33,18 +33,13 @@
     },
     host_supported: true,
     double_loadable: true,
-    include_dirs: [
-        "frameworks/av/include",
-        "frameworks/native/include",
-        "frameworks/native/libs/arect/include",
-        "frameworks/native/libs/nativebase/include",
-    ],
 
     local_include_dirs: [
         "include/media/stagefright/foundation",
     ],
 
     header_libs: [
+        "av-headers",
         // this is only needed for the vendor variant that removes libbinder, but vendor
         // target below does not allow adding header_libs.
         "libbinder_headers",
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index ada5d81..5c4ec17 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -60,12 +60,66 @@
 const char *MEDIA_MIMETYPE_AUDIO_AC4 = "audio/ac4";
 const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1 = "audio/mha1";
 const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1 = "audio/mhm1";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3 = "audio/mhm1.03";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4 = "audio/mhm1.04";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3 = "audio/mhm1.0d";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4 = "audio/mhm1.0e";
 const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
 const char *MEDIA_MIMETYPE_AUDIO_ALAC = "audio/alac";
 const char *MEDIA_MIMETYPE_AUDIO_WMA = "audio/x-ms-wma";
 const char *MEDIA_MIMETYPE_AUDIO_MS_ADPCM = "audio/x-adpcm-ms";
 const char *MEDIA_MIMETYPE_AUDIO_DVI_IMA_ADPCM = "audio/x-adpcm-dvi-ima";
-
+const char *MEDIA_MIMETYPE_AUDIO_DTS = "audio/vnd.dts";
+const char *MEDIA_MIMETYPE_AUDIO_DTS_HD = "audio/vnd.dts.hd";
+const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD = "audio/vnd.dts.uhd";
+const char *MEDIA_MIMETYPE_AUDIO_EVRC = "audio/evrc";
+const char *MEDIA_MIMETYPE_AUDIO_EVRCB = "audio/evrcb";
+const char *MEDIA_MIMETYPE_AUDIO_EVRCWB = "audio/evrcwb";
+const char *MEDIA_MIMETYPE_AUDIO_EVRCNW = "audio/evrcnw";
+const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS = "audio/amr-wb+";
+const char *MEDIA_MIMETYPE_AUDIO_APTX = "audio/aptx";
+const char *MEDIA_MIMETYPE_AUDIO_DRA = "audio/vnd.dra";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT = "audio/vnd.dolby.mat";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_1_0 = "audio/vnd.dolby.mat.1.0";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_0 = "audio/vnd.dolby.mat.2.0";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_1 = "audio/vnd.dolby.mat.2.1";
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD = "audio/vnd.dolby.mlp";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_MP4 = "audio/mp4a.40";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_MAIN = "audio/mp4a.40.01";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LC = "audio/mp4a.40.02";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_SSR = "audio/mp4a.40.03";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LTP = "audio/mp4a.40.04";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V1 = "audio/mp4a.40.05";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE = "audio/mp4a.40.06";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ERLC = "audio/mp4a.40.17";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LD = "audio/mp4a.40.23";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V2 = "audio/mp4a.40.29";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ELD = "audio/mp4a.40.39";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_XHE = "audio/mp4a.40.42";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADIF = "audio/aac-adif";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN = "audio/aac-adts.01";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC = "audio/aac-adts.02";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR = "audio/aac-adts.03";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP = "audio/aac-adts.04";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1 = "audio/aac-adts.05";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE = "audio/aac-adts.06";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC = "audio/aac-adts.17";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD = "audio/aac-adts.23";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2 = "audio/aac-adts.29";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD = "audio/aac-adts.39";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE = "audio/aac-adts.42";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC = "audio/mp4a-latm.02";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1 = "audio/mp4a-latm.05";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2 = "audio/mp4a-latm.29";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_IEC61937 = "audio/x-iec61937";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_IEC60958 = "audio/x-iec60958";
 
 const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
 const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index f5cecef..fb8c299 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -62,12 +62,59 @@
 extern const char *MEDIA_MIMETYPE_AUDIO_AC4;
 extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1;
 extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4;
 extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
 extern const char *MEDIA_MIMETYPE_AUDIO_ALAC;
 extern const char *MEDIA_MIMETYPE_AUDIO_WMA;
 extern const char *MEDIA_MIMETYPE_AUDIO_MS_ADPCM;
 extern const char *MEDIA_MIMETYPE_AUDIO_DVI_IMA_ADPCM;
-
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS_HD;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRC;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRCB;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRCWB;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRCNW;
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS;
+extern const char *MEDIA_MIMETYPE_AUDIO_APTX;
+extern const char *MEDIA_MIMETYPE_AUDIO_DRA;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_1_0;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_0;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_1;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_MP4;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_MAIN;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_SSR;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LTP;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V1;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ERLC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V2;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ELD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_XHE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADIF;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2;
+extern const char *MEDIA_MIMETYPE_AUDIO_IEC61937;
+extern const char *MEDIA_MIMETYPE_AUDIO_IEC60958;
 
 extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
 extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
diff --git a/media/libstagefright/foundation/tests/AVCUtils/Android.bp b/media/libstagefright/foundation/tests/AVCUtils/Android.bp
index 594da56..ee7db21 100644
--- a/media/libstagefright/foundation/tests/AVCUtils/Android.bp
+++ b/media/libstagefright/foundation/tests/AVCUtils/Android.bp
@@ -43,10 +43,6 @@
         "libstagefright_foundation",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright/foundation",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/media/libstagefright/foundation/tests/Android.bp b/media/libstagefright/foundation/tests/Android.bp
index e50742e..e72ce43 100644
--- a/media/libstagefright/foundation/tests/Android.bp
+++ b/media/libstagefright/foundation/tests/Android.bp
@@ -18,10 +18,6 @@
         "-Wall",
     ],
 
-    include_dirs: [
-        "frameworks/av/include",
-    ],
-
     shared_libs: [
         "liblog",
         "libstagefright_foundation",
diff --git a/media/libstagefright/http/Android.bp b/media/libstagefright/http/Android.bp
index f4d6d99..f25318d 100644
--- a/media/libstagefright/http/Android.bp
+++ b/media/libstagefright/http/Android.bp
@@ -12,10 +12,8 @@
 
     srcs: ["HTTPHelper.cpp"],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-        "frameworks/native/include/media/openmax",
-        "frameworks/base/core/jni",
+    header_libs: [
+        "libstagefright_headers",
     ],
 
     shared_libs: [
diff --git a/media/libstagefright/httplive/Android.bp b/media/libstagefright/httplive/Android.bp
index 7acf735..7e26bd6 100644
--- a/media/libstagefright/httplive/Android.bp
+++ b/media/libstagefright/httplive/Android.bp
@@ -28,10 +28,6 @@
         "PlaylistFetcher.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/native/include/media/openmax",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 0d7cadd..09ca1c9 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -23,7 +23,7 @@
 #include "M3UParser.h"
 #include "PlaylistFetcher.h"
 
-#include <AnotherPacketSource.h>
+#include <mpeg2ts/AnotherPacketSource.h>
 
 #include <cutils/properties.h>
 #include <media/MediaHTTPService.h>
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index ceea41d..ed38a2e 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -24,7 +24,7 @@
 
 #include <utils/String8.h>
 
-#include <ATSParser.h>
+#include <mpeg2ts/ATSParser.h>
 
 namespace android {
 
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 907b326..b339fd2 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -25,8 +25,8 @@
 #include "LiveSession.h"
 #include "M3UParser.h"
 #include <ID3.h>
-#include <AnotherPacketSource.h>
-#include <HlsSampleDecryptor.h>
+#include <mpeg2ts/AnotherPacketSource.h>
+#include <mpeg2ts/HlsSampleDecryptor.h>
 
 #include <datasource/DataURISource.h>
 #include <media/stagefright/foundation/ABitReader.h>
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index 2e28164..716df63 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -21,7 +21,7 @@
 #include <media/stagefright/foundation/AHandler.h>
 #include <openssl/aes.h>
 
-#include <ATSParser.h>
+#include <mpeg2ts/ATSParser.h>
 #include "LiveSession.h"
 
 namespace android {
diff --git a/media/libstagefright/MediaCodecListOverrides.h b/media/libstagefright/include/media/stagefright/MediaCodecListOverrides.h
similarity index 100%
rename from media/libstagefright/MediaCodecListOverrides.h
rename to media/libstagefright/include/media/stagefright/MediaCodecListOverrides.h
diff --git a/media/libstagefright/mpeg2ts/Android.bp b/media/libstagefright/mpeg2ts/Android.bp
index a970224..283df1e 100644
--- a/media/libstagefright/mpeg2ts/Android.bp
+++ b/media/libstagefright/mpeg2ts/Android.bp
@@ -27,11 +27,6 @@
         "ESQueue.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-        "frameworks/native/include/media/openmax",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
@@ -59,16 +54,21 @@
         "libstagefright_foundation_headers",
     ],
 
-    export_include_dirs: ["."],
+    export_include_dirs: ["include"],
+
+    local_include_dirs: ["include/mpeg2ts"],
 
     whole_static_libs: [
         "libstagefright_metadatautils",
     ],
 
+}
+
+cc_defaults {
+    name: "libstagefright_mpeg2support_sdk_defaults",
+
     min_sdk_version: "29",
-
     host_supported: true,
-
     target: {
         darwin: {
             enabled: false,
@@ -76,11 +76,19 @@
     },
 }
 
+cc_library_headers {
+    name: "libstagefright_mpeg2support_headers",
+    defaults: [
+        "libstagefright_mpeg2support_sdk_defaults",
+    ],
+    export_include_dirs: ["include"],
+}
 
 cc_library_static {
     name: "libstagefright_mpeg2support",
     defaults: [
         "libstagefright_mpeg2support_defaults",
+        "libstagefright_mpeg2support_sdk_defaults",
     ],
     cflags: [
         "-DENABLE_CRYPTO",
@@ -97,6 +105,7 @@
     name: "libstagefright_mpeg2support_nocrypto",
     defaults: [
         "libstagefright_mpeg2support_defaults",
+        "libstagefright_mpeg2support_sdk_defaults",
     ],
     apex_available: [
         "com.android.media",
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/include/mpeg2ts/ATSParser.h
similarity index 100%
rename from media/libstagefright/mpeg2ts/ATSParser.h
rename to media/libstagefright/mpeg2ts/include/mpeg2ts/ATSParser.h
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/include/mpeg2ts/AnotherPacketSource.h
similarity index 100%
rename from media/libstagefright/mpeg2ts/AnotherPacketSource.h
rename to media/libstagefright/mpeg2ts/include/mpeg2ts/AnotherPacketSource.h
diff --git a/media/libstagefright/mpeg2ts/CasManager.h b/media/libstagefright/mpeg2ts/include/mpeg2ts/CasManager.h
similarity index 100%
rename from media/libstagefright/mpeg2ts/CasManager.h
rename to media/libstagefright/mpeg2ts/include/mpeg2ts/CasManager.h
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/include/mpeg2ts/ESQueue.h
similarity index 100%
rename from media/libstagefright/mpeg2ts/ESQueue.h
rename to media/libstagefright/mpeg2ts/include/mpeg2ts/ESQueue.h
diff --git a/media/libstagefright/mpeg2ts/HlsSampleDecryptor.h b/media/libstagefright/mpeg2ts/include/mpeg2ts/HlsSampleDecryptor.h
similarity index 100%
rename from media/libstagefright/mpeg2ts/HlsSampleDecryptor.h
rename to media/libstagefright/mpeg2ts/include/mpeg2ts/HlsSampleDecryptor.h
diff --git a/media/libstagefright/mpeg2ts/SampleDecryptor.h b/media/libstagefright/mpeg2ts/include/mpeg2ts/SampleDecryptor.h
similarity index 100%
rename from media/libstagefright/mpeg2ts/SampleDecryptor.h
rename to media/libstagefright/mpeg2ts/include/mpeg2ts/SampleDecryptor.h
diff --git a/media/libstagefright/mpeg2ts/test/Android.bp b/media/libstagefright/mpeg2ts/test/Android.bp
index 464b039..34a8d3e 100644
--- a/media/libstagefright/mpeg2ts/test/Android.bp
+++ b/media/libstagefright/mpeg2ts/test/Android.bp
@@ -57,11 +57,6 @@
         "libstagefright_mpeg2support",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/",
-        "frameworks/av/media/libstagefright/",
-    ],
-
     header_libs: [
         "libmedia_headers",
         "libaudioclient_headers",
diff --git a/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp b/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp
index 79c233b..9e24a99 100644
--- a/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp
+++ b/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp
@@ -26,9 +26,8 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaDataBase.h>
 #include <media/stagefright/foundation/AUtils.h>
-
-#include "mpeg2ts/ATSParser.h"
-#include "mpeg2ts/AnotherPacketSource.h"
+#include <mpeg2ts/AnotherPacketSource.h>
+#include <mpeg2ts/ATSParser.h>
 
 #include "Mpeg2tsUnitTestEnvironment.h"
 
diff --git a/media/libstagefright/rtsp/AAMRAssembler.cpp b/media/libstagefright/rtsp/AAMRAssembler.cpp
index bb2a238..e773031 100644
--- a/media/libstagefright/rtsp/AAMRAssembler.cpp
+++ b/media/libstagefright/rtsp/AAMRAssembler.cpp
@@ -18,9 +18,8 @@
 #define LOG_TAG "AAMRAssembler"
 #include <utils/Log.h>
 
-#include "AAMRAssembler.h"
-
-#include "ARTPSource.h"
+#include <media/stagefright/rtsp/AAMRAssembler.h>
+#include <media/stagefright/rtsp/ARTPSource.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp
index 30cdbc9..2f516d5 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AAVCAssembler.cpp
@@ -18,9 +18,9 @@
 #define LOG_TAG "AAVCAssembler"
 #include <utils/Log.h>
 
-#include "AAVCAssembler.h"
+#include <media/stagefright/rtsp/AAVCAssembler.h>
 
-#include "ARTPSource.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/AH263Assembler.cpp b/media/libstagefright/rtsp/AH263Assembler.cpp
index 3436e95..584b4de 100644
--- a/media/libstagefright/rtsp/AH263Assembler.cpp
+++ b/media/libstagefright/rtsp/AH263Assembler.cpp
@@ -17,9 +17,9 @@
 #define LOG_TAG "AH263Assembler"
 #include <utils/Log.h>
 
-#include "AH263Assembler.h"
+#include <media/stagefright/rtsp/AH263Assembler.h>
 
-#include "ARTPSource.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/AHEVCAssembler.cpp b/media/libstagefright/rtsp/AHEVCAssembler.cpp
index b240339..bb42d1f 100644
--- a/media/libstagefright/rtsp/AHEVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AHEVCAssembler.cpp
@@ -18,14 +18,14 @@
 #define LOG_TAG "AHEVCAssembler"
 #include <utils/Log.h>
 
-#include "AHEVCAssembler.h"
+#include <media/stagefright/rtsp/AHEVCAssembler.h>
 
-#include "ARTPSource.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
 
+#include <HevcUtils.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
-#include <include/HevcUtils.h>
 #include <media/stagefright/foundation/hexdump.h>
 
 #include <stdint.h>
diff --git a/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp b/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp
index 0988774..2101de1 100644
--- a/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp
@@ -18,10 +18,10 @@
 #define LOG_TAG "AMPEG2TSAssembler"
 #include <utils/Log.h>
 
-#include "AMPEG2TSAssembler.h"
+#include <media/stagefright/rtsp/AMPEG2TSAssembler.h>
 
-#include "ARTPSource.h"
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
index 4302aee..0fc03ae 100644
--- a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
@@ -17,9 +17,9 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "AMPEG4AudioAssembler"
 
-#include "AMPEG4AudioAssembler.h"
+#include <media/stagefright/rtsp/AMPEG4AudioAssembler.h>
 
-#include "ARTPSource.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
 
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/foundation/ABitReader.h>
diff --git a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
index 7bd33c1..6b1d2a1 100644
--- a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
@@ -18,10 +18,10 @@
 #define LOG_TAG "AMPEG4ElementaryAssembler"
 #include <utils/Log.h>
 
-#include "AMPEG4ElementaryAssembler.h"
+#include <media/stagefright/rtsp/AMPEG4ElementaryAssembler.h>
 
-#include "ARTPSource.h"
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <media/stagefright/foundation/ABitReader.h>
 #include <media/stagefright/foundation/ABuffer.h>
diff --git a/media/libstagefright/rtsp/APacketSource.cpp b/media/libstagefright/rtsp/APacketSource.cpp
index 169df46..db63183 100644
--- a/media/libstagefright/rtsp/APacketSource.cpp
+++ b/media/libstagefright/rtsp/APacketSource.cpp
@@ -18,10 +18,9 @@
 #define LOG_TAG "APacketSource"
 #include <utils/Log.h>
 
-#include "APacketSource.h"
-
-#include "ARawAudioAssembler.h"
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/APacketSource.h>
+#include <media/stagefright/rtsp/ARawAudioAssembler.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <ctype.h>
 
diff --git a/media/libstagefright/rtsp/ARTPAssembler.cpp b/media/libstagefright/rtsp/ARTPAssembler.cpp
index 52aa3a0..b9869de 100644
--- a/media/libstagefright/rtsp/ARTPAssembler.cpp
+++ b/media/libstagefright/rtsp/ARTPAssembler.cpp
@@ -15,7 +15,7 @@
  */
 
 #define LOG_TAG "ARTPAssembler"
-#include "ARTPAssembler.h"
+#include <media/stagefright/rtsp/ARTPAssembler.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index 0bd342a..5a8f471 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -18,9 +18,9 @@
 #define LOG_TAG "ARTPConnection"
 #include <utils/Log.h>
 
-#include "ARTPConnection.h"
-#include "ARTPSource.h"
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/ARTPConnection.h>
+#include <media/stagefright/rtsp/ARTPSource.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/ARTPSession.cpp b/media/libstagefright/rtsp/ARTPSession.cpp
index e5acb06..dae46f9 100644
--- a/media/libstagefright/rtsp/ARTPSession.cpp
+++ b/media/libstagefright/rtsp/ARTPSession.cpp
@@ -18,7 +18,10 @@
 #define LOG_TAG "ARTPSession"
 #include <utils/Log.h>
 
-#include "ARTPSession.h"
+#include <media/stagefright/rtsp/APacketSource.h>
+#include <media/stagefright/rtsp/ARTPConnection.h>
+#include <media/stagefright/rtsp/ARTPSession.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -29,9 +32,6 @@
 #include <arpa/inet.h>
 #include <sys/socket.h>
 
-#include "APacketSource.h"
-#include "ARTPConnection.h"
-#include "ASessionDescription.h"
 
 namespace android {
 
diff --git a/media/libstagefright/rtsp/ARTPSource.cpp b/media/libstagefright/rtsp/ARTPSource.cpp
index 38a370b..5f62b9d 100644
--- a/media/libstagefright/rtsp/ARTPSource.cpp
+++ b/media/libstagefright/rtsp/ARTPSource.cpp
@@ -18,17 +18,17 @@
 #define LOG_TAG "ARTPSource"
 #include <utils/Log.h>
 
-#include "ARTPSource.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
 
-#include "AAMRAssembler.h"
-#include "AAVCAssembler.h"
-#include "AHEVCAssembler.h"
-#include "AH263Assembler.h"
-#include "AMPEG2TSAssembler.h"
-#include "AMPEG4AudioAssembler.h"
-#include "AMPEG4ElementaryAssembler.h"
-#include "ARawAudioAssembler.h"
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/AAMRAssembler.h>
+#include <media/stagefright/rtsp/AAVCAssembler.h>
+#include <media/stagefright/rtsp/AHEVCAssembler.h>
+#include <media/stagefright/rtsp/AH263Assembler.h>
+#include <media/stagefright/rtsp/AMPEG2TSAssembler.h>
+#include <media/stagefright/rtsp/AMPEG4AudioAssembler.h>
+#include <media/stagefright/rtsp/AMPEG4ElementaryAssembler.h>
+#include <media/stagefright/rtsp/ARawAudioAssembler.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/ARTPWriter.cpp b/media/libstagefright/rtsp/ARTPWriter.cpp
index 11c7aeb..8990f0c 100644
--- a/media/libstagefright/rtsp/ARTPWriter.cpp
+++ b/media/libstagefright/rtsp/ARTPWriter.cpp
@@ -18,7 +18,7 @@
 #define LOG_TAG "ARTPWriter"
 #include <utils/Log.h>
 
-#include "ARTPWriter.h"
+#include <media/stagefright/rtsp/ARTPWriter.h>
 
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/foundation/ABuffer.h>
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index c33bf3f..aab63a8 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -18,8 +18,8 @@
 #define LOG_TAG "ARTSPConnection"
 #include <utils/Log.h>
 
-#include "ARTSPConnection.h"
-#include "NetworkUtils.h"
+#include <media/stagefright/rtsp/ARTSPConnection.h>
+#include <media/stagefright/rtsp/NetworkUtils.h>
 
 #include <datasource/HTTPBase.h>
 #include <media/stagefright/foundation/ABuffer.h>
diff --git a/media/libstagefright/rtsp/ARawAudioAssembler.cpp b/media/libstagefright/rtsp/ARawAudioAssembler.cpp
index 167f7a4..9210af3 100644
--- a/media/libstagefright/rtsp/ARawAudioAssembler.cpp
+++ b/media/libstagefright/rtsp/ARawAudioAssembler.cpp
@@ -18,10 +18,10 @@
 #define LOG_TAG "ARawAudioAssembler"
 #include <utils/Log.h>
 
-#include "ARawAudioAssembler.h"
+#include <media/stagefright/rtsp/ARawAudioAssembler.h>
 
-#include "ARTPSource.h"
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/ARTPSource.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index 5b5b4b1..217eca7 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -18,7 +18,7 @@
 #define LOG_TAG "ASessionDescription"
 #include <utils/Log.h>
 
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AString.h>
diff --git a/media/libstagefright/rtsp/Android.bp b/media/libstagefright/rtsp/Android.bp
index 34d1788..97d4abe 100644
--- a/media/libstagefright/rtsp/Android.bp
+++ b/media/libstagefright/rtsp/Android.bp
@@ -47,10 +47,9 @@
         "libmedia",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-        "frameworks/native/include/media/openmax",
-        "frameworks/native/include/android",
+    header_libs: [
+        "libstagefright_headers",
+        "libstagefright_rtsp_headers",
     ],
 
     arch: {
@@ -73,6 +72,18 @@
     },
 }
 
+cc_library_headers {
+    name: "libstagefright_rtsp_headers",
+    export_include_dirs: ["include"],
+    vendor_available: true,
+    host_supported: true,
+    target: {
+        darwin: {
+            enabled: false,
+        },
+    },
+}
+
 cc_library_static {
     name: "libstagefright_rtsp",
 
diff --git a/media/libstagefright/rtsp/JitterCalculator.cpp b/media/libstagefright/rtsp/JitterCalculator.cpp
index 7e60be2..93afe9c 100644
--- a/media/libstagefright/rtsp/JitterCalculator.cpp
+++ b/media/libstagefright/rtsp/JitterCalculator.cpp
@@ -17,7 +17,7 @@
 #define LOG_TAG "JitterCalc"
 #include <utils/Log.h>
 
-#include "JitterCalculator.h"
+#include <media/stagefright/rtsp/JitterCalculator.h>
 
 #include <stdlib.h>
 
diff --git a/media/libstagefright/rtsp/MyTransmitter.h b/media/libstagefright/rtsp/MyTransmitter.h
deleted file mode 100644
index bf44aff..0000000
--- a/media/libstagefright/rtsp/MyTransmitter.h
+++ /dev/null
@@ -1,984 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MY_TRANSMITTER_H_
-
-#define MY_TRANSMITTER_H_
-
-#include "ARTPConnection.h"
-
-#include <arpa/inet.h>
-#include <sys/socket.h>
-
-#include <openssl/md5.h>
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/base64.h>
-#include <media/stagefright/foundation/hexdump.h>
-
-#ifdef ANDROID
-#include "VideoSource.h"
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaCodecSource.h>
-#endif
-
-namespace android {
-
-#define TRACK_SUFFIX    "trackid=1"
-#define PT              96
-#define PT_STR          "96"
-
-#define USERNAME        "bcast"
-#define PASSWORD        "test"
-
-static int uniformRand(int limit) {
-    return ((double)rand() * limit) / RAND_MAX;
-}
-
-static bool GetAttribute(const char *s, const char *key, AString *value) {
-    value->clear();
-
-    size_t keyLen = strlen(key);
-
-    for (;;) {
-        const char *colonPos = strchr(s, ';');
-
-        size_t len =
-            (colonPos == NULL) ? strlen(s) : colonPos - s;
-
-        if (len >= keyLen + 1 && s[keyLen] == '=' && !strncmp(s, key, keyLen)) {
-            value->setTo(&s[keyLen + 1], len - keyLen - 1);
-            return true;
-        }
-
-        if (colonPos == NULL) {
-            return false;
-        }
-
-        s = colonPos + 1;
-    }
-}
-
-struct MyTransmitter : public AHandler {
-    MyTransmitter(const char *url, const sp<ALooper> &looper)
-        : mServerURL(url),
-          mLooper(looper),
-          mConn(new ARTSPConnection),
-          mConnected(false),
-          mAuthType(NONE),
-          mRTPSocket(-1),
-          mRTCPSocket(-1),
-          mSourceID(rand()),
-          mSeqNo(uniformRand(65536)),
-          mRTPTimeBase(rand()),
-          mNumSamplesSent(0),
-          mNumRTPSent(0),
-          mNumRTPOctetsSent(0),
-          mLastRTPTime(0),
-          mLastNTPTime(0) {
-        mStreamURL = mServerURL;
-        mStreamURL.append("/bazong.sdp");
-
-        mTrackURL = mStreamURL;
-        mTrackURL.append("/");
-        mTrackURL.append(TRACK_SUFFIX);
-
-        mLooper->registerHandler(this);
-        mLooper->registerHandler(mConn);
-
-        sp<AMessage> reply = new AMessage('conn', this);
-        mConn->connect(mServerURL.c_str(), reply);
-
-#ifdef ANDROID
-        int width = 640;
-        int height = 480;
-
-        sp<MediaSource> source = new VideoSource(width, height);
-
-        sp<AMessage> encMeta = new AMessage;
-        encMeta->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
-        encMeta->setInt32("width", width);
-        encMeta->setInt32("height", height);
-        encMeta->setInt32("frame-rate", 30);
-        encMeta->setInt32("bitrate", 256000);
-        encMeta->setInt32("i-frame-interval", 10);
-
-        sp<ALooper> encLooper = new ALooper;
-        encLooper->setName("rtsp_transmitter");
-        encLooper->start();
-
-        mEncoder = MediaCodecSource::Create(encLooper, encMeta, source);
-
-        mEncoder->start();
-
-        MediaBuffer *buffer;
-        CHECK_EQ(mEncoder->read(&buffer), (status_t)OK);
-        CHECK(buffer != NULL);
-
-        makeH264SPropParamSets(buffer);
-
-        buffer->release();
-        buffer = NULL;
-#endif
-    }
-
-    uint64_t ntpTime() {
-        struct timeval tv;
-        gettimeofday(&tv, NULL);
-
-        uint64_t nowUs = tv.tv_sec * 1000000ll + tv.tv_usec;
-
-        nowUs += ((70ll * 365 + 17) * 24) * 60 * 60 * 1000000ll;
-
-        uint64_t hi = nowUs / 1000000ll;
-        uint64_t lo = ((1ll << 32) * (nowUs % 1000000ll)) / 1000000ll;
-
-        return (hi << 32) | lo;
-    }
-
-    void issueAnnounce() {
-        AString sdp;
-        sdp = "v=0\r\n";
-
-        sdp.append("o=- ");
-
-        uint64_t ntp = ntpTime();
-        sdp.append(ntp);
-        sdp.append(" ");
-        sdp.append(ntp);
-        sdp.append(" IN IP4 127.0.0.0\r\n");
-
-        sdp.append(
-              "s=Sample\r\n"
-              "i=Playing around with ANNOUNCE\r\n"
-              "c=IN IP4 ");
-
-        struct in_addr addr;
-        addr.s_addr = htonl(mServerIP);
-
-        sdp.append(inet_ntoa(addr));
-
-        sdp.append(
-              "\r\n"
-              "t=0 0\r\n"
-              "a=range:npt=now-\r\n");
-
-#ifdef ANDROID
-        sp<MetaData> meta = mEncoder->getFormat();
-        int32_t width, height;
-        CHECK(meta->findInt32(kKeyWidth, &width));
-        CHECK(meta->findInt32(kKeyHeight, &height));
-
-        sdp.append(
-              "m=video 0 RTP/AVP " PT_STR "\r\n"
-              "b=AS 320000\r\n"
-              "a=rtpmap:" PT_STR " H264/90000\r\n");
-
-        sdp.append("a=cliprect 0,0,");
-        sdp.append(height);
-        sdp.append(",");
-        sdp.append(width);
-        sdp.append("\r\n");
-
-        sdp.append(
-              "a=framesize:" PT_STR " ");
-        sdp.append(width);
-        sdp.append("-");
-        sdp.append(height);
-        sdp.append("\r\n");
-
-        sdp.append(
-              "a=fmtp:" PT_STR " profile-level-id=42C015;sprop-parameter-sets=");
-
-        sdp.append(mSeqParamSet);
-        sdp.append(",");
-        sdp.append(mPicParamSet);
-        sdp.append(";packetization-mode=1\r\n");
-#else
-        sdp.append(
-                "m=audio 0 RTP/AVP " PT_STR "\r\n"
-                "a=rtpmap:" PT_STR " L8/8000/1\r\n");
-#endif
-
-        sdp.append("a=control:" TRACK_SUFFIX "\r\n");
-
-        AString request;
-        request.append("ANNOUNCE ");
-        request.append(mStreamURL);
-        request.append(" RTSP/1.0\r\n");
-
-        addAuthentication(&request, "ANNOUNCE", mStreamURL.c_str());
-
-        request.append("Content-Type: application/sdp\r\n");
-        request.append("Content-Length: ");
-        request.append(sdp.size());
-        request.append("\r\n");
-
-        request.append("\r\n");
-        request.append(sdp);
-
-        sp<AMessage> reply = new AMessage('anno', this);
-        mConn->sendRequest(request.c_str(), reply);
-    }
-
-    void H(const AString &s, AString *out) {
-        out->clear();
-
-        MD5_CTX m;
-        MD5_Init(&m);
-        MD5_Update(&m, s.c_str(), s.size());
-
-        uint8_t key[16];
-        MD5_Final(key, &m);
-
-        for (size_t i = 0; i < 16; ++i) {
-            char nibble = key[i] >> 4;
-            if (nibble <= 9) {
-                nibble += '0';
-            } else {
-                nibble += 'a' - 10;
-            }
-            out->append(&nibble, 1);
-
-            nibble = key[i] & 0x0f;
-            if (nibble <= 9) {
-                nibble += '0';
-            } else {
-                nibble += 'a' - 10;
-            }
-            out->append(&nibble, 1);
-        }
-    }
-
-    void authenticate(const sp<ARTSPResponse> &response) {
-        ssize_t i = response->mHeaders.indexOfKey("www-authenticate");
-        CHECK_GE(i, 0);
-
-        AString value = response->mHeaders.valueAt(i);
-
-        if (!strncmp(value.c_str(), "Basic", 5)) {
-            mAuthType = BASIC;
-        } else {
-            CHECK(!strncmp(value.c_str(), "Digest", 6));
-            mAuthType = DIGEST;
-
-            i = value.find("nonce=");
-            CHECK_GE(i, 0);
-            CHECK_EQ(value.c_str()[i + 6], '\"');
-            ssize_t j = value.find("\"", i + 7);
-            CHECK_GE(j, 0);
-
-            mNonce.setTo(value, i + 7, j - i - 7);
-        }
-
-        issueAnnounce();
-    }
-
-    void addAuthentication(
-            AString *request, const char *method, const char *url) {
-        if (mAuthType == NONE) {
-            return;
-        }
-
-        if (mAuthType == BASIC) {
-            request->append("Authorization: Basic YmNhc3Q6dGVzdAo=\r\n");
-            return;
-        }
-
-        CHECK_EQ((int)mAuthType, (int)DIGEST);
-
-        AString A1;
-        A1.append(USERNAME);
-        A1.append(":");
-        A1.append("Streaming Server");
-        A1.append(":");
-        A1.append(PASSWORD);
-
-        AString A2;
-        A2.append(method);
-        A2.append(":");
-        A2.append(url);
-
-        AString HA1, HA2;
-        H(A1, &HA1);
-        H(A2, &HA2);
-
-        AString tmp;
-        tmp.append(HA1);
-        tmp.append(":");
-        tmp.append(mNonce);
-        tmp.append(":");
-        tmp.append(HA2);
-
-        AString digest;
-        H(tmp, &digest);
-
-        request->append("Authorization: Digest ");
-        request->append("nonce=\"");
-        request->append(mNonce);
-        request->append("\", ");
-        request->append("username=\"" USERNAME "\", ");
-        request->append("uri=\"");
-        request->append(url);
-        request->append("\", ");
-        request->append("response=\"");
-        request->append(digest);
-        request->append("\"");
-        request->append("\r\n");
-    }
-
-    virtual void onMessageReceived(const sp<AMessage> &msg) {
-        switch (msg->what()) {
-            case 'conn':
-            {
-                int32_t result;
-                CHECK(msg->findInt32("result", &result));
-
-                LOG(INFO) << "connection request completed with result "
-                     << result << " (" << strerror(-result) << ")";
-
-                if (result != OK) {
-                    (new AMessage('quit', this))->post();
-                    break;
-                }
-
-                mConnected = true;
-
-                CHECK(msg->findInt32("server-ip", (int32_t *)&mServerIP));
-
-                issueAnnounce();
-                break;
-            }
-
-            case 'anno':
-            {
-                int32_t result;
-                CHECK(msg->findInt32("result", &result));
-
-                LOG(INFO) << "ANNOUNCE completed with result "
-                     << result << " (" << strerror(-result) << ")";
-
-                sp<RefBase> obj;
-                CHECK(msg->findObject("response", &obj));
-                sp<ARTSPResponse> response;
-
-                if (result == OK) {
-                    response = static_cast<ARTSPResponse *>(obj.get());
-                    CHECK(response != NULL);
-
-                    if (response->mStatusCode == 401) {
-                        if (mAuthType != NONE) {
-                            LOG(INFO) << "FAILED to authenticate";
-                            (new AMessage('quit', this))->post();
-                            break;
-                        }
-
-                        authenticate(response);
-                        break;
-                    }
-                }
-
-                if (result != OK || response->mStatusCode != 200) {
-                    (new AMessage('quit', this))->post();
-                    break;
-                }
-
-                unsigned rtpPort;
-                ARTPConnection::MakePortPair(&mRTPSocket, &mRTCPSocket, &rtpPort);
-
-                // (new AMessage('poll', this))->post();
-
-                AString request;
-                request.append("SETUP ");
-                request.append(mTrackURL);
-                request.append(" RTSP/1.0\r\n");
-
-                addAuthentication(&request, "SETUP", mTrackURL.c_str());
-
-                request.append("Transport: RTP/AVP;unicast;client_port=");
-                request.append(rtpPort);
-                request.append("-");
-                request.append(rtpPort + 1);
-                request.append(";mode=record\r\n");
-                request.append("\r\n");
-
-                sp<AMessage> reply = new AMessage('setu', this);
-                mConn->sendRequest(request.c_str(), reply);
-                break;
-            }
-
-#if 0
-            case 'poll':
-            {
-                fd_set rs;
-                FD_ZERO(&rs);
-                FD_SET(mRTCPSocket, &rs);
-
-                struct timeval tv;
-                tv.tv_sec = 0;
-                tv.tv_usec = 0;
-
-                int res = select(mRTCPSocket + 1, &rs, NULL, NULL, &tv);
-
-                if (res == 1) {
-                    sp<ABuffer> buffer = new ABuffer(65536);
-                    ssize_t n = recv(mRTCPSocket, buffer->data(), buffer->size(), 0);
-
-                    if (n <= 0) {
-                        LOG(ERROR) << "recv returned " << n;
-                    } else {
-                        LOG(INFO) << "recv returned " << n << " bytes of data.";
-
-                        hexdump(buffer->data(), n);
-                    }
-                }
-
-                msg->post(50000);
-                break;
-            }
-#endif
-
-            case 'setu':
-            {
-                int32_t result;
-                CHECK(msg->findInt32("result", &result));
-
-                LOG(INFO) << "SETUP completed with result "
-                     << result << " (" << strerror(-result) << ")";
-
-                sp<RefBase> obj;
-                CHECK(msg->findObject("response", &obj));
-                sp<ARTSPResponse> response;
-
-                if (result == OK) {
-                    response = static_cast<ARTSPResponse *>(obj.get());
-                    CHECK(response != NULL);
-                }
-
-                if (result != OK || response->mStatusCode != 200) {
-                    (new AMessage('quit', this))->post();
-                    break;
-                }
-
-                ssize_t i = response->mHeaders.indexOfKey("session");
-                CHECK_GE(i, 0);
-                mSessionID = response->mHeaders.valueAt(i);
-                i = mSessionID.find(";");
-                if (i >= 0) {
-                    // Remove options, i.e. ";timeout=90"
-                    mSessionID.erase(i, mSessionID.size() - i);
-                }
-
-                i = response->mHeaders.indexOfKey("transport");
-                CHECK_GE(i, 0);
-                AString transport = response->mHeaders.valueAt(i);
-
-                LOG(INFO) << "transport = '" << transport << "'";
-
-                AString value;
-                CHECK(GetAttribute(transport.c_str(), "server_port", &value));
-
-                unsigned rtpPort, rtcpPort;
-                CHECK_EQ(sscanf(value.c_str(), "%u-%u", &rtpPort, &rtcpPort), 2);
-
-                CHECK(GetAttribute(transport.c_str(), "source", &value));
-
-                memset(mRemoteAddr.sin_zero, 0, sizeof(mRemoteAddr.sin_zero));
-                mRemoteAddr.sin_family = AF_INET;
-                mRemoteAddr.sin_addr.s_addr = inet_addr(value.c_str());
-                mRemoteAddr.sin_port = htons(rtpPort);
-
-                mRemoteRTCPAddr = mRemoteAddr;
-                mRemoteRTCPAddr.sin_port = htons(rtpPort + 1);
-
-                CHECK_EQ(0, connect(mRTPSocket,
-                                    (const struct sockaddr *)&mRemoteAddr,
-                                    sizeof(mRemoteAddr)));
-
-                CHECK_EQ(0, connect(mRTCPSocket,
-                                    (const struct sockaddr *)&mRemoteRTCPAddr,
-                                    sizeof(mRemoteRTCPAddr)));
-
-                uint32_t x = ntohl(mRemoteAddr.sin_addr.s_addr);
-                LOG(INFO) << "sending data to "
-                     << (x >> 24)
-                     << "."
-                     << ((x >> 16) & 0xff)
-                     << "."
-                     << ((x >> 8) & 0xff)
-                     << "."
-                     << (x & 0xff)
-                     << ":"
-                     << rtpPort;
-
-                AString request;
-                request.append("RECORD ");
-                request.append(mStreamURL);
-                request.append(" RTSP/1.0\r\n");
-
-                addAuthentication(&request, "RECORD", mStreamURL.c_str());
-
-                request.append("Session: ");
-                request.append(mSessionID);
-                request.append("\r\n");
-                request.append("\r\n");
-
-                sp<AMessage> reply = new AMessage('reco', this);
-                mConn->sendRequest(request.c_str(), reply);
-                break;
-            }
-
-            case 'reco':
-            {
-                int32_t result;
-                CHECK(msg->findInt32("result", &result));
-
-                LOG(INFO) << "RECORD completed with result "
-                     << result << " (" << strerror(-result) << ")";
-
-                sp<RefBase> obj;
-                CHECK(msg->findObject("response", &obj));
-                sp<ARTSPResponse> response;
-
-                if (result == OK) {
-                    response = static_cast<ARTSPResponse *>(obj.get());
-                    CHECK(response != NULL);
-                }
-
-                if (result != OK) {
-                    (new AMessage('quit', this))->post();
-                    break;
-                }
-
-                (new AMessage('more', this))->post();
-                (new AMessage('sr  ', this))->post();
-                (new AMessage('aliv', this))->post(30000000ll);
-                break;
-            }
-
-            case 'aliv':
-            {
-                if (!mConnected) {
-                    break;
-                }
-
-                AString request;
-                request.append("OPTIONS ");
-                request.append(mStreamURL);
-                request.append(" RTSP/1.0\r\n");
-
-                addAuthentication(&request, "RECORD", mStreamURL.c_str());
-
-                request.append("Session: ");
-                request.append(mSessionID);
-                request.append("\r\n");
-                request.append("\r\n");
-
-                sp<AMessage> reply = new AMessage('opts', this);
-                mConn->sendRequest(request.c_str(), reply);
-                break;
-            }
-
-            case 'opts':
-            {
-                int32_t result;
-                CHECK(msg->findInt32("result", &result));
-
-                LOG(INFO) << "OPTIONS completed with result "
-                     << result << " (" << strerror(-result) << ")";
-
-                if (!mConnected) {
-                    break;
-                }
-
-                (new AMessage('aliv', this))->post(30000000ll);
-                break;
-            }
-
-            case 'more':
-            {
-                if (!mConnected) {
-                    break;
-                }
-
-                sp<ABuffer> buffer = new ABuffer(65536);
-                uint8_t *data = buffer->data();
-                data[0] = 0x80;
-                data[1] = (1 << 7) | PT;  // M-bit
-                data[2] = (mSeqNo >> 8) & 0xff;
-                data[3] = mSeqNo & 0xff;
-                data[8] = mSourceID >> 24;
-                data[9] = (mSourceID >> 16) & 0xff;
-                data[10] = (mSourceID >> 8) & 0xff;
-                data[11] = mSourceID & 0xff;
-
-#ifdef ANDROID
-                MediaBuffer *mediaBuf = NULL;
-                for (;;) {
-                    CHECK_EQ(mEncoder->read(&mediaBuf), (status_t)OK);
-                    if (mediaBuf->range_length() > 0) {
-                        break;
-                    }
-                    mediaBuf->release();
-                    mediaBuf = NULL;
-                }
-
-                int64_t timeUs;
-                CHECK(mediaBuf->meta_data()->findInt64(kKeyTime, &timeUs));
-
-                uint32_t rtpTime = mRTPTimeBase + (timeUs * 9 / 100ll);
-
-                const uint8_t *mediaData =
-                    (const uint8_t *)mediaBuf->data() + mediaBuf->range_offset();
-
-                CHECK(!memcmp("\x00\x00\x00\x01", mediaData, 4));
-
-                CHECK_LE(mediaBuf->range_length() - 4 + 12, buffer->size());
-
-                memcpy(&data[12],
-                       mediaData + 4, mediaBuf->range_length() - 4);
-
-                buffer->setRange(0, mediaBuf->range_length() - 4 + 12);
-
-                mediaBuf->release();
-                mediaBuf = NULL;
-#else
-                uint32_t rtpTime = mRTPTimeBase + mNumRTPSent * 128;
-                memset(&data[12], 0, 128);
-                buffer->setRange(0, 12 + 128);
-#endif
-
-                data[4] = rtpTime >> 24;
-                data[5] = (rtpTime >> 16) & 0xff;
-                data[6] = (rtpTime >> 8) & 0xff;
-                data[7] = rtpTime & 0xff;
-
-                ssize_t n = send(
-                        mRTPSocket, data, buffer->size(), 0);
-                if (n < 0) {
-                    LOG(ERROR) << "send failed (" << strerror(errno) << ")";
-                }
-                CHECK_EQ(n, (ssize_t)buffer->size());
-
-                ++mSeqNo;
-
-                ++mNumRTPSent;
-                mNumRTPOctetsSent += buffer->size() - 12;
-
-                mLastRTPTime = rtpTime;
-                mLastNTPTime = ntpTime();
-
-#ifdef ANDROID
-                if (mNumRTPSent < 60 * 25) {  // 60 secs worth
-                    msg->post(40000);
-#else
-                if (mNumRTPOctetsSent < 8000 * 60) {
-                    msg->post(1000000ll * 128 / 8000);
-#endif
-                } else {
-                    LOG(INFO) << "That's enough, pausing.";
-
-                    AString request;
-                    request.append("PAUSE ");
-                    request.append(mStreamURL);
-                    request.append(" RTSP/1.0\r\n");
-
-                    addAuthentication(&request, "PAUSE", mStreamURL.c_str());
-
-                    request.append("Session: ");
-                    request.append(mSessionID);
-                    request.append("\r\n");
-                    request.append("\r\n");
-
-                    sp<AMessage> reply = new AMessage('paus', this);
-                    mConn->sendRequest(request.c_str(), reply);
-                }
-                break;
-            }
-
-            case 'sr  ':
-            {
-                if (!mConnected) {
-                    break;
-                }
-
-                sp<ABuffer> buffer = new ABuffer(65536);
-                buffer->setRange(0, 0);
-
-                addSR(buffer);
-                addSDES(buffer);
-
-                uint8_t *data = buffer->data();
-                ssize_t n = send(
-                        mRTCPSocket, data, buffer->size(), 0);
-                CHECK_EQ(n, (ssize_t)buffer->size());
-
-                msg->post(3000000);
-                break;
-            }
-
-            case 'paus':
-            {
-                int32_t result;
-                CHECK(msg->findInt32("result", &result));
-
-                LOG(INFO) << "PAUSE completed with result "
-                     << result << " (" << strerror(-result) << ")";
-
-                sp<RefBase> obj;
-                CHECK(msg->findObject("response", &obj));
-                sp<ARTSPResponse> response;
-
-                AString request;
-                request.append("TEARDOWN ");
-                request.append(mStreamURL);
-                request.append(" RTSP/1.0\r\n");
-
-                addAuthentication(&request, "TEARDOWN", mStreamURL.c_str());
-
-                request.append("Session: ");
-                request.append(mSessionID);
-                request.append("\r\n");
-                request.append("\r\n");
-
-                sp<AMessage> reply = new AMessage('tear', this);
-                mConn->sendRequest(request.c_str(), reply);
-                break;
-            }
-
-            case 'tear':
-            {
-                int32_t result;
-                CHECK(msg->findInt32("result", &result));
-
-                LOG(INFO) << "TEARDOWN completed with result "
-                     << result << " (" << strerror(-result) << ")";
-
-                sp<RefBase> obj;
-                CHECK(msg->findObject("response", &obj));
-                sp<ARTSPResponse> response;
-
-                if (result == OK) {
-                    response = static_cast<ARTSPResponse *>(obj.get());
-                    CHECK(response != NULL);
-                }
-
-                (new AMessage('quit', this))->post();
-                break;
-            }
-
-            case 'disc':
-            {
-                LOG(INFO) << "disconnect completed";
-
-                mConnected = false;
-                (new AMessage('quit', this))->post();
-                break;
-            }
-
-            case 'quit':
-            {
-                if (mConnected) {
-                    mConn->disconnect(new AMessage('disc', this));
-                    break;
-                }
-
-                if (mRTPSocket >= 0) {
-                    close(mRTPSocket);
-                    mRTPSocket = -1;
-                }
-
-                if (mRTCPSocket >= 0) {
-                    close(mRTCPSocket);
-                    mRTCPSocket = -1;
-                }
-
-#ifdef ANDROID
-                mEncoder->stop();
-                mEncoder.clear();
-#endif
-
-                mLooper->stop();
-                break;
-            }
-
-            default:
-                TRESPASS();
-        }
-    }
-
-protected:
-    virtual ~MyTransmitter() {
-    }
-
-private:
-    enum AuthType {
-        NONE,
-        BASIC,
-        DIGEST
-    };
-
-    AString mServerURL;
-    AString mTrackURL;
-    AString mStreamURL;
-
-    sp<ALooper> mLooper;
-    sp<ARTSPConnection> mConn;
-    bool mConnected;
-    uint32_t mServerIP;
-    AuthType mAuthType;
-    AString mNonce;
-    AString mSessionID;
-    int mRTPSocket, mRTCPSocket;
-    uint32_t mSourceID;
-    uint32_t mSeqNo;
-    uint32_t mRTPTimeBase;
-    struct sockaddr_in mRemoteAddr;
-    struct sockaddr_in mRemoteRTCPAddr;
-    size_t mNumSamplesSent;
-    uint32_t mNumRTPSent;
-    uint32_t mNumRTPOctetsSent;
-    uint32_t mLastRTPTime;
-    uint64_t mLastNTPTime;
-
-#ifdef ANDROID
-    sp<MediaSource> mEncoder;
-    AString mSeqParamSet;
-    AString mPicParamSet;
-
-    void makeH264SPropParamSets(MediaBuffer *buffer) {
-        static const char kStartCode[] = "\x00\x00\x00\x01";
-
-        const uint8_t *data =
-            (const uint8_t *)buffer->data() + buffer->range_offset();
-        size_t size = buffer->range_length();
-
-        CHECK_GE(size, 0u);
-        CHECK(!memcmp(kStartCode, data, 4));
-
-        data += 4;
-        size -= 4;
-
-        size_t startCodePos = 0;
-        while (startCodePos + 3 < size
-                && memcmp(kStartCode, &data[startCodePos], 4)) {
-            ++startCodePos;
-        }
-
-        CHECK_LT(startCodePos + 3, size);
-
-        encodeBase64(data, startCodePos, &mSeqParamSet);
-
-        encodeBase64(&data[startCodePos + 4], size - startCodePos - 4,
-                     &mPicParamSet);
-    }
-#endif
-
-    void addSR(const sp<ABuffer> &buffer) {
-        uint8_t *data = buffer->data() + buffer->size();
-
-        data[0] = 0x80 | 0;
-        data[1] = 200;  // SR
-        data[2] = 0;
-        data[3] = 6;
-        data[4] = mSourceID >> 24;
-        data[5] = (mSourceID >> 16) & 0xff;
-        data[6] = (mSourceID >> 8) & 0xff;
-        data[7] = mSourceID & 0xff;
-
-        data[8] = mLastNTPTime >> (64 - 8);
-        data[9] = (mLastNTPTime >> (64 - 16)) & 0xff;
-        data[10] = (mLastNTPTime >> (64 - 24)) & 0xff;
-        data[11] = (mLastNTPTime >> 32) & 0xff;
-        data[12] = (mLastNTPTime >> 24) & 0xff;
-        data[13] = (mLastNTPTime >> 16) & 0xff;
-        data[14] = (mLastNTPTime >> 8) & 0xff;
-        data[15] = mLastNTPTime & 0xff;
-
-        data[16] = (mLastRTPTime >> 24) & 0xff;
-        data[17] = (mLastRTPTime >> 16) & 0xff;
-        data[18] = (mLastRTPTime >> 8) & 0xff;
-        data[19] = mLastRTPTime & 0xff;
-
-        data[20] = mNumRTPSent >> 24;
-        data[21] = (mNumRTPSent >> 16) & 0xff;
-        data[22] = (mNumRTPSent >> 8) & 0xff;
-        data[23] = mNumRTPSent & 0xff;
-
-        data[24] = mNumRTPOctetsSent >> 24;
-        data[25] = (mNumRTPOctetsSent >> 16) & 0xff;
-        data[26] = (mNumRTPOctetsSent >> 8) & 0xff;
-        data[27] = mNumRTPOctetsSent & 0xff;
-
-        buffer->setRange(buffer->offset(), buffer->size() + 28);
-    }
-
-    void addSDES(const sp<ABuffer> &buffer) {
-        uint8_t *data = buffer->data() + buffer->size();
-        data[0] = 0x80 | 1;
-        data[1] = 202;  // SDES
-        data[4] = mSourceID >> 24;
-        data[5] = (mSourceID >> 16) & 0xff;
-        data[6] = (mSourceID >> 8) & 0xff;
-        data[7] = mSourceID & 0xff;
-
-        size_t offset = 8;
-
-        data[offset++] = 1;  // CNAME
-
-        static const char *kCNAME = "andih@laptop";
-        data[offset++] = strlen(kCNAME);
-
-        memcpy(&data[offset], kCNAME, strlen(kCNAME));
-        offset += strlen(kCNAME);
-
-        data[offset++] = 7;  // NOTE
-
-        static const char *kNOTE = "Hell's frozen over.";
-        data[offset++] = strlen(kNOTE);
-
-        memcpy(&data[offset], kNOTE, strlen(kNOTE));
-        offset += strlen(kNOTE);
-
-        data[offset++] = 0;
-
-        if ((offset % 4) > 0) {
-            size_t count = 4 - (offset % 4);
-            switch (count) {
-                case 3:
-                    data[offset++] = 0;
-                case 2:
-                    data[offset++] = 0;
-                case 1:
-                    data[offset++] = 0;
-            }
-        }
-
-        size_t numWords = (offset / 4) - 1;
-        data[2] = numWords >> 8;
-        data[3] = numWords & 0xff;
-
-        buffer->setRange(buffer->offset(), buffer->size() + offset);
-    }
-
-    DISALLOW_EVIL_CONSTRUCTORS(MyTransmitter);
-};
-
-}  // namespace android
-
-#endif  // MY_TRANSMITTER_H_
diff --git a/media/libstagefright/rtsp/NetworkUtils.cpp b/media/libstagefright/rtsp/NetworkUtils.cpp
index c053be8..e8ec64d 100644
--- a/media/libstagefright/rtsp/NetworkUtils.cpp
+++ b/media/libstagefright/rtsp/NetworkUtils.cpp
@@ -20,7 +20,7 @@
 #define LOG_TAG "NetworkUtils"
 #include <utils/Log.h>
 
-#include "NetworkUtils.h"
+#include <media/stagefright/rtsp/NetworkUtils.h>
 #include <cutils/qtaguid.h>
 #include <NetdClient.h>
 
diff --git a/media/libstagefright/rtsp/NetworkUtilsForAppProc.cpp b/media/libstagefright/rtsp/NetworkUtilsForAppProc.cpp
index 662159c..30fc38a 100644
--- a/media/libstagefright/rtsp/NetworkUtilsForAppProc.cpp
+++ b/media/libstagefright/rtsp/NetworkUtilsForAppProc.cpp
@@ -18,7 +18,7 @@
 #define LOG_TAG "NetworkUtils"
 #include <utils/Log.h>
 
-#include "NetworkUtils.h"
+#include <media/stagefright/rtsp/NetworkUtils.h>
 
 // NetworkUtils implementation for application process.
 namespace android {
diff --git a/media/libstagefright/rtsp/QualManager.cpp b/media/libstagefright/rtsp/QualManager.cpp
index 37aa326..f1f8222 100644
--- a/media/libstagefright/rtsp/QualManager.cpp
+++ b/media/libstagefright/rtsp/QualManager.cpp
@@ -21,7 +21,7 @@
 #include <sys/prctl.h>
 #include <utils/Log.h>
 
-#include "QualManager.h"
+#include <media/stagefright/rtsp/QualManager.h>
 
 namespace android {
 
diff --git a/media/libstagefright/rtsp/SDPLoader.cpp b/media/libstagefright/rtsp/SDPLoader.cpp
index e236267..8cd33cf 100644
--- a/media/libstagefright/rtsp/SDPLoader.cpp
+++ b/media/libstagefright/rtsp/SDPLoader.cpp
@@ -18,9 +18,10 @@
 #define LOG_TAG "SDPLoader"
 #include <utils/Log.h>
 
-#include "include/SDPLoader.h"
+// #include "include/SDPLoader.h"
+#include <media/stagefright/rtsp/SDPLoader.h>
 
-#include "ASessionDescription.h"
+#include <media/stagefright/rtsp/ASessionDescription.h>
 
 #include <datasource/MediaHTTP.h>
 #include <media/MediaHTTPConnection.h>
diff --git a/media/libstagefright/rtsp/UDPPusher.cpp b/media/libstagefright/rtsp/UDPPusher.cpp
index 5c685a1..4e812f5 100644
--- a/media/libstagefright/rtsp/UDPPusher.cpp
+++ b/media/libstagefright/rtsp/UDPPusher.cpp
@@ -18,7 +18,7 @@
 #define LOG_TAG "UDPPusher"
 #include <utils/Log.h>
 
-#include "UDPPusher.h"
+#include <media/stagefright/rtsp/UDPPusher.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/AAMRAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AAMRAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/AAMRAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/AAMRAssembler.h
diff --git a/media/libstagefright/rtsp/AAVCAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AAVCAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/AAVCAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/AAVCAssembler.h
diff --git a/media/libstagefright/rtsp/AH263Assembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AH263Assembler.h
similarity index 100%
rename from media/libstagefright/rtsp/AH263Assembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/AH263Assembler.h
diff --git a/media/libstagefright/rtsp/AHEVCAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AHEVCAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/AHEVCAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/AHEVCAssembler.h
diff --git a/media/libstagefright/rtsp/AMPEG2TSAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AMPEG2TSAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/AMPEG2TSAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/AMPEG2TSAssembler.h
diff --git a/media/libstagefright/rtsp/AMPEG4AudioAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AMPEG4AudioAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/AMPEG4AudioAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/AMPEG4AudioAssembler.h
diff --git a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AMPEG4ElementaryAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/AMPEG4ElementaryAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/AMPEG4ElementaryAssembler.h
diff --git a/media/libstagefright/rtsp/APacketSource.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/APacketSource.h
similarity index 100%
rename from media/libstagefright/rtsp/APacketSource.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/APacketSource.h
diff --git a/media/libstagefright/rtsp/ARTPAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/ARTPAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPAssembler.h
diff --git a/media/libstagefright/rtsp/ARTPConnection.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPConnection.h
similarity index 100%
rename from media/libstagefright/rtsp/ARTPConnection.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPConnection.h
diff --git a/media/libstagefright/rtsp/ARTPSession.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSession.h
similarity index 100%
rename from media/libstagefright/rtsp/ARTPSession.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSession.h
diff --git a/media/libstagefright/rtsp/ARTPSource.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSource.h
similarity index 100%
rename from media/libstagefright/rtsp/ARTPSource.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSource.h
diff --git a/media/libstagefright/rtsp/ARTPWriter.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPWriter.h
similarity index 100%
rename from media/libstagefright/rtsp/ARTPWriter.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPWriter.h
diff --git a/media/libstagefright/rtsp/ARTSPConnection.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTSPConnection.h
similarity index 100%
rename from media/libstagefright/rtsp/ARTSPConnection.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTSPConnection.h
diff --git a/media/libstagefright/rtsp/ARawAudioAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARawAudioAssembler.h
similarity index 100%
rename from media/libstagefright/rtsp/ARawAudioAssembler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ARawAudioAssembler.h
diff --git a/media/libstagefright/rtsp/ASessionDescription.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ASessionDescription.h
similarity index 100%
rename from media/libstagefright/rtsp/ASessionDescription.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/ASessionDescription.h
diff --git a/media/libstagefright/rtsp/JitterCalculator.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/JitterCalculator.h
similarity index 100%
rename from media/libstagefright/rtsp/JitterCalculator.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/JitterCalculator.h
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/MyHandler.h
similarity index 100%
rename from media/libstagefright/rtsp/MyHandler.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/MyHandler.h
diff --git a/media/libstagefright/rtsp/NetworkUtils.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/NetworkUtils.h
similarity index 100%
rename from media/libstagefright/rtsp/NetworkUtils.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/NetworkUtils.h
diff --git a/media/libstagefright/rtsp/QualManager.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/QualManager.h
similarity index 100%
rename from media/libstagefright/rtsp/QualManager.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/QualManager.h
diff --git a/media/libstagefright/include/SDPLoader.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/SDPLoader.h
similarity index 100%
rename from media/libstagefright/include/SDPLoader.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/SDPLoader.h
diff --git a/media/libstagefright/rtsp/TrafficRecorder.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/TrafficRecorder.h
similarity index 100%
rename from media/libstagefright/rtsp/TrafficRecorder.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/TrafficRecorder.h
diff --git a/media/libstagefright/rtsp/UDPPusher.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/UDPPusher.h
similarity index 100%
rename from media/libstagefright/rtsp/UDPPusher.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/UDPPusher.h
diff --git a/media/libstagefright/rtsp/VideoSource.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/VideoSource.h
similarity index 100%
rename from media/libstagefright/rtsp/VideoSource.h
rename to media/libstagefright/rtsp/include/media/stagefright/rtsp/VideoSource.h
diff --git a/media/libstagefright/rtsp/rtp_test.cpp b/media/libstagefright/rtsp/rtp_test.cpp
index 4590699..1ae4a09 100644
--- a/media/libstagefright/rtsp/rtp_test.cpp
+++ b/media/libstagefright/rtsp/rtp_test.cpp
@@ -27,9 +27,9 @@
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/SimpleDecodingSource.h>
 
-#include "ARTPSession.h"
-#include "ASessionDescription.h"
-#include "UDPPusher.h"
+#include <media/stagefright/rtsp/ARTPSession.h>
+#include <media/stagefright/rtsp/ASessionDescription.h>
+#include <media/stagefright/rtsp/UDPPusher.h>
 
 using namespace android;
 
diff --git a/media/libstagefright/tests/Android.bp b/media/libstagefright/tests/Android.bp
index a799a13..e6b67ce 100644
--- a/media/libstagefright/tests/Android.bp
+++ b/media/libstagefright/tests/Android.bp
@@ -32,11 +32,6 @@
         "liblog",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-        "frameworks/av/media/libstagefright/include",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/media/libstagefright/tests/HEVC/Android.bp b/media/libstagefright/tests/HEVC/Android.bp
index 91bf385..7a0ba52 100644
--- a/media/libstagefright/tests/HEVC/Android.bp
+++ b/media/libstagefright/tests/HEVC/Android.bp
@@ -44,10 +44,6 @@
         "libstagefright_foundation",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp b/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp
index 324a042..c43e1f8 100644
--- a/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp
+++ b/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp
@@ -21,7 +21,7 @@
 #include <fstream>
 
 #include <media/stagefright/foundation/ABitReader.h>
-#include "include/HevcUtils.h"
+#include <HevcUtils.h>
 
 #include "HEVCUtilsTestEnvironment.h"
 
diff --git a/media/libstagefright/tests/MediaCodecListOverrides_test.cpp b/media/libstagefright/tests/MediaCodecListOverrides_test.cpp
index 0c22a42..20737db 100644
--- a/media/libstagefright/tests/MediaCodecListOverrides_test.cpp
+++ b/media/libstagefright/tests/MediaCodecListOverrides_test.cpp
@@ -20,11 +20,10 @@
 
 #include <gtest/gtest.h>
 
-#include "MediaCodecListOverrides.h"
-
 #include <media/MediaCodecInfo.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/MediaCodecListOverrides.h>
 
 #include <vector>
 
diff --git a/media/libstagefright/tests/extractorFactory/Android.bp b/media/libstagefright/tests/extractorFactory/Android.bp
index 13d5b89..a067284 100644
--- a/media/libstagefright/tests/extractorFactory/Android.bp
+++ b/media/libstagefright/tests/extractorFactory/Android.bp
@@ -51,10 +51,6 @@
         "libstagefright_foundation",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-    ],
-
     // TODO: (b/150181583)
     compile_multilib: "first",
 
diff --git a/media/libstagefright/tests/fuzzers/Android.bp b/media/libstagefright/tests/fuzzers/Android.bp
index ea17a4d..2bcfd67 100644
--- a/media/libstagefright/tests/fuzzers/Android.bp
+++ b/media/libstagefright/tests/fuzzers/Android.bp
@@ -32,9 +32,6 @@
         "liblog",
         "media_permission-aidl-cpp",
     ],
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-    ],
 }
 
 cc_fuzz {
@@ -74,6 +71,7 @@
     srcs: [
         "FrameDecoderFuzzer.cpp",
     ],
+    corpus: ["corpus/*"],
     defaults: ["libstagefright_fuzzer_defaults"],
 }
 
@@ -85,6 +83,9 @@
     ],
     dictionary: "dictionaries/formats.dict",
     defaults: ["libstagefright_fuzzer_defaults"],
+    header_libs: [
+        "libstagefright_webm_headers",
+    ],
     static_libs: [
         "libdatasource",
     ],
diff --git a/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp b/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp
index c251479..4218d2d 100644
--- a/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp
+++ b/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include "include/FrameDecoder.h"
+#include <FrameDecoder.h>
 #include <fuzzer/FuzzedDataProvider.h>
 #include <media/IMediaSource.h>
 #include <media/stagefright/MetaData.h>
@@ -46,12 +46,15 @@
     }
 
     while (fdp.remaining_bytes()) {
-        switch (fdp.ConsumeIntegralInRange<uint8_t>(0, 3)) {
-            case 0:
-                decoder->init(/*frameTimeUs*/ fdp.ConsumeIntegral<int64_t>(),
-                              /*option*/ fdp.ConsumeIntegral<int>(),
-                              /*colorFormat*/ fdp.ConsumeIntegral<int>());
+        uint8_t switchCase = fdp.ConsumeIntegralInRange<uint8_t>(0, 3);
+        switch (switchCase) {
+            case 0: {
+                int64_t frameTimeUs = fdp.ConsumeIntegral<int64_t>();
+                int option = fdp.ConsumeIntegral<int>();
+                int colorFormat = fdp.ConsumeIntegral<int>();
+                decoder->init(frameTimeUs, option, colorFormat);
                 break;
+            }
             case 1:
                 decoder->extractFrame();
                 break;
diff --git a/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.cpp b/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.cpp
index 810ae95..d94c8ff 100644
--- a/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.cpp
+++ b/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.cpp
@@ -23,7 +23,8 @@
 #include <media/stagefright/OggWriter.h>
 
 #include "MediaMimeTypes.h"
-#include "webm/WebmWriter.h"
+
+#include <webm/WebmWriter.h>
 
 namespace android {
 std::string genMimeType(FuzzedDataProvider *dataProvider) {
@@ -121,4 +122,4 @@
     }
     return writer;
 }
-}  // namespace android
\ No newline at end of file
+}  // namespace android
diff --git a/media/libstagefright/tests/fuzzers/corpus/color_format_rgb_565.dat b/media/libstagefright/tests/fuzzers/corpus/color_format_rgb_565.dat
new file mode 100644
index 0000000..698e21d
--- /dev/null
+++ b/media/libstagefright/tests/fuzzers/corpus/color_format_rgb_565.dat
Binary files differ
diff --git a/media/libstagefright/tests/writer/Android.bp b/media/libstagefright/tests/writer/Android.bp
index 38d5ecc..49fb569 100644
--- a/media/libstagefright/tests/writer/Android.bp
+++ b/media/libstagefright/tests/writer/Android.bp
@@ -52,10 +52,6 @@
         "libogg",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/media/libstagefright/timedtext/Android.bp b/media/libstagefright/timedtext/Android.bp
index 6590ef7..619e06b 100644
--- a/media/libstagefright/timedtext/Android.bp
+++ b/media/libstagefright/timedtext/Android.bp
@@ -35,8 +35,16 @@
         cfi: true,
     },
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
+    export_include_dirs: [
+        "include",
+    ],
+
+    local_include_dirs: [
+        "include/timedtext",
+    ],
+
+    header_libs: [
+        "libstagefright_headers",
     ],
 
     shared_libs: ["libmedia"],
diff --git a/media/libstagefright/timedtext/TextDescriptions.h b/media/libstagefright/timedtext/include/timedtext/TextDescriptions.h
similarity index 100%
rename from media/libstagefright/timedtext/TextDescriptions.h
rename to media/libstagefright/timedtext/include/timedtext/TextDescriptions.h
diff --git a/media/libstagefright/timedtext/test/Android.bp b/media/libstagefright/timedtext/test/Android.bp
index 0b632bf..58c68ef 100644
--- a/media/libstagefright/timedtext/test/Android.bp
+++ b/media/libstagefright/timedtext/test/Android.bp
@@ -39,8 +39,8 @@
         "libstagefright_foundation",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
+    header_libs: [
+        "libstagefright_headers",
     ],
 
     shared_libs: [
diff --git a/media/libstagefright/timedtext/test/TimedTextUnitTest.cpp b/media/libstagefright/timedtext/test/TimedTextUnitTest.cpp
index d85ae39..f934b54 100644
--- a/media/libstagefright/timedtext/test/TimedTextUnitTest.cpp
+++ b/media/libstagefright/timedtext/test/TimedTextUnitTest.cpp
@@ -27,7 +27,7 @@
 #include <media/stagefright/foundation/AString.h>
 #include <media/stagefright/foundation/ByteUtils.h>
 
-#include "timedtext/TextDescriptions.h"
+#include <timedtext/TextDescriptions.h>
 
 #include "TimedTextTestEnvironment.h"
 
diff --git a/media/libstagefright/webm/Android.bp b/media/libstagefright/webm/Android.bp
index 32a22ba..9d5f430 100644
--- a/media/libstagefright/webm/Android.bp
+++ b/media/libstagefright/webm/Android.bp
@@ -33,7 +33,11 @@
         "WebmWriter.cpp",
     ],
 
-    include_dirs: ["frameworks/av/include"],
+    local_include_dirs: [
+        "include/webm",
+    ],
+
+    export_include_dirs: ["include"],
 
     shared_libs: [
         "libdatasource",
@@ -44,7 +48,21 @@
     ],
 
     header_libs: [
+        "av-headers",
         "libmedia_headers",
         "media_ndk_headers",
     ],
 }
+
+
+cc_library_headers {
+    name: "libstagefright_webm_headers",
+    export_include_dirs: ["include"],
+
+    host_supported: true,
+    target: {
+        darwin: {
+            enabled: false,
+        },
+    },
+}
diff --git a/media/libstagefright/webm/EbmlUtil.h b/media/libstagefright/webm/include/webm/EbmlUtil.h
similarity index 100%
rename from media/libstagefright/webm/EbmlUtil.h
rename to media/libstagefright/webm/include/webm/EbmlUtil.h
diff --git a/media/libstagefright/webm/LinkedBlockingQueue.h b/media/libstagefright/webm/include/webm/LinkedBlockingQueue.h
similarity index 100%
rename from media/libstagefright/webm/LinkedBlockingQueue.h
rename to media/libstagefright/webm/include/webm/LinkedBlockingQueue.h
diff --git a/media/libstagefright/webm/WebmConstants.h b/media/libstagefright/webm/include/webm/WebmConstants.h
similarity index 100%
rename from media/libstagefright/webm/WebmConstants.h
rename to media/libstagefright/webm/include/webm/WebmConstants.h
diff --git a/media/libstagefright/webm/WebmElement.h b/media/libstagefright/webm/include/webm/WebmElement.h
similarity index 100%
rename from media/libstagefright/webm/WebmElement.h
rename to media/libstagefright/webm/include/webm/WebmElement.h
diff --git a/media/libstagefright/webm/WebmFrame.h b/media/libstagefright/webm/include/webm/WebmFrame.h
similarity index 100%
rename from media/libstagefright/webm/WebmFrame.h
rename to media/libstagefright/webm/include/webm/WebmFrame.h
diff --git a/media/libstagefright/webm/WebmFrameThread.h b/media/libstagefright/webm/include/webm/WebmFrameThread.h
similarity index 100%
rename from media/libstagefright/webm/WebmFrameThread.h
rename to media/libstagefright/webm/include/webm/WebmFrameThread.h
diff --git a/media/libstagefright/webm/WebmWriter.h b/media/libstagefright/webm/include/webm/WebmWriter.h
similarity index 100%
rename from media/libstagefright/webm/WebmWriter.h
rename to media/libstagefright/webm/include/webm/WebmWriter.h
diff --git a/media/libstagefright/webm/tests/Android.bp b/media/libstagefright/webm/tests/Android.bp
index 4443766..629ee47 100644
--- a/media/libstagefright/webm/tests/Android.bp
+++ b/media/libstagefright/webm/tests/Android.bp
@@ -31,8 +31,8 @@
         "WebmFrameThreadUnitTest.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
+    header_libs: [
+        "libstagefright_headers",
     ],
 
     static_libs: [
diff --git a/media/libstagefright/writer_fuzzers/Android.bp b/media/libstagefright/writer_fuzzers/Android.bp
index a33b888..b81f27e 100644
--- a/media/libstagefright/writer_fuzzers/Android.bp
+++ b/media/libstagefright/writer_fuzzers/Android.bp
@@ -119,7 +119,7 @@
         "libstagefright_webm",
         "libdatasource",
     ],
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
+    header_libs: [
+        "libstagefright_headers",
     ],
 }
diff --git a/media/mediaserver/Android.bp b/media/mediaserver/Android.bp
index e25658f..537df76 100644
--- a/media/mediaserver/Android.bp
+++ b/media/mediaserver/Android.bp
@@ -47,11 +47,6 @@
         "libregistermsext",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libmediaplayerservice",
-        "frameworks/av/services/mediaresourcemanager",
-    ],
-
     // By default mediaserver runs in 32-bit to save memory, except
     // on 64-bit-only lunch targets.
     // ****************************************************************
diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp
index 58e2d2a..026847a 100644
--- a/media/mediaserver/main_mediaserver.cpp
+++ b/media/mediaserver/main_mediaserver.cpp
@@ -25,9 +25,8 @@
 #include <utils/Log.h>
 #include "RegisterExtensions.h"
 
-// from LOCAL_C_INCLUDES
-#include "MediaPlayerService.h"
-#include "ResourceManagerService.h"
+#include <MediaPlayerService.h>
+#include <ResourceManagerService.h>
 
 using namespace android;
 
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 8d527e9..6c5e6cb 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -167,7 +167,7 @@
     stubs: {
         symbol_file: "libmediandk.map.txt",
         versions: ["29"],
-    },
+    }
 }
 
 cc_library {
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 6e9945d..59c1103 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -97,6 +97,8 @@
     List<idvec_t> mIds;
     KeyedVector<String8, String8> mQueryResults;
     Vector<uint8_t> mKeyRequest;
+    String8 mDefaultUrl;
+    AMediaDrmKeyRequestType mkeyRequestType;
     Vector<uint8_t> mProvisionRequest;
     String8 mProvisionUrl;
     String8 mPropertyString;
@@ -416,6 +418,21 @@
         const AMediaDrmKeyValue *optionalParameters, size_t numOptionalParameters,
         const uint8_t **keyRequest, size_t *keyRequestSize) {
 
+    return AMediaDrm_getKeyRequestWithDefaultUrlAndType(mObj,
+        scope, init, initSize, mimeType, keyType, optionalParameters,
+        numOptionalParameters, keyRequest,
+        keyRequestSize, NULL, NULL);
+}
+
+EXPORT
+media_status_t AMediaDrm_getKeyRequestWithDefaultUrlAndType(AMediaDrm *mObj,
+        const AMediaDrmScope *scope, const uint8_t *init, size_t initSize,
+        const char *mimeType, AMediaDrmKeyType keyType,
+        const AMediaDrmKeyValue *optionalParameters,
+        size_t numOptionalParameters, const uint8_t **keyRequest,
+        size_t *keyRequestSize, const char **defaultUrl,
+        AMediaDrmKeyRequestType *keyRequestType) {
+
     if (!mObj || mObj->mDrm == NULL) {
         return AMEDIA_ERROR_INVALID_OBJECT;
     }
@@ -449,18 +466,43 @@
         mdOptionalParameters.add(String8(optionalParameters[i].mKey),
                 String8(optionalParameters[i].mValue));
     }
-    String8 defaultUrl;
-    DrmPlugin::KeyRequestType keyRequestType;
+
+    DrmPlugin::KeyRequestType requestType;
     mObj->mKeyRequest.clear();
     status_t status = mObj->mDrm->getKeyRequest(*iter, mdInit, String8(mimeType),
-            mdKeyType, mdOptionalParameters, mObj->mKeyRequest, defaultUrl,
-            &keyRequestType);
+            mdKeyType, mdOptionalParameters, mObj->mKeyRequest, mObj->mDefaultUrl,
+            &requestType);
     if (status != OK) {
         return translateStatus(status);
     } else {
         *keyRequest = mObj->mKeyRequest.array();
         *keyRequestSize = mObj->mKeyRequest.size();
+        if (defaultUrl != NULL)
+            *defaultUrl = mObj->mDefaultUrl.string();
+        switch(requestType) {
+            case DrmPlugin::kKeyRequestType_Initial:
+                mObj->mkeyRequestType = KEY_REQUEST_TYPE_INITIAL;
+                break;
+            case DrmPlugin::kKeyRequestType_Renewal:
+                mObj->mkeyRequestType = KEY_REQUEST_TYPE_RENEWAL;
+                break;
+            case DrmPlugin::kKeyRequestType_Release:
+                mObj->mkeyRequestType = KEY_REQUEST_TYPE_RELEASE;
+                break;
+            case DrmPlugin::kKeyRequestType_None:
+                mObj->mkeyRequestType = KEY_REQUEST_TYPE_NONE;
+                break;
+            case DrmPlugin::kKeyRequestType_Update:
+                mObj->mkeyRequestType = KEY_REQUEST_TYPE_UPDATE;
+                break;
+            default:
+                return AMEDIA_ERROR_UNKNOWN;
+        }
+
+        if (keyRequestType != NULL)
+            *keyRequestType = mObj->mkeyRequestType;
     }
+
     return AMEDIA_OK;
 }
 
diff --git a/media/ndk/include/media/NdkMediaDrm.h b/media/ndk/include/media/NdkMediaDrm.h
index 849a8f9..4eca3d7 100644
--- a/media/ndk/include/media/NdkMediaDrm.h
+++ b/media/ndk/include/media/NdkMediaDrm.h
@@ -112,6 +112,41 @@
 } AMediaDrmKeyType;
 
 /**
+ * Introduced in API 33.
+ */
+typedef enum AMediaDrmKeyRequestType : int32_t {
+    /**
+     * Key request type is initial license request.
+     * An initial license request is necessary to load keys.
+     */
+    KEY_REQUEST_TYPE_INITIAL,
+
+    /**
+     * Key request type is license renewal.
+     * A renewal license request is necessary to prevent the keys from expiring.
+     */
+    KEY_REQUEST_TYPE_RENEWAL,
+
+    /**
+     * Key request type is license release.
+     * A license release request indicates that keys are removed.
+     */
+    KEY_REQUEST_TYPE_RELEASE,
+
+    /**
+     * Keys are already loaded and are available for use. No license request is necessary, and
+     * no key request data is returned.
+     */
+    KEY_REQUEST_TYPE_NONE,
+
+    /**
+     * Keys have been loaded but an additional license request is needed
+     * to update their values.
+     */
+    KEY_REQUEST_TYPE_UPDATE
+} AMediaDrmKeyRequestType;
+
+/**
  *  Data type containing {key, value} pair
  */
 typedef struct AMediaDrmKeyValuePair {
@@ -248,7 +283,10 @@
  * to obtain or release keys used to decrypt encrypted content.
  * AMediaDrm_getKeyRequest is used to obtain an opaque key request byte array that
  * is delivered to the license server.  The opaque key request byte array is
- * returned in KeyRequest.data.
+ * returned in *keyRequest and the number of bytes in the request is
+ * returned in *keyRequestSize.
+ * This API has same functionality as AMediaDrm_getKeyRequestWithDefaultUrlAndType()
+ * when defaultUrl and keyRequestType are passed in as NULL.
  *
  * After the app has received the key request response from the server,
  * it should deliver to the response to the DRM engine plugin using the method
@@ -280,11 +318,14 @@
  *   by the caller
  *
  * On exit:
+ *   If this returns AMEDIA_OK,
  *   1. The keyRequest pointer will reference the opaque key request data.  It
  *       will reside in memory owned by the AMediaDrm object, and will remain
- *       accessible until the next call to AMediaDrm_getKeyRequest or until the
+ *       accessible until the next call to AMediaDrm_getKeyRequest
+ *       or AMediaDrm_getKeyRequestWithDefaultUrlAndType or until the
  *       MediaDrm object is released.
  *   2. keyRequestSize will be set to the size of the request
+ *   If this does not return AMEDIA_OK, value of these parameters should not be used.
  *
  * Returns MEDIADRM_NOT_PROVISIONED_ERROR if reprovisioning is needed, due to a
  * problem with the device certificate.
@@ -297,6 +338,72 @@
         const uint8_t **keyRequest, size_t *keyRequestSize) __INTRODUCED_IN(21);
 
 /**
+ * A key request/response exchange occurs between the app and a license server
+ * to obtain or release keys used to decrypt encrypted content.
+ * AMediaDrm_getKeyRequest is used to obtain an opaque key request byte array that
+ * is delivered to the license server.  The opaque key request byte array is
+ * returned in *keyRequest and the number of bytes in the request is
+ * returned in *keyRequestSize.
+ *
+ * After the app has received the key request response from the server,
+ * it should deliver to the response to the DRM engine plugin using the method
+ * AMediaDrm_provideKeyResponse.
+ *
+ * scope may be a sessionId or a keySetId, depending on the specified keyType.
+ * When the keyType is KEY_TYPE_STREAMING or KEY_TYPE_OFFLINE, scope should be set
+ * to the sessionId the keys will be provided to.  When the keyType is
+ * KEY_TYPE_RELEASE, scope should be set to the keySetId of the keys being released.
+ * Releasing keys from a device invalidates them for all sessions.
+ *
+ * init container-specific data, its meaning is interpreted based on the mime type
+ * provided in the mimeType parameter.  It could contain, for example, the content
+ * ID, key ID or other data obtained from the content metadata that is required in
+ * generating the key request. init may be null when keyType is KEY_TYPE_RELEASE.
+ *
+ * initSize is the number of bytes of initData
+ *
+ * mimeType identifies the mime type of the content.
+ *
+ * keyType specifes the type of the request. The request may be to acquire keys for
+ *   streaming or offline content, or to release previously acquired keys, which are
+ *   identified by a keySetId.
+ *
+ * optionalParameters are included in the key request message to allow a client
+ *   application to provide additional message parameters to the server.
+ *
+ * numOptionalParameters indicates the number of optional parameters provided
+ *   by the caller
+ *
+ * On exit:
+ *   If this returns AMEDIA_OK,
+ *   1. The keyRequest pointer will reference the opaque key request data.  It
+ *       will reside in memory owned by the AMediaDrm object, and will remain
+ *       accessible until the next call to either AMediaDrm_getKeyRequest
+ *       or AMediaDrm_getKeyRequestWithDefaultUrlAndType or until the
+ *       MediaDrm object is released.
+ *   2. keyRequestSize will be set to the size of the request.
+ *   3. defaultUrl will be set to the recommended URL to deliver the key request.
+ *      The defaultUrl pointer will reference a NULL terminated URL string.
+ *      It will be UTF-8 encoded and have same lifetime with the key request data
+ *      KeyRequest pointer references to. Passing in NULL means you don't need it
+ *      to be reported.
+ *   4. keyRequestType will be set to the key request type. Passing in NULL means
+*       you don't need it to be reported.
+ *
+ * Returns MEDIADRM_NOT_PROVISIONED_ERROR if reprovisioning is needed, due to a
+ * problem with the device certificate.
+ *
+ * Available since API level 33.
+ */
+media_status_t AMediaDrm_getKeyRequestWithDefaultUrlAndType(AMediaDrm *,
+        const AMediaDrmScope *scope, const uint8_t *init, size_t initSize,
+        const char *mimeType, AMediaDrmKeyType keyType,
+        const AMediaDrmKeyValue *optionalParameters,
+        size_t numOptionalParameters, const uint8_t **keyRequest,
+        size_t *keyRequestSize, const char **defaultUrl,
+        AMediaDrmKeyRequestType *keyRequestType) __INTRODUCED_IN(__ANDROID_API_T__);
+
+/**
  * A key response is received from the license server by the app, then it is
  * provided to the DRM engine plugin using provideKeyResponse.  When the
  * response is for an offline key request, a keySetId is returned that can be
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 6f275c7..b228945 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -229,6 +229,7 @@
     AMediaDrm_decrypt;
     AMediaDrm_encrypt;
     AMediaDrm_getKeyRequest;
+    AMediaDrm_getKeyRequestWithDefaultUrlAndType; # introduced=Tiramisu
     AMediaDrm_getPropertyByteArray;
     AMediaDrm_getPropertyString;
     AMediaDrm_getProvisionRequest;
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index 88b822d..c1c7df5 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -34,6 +34,7 @@
         "SchedulingPolicyService.cpp",
         "ServiceUtilities.cpp",
         "TimeCheck.cpp",
+        "TimerThread.cpp",
     ],
     static_libs: [
         "libc_malloc_debug_backtrace",
@@ -118,3 +119,12 @@
 
     export_include_dirs: ["include"],
 }
+
+cc_test {
+    name: "libmediautils_test",
+    srcs: ["TimerThread-test.cpp"],
+    shared_libs: [
+      "libmediautils",
+      "libutils",
+    ]
+}
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 42f48a5..1ab5bc1 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -45,6 +45,7 @@
 static const String16 sAndroidPermissionRecordAudio("android.permission.RECORD_AUDIO");
 static const String16 sModifyPhoneState("android.permission.MODIFY_PHONE_STATE");
 static const String16 sModifyAudioRouting("android.permission.MODIFY_AUDIO_ROUTING");
+static const String16 sCallAudioInterception("android.permission.CALL_AUDIO_INTERCEPTION");
 
 static String16 resolveCallingPackage(PermissionController& permissionController,
         const std::optional<String16> opPackageName, uid_t uid) {
@@ -71,6 +72,7 @@
   switch (source) {
     case AUDIO_SOURCE_HOTWORD:
       return AppOpsManager::OP_RECORD_AUDIO_HOTWORD;
+    case AUDIO_SOURCE_ECHO_REFERENCE: // fallthrough
     case AUDIO_SOURCE_REMOTE_SUBMIX:
       return AppOpsManager::OP_RECORD_AUDIO_OUTPUT;
     case AUDIO_SOURCE_VOICE_DOWNLINK:
@@ -218,6 +220,17 @@
     return ok;
 }
 
+bool accessUltrasoundAllowed(const AttributionSourceState& attributionSource) {
+    uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+    uid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
+    if (isAudioServerOrRootUid(uid)) return true;
+    static const String16 sAccessUltrasound(
+        "android.permission.ACCESS_ULTRASOUND");
+    bool ok = PermissionCache::checkPermission(sAccessUltrasound, pid, uid);
+    if (!ok) ALOGE("Request requires android.permission.ACCESS_ULTRASOUND");
+    return ok;
+}
+
 bool captureHotwordAllowed(const AttributionSourceState& attributionSource) {
     // CAPTURE_AUDIO_HOTWORD permission implies RECORD_AUDIO permission
     bool ok = recordingAllowed(attributionSource);
@@ -308,6 +321,17 @@
     return ok;
 }
 
+bool callAudioInterceptionAllowed(const AttributionSourceState& attributionSource) {
+    uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+    pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
+
+    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+    bool ok = PermissionCache::checkPermission(sCallAudioInterception, pid, uid);
+    if (!ok) ALOGV("%s(): android.permission.CALL_AUDIO_INTERCEPTION denied for uid %d",
+        __func__, uid);
+    return ok;
+}
+
 AttributionSourceState getCallingAttributionSource() {
     AttributionSourceState attributionSource = AttributionSourceState();
     attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(
diff --git a/media/utils/TimeCheck.cpp b/media/utils/TimeCheck.cpp
index 819e146..2b765cc 100644
--- a/media/utils/TimeCheck.cpp
+++ b/media/utils/TimeCheck.cpp
@@ -16,13 +16,25 @@
 
 #define LOG_TAG "TimeCheck"
 
-#include <utils/Log.h>
-#include <mediautils/TimeCheck.h>
+#include <optional>
+#include <sstream>
+
 #include <mediautils/EventLog.h>
+#include <mediautils/TimeCheck.h>
+#include <utils/Log.h>
 #include "debuggerd/handler.h"
 
 namespace android {
 
+namespace {
+
+std::string formatTime(std::chrono::system_clock::time_point t) {
+    auto msSinceEpoch = std::chrono::round<std::chrono::milliseconds>(t.time_since_epoch());
+    return (std::ostringstream() << msSinceEpoch.count()).str();
+}
+
+}  // namespace
+
 // Audio HAL server pids vector used to generate audio HAL processes tombstone
 // when audioserver watchdog triggers.
 // We use a lockless storage to avoid potential deadlocks in the context of watchdog
@@ -58,84 +70,39 @@
 }
 
 /* static */
-sp<TimeCheck::TimeCheckThread> TimeCheck::getTimeCheckThread()
-{
-    static sp<TimeCheck::TimeCheckThread> sTimeCheckThread = new TimeCheck::TimeCheckThread();
+TimerThread* TimeCheck::getTimeCheckThread() {
+    static TimerThread* sTimeCheckThread = new TimerThread();
     return sTimeCheckThread;
 }
 
-TimeCheck::TimeCheck(const char *tag, uint32_t timeoutMs)
-    : mEndTimeNs(getTimeCheckThread()->startMonitoring(tag, timeoutMs))
-{
-}
+TimeCheck::TimeCheck(const char* tag, uint32_t timeoutMs)
+    : mTimerHandle(getTimeCheckThread()->scheduleTask(
+              [tag, startTime = std::chrono::system_clock::now()] { crash(tag, startTime); },
+              std::chrono::milliseconds(timeoutMs))) {}
 
 TimeCheck::~TimeCheck() {
-    getTimeCheckThread()->stopMonitoring(mEndTimeNs);
+    getTimeCheckThread()->cancelTask(mTimerHandle);
 }
 
-TimeCheck::TimeCheckThread::~TimeCheckThread()
-{
-    AutoMutex _l(mMutex);
-    requestExit();
-    mMonitorRequests.clear();
-    mCond.signal();
-}
+/* static */
+void TimeCheck::crash(const char* tag, std::chrono::system_clock::time_point startTime) {
+    std::chrono::system_clock::time_point endTime = std::chrono::system_clock::now();
 
-nsecs_t TimeCheck::TimeCheckThread::startMonitoring(const char *tag, uint32_t timeoutMs) {
-    Mutex::Autolock _l(mMutex);
-    nsecs_t endTimeNs = systemTime() + milliseconds(timeoutMs);
-    for (; mMonitorRequests.indexOfKey(endTimeNs) >= 0; ++endTimeNs);
-    mMonitorRequests.add(endTimeNs, tag);
-    mCond.signal();
-    return endTimeNs;
-}
-
-void TimeCheck::TimeCheckThread::stopMonitoring(nsecs_t endTimeNs) {
-    Mutex::Autolock _l(mMutex);
-    mMonitorRequests.removeItem(endTimeNs);
-    mCond.signal();
-}
-
-bool TimeCheck::TimeCheckThread::threadLoop()
-{
-    status_t status = TIMED_OUT;
-    {
-        AutoMutex _l(mMutex);
-
-        if (exitPending()) {
-            return false;
+    // Generate audio HAL processes tombstones and allow time to complete
+    // before forcing restart
+    std::vector<pid_t> pids = getAudioHalPids();
+    if (pids.size() != 0) {
+        for (const auto& pid : pids) {
+            ALOGI("requesting tombstone for pid: %d", pid);
+            sigqueue(pid, DEBUGGER_SIGNAL, {.sival_int = 0});
         }
-
-        nsecs_t endTimeNs = INT64_MAX;
-        const char *tag = "<unspecified>";
-        // KeyedVector mMonitorRequests is ordered so take first entry as next timeout
-        if (mMonitorRequests.size() != 0) {
-            endTimeNs = mMonitorRequests.keyAt(0);
-            tag = mMonitorRequests.valueAt(0);
-        }
-
-        const nsecs_t waitTimeNs = endTimeNs - systemTime();
-        if (waitTimeNs > 0) {
-            status = mCond.waitRelative(mMutex, waitTimeNs);
-        }
-        if (status != NO_ERROR) {
-            // Generate audio HAL processes tombstones and allow time to complete
-            // before forcing restart
-            std::vector<pid_t> pids = getAudioHalPids();
-            if (pids.size() != 0) {
-                for (const auto& pid : pids) {
-                    ALOGI("requesting tombstone for pid: %d", pid);
-                    sigqueue(pid, DEBUGGER_SIGNAL, {.sival_int = 0});
-                }
-                sleep(1);
-            } else {
-                ALOGI("No HAL process pid available, skipping tombstones");
-            }
-            LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
-            LOG_ALWAYS_FATAL("TimeCheck timeout for %s", tag);
-        }
+        sleep(1);
+    } else {
+        ALOGI("No HAL process pid available, skipping tombstones");
     }
-    return true;
+    LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
+    LOG_ALWAYS_FATAL("TimeCheck timeout for %s (start=%s, end=%s)", tag,
+                     formatTime(startTime).c_str(), formatTime(endTime).c_str());
 }
 
-}; // namespace android
+};  // namespace android
diff --git a/media/utils/TimerThread-test.cpp b/media/utils/TimerThread-test.cpp
new file mode 100644
index 0000000..ee8a811
--- /dev/null
+++ b/media/utils/TimerThread-test.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <chrono>
+#include <thread>
+#include <gtest/gtest.h>
+#include <mediautils/TimerThread.h>
+
+using namespace std::chrono_literals;
+
+namespace android {
+namespace {
+
+constexpr auto kJitter = 10ms;
+
+TEST(TimerThread, Basic) {
+    std::atomic<bool> taskRan = false;
+    TimerThread thread;
+    thread.scheduleTask([&taskRan] { taskRan = true; }, 100ms);
+    std::this_thread::sleep_for(100ms - kJitter);
+    ASSERT_FALSE(taskRan);
+    std::this_thread::sleep_for(2 * kJitter);
+    ASSERT_TRUE(taskRan);
+}
+
+TEST(TimerThread, Cancel) {
+    std::atomic<bool> taskRan = false;
+    TimerThread thread;
+    TimerThread::Handle handle = thread.scheduleTask([&taskRan] { taskRan = true; }, 100ms);
+    std::this_thread::sleep_for(100ms - kJitter);
+    ASSERT_FALSE(taskRan);
+    thread.cancelTask(handle);
+    std::this_thread::sleep_for(2 * kJitter);
+    ASSERT_FALSE(taskRan);
+}
+
+TEST(TimerThread, CancelAfterRun) {
+    std::atomic<bool> taskRan = false;
+    TimerThread thread;
+    TimerThread::Handle handle = thread.scheduleTask([&taskRan] { taskRan = true; }, 100ms);
+    std::this_thread::sleep_for(100ms + kJitter);
+    ASSERT_TRUE(taskRan);
+    thread.cancelTask(handle);
+}
+
+TEST(TimerThread, MultipleTasks) {
+    std::array<std::atomic<bool>, 6> taskRan;
+    TimerThread thread;
+
+    auto startTime = std::chrono::steady_clock::now();
+
+    thread.scheduleTask([&taskRan] { taskRan[0] = true; }, 300ms);
+    thread.scheduleTask([&taskRan] { taskRan[1] = true; }, 100ms);
+    thread.scheduleTask([&taskRan] { taskRan[2] = true; }, 200ms);
+    thread.scheduleTask([&taskRan] { taskRan[3] = true; }, 400ms);
+    auto handle4 = thread.scheduleTask([&taskRan] { taskRan[4] = true; }, 200ms);
+    thread.scheduleTask([&taskRan] { taskRan[5] = true; }, 200ms);
+
+    // Task 1 should trigger around 100ms.
+    std::this_thread::sleep_until(startTime + 100ms - kJitter);
+    ASSERT_FALSE(taskRan[0]);
+    ASSERT_FALSE(taskRan[1]);
+    ASSERT_FALSE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_FALSE(taskRan[5]);
+
+    std::this_thread::sleep_until(startTime + 100ms + kJitter);
+    ASSERT_FALSE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_FALSE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_FALSE(taskRan[5]);
+
+    // Cancel task 4 before it gets a chance to run.
+    thread.cancelTask(handle4);
+
+    // Tasks 2 and 5 should trigger around 200ms.
+    std::this_thread::sleep_until(startTime + 200ms - kJitter);
+    ASSERT_FALSE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_FALSE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_FALSE(taskRan[5]);
+
+    std::this_thread::sleep_until(startTime + 200ms + kJitter);
+    ASSERT_FALSE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_TRUE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_TRUE(taskRan[5]);
+
+    // Task 0 should trigger around 300ms.
+    std::this_thread::sleep_until(startTime + 300ms - kJitter);
+    ASSERT_FALSE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_TRUE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_TRUE(taskRan[5]);
+
+    std::this_thread::sleep_until(startTime + 300ms + kJitter);
+    ASSERT_TRUE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_TRUE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_TRUE(taskRan[5]);
+
+    // Task 3 should trigger around 400ms.
+    std::this_thread::sleep_until(startTime + 400ms - kJitter);
+    ASSERT_TRUE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_TRUE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_TRUE(taskRan[5]);
+
+    std::this_thread::sleep_until(startTime + 400ms + kJitter);
+    ASSERT_TRUE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_TRUE(taskRan[2]);
+    ASSERT_TRUE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_TRUE(taskRan[5]);
+}
+
+
+}  // namespace
+}  // namespace android
diff --git a/media/utils/TimerThread.cpp b/media/utils/TimerThread.cpp
new file mode 100644
index 0000000..3c95798
--- /dev/null
+++ b/media/utils/TimerThread.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TimerThread"
+
+#include <optional>
+
+#include <mediautils/TimerThread.h>
+#include <utils/ThreadDefs.h>
+
+namespace android {
+
+TimerThread::TimerThread() : mThread([this] { threadFunc(); }) {
+    pthread_setname_np(mThread.native_handle(), "TimeCheckThread");
+    pthread_setschedprio(mThread.native_handle(), PRIORITY_URGENT_AUDIO);
+}
+
+TimerThread::~TimerThread() {
+    {
+        std::lock_guard _l(mMutex);
+        mShouldExit = true;
+        mCond.notify_all();
+    }
+    mThread.join();
+}
+
+TimerThread::Handle TimerThread::scheduleTaskAtDeadline(std::function<void()>&& func,
+                                                        TimePoint deadline) {
+    std::lock_guard _l(mMutex);
+
+    // To avoid key collisions, advance by 1 tick until the key is unique.
+    for (; mMonitorRequests.find(deadline) != mMonitorRequests.end();
+         deadline += TimePoint::duration(1))
+        ;
+    mMonitorRequests.emplace(deadline, std::move(func));
+    mCond.notify_all();
+    return deadline;
+}
+
+void TimerThread::cancelTask(Handle handle) {
+    std::lock_guard _l(mMutex);
+    mMonitorRequests.erase(handle);
+}
+
+void TimerThread::threadFunc() {
+    std::unique_lock _l(mMutex);
+
+    while (!mShouldExit) {
+        if (!mMonitorRequests.empty()) {
+            TimePoint nextDeadline = mMonitorRequests.begin()->first;
+            if (nextDeadline < std::chrono::steady_clock::now()) {
+                // Deadline expired.
+                mMonitorRequests.begin()->second();
+                mMonitorRequests.erase(mMonitorRequests.begin());
+            }
+            mCond.wait_until(_l, nextDeadline);
+        } else {
+            mCond.wait(_l);
+        }
+    }
+}
+
+}  // namespace android
diff --git a/media/utils/fuzzers/Android.bp b/media/utils/fuzzers/Android.bp
index c4dc24f..d26e6c2 100644
--- a/media/utils/fuzzers/Android.bp
+++ b/media/utils/fuzzers/Android.bp
@@ -32,11 +32,6 @@
         "bionic_libc_platform_headers",
         "libmedia_headers",
     ],
-
-    include_dirs: [
-        // For DEBUGGER_SIGNAL
-        "system/core/debuggerd/include",
-    ],
 }
 
 cc_fuzz {
diff --git a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
index 6e52512..51e8d7a 100644
--- a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
+++ b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
@@ -17,7 +17,7 @@
 #include <fcntl.h>
 
 #include <functional>
-#include <type_traits>
+#include  <type_traits>
 
 #include <android/content/AttributionSourceState.h>
 #include "fuzzer/FuzzedDataProvider.h"
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 734313c..de20d55 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -61,6 +61,7 @@
 
 // Used for calls that should come from system server or internal.
 // Note: system server is multiprocess for multiple users.  audioserver is not.
+// Note: if this method is modified, also update the same method in SensorService.h.
 static inline bool isAudioServerOrSystemServerUid(uid_t uid) {
     return multiuser_get_app_id(uid) == AID_SYSTEM || uid == AID_AUDIOSERVER;
 }
@@ -95,6 +96,7 @@
 bool captureMediaOutputAllowed(const AttributionSourceState& attributionSource);
 bool captureTunerAudioInputAllowed(const AttributionSourceState& attributionSource);
 bool captureVoiceCommunicationOutputAllowed(const AttributionSourceState& attributionSource);
+bool accessUltrasoundAllowed(const AttributionSourceState& attributionSource);
 bool captureHotwordAllowed(const AttributionSourceState& attributionSource);
 bool settingsAllowed();
 bool modifyAudioRoutingAllowed();
@@ -104,6 +106,7 @@
 bool dumpAllowed();
 bool modifyPhoneStateAllowed(const AttributionSourceState& attributionSource);
 bool bypassInterruptionPolicyAllowed(const AttributionSourceState& attributionSource);
+bool callAudioInterceptionAllowed(const AttributionSourceState& attributionSource);
 void purgePermissionCache();
 int32_t getOpForSource(audio_source_t source);
 
diff --git a/media/utils/include/mediautils/TimeCheck.h b/media/utils/include/mediautils/TimeCheck.h
index 5ba6d7c..0d6e80d 100644
--- a/media/utils/include/mediautils/TimeCheck.h
+++ b/media/utils/include/mediautils/TimeCheck.h
@@ -14,62 +14,33 @@
  * limitations under the License.
  */
 
+#pragma once
 
-#ifndef ANDROID_TIME_CHECK_H
-#define ANDROID_TIME_CHECK_H
-
-#include <utils/KeyedVector.h>
-#include <utils/Thread.h>
 #include <vector>
 
+#include <mediautils/TimerThread.h>
+
 namespace android {
 
 // A class monitoring execution time for a code block (scoped variable) and causing an assert
 // if it exceeds a certain time
 
 class TimeCheck {
-public:
-
+  public:
     // The default timeout is chosen to be less than system server watchdog timeout
     static constexpr uint32_t kDefaultTimeOutMs = 5000;
 
-            TimeCheck(const char *tag, uint32_t timeoutMs = kDefaultTimeOutMs);
-            ~TimeCheck();
-    static  void setAudioHalPids(const std::vector<pid_t>& pids);
-    static  std::vector<pid_t> getAudioHalPids();
+    TimeCheck(const char* tag, uint32_t timeoutMs = kDefaultTimeOutMs);
+    ~TimeCheck();
+    static void setAudioHalPids(const std::vector<pid_t>& pids);
+    static std::vector<pid_t> getAudioHalPids();
 
-private:
-
-    class TimeCheckThread : public Thread {
-    public:
-
-                            TimeCheckThread() {}
-        virtual             ~TimeCheckThread() override;
-
-                nsecs_t     startMonitoring(const char *tag, uint32_t timeoutMs);
-                void        stopMonitoring(nsecs_t endTimeNs);
-
-    private:
-
-                // RefBase
-        virtual void        onFirstRef() override { run("TimeCheckThread", PRIORITY_URGENT_AUDIO); }
-
-                // Thread
-        virtual bool        threadLoop() override;
-
-                Condition           mCond;
-                Mutex               mMutex;
-                // using the end time in ns as key is OK given the risk is low that two entries
-                // are added in such a way that <add time> + <timeout> are the same for both.
-                KeyedVector< nsecs_t, const char*>  mMonitorRequests;
-    };
-
-    static sp<TimeCheckThread> getTimeCheckThread();
+  private:
+    static TimerThread* getTimeCheckThread();
     static void accessAudioHalPids(std::vector<pid_t>* pids, bool update);
+    static void crash(const char* tag, std::chrono::system_clock::time_point startTime);
 
-    const           nsecs_t mEndTimeNs;
+    const TimerThread::Handle mTimerHandle;
 };
 
-}; // namespace android
-
-#endif  // ANDROID_TIME_CHECK_H
+};  // namespace android
diff --git a/media/utils/include/mediautils/TimerThread.h b/media/utils/include/mediautils/TimerThread.h
new file mode 100644
index 0000000..cf457b8
--- /dev/null
+++ b/media/utils/include/mediautils/TimerThread.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <condition_variable>
+#include <functional>
+#include <map>
+#include <mutex>
+#include <thread>
+
+#include <android-base/thread_annotations.h>
+
+namespace android {
+
+/**
+ * A thread for deferred execution of tasks, with cancellation.
+ */
+class TimerThread {
+  public:
+    using Handle = std::chrono::steady_clock::time_point;
+
+    TimerThread();
+    ~TimerThread();
+
+    /**
+     * Schedule a task to be executed in the future (`timeout` duration from now).
+     * Returns a handle that can be used for cancellation.
+     */
+    template <typename R, typename P>
+    Handle scheduleTask(std::function<void()>&& func, std::chrono::duration<R, P> timeout) {
+        auto deadline = std::chrono::steady_clock::now() + std::chrono::milliseconds(timeout);
+        return scheduleTaskAtDeadline(std::move(func), deadline);
+    }
+
+    /**
+     * Cancel a task, previously scheduled with scheduleTask().
+     * If the task has already executed, this is a no-op.
+     */
+    void cancelTask(Handle handle);
+
+  private:
+    using TimePoint = std::chrono::steady_clock::time_point;
+
+    std::condition_variable mCond;
+    std::mutex mMutex;
+    std::thread mThread;
+    std::map<TimePoint, std::function<void()>> mMonitorRequests GUARDED_BY(mMutex);
+    bool mShouldExit GUARDED_BY(mMutex) = false;
+
+    void threadFunc();
+    Handle scheduleTaskAtDeadline(std::function<void()>&& func, TimePoint deadline);
+};
+
+}  // namespace android
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index fecc183..763c070 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -41,6 +41,7 @@
         "FastThreadState.cpp",
         "NBAIO_Tee.cpp",
         "PatchPanel.cpp",
+        "PropertyUtils.cpp",
         "SpdifStreamOut.cpp",
         "StateQueue.cpp",
         "Threads.cpp",
@@ -54,6 +55,7 @@
     ],
 
     shared_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioflinger-aidl-cpp",
         "audioclient-types-aidl-cpp",
         "av-types-aidl-cpp",
@@ -91,6 +93,7 @@
     ],
 
     header_libs: [
+        "libaaudio_headers",
         "libaudioclient_headers",
         "libaudiohal_headers",
         "libmedia_headers",
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 0718643..ed4666f 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -57,6 +57,7 @@
 
 #include "AudioFlinger.h"
 #include "NBAIO_Tee.h"
+#include "PropertyUtils.h"
 
 #include <media/AudioResamplerPublic.h>
 
@@ -103,7 +104,11 @@
 
 namespace android {
 
+#define MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION 7.0
+
 using media::IEffectClient;
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
 using android::content::AttributionSourceState;
 
 static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
@@ -301,6 +306,11 @@
 
     mDevicesFactoryHalCallback = new DevicesFactoryHalCallbackImpl;
     mDevicesFactoryHal->setCallbackOnce(mDevicesFactoryHalCallback);
+
+    if (mDevicesFactoryHal->getHalVersion() <= MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION) {
+        mAAudioBurstsPerBuffer = getAAudioMixerBurstCountFromSystemProperty();
+        mAAudioHwBurstMinMicros = getAAudioHardwareBurstMinUsecFromSystemProperty();
+    }
 }
 
 status_t AudioFlinger::setAudioHalPids(const std::vector<pid_t>& pids) {
@@ -336,6 +346,44 @@
     return NO_ERROR;
 }
 
+status_t AudioFlinger::getMmapPolicyInfos(
+            AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+    Mutex::Autolock _l(mLock);
+    if (const auto it = mPolicyInfos.find(policyType); it != mPolicyInfos.end()) {
+        *policyInfos = it->second;
+        return NO_ERROR;
+    }
+    if (mDevicesFactoryHal->getHalVersion() > MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION) {
+        AutoMutex lock(mHardwareLock);
+        for (size_t i = 0; i < mAudioHwDevs.size(); ++i) {
+            AudioHwDevice *dev = mAudioHwDevs.valueAt(i);
+            std::vector<AudioMMapPolicyInfo> infos;
+            status_t status = dev->getMmapPolicyInfos(policyType, &infos);
+            if (status != NO_ERROR) {
+                ALOGE("Failed to query mmap policy info of %d, error %d",
+                      mAudioHwDevs.keyAt(i), status);
+                continue;
+            }
+            policyInfos->insert(policyInfos->end(), infos.begin(), infos.end());
+        }
+        mPolicyInfos[policyType] = *policyInfos;
+    } else {
+        getMmapPolicyInfosFromSystemProperty(policyType, policyInfos);
+        mPolicyInfos[policyType] = *policyInfos;
+    }
+    return NO_ERROR;
+}
+
+int32_t AudioFlinger::getAAudioMixerBurstCount() {
+    Mutex::Autolock _l(mLock);
+    return mAAudioBurstsPerBuffer;
+}
+
+int32_t AudioFlinger::getAAudioHardwareBurstMinUsec() {
+    Mutex::Autolock _l(mLock);
+    return mAAudioHwBurstMinMicros;
+}
+
 // getDefaultVibratorInfo_l must be called with AudioFlinger lock held.
 std::optional<media::AudioVibratorInfo> AudioFlinger::getDefaultVibratorInfo_l() {
     if (mAudioVibratorInfos.empty()) {
@@ -567,10 +615,12 @@
     String8 result;
 
     result.append("Clients:\n");
+    result.append("   pid    heap_size\n");
     for (size_t i = 0; i < mClients.size(); ++i) {
         sp<Client> client = mClients.valueAt(i).promote();
         if (client != 0) {
-            result.appendFormat("  pid: %d\n", client->pid());
+            result.appendFormat("%6d %12zu\n", client->pid(),
+                    client->heap()->getMemoryHeap()->getSize());
         }
     }
 
@@ -1875,13 +1925,13 @@
     }
 }
 
-void AudioFlinger::ioConfigChanged(audio_io_config_event event,
+void AudioFlinger::ioConfigChanged(audio_io_config_event_t event,
                                    const sp<AudioIoDescriptor>& ioDesc,
                                    pid_t pid) {
+    media::AudioIoConfigEvent eventAidl = VALUE_OR_FATAL(
+            legacy2aidl_audio_io_config_event_t_AudioIoConfigEvent(event));
     media::AudioIoDescriptor descAidl = VALUE_OR_FATAL(
             legacy2aidl_AudioIoDescriptor_AudioIoDescriptor(ioDesc));
-    media::AudioIoConfigEvent eventAidl = VALUE_OR_FATAL(
-            legacy2aidl_audio_io_config_event_AudioIoConfigEvent(event));
 
     Mutex::Autolock _l(mClientLock);
     size_t size = mNotificationClients.size();
@@ -2144,6 +2194,20 @@
             goto Exit;
         }
 
+        if (recordTrack->isFastTrack()) {
+            output.serverConfig = {
+                    thread->sampleRate(),
+                    thread->channelMask(),
+                    thread->format()
+            };
+        } else {
+            output.serverConfig = {
+                    recordTrack->sampleRate(),
+                    recordTrack->channelMask(),
+                    recordTrack->format()
+            };
+        }
+
         // Check if one effect chain was awaiting for an AudioRecord to be created on this
         // session and move it to this thread.
         sp<EffectChain> chain = getOrphanEffectChain_l(sessionId);
@@ -2274,6 +2338,17 @@
         mHardwareStatus = AUDIO_HW_IDLE;
     }
 
+    if (mDevicesFactoryHal->getHalVersion() > MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION) {
+        if (int32_t mixerBursts = dev->getAAudioMixerBurstCount();
+            mixerBursts > mAAudioBurstsPerBuffer) {
+            mAAudioBurstsPerBuffer = mixerBursts;
+        }
+        if (int32_t hwBurstMinMicros = dev->getAAudioHardwareBurstMinUsec();
+            hwBurstMinMicros < mAAudioHwBurstMinMicros || mAAudioHwBurstMinMicros == 0) {
+            mAAudioHwBurstMinMicros = hwBurstMinMicros;
+        }
+    }
+
     mAudioHwDevs.add(handle, audioDevice);
 
     ALOGI("loadHwModule() Loaded %s audio interface, handle %d", name, handle);
@@ -2617,9 +2692,9 @@
     audio_module_handle_t module = VALUE_OR_RETURN_STATUS(
             aidl2legacy_int32_t_audio_module_handle_t(request.module));
     audio_config_t halConfig = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioConfig_audio_config_t(request.halConfig));
+            aidl2legacy_AudioConfig_audio_config_t(request.halConfig, false /*isInput*/));
     audio_config_base_t mixerConfig = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioConfigBase_audio_config_base_t(request.mixerConfig));
+            aidl2legacy_AudioConfigBase_audio_config_base_t(request.mixerConfig, false/*isInput*/));
     sp<DeviceDescriptorBase> device = VALUE_OR_RETURN_STATUS(
             aidl2legacy_DeviceDescriptorBase(request.device));
     audio_output_flags_t flags = VALUE_OR_RETURN_STATUS(
@@ -2672,8 +2747,8 @@
             mmapThread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
         }
         response->output = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
-        response->config =
-                VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(halConfig));
+        response->config = VALUE_OR_RETURN_STATUS(
+                legacy2aidl_audio_config_t_AudioConfig(halConfig, false /*isInput*/));
         response->latencyMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(latencyMs));
         response->flags = VALUE_OR_RETURN_STATUS(
                 legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
@@ -2759,9 +2834,7 @@
             mMmapThreads.removeItem(output);
             ALOGD("closing mmapThread %p", mmapThread.get());
         }
-        const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
-        ioDesc->mIoHandle = output;
-        ioConfigChanged(AUDIO_OUTPUT_CLOSED, ioDesc);
+        ioConfigChanged(AUDIO_OUTPUT_CLOSED, sp<AudioIoDescriptor>::make(output));
         mPatchPanel.notifyStreamClosed(output);
     }
     // The thread entity (active unit of execution) is no longer running here,
@@ -2834,16 +2907,16 @@
 {
     Mutex::Autolock _l(mLock);
 
-    if (request.device.type == AUDIO_DEVICE_NONE) {
+    AudioDeviceTypeAddr device = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioDeviceTypeAddress(request.device));
+    if (device.mType == AUDIO_DEVICE_NONE) {
         return BAD_VALUE;
     }
 
     audio_io_handle_t input = VALUE_OR_RETURN_STATUS(
             aidl2legacy_int32_t_audio_io_handle_t(request.input));
     audio_config_t config = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioConfig_audio_config_t(request.config));
-    AudioDeviceTypeAddr device = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioDeviceTypeAddress(request.device));
+            aidl2legacy_AudioConfig_audio_config_t(request.config, true /*isInput*/));
 
     sp<ThreadBase> thread = openInput_l(
             VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_module_handle_t(request.module)),
@@ -2851,13 +2924,14 @@
             &config,
             device.mType,
             device.address().c_str(),
-            VALUE_OR_RETURN_STATUS(aidl2legacy_AudioSourceType_audio_source_t(request.source)),
+            VALUE_OR_RETURN_STATUS(aidl2legacy_AudioSource_audio_source_t(request.source)),
             VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_input_flags_t_mask(request.flags)),
             AUDIO_DEVICE_NONE,
             String8{});
 
     response->input = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(input));
-    response->config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+    response->config = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(config, true /*isInput*/));
     response->device = request.device;
 
     if (thread != 0) {
@@ -3019,9 +3093,7 @@
             dumpToThreadLog_l(mmapThread);
             mMmapThreads.removeItem(input);
         }
-        const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
-        ioDesc->mIoHandle = input;
-        ioConfigChanged(AUDIO_INPUT_CLOSED, ioDesc);
+        ioConfigChanged(AUDIO_INPUT_CLOSED, sp<AudioIoDescriptor>::make(input));
     }
     // FIXME: calling thread->exit() without mLock held should not be needed anymore now that
     // we have a different lock for notification client
@@ -4012,7 +4084,6 @@
     // transfer all effects one by one so that new effect chain is created on new thread with
     // correct buffer sizes and audio parameters and effect engines reconfigured accordingly
     sp<EffectChain> dstChain;
-    uint32_t strategy = 0; // prevent compiler warning
     sp<EffectModule> effect = chain->getEffectFromId_l(0);
     Vector< sp<EffectModule> > removed;
     status_t status = NO_ERROR;
@@ -4037,7 +4108,6 @@
                 status = NO_INIT;
                 break;
             }
-            strategy = dstChain->strategy();
         }
         effect = chain->getEffectFromId_l(0);
     }
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 8fcd6e4..8c546cc 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -282,6 +282,14 @@
     virtual status_t updateSecondaryOutputs(
             const TrackSecondaryOutputsMap& trackSecondaryOutputs);
 
+    virtual status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos);
+
+    virtual int32_t getAAudioMixerBurstCount();
+
+    virtual int32_t getAAudioHardwareBurstMinUsec();
+
     status_t onTransactWrapper(TransactionCode code, const Parcel& data, uint32_t flags,
         const std::function<status_t()>& delegate) override;
 
@@ -750,7 +758,7 @@
               // no range check, AudioFlinger::mLock held
               bool streamMute_l(audio_stream_type_t stream) const
                                 { return mStreamTypes[stream].mute; }
-              void ioConfigChanged(audio_io_config_event event,
+              void ioConfigChanged(audio_io_config_event_t event,
                                    const sp<AudioIoDescriptor>& ioDesc,
                                    pid_t pid = 0);
 
@@ -1004,6 +1012,11 @@
 
     // Keep in sync with java definition in media/java/android/media/AudioRecord.java
     static constexpr int32_t kMaxSharedAudioHistoryMs = 5000;
+
+    std::map<media::audio::common::AudioMMapPolicyType,
+             std::vector<media::audio::common::AudioMMapPolicyInfo>> mPolicyInfos;
+    int32_t mAAudioBurstsPerBuffer = 0;
+    int32_t mAAudioHwBurstMinMicros = 0;
 };
 
 #undef INCLUDING_FROM_AUDIOFLINGER_H
diff --git a/services/audioflinger/AudioHwDevice.cpp b/services/audioflinger/AudioHwDevice.cpp
index 16b25f6..dee6161 100644
--- a/services/audioflinger/AudioHwDevice.cpp
+++ b/services/audioflinger/AudioHwDevice.cpp
@@ -29,6 +29,9 @@
 
 namespace android {
 
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
+
 // ----------------------------------------------------------------------------
 
 status_t AudioHwDevice::openOutputStream(
@@ -102,5 +105,18 @@
     return mHwDevice->getAudioPort(port);
 }
 
+status_t AudioHwDevice::getMmapPolicyInfos(
+            AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) const {
+    return mHwDevice->getMmapPolicyInfos(policyType, policyInfos);
+}
+
+int32_t AudioHwDevice::getAAudioMixerBurstCount() const {
+    return mHwDevice->getAAudioMixerBurstCount();
+}
+
+int32_t AudioHwDevice::getAAudioHardwareBurstMinUsec() const {
+    return mHwDevice->getAAudioHardwareBurstMinUsec();
+}
+
 
 }; // namespace android
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
index fc2c693..8c5d239 100644
--- a/services/audioflinger/AudioHwDevice.h
+++ b/services/audioflinger/AudioHwDevice.h
@@ -22,6 +22,8 @@
 #include <stdlib.h>
 #include <sys/types.h>
 
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
 #include <media/audiohal/DeviceHalInterface.h>
 #include <utils/Errors.h>
 #include <system/audio.h>
@@ -85,6 +87,14 @@
 
     status_t getAudioPort(struct audio_port_v7 *port) const;
 
+    status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) const;
+
+    int32_t getAAudioMixerBurstCount() const;
+
+    int32_t getAAudioHardwareBurstMinUsec() const;
+
 private:
     const audio_module_handle_t mHandle;
     const char * const          mModuleName;
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 8d04edb..b748f9d 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -3260,6 +3260,8 @@
         } else {
             mHalEffect->setDevices({mDevice});
         }
+        mHalEffect->configure();
+
         *handle = new EffectHandle(mHalEffect, nullptr, nullptr, 0 /*priority*/,
                                    mNotifyFramesProcessed);
         status = (*handle)->initCheck();
@@ -3308,8 +3310,14 @@
 }
 
 void AudioFlinger::DeviceEffectProxy::onReleasePatch(audio_patch_handle_t patchHandle) {
-    Mutex::Autolock _l(mProxyLock);
-    mEffectHandles.erase(patchHandle);
+    sp<EffectHandle> effect;
+    {
+        Mutex::Autolock _l(mProxyLock);
+        if (mEffectHandles.find(patchHandle) != mEffectHandles.end()) {
+            effect = mEffectHandles.at(patchHandle);
+            mEffectHandles.erase(patchHandle);
+        }
+    }
 }
 
 
@@ -3317,6 +3325,7 @@
 {
     Mutex::Autolock _l(mProxyLock);
     if (effect == mHalEffect) {
+        mHalEffect->release_l();
         mHalEffect.clear();
         mDevicePort.id = AUDIO_PORT_HANDLE_NONE;
     }
@@ -3464,7 +3473,7 @@
     if (proxy == nullptr) {
         return NO_INIT;
     }
-    return proxy->addEffectToHal(effect);
+    return proxy->removeEffectFromHal(effect);
 }
 
 bool AudioFlinger::DeviceEffectProxy::ProxyCallback::isOutput() const {
@@ -3516,4 +3525,22 @@
     return proxy->channelCount();
 }
 
+void AudioFlinger::DeviceEffectProxy::ProxyCallback::onEffectEnable(
+        const sp<EffectBase>& effectBase) {
+    sp<EffectModule> effect = effectBase->asEffectModule();
+    if (effect == nullptr) {
+        return;
+    }
+    effect->start();
+}
+
+void AudioFlinger::DeviceEffectProxy::ProxyCallback::onEffectDisable(
+        const sp<EffectBase>& effectBase) {
+    sp<EffectModule> effect = effectBase->asEffectModule();
+    if (effect == nullptr) {
+        return;
+    }
+    effect->stop();
+}
+
 } // namespace android
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 5ebf483..e2bea67 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -766,8 +766,8 @@
         void resetVolume() override {}
         product_strategy_t strategy() const override  { return static_cast<product_strategy_t>(0); }
         int32_t activeTrackCnt() const override { return 0; }
-        void onEffectEnable(const sp<EffectBase>& effect __unused) override {}
-        void onEffectDisable(const sp<EffectBase>& effect __unused) override {}
+        void onEffectEnable(const sp<EffectBase>& effect __unused) override;
+        void onEffectDisable(const sp<EffectBase>& effect __unused) override;
 
         wp<EffectChain> chain() const override { return nullptr; }
 
diff --git a/services/audioflinger/PropertyUtils.cpp b/services/audioflinger/PropertyUtils.cpp
new file mode 100644
index 0000000..65e2533
--- /dev/null
+++ b/services/audioflinger/PropertyUtils.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
+#include <android/media/audio/common/AudioMMapPolicy.h>
+#include <cutils/properties.h>
+
+#include "PropertyUtils.h"
+
+namespace android {
+
+using media::audio::common::AudioMMapPolicy;
+using media::audio::common::AudioMMapPolicyType;
+using media::audio::common::AudioMMapPolicyInfo;
+
+std::string getMmapPolicyProperty(AudioMMapPolicyType policyType) {
+    switch (policyType) {
+        case AudioMMapPolicyType::DEFAULT:
+            return "aaudio.mmap_policy";
+        case AudioMMapPolicyType::EXCLUSIVE:
+            return "aaudio.mmap_exclusive_policy";
+        default:
+            return "";
+    }
+}
+
+int getDefaultPolicyFromType(AudioMMapPolicyType policyType) {
+    switch (policyType) {
+        case AudioMMapPolicyType::EXCLUSIVE:
+            return AAUDIO_UNSPECIFIED;
+        case AudioMMapPolicyType::DEFAULT:
+        default:
+            return AAUDIO_POLICY_NEVER;
+    }
+}
+
+AudioMMapPolicy legacy2aidl_aaudio_policy_t_AudioMMapPolicy(aaudio_policy_t legacy) {
+    switch (legacy) {
+        case AAUDIO_POLICY_NEVER:
+            return AudioMMapPolicy::NEVER;
+        case AAUDIO_POLICY_AUTO:
+            return AudioMMapPolicy::AUTO;
+        case AAUDIO_POLICY_ALWAYS:
+            return AudioMMapPolicy::ALWAYS;
+        case AAUDIO_UNSPECIFIED:
+            return AudioMMapPolicy::UNSPECIFIED;
+        default:
+            ALOGE("%s unknown aaudio policy: %d", __func__, legacy);
+            return AudioMMapPolicy::UNSPECIFIED;
+    }
+}
+
+status_t getMmapPolicyInfosFromSystemProperty(
+        AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+    AudioMMapPolicyInfo policyInfo;
+    const std::string propertyStr = getMmapPolicyProperty(policyType);
+    if (propertyStr.empty()) {
+        return BAD_VALUE;
+    }
+    policyInfo.mmapPolicy = legacy2aidl_aaudio_policy_t_AudioMMapPolicy(
+            property_get_int32(propertyStr.c_str(), getDefaultPolicyFromType(policyType)));
+    policyInfos->push_back(policyInfo);
+    return NO_ERROR;
+}
+
+int32_t getAAudioMixerBurstCountFromSystemProperty() {
+    static const int32_t sDefaultBursts = 2; // arbitrary, use 2 for double buffered
+    static const int32_t sMaxBursts = 1024; // arbitrary
+    static const char* sPropMixerBursts = "aaudio.mixer_bursts";
+    int32_t prop = property_get_int32(sPropMixerBursts, sDefaultBursts);
+    if (prop <= 0 || prop > sMaxBursts) {
+        ALOGE("%s: invalid value %d, use default %d", __func__, prop, sDefaultBursts);
+        prop = sDefaultBursts;
+    }
+    return prop;
+}
+
+int32_t getAAudioHardwareBurstMinUsecFromSystemProperty() {
+    static const int32_t sDefaultMicros = 1000; // arbitrary
+    static const int32_t sMaxMicros = 1000 * 1000; // arbitrary
+    static const char* sPropHwBurstMinUsec = "aaudio.hw_burst_min_usec";
+    int32_t prop = property_get_int32(sPropHwBurstMinUsec, sDefaultMicros);
+    if (prop <= 0 || prop > sMaxMicros) {
+        ALOGE("%s invalid value %d, use default %d", __func__, prop, sDefaultMicros);
+        prop = sDefaultMicros;
+    }
+    return prop;
+}
+
+} // namespace android
diff --git a/services/audioflinger/PropertyUtils.h b/services/audioflinger/PropertyUtils.h
new file mode 100644
index 0000000..fbf651a
--- /dev/null
+++ b/services/audioflinger/PropertyUtils.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/media/audio/common/AudioMMapPolicyType.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+
+namespace android {
+
+status_t getMmapPolicyInfosFromSystemProperty(
+        media::audio::common::AudioMMapPolicyType policyType,
+        std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos);
+
+int32_t getAAudioMixerBurstCountFromSystemProperty();
+
+int32_t getAAudioHardwareBurstMinUsecFromSystemProperty();
+
+} // namespace android
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index e0da037..dd278f0 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -626,7 +626,7 @@
     return status;
 }
 
-void AudioFlinger::ThreadBase::sendIoConfigEvent(audio_io_config_event event, pid_t pid,
+void AudioFlinger::ThreadBase::sendIoConfigEvent(audio_io_config_event_t event, pid_t pid,
                                                  audio_port_handle_t portId)
 {
     Mutex::Autolock _l(mLock);
@@ -634,7 +634,7 @@
 }
 
 // sendIoConfigEvent_l() must be called with ThreadBase::mLock held
-void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event event, pid_t pid,
+void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event_t event, pid_t pid,
                                                    audio_port_handle_t portId)
 {
     // The audio statistics history is exponentially weighted to forget events
@@ -644,6 +644,7 @@
     mIoJitterMs.reset();
     mLatencyMs.reset();
     mProcessTimeMs.reset();
+    mMonopipePipeDepthStats.reset();
     mTimestampVerifier.discontinuity(mTimestampVerifier.DISCONTINUITY_MODE_CONTINUOUS);
 
     sp<ConfigEvent> configEvent = (ConfigEvent *)new IoConfigEvent(event, pid, portId);
@@ -988,6 +989,12 @@
                 isOutput() ? "write" : "read",
                 mLatencyMs.toString().c_str());
     }
+
+    if (mMonopipePipeDepthStats.getN() > 0) {
+        dprintf(fd, "  Monopipe %s pipe depth stats: %s\n",
+            isOutput() ? "write" : "read",
+            mMonopipePipeDepthStats.toString().c_str());
+    }
 }
 
 void AudioFlinger::ThreadBase::dumpEffectChains_l(int fd, const Vector<String16>& args)
@@ -1946,6 +1953,12 @@
         item->setDouble(MM_PREFIX "latencyMs.mean", mLatencyMs.getMean());
         item->setDouble(MM_PREFIX "latencyMs.std", mLatencyMs.getStdDev());
     }
+    if (mMonopipePipeDepthStats.getN() > 0) {
+        item->setDouble(MM_PREFIX "monopipePipeDepthStats.mean",
+                        mMonopipePipeDepthStats.getMean());
+        item->setDouble(MM_PREFIX "monopipePipeDepthStats.std",
+                        mMonopipePipeDepthStats.getStdDev());
+    }
 
     item->selfrecord();
 }
@@ -2303,7 +2316,7 @@
                  "AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
                  frameCount, mFrameCount);
       } else {
-        ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu "
+        ALOGD("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu "
                 "mFrameCount=%zu format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
                 "sampleRate=%u mSampleRate=%u "
                 "hasFastMixer=%d tid=%d fastTrackAvailMask=%#x",
@@ -2802,36 +2815,26 @@
     return mOutput->stream->selectPresentation(presentationId, programId);
 }
 
-void AudioFlinger::PlaybackThread::ioConfigChanged(audio_io_config_event event, pid_t pid,
+void AudioFlinger::PlaybackThread::ioConfigChanged(audio_io_config_event_t event, pid_t pid,
                                                    audio_port_handle_t portId) {
-    sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
     ALOGV("PlaybackThread::ioConfigChanged, thread %p, event %d", this, event);
-
-    desc->mIoHandle = mId;
-    struct audio_patch patch = mPatch;
-    if (isMsdDevice()) {
-        patch = mDownStreamPatch;
-    }
-
+    sp<AudioIoDescriptor> desc;
+    const struct audio_patch patch = isMsdDevice() ? mDownStreamPatch : mPatch;
     switch (event) {
     case AUDIO_OUTPUT_OPENED:
     case AUDIO_OUTPUT_REGISTERED:
     case AUDIO_OUTPUT_CONFIG_CHANGED:
-        desc->mPatch = patch;
-        desc->mChannelMask = mChannelMask;
-        desc->mSamplingRate = mSampleRate;
-        desc->mFormat = mFormat;
-        desc->mFrameCount = mNormalFrameCount; // FIXME see
-                                             // AudioFlinger::frameCount(audio_io_handle_t)
-        desc->mFrameCountHAL = mFrameCount;
-        desc->mLatency = latency_l();
+        desc = sp<AudioIoDescriptor>::make(mId, patch, false /*isInput*/,
+                mSampleRate, mFormat, mChannelMask,
+                // FIXME AudioFlinger::frameCount(audio_io_handle_t) instead of mNormalFrameCount?
+                mNormalFrameCount, mFrameCount, latency_l());
         break;
     case AUDIO_CLIENT_STARTED:
-        desc->mPatch = patch;
-        desc->mPortId = portId;
+        desc = sp<AudioIoDescriptor>::make(mId, patch, portId);
         break;
     case AUDIO_OUTPUT_CLOSED:
     default:
+        desc = sp<AudioIoDescriptor>::make(mId);
         break;
     }
     mAudioFlinger->ioConfigChanged(event, desc, pid);
@@ -4112,6 +4115,18 @@
                                 Mutex::Autolock _l(mLock);
                                 mIoJitterMs.add(jitterMs);
                                 mProcessTimeMs.add(processMs);
+
+                                if (mPipeSink.get() != nullptr) {
+                                    // Using the Monopipe availableToWrite, we estimate the current
+                                    // buffer size.
+                                    MonoPipe* monoPipe = static_cast<MonoPipe*>(mPipeSink.get());
+                                    const ssize_t
+                                            availableToWrite = mPipeSink->availableToWrite();
+                                    const size_t pipeFrames = monoPipe->maxFrames();
+                                    const size_t
+                                            remainingFrames = pipeFrames - max(availableToWrite, 0);
+                                    mMonopipePipeDepthStats.add(remainingFrames);
+                                }
                             }
 
                             // write blocked detection
@@ -4712,12 +4727,7 @@
             break;
         case FastMixer_Static:
         case FastMixer_Dynamic:
-            // FastMixer was designed to operate with a HAL that pulls at a regular rate,
-            // where the period is less than an experimentally determined threshold that can be
-            // scheduled reliably with CFS. However, the BT A2DP HAL is
-            // bursty (does not pull at a regular rate) and so cannot operate with FastMixer.
-            initFastMixer = mFrameCount < mNormalFrameCount
-                    && Intersection(outDeviceTypes(), getAudioDeviceOutAllA2dpSet()).empty();
+            initFastMixer = mFrameCount < mNormalFrameCount;
             break;
         }
         ALOGW_IF(initFastMixer == false && mFrameCount < mNormalFrameCount,
@@ -5879,6 +5889,20 @@
     return trackCount;
 }
 
+bool AudioFlinger::PlaybackThread::checkRunningTimestamp()
+{
+    uint64_t position = 0;
+    struct timespec unused;
+    const status_t ret = mOutput->getPresentationPosition(&position, &unused);
+    if (ret == NO_ERROR) {
+        if (position != mLastCheckedTimestampPosition) {
+            mLastCheckedTimestampPosition = position;
+            return true;
+        }
+    }
+    return false;
+}
+
 // isTrackAllowed_l() must be called with ThreadBase::mLock held
 bool AudioFlinger::MixerThread::isTrackAllowed_l(
         audio_channel_mask_t channelMask, audio_format_t format,
@@ -6307,19 +6331,24 @@
                 // fill a buffer, then remove it from active list.
                 // Only consider last track started for mixer state control
                 if (--(track->mRetryCount) <= 0) {
-                    ALOGV("BUFFER TIMEOUT: remove track(%d) from active list", trackId);
-                    tracksToRemove->add(track);
-                    // indicate to client process that the track was disabled because of underrun;
-                    // it will then automatically call start() when data is available
-                    track->disable();
-                    // only do hw pause when track is going to be removed due to BUFFER TIMEOUT.
-                    // unlike mixerthread, HAL can be paused for direct output
-                    ALOGW("pause because of UNDERRUN, framesReady = %zu,"
-                            "minFrames = %u, mFormat = %#x",
-                            framesReady, minFrames, mFormat);
-                    if (last && mHwSupportsPause && !mHwPaused && !mStandby) {
-                        doHwPause = true;
-                        mHwPaused = true;
+                    const bool running = checkRunningTimestamp();
+                    if (running) { // still running, give us more time.
+                        track->mRetryCount = kMaxTrackRetriesOffload;
+                    } else {
+                        ALOGV("BUFFER TIMEOUT: remove track(%d) from active list", trackId);
+                        tracksToRemove->add(track);
+                        // indicate to client process that the track was disabled because of
+                        // underrun; it will then automatically call start() when data is available
+                        track->disable();
+                        // only do hw pause when track is going to be removed due to BUFFER TIMEOUT.
+                        // unlike mixerthread, HAL can be paused for direct output
+                        ALOGW("pause because of UNDERRUN, framesReady = %zu,"
+                                "minFrames = %u, mFormat = %#x",
+                                framesReady, minFrames, mFormat);
+                        if (last && mHwSupportsPause && !mHwPaused && !mStandby) {
+                            doHwPause = true;
+                            mHwPaused = true;
+                        }
                     }
                 } else if (last) {
                     mixerStatus = MIXER_TRACKS_ENABLED;
@@ -6530,6 +6559,7 @@
 
 void AudioFlinger::DirectOutputThread::flushHw_l()
 {
+    PlaybackThread::flushHw_l();
     mOutput->flush();
     mHwPaused = false;
     mFlushPending = false;
@@ -6665,8 +6695,7 @@
 AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
         AudioStreamOut* output, audio_io_handle_t id, bool systemReady)
     :   DirectOutputThread(audioFlinger, output, id, OFFLOAD, systemReady),
-        mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true),
-        mOffloadUnderrunPosition(~0LL)
+        mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true)
 {
     //FIXME: mStandby should be set to true by ThreadBase constructo
     mStandby = true;
@@ -6883,19 +6912,7 @@
                 // No buffers for this track. Give it a few chances to
                 // fill a buffer, then remove it from active list.
                 if (--(track->mRetryCount) <= 0) {
-                    bool running = false;
-                    uint64_t position = 0;
-                    struct timespec unused;
-                    // The running check restarts the retry counter at least once.
-                    status_t ret = mOutput->stream->getPresentationPosition(&position, &unused);
-                    if (ret == NO_ERROR && position != mOffloadUnderrunPosition) {
-                        running = true;
-                        mOffloadUnderrunPosition = position;
-                    }
-                    if (ret == NO_ERROR) {
-                        ALOGVV("underrun counter, running(%d): %lld vs %lld", running,
-                                (long long)position, (long long)mOffloadUnderrunPosition);
-                    }
+                    const bool running = checkRunningTimestamp();
                     if (running) { // still running, give us more time.
                         track->mRetryCount = kMaxTrackRetriesOffload;
                     } else {
@@ -6966,7 +6983,6 @@
     mPausedBytesRemaining = 0;
     // reset bytes written count to reflect that DSP buffers are empty after flush.
     mBytesWritten = 0;
-    mOffloadUnderrunPosition = ~0LL;
 
     if (mUseAsyncWrite) {
         // discard any pending drain or write ack by incrementing sequence
@@ -7761,6 +7777,7 @@
 
             const ssize_t availableToRead = mPipeSource->availableToRead();
             if (availableToRead >= 0) {
+                mMonopipePipeDepthStats.add(availableToRead);
                 // PipeSource is the primary clock.  It is up to the AudioRecord client to keep up.
                 LOG_ALWAYS_FATAL_IF((size_t)availableToRead > mPipeFramesP2,
                         "more frames to read than fifo size, %zd > %zu",
@@ -8189,6 +8206,7 @@
       if (
             // we formerly checked for a callback handler (non-0 tid),
             // but that is no longer required for TRANSFER_OBTAIN mode
+            // No need to match hardware format, format conversion will be done in client side.
             //
             // Frame count is not specified (0), or is less than or equal the pipe depth.
             // It is OK to provide a higher capacity than requested.
@@ -8196,8 +8214,6 @@
             (frameCount <= mPipeFramesP2) &&
             // PCM data
             audio_is_linear_pcm(format) &&
-            // hardware format
-            (format == mFormat) &&
             // hardware channel mask
             (channelMask == mChannelMask) &&
             // hardware sample rate
@@ -8947,30 +8963,22 @@
     return String8();
 }
 
-void AudioFlinger::RecordThread::ioConfigChanged(audio_io_config_event event, pid_t pid,
+void AudioFlinger::RecordThread::ioConfigChanged(audio_io_config_event_t event, pid_t pid,
                                                  audio_port_handle_t portId) {
-    sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
-
-    desc->mIoHandle = mId;
-
+    sp<AudioIoDescriptor> desc;
     switch (event) {
     case AUDIO_INPUT_OPENED:
     case AUDIO_INPUT_REGISTERED:
     case AUDIO_INPUT_CONFIG_CHANGED:
-        desc->mPatch = mPatch;
-        desc->mChannelMask = mChannelMask;
-        desc->mSamplingRate = mSampleRate;
-        desc->mFormat = mFormat;
-        desc->mFrameCount = mFrameCount;
-        desc->mFrameCountHAL = mFrameCount;
-        desc->mLatency = 0;
+        desc = sp<AudioIoDescriptor>::make(mId, mPatch, true /*isInput*/,
+                mSampleRate, mFormat, mChannelMask, mFrameCount, mFrameCount);
         break;
     case AUDIO_CLIENT_STARTED:
-        desc->mPatch = mPatch;
-        desc->mPortId = portId;
+        desc = sp<AudioIoDescriptor>::make(mId, mPatch, portId);
         break;
     case AUDIO_INPUT_CLOSED:
     default:
+        desc = sp<AudioIoDescriptor>::make(mId);
         break;
     }
     mAudioFlinger->ioConfigChanged(event, desc, pid);
@@ -9813,31 +9821,26 @@
     return String8();
 }
 
-void AudioFlinger::MmapThread::ioConfigChanged(audio_io_config_event event, pid_t pid,
+void AudioFlinger::MmapThread::ioConfigChanged(audio_io_config_event_t event, pid_t pid,
                                                audio_port_handle_t portId __unused) {
-    sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
-
-    desc->mIoHandle = mId;
-
+    sp<AudioIoDescriptor> desc;
+    bool isInput = false;
     switch (event) {
     case AUDIO_INPUT_OPENED:
     case AUDIO_INPUT_REGISTERED:
     case AUDIO_INPUT_CONFIG_CHANGED:
+        isInput = true;
+        FALLTHROUGH_INTENDED;
     case AUDIO_OUTPUT_OPENED:
     case AUDIO_OUTPUT_REGISTERED:
     case AUDIO_OUTPUT_CONFIG_CHANGED:
-        desc->mPatch = mPatch;
-        desc->mChannelMask = mChannelMask;
-        desc->mSamplingRate = mSampleRate;
-        desc->mFormat = mFormat;
-        desc->mFrameCount = mFrameCount;
-        desc->mFrameCountHAL = mFrameCount;
-        desc->mLatency = 0;
+        desc = sp<AudioIoDescriptor>::make(mId, mPatch, isInput,
+                mSampleRate, mFormat, mChannelMask, mFrameCount, mFrameCount);
         break;
-
     case AUDIO_INPUT_CLOSED:
     case AUDIO_OUTPUT_CLOSED:
     default:
+        desc = sp<AudioIoDescriptor>::make(mId);
         break;
     }
     mAudioFlinger->ioConfigChanged(event, desc, pid);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 8561de3..61537a8 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -113,7 +113,7 @@
 
     class IoConfigEventData : public ConfigEventData {
     public:
-        IoConfigEventData(audio_io_config_event event, pid_t pid,
+        IoConfigEventData(audio_io_config_event_t event, pid_t pid,
                           audio_port_handle_t portId) :
             mEvent(event), mPid(pid), mPortId(portId) {}
 
@@ -121,14 +121,14 @@
             snprintf(buffer, size, "- IO event: event %d\n", mEvent);
         }
 
-        const audio_io_config_event mEvent;
+        const audio_io_config_event_t mEvent;
         const pid_t                 mPid;
         const audio_port_handle_t   mPortId;
     };
 
     class IoConfigEvent : public ConfigEvent {
     public:
-        IoConfigEvent(audio_io_config_event event, pid_t pid, audio_port_handle_t portId) :
+        IoConfigEvent(audio_io_config_event_t event, pid_t pid, audio_port_handle_t portId) :
             ConfigEvent(CFG_EVENT_IO) {
             mData = new IoConfigEventData(event, pid, portId);
         }
@@ -332,15 +332,15 @@
                                                     status_t& status) = 0;
     virtual     status_t    setParameters(const String8& keyValuePairs);
     virtual     String8     getParameters(const String8& keys) = 0;
-    virtual     void        ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+    virtual     void        ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
                                         audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE) = 0;
                 // sendConfigEvent_l() must be called with ThreadBase::mLock held
                 // Can temporarily release the lock if waiting for a reply from
                 // processConfigEvents_l().
                 status_t    sendConfigEvent_l(sp<ConfigEvent>& event);
-                void        sendIoConfigEvent(audio_io_config_event event, pid_t pid = 0,
+                void        sendIoConfigEvent(audio_io_config_event_t event, pid_t pid = 0,
                                               audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
-                void        sendIoConfigEvent_l(audio_io_config_event event, pid_t pid = 0,
+                void        sendIoConfigEvent_l(audio_io_config_event_t event, pid_t pid = 0,
                                             audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
                 void        sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio, bool forApp);
                 void        sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio, bool forApp);
@@ -691,6 +691,7 @@
                 audio_utils::Statistics<double> mIoJitterMs{0.995 /* alpha */};
                 audio_utils::Statistics<double> mProcessTimeMs{0.995 /* alpha */};
                 audio_utils::Statistics<double> mLatencyMs{0.995 /* alpha */};
+                audio_utils::Statistics<double> mMonopipePipeDepthStats{0.999 /* alpha */};
 
                 // Save the last count when we delivered statistics to mediametrics.
                 int64_t                 mLastRecordedTimestampVerifierN = 0;
@@ -979,7 +980,7 @@
                                 { return android_atomic_acquire_load(&mSuspended) > 0; }
 
     virtual     String8     getParameters(const String8& keys);
-    virtual     void        ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+    virtual     void        ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
                                             audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
                 status_t    getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames);
                 // Consider also removing and passing an explicit mMainBuffer initialization
@@ -1375,6 +1376,14 @@
                 struct audio_patch mDownStreamPatch;
 
                 std::atomic_bool mCheckOutputStageEffects{};
+
+                // A differential check on the timestamps to see if there is a change in the
+                // timestamp frame position between the last call to checkRunningTimestamp.
+                uint64_t mLastCheckedTimestampPosition = ~0LL;
+
+                bool checkRunningTimestamp();
+
+    virtual     void flushHw_l() { mLastCheckedTimestampPosition = ~0LL; }
 };
 
 class MixerThread : public PlaybackThread {
@@ -1492,7 +1501,7 @@
     virtual     bool        checkForNewParameter_l(const String8& keyValuePair,
                                                    status_t& status);
 
-    virtual     void        flushHw_l();
+                void        flushHw_l() override;
 
                 void        setMasterBalance(float balance) override;
 
@@ -1557,7 +1566,7 @@
     OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
                   audio_io_handle_t id, bool systemReady);
     virtual                 ~OffloadThread() {};
-    virtual     void        flushHw_l();
+                void        flushHw_l() override;
 
 protected:
     // threadLoop snippets
@@ -1574,10 +1583,6 @@
     size_t      mPausedWriteLength;     // length in bytes of write interrupted by pause
     size_t      mPausedBytesRemaining;  // bytes still waiting in mixbuffer after resume
     bool        mKeepWakeLock;          // keep wake lock while waiting for write callback
-    uint64_t    mOffloadUnderrunPosition; // Current frame position for offloaded playback
-                                          // used and valid only during underrun.  ~0 if
-                                          // no underrun has occurred during playback and
-                                          // is not reset on standby.
 };
 
 class AsyncCallbackThread : public Thread {
@@ -1801,7 +1806,7 @@
                                                status_t& status);
     virtual void        cacheParameters_l() {}
     virtual String8     getParameters(const String8& keys);
-    virtual void        ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+    virtual void        ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
                                         audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
     virtual status_t    createAudioPatch_l(const struct audio_patch *patch,
                                            audio_patch_handle_t *handle);
@@ -2010,7 +2015,7 @@
     virtual     bool        checkForNewParameter_l(const String8& keyValuePair,
                                                     status_t& status);
     virtual     String8     getParameters(const String8& keys);
-    virtual     void        ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+    virtual     void        ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
                                             audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
                 void        readHalParameters_l();
     virtual     void        cacheParameters_l() {}
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 09523d0..f6f3b9a 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -18,6 +18,7 @@
 #define ANDROID_AUDIOPOLICY_INTERFACE_H
 
 #include <media/AudioCommonTypes.h>
+#include <media/AudioContainers.h>
 #include <media/AudioDeviceTypeAddr.h>
 #include <media/AudioSystem.h>
 #include <media/AudioPolicy.h>
@@ -31,30 +32,42 @@
 
 // ----------------------------------------------------------------------------
 
-// The AudioPolicyInterface and AudioPolicyClientInterface classes define the communication interfaces
-// between the platform specific audio policy manager and Android generic audio policy manager.
-// The platform specific audio policy manager must implement methods of the AudioPolicyInterface class.
+// The AudioPolicyInterface and AudioPolicyClientInterface classes define the communication
+// interfaces between the platform specific audio policy manager and Android generic audio policy
+// manager.
+// The platform specific audio policy manager must implement methods of the AudioPolicyInterface
+// class.
 // This implementation makes use of the AudioPolicyClientInterface to control the activity and
 // configuration of audio input and output streams.
 //
 // The platform specific audio policy manager is in charge of the audio routing and volume control
 // policies for a given platform.
 // The main roles of this module are:
-//   - keep track of current system state (removable device connections, phone state, user requests...).
-//   System state changes and user actions are notified to audio policy manager with methods of the AudioPolicyInterface.
+//   - keep track of current system state (removable device connections, phone state,
+//   user requests...).
+//   System state changes and user actions are notified to audio policy manager with methods of the
+//   AudioPolicyInterface.
 //   - process getOutput() queries received when AudioTrack objects are created: Those queries
-//   return a handler on an output that has been selected, configured and opened by the audio policy manager and that
-//   must be used by the AudioTrack when registering to the AudioFlinger with the createTrack() method.
-//   When the AudioTrack object is released, a putOutput() query is received and the audio policy manager can decide
-//   to close or reconfigure the output depending on other streams using this output and current system state.
-//   - similarly process getInput() and putInput() queries received from AudioRecord objects and configure audio inputs.
-//   - process volume control requests: the stream volume is converted from an index value (received from UI) to a float value
-//   applicable to each output as a function of platform specific settings and current output route (destination device). It
-//   also make sure that streams are not muted if not allowed (e.g. camera shutter sound in some countries).
+//   return a handler on an output that has been selected, configured and opened by the audio
+//   policy manager and that must be used by the AudioTrack when registering to the AudioFlinger
+//   with the createTrack() method.
+//   When the AudioTrack object is released, a putOutput() query is received and the audio policy
+//   manager can decide to close or reconfigure the output depending on other streams using this
+//   output and current system state.
+//   - similarly process getInput() and putInput() queries received from AudioRecord objects and
+//   configure audio inputs.
+//   - process volume control requests: the stream volume is converted from an index value
+//   (received from UI) to a float value applicable to each output as a function of platform
+//   specificsettings and current output route (destination device). It also make sure that streams
+//   are not muted if not allowed (e.g. camera shutter sound in some countries).
 //
-// The platform specific audio policy manager is provided as a shared library by platform vendors (as for libaudio.so)
-// and is linked with libaudioflinger.so
-
+// The platform specific audio policy manager is provided as a shared library by platform vendors
+// (as for libaudio.so) and is linked with libaudioflinger.so
+//
+// NOTE: by convention, the implementation of the AudioPolicyInterface in AudioPolicyManager does
+// not have to perform any nullptr check on input arguments: The caller of this API is
+// AudioPolicyService running in the same process and in charge of validating arguments received
+// from incoming binder calls before calling AudioPolicyManager.
 
 //    Audio Policy Manager Interface
 class AudioPolicyInterface
@@ -99,7 +112,7 @@
                                               audio_format_t encodedFormat) = 0;
     // retrieve a device connection status
     virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
-                                                                          const char *device_address) = 0;
+                                                              const char *device_address) = 0;
     // indicate a change in device configuration
     virtual status_t handleDeviceConfigChange(audio_devices_t device,
                                               const char *device_address,
@@ -133,9 +146,11 @@
                                         audio_port_handle_t *portId,
                                         std::vector<audio_io_handle_t> *secondaryOutputs,
                                         output_type_t *outputType) = 0;
-    // indicates to the audio policy manager that the output starts being used by corresponding stream.
+    // indicates to the audio policy manager that the output starts being used by corresponding
+    // stream.
     virtual status_t startOutput(audio_port_handle_t portId) = 0;
-    // indicates to the audio policy manager that the output stops being used by corresponding stream.
+    // indicates to the audio policy manager that the output stops being used by corresponding
+    // stream.
     virtual status_t stopOutput(audio_port_handle_t portId) = 0;
     // releases the output, return true if the output descriptor is reopened.
     virtual bool releaseOutput(audio_port_handle_t portId) = 0;
@@ -198,7 +213,7 @@
     virtual product_strategy_t getStrategyForStream(audio_stream_type_t stream) = 0;
 
     // return the enabled output devices for the given stream type
-    virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream) = 0;
+    virtual DeviceTypeSet getDevicesForStream(audio_stream_type_t stream) = 0;
 
     // retrieves the list of enabled output devices for the given audio attributes
     virtual status_t getDevicesForAttributes(const audio_attributes_t &attr,
@@ -377,6 +392,18 @@
      * @return NO_ERROR if an output was closed, INVALID_OPERATION or BAD_VALUE otherwise
      */
     virtual status_t releaseSpatializerOutput(audio_io_handle_t output) = 0;
+
+    /**
+     * Query how the direct playback is currently supported on the device.
+     * @param attr audio attributes describing the playback use case
+     * @param config audio configuration for the playback
+     * @param directMode out: a set of flags describing how the direct playback is currently
+     *        supported on the device
+     * @return NO_ERROR in case of success, DEAD_OBJECT, NO_INIT, BAD_VALUE, PERMISSION_DENIED
+     *         in case of error.
+     */
+    virtual audio_direct_mode_t getDirectPlaybackSupport(const audio_attributes_t *attr,
+                                                         const audio_config_t *config) = 0;
 };
 
 
@@ -397,10 +424,13 @@
     // Audio output Control functions
     //
 
-    // opens an audio output with the requested parameters. The parameter values can indicate to use the default values
-    // in case the audio policy manager has no specific requirements for the output being opened.
-    // When the function returns, the parameter values reflect the actual values used by the audio hardware output stream.
-    // The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
+    // opens an audio output with the requested parameters. The parameter values can indicate to
+    // use the default values in case the audio policy manager has no specific requirements for the
+    // output being opened.
+    // When the function returns, the parameter values reflect the actual values used by the audio
+    // hardware output stream.
+    // The audio policy manager can check if the proposed parameters are suitable or not and act
+    // accordingly.
     virtual status_t openOutput(audio_module_handle_t module,
                                 audio_io_handle_t *output,
                                 audio_config_t *halConfig,
@@ -408,13 +438,15 @@
                                 const sp<DeviceDescriptorBase>& device,
                                 uint32_t *latencyMs,
                                 audio_output_flags_t flags) = 0;
-    // creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by
-    // a special mixer thread in the AudioFlinger.
-    virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2) = 0;
+    // creates a special output that is duplicated to the two outputs passed as arguments.
+    // The duplication is performed by a special mixer thread in the AudioFlinger.
+    virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
+                                                  audio_io_handle_t output2) = 0;
     // closes the output stream
     virtual status_t closeOutput(audio_io_handle_t output) = 0;
-    // suspends the output. When an output is suspended, the corresponding audio hardware output stream is placed in
-    // standby and the AudioTracks attached to the mixer thread are still processed but the output mix is discarded.
+    // suspends the output. When an output is suspended, the corresponding audio hardware output
+    // stream is placed in standby and the AudioTracks attached to the mixer thread are still
+    // processed but the output mix is discarded.
     virtual status_t suspendOutput(audio_io_handle_t output) = 0;
     // restores a suspended output.
     virtual status_t restoreOutput(audio_io_handle_t output) = 0;
@@ -437,16 +469,21 @@
     // misc control functions
     //
 
-    // set a stream volume for a particular output. For the same user setting, a given stream type can have different volumes
+    // set a stream volume for a particular output. For the same user setting, a given stream type
+    // can have different volumes
     // for each output (destination device) it is attached to.
-    virtual status_t setStreamVolume(audio_stream_type_t stream, float volume, audio_io_handle_t output, int delayMs = 0) = 0;
+    virtual status_t setStreamVolume(audio_stream_type_t stream, float volume,
+                                     audio_io_handle_t output, int delayMs = 0) = 0;
 
     // invalidate a stream type, causing a reroute to an unspecified new output
     virtual status_t invalidateStream(audio_stream_type_t stream) = 0;
 
-    // function enabling to send proprietary informations directly from audio policy manager to audio hardware interface.
-    virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs, int delayMs = 0) = 0;
-    // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
+    // function enabling to send proprietary informations directly from audio policy manager to
+    // audio hardware interface.
+    virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs,
+                               int delayMs = 0) = 0;
+    // function enabling to receive proprietary informations directly from audio hardware interface
+    // to audio policy manager.
     virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) = 0;
 
     // set down link audio volume.
@@ -509,7 +546,8 @@
     // These are the signatures of createAudioPolicyManager/destroyAudioPolicyManager
     // methods respectively, expected by AudioPolicyService, needs to be exposed by
     // libaudiopolicymanagercustom.
-    using CreateAudioPolicyManagerInstance = AudioPolicyInterface* (*)(AudioPolicyClientInterface*);
+    using CreateAudioPolicyManagerInstance =
+            AudioPolicyInterface* (*)(AudioPolicyClientInterface*);
     using DestroyAudioPolicyManagerInstance = void (*)(AudioPolicyInterface*);
 
 } // namespace android
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 577f641..3d3e0cf 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -133,6 +133,7 @@
  * - AUDIO_SOURCE_FM_TUNER
  * - AUDIO_SOURCE_VOICE_RECOGNITION
  * - AUDIO_SOURCE_HOTWORD
+ * - AUDIO_SOURCE_ULTRASOUND
  *
  * @return the corresponding input source priority or 0 if priority is irrelevant for this source.
  *      This happens when the specified source cannot share a given input stream (e.g remote submix)
@@ -142,22 +143,24 @@
 {
     switch (inputSource) {
     case AUDIO_SOURCE_VOICE_COMMUNICATION:
-        return 9;
+        return 10;
     case AUDIO_SOURCE_CAMCORDER:
-        return 8;
+        return 9;
     case AUDIO_SOURCE_VOICE_PERFORMANCE:
-        return 7;
+        return 8;
     case AUDIO_SOURCE_UNPROCESSED:
-        return 6;
+        return 7;
     case AUDIO_SOURCE_MIC:
-        return 5;
+        return 6;
     case AUDIO_SOURCE_ECHO_REFERENCE:
-        return 4;
+        return 5;
     case AUDIO_SOURCE_FM_TUNER:
-        return 3;
+        return 4;
     case AUDIO_SOURCE_VOICE_RECOGNITION:
-        return 2;
+        return 3;
     case AUDIO_SOURCE_HOTWORD:
+        return 2;
+    case AUDIO_SOURCE_ULTRASOUND:
         return 1;
     default:
         break;
diff --git a/services/audiopolicy/common/managerdefinitions/Android.bp b/services/audiopolicy/common/managerdefinitions/Android.bp
index 227c2d8..1f23ae3 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.bp
+++ b/services/audiopolicy/common/managerdefinitions/Android.bp
@@ -30,6 +30,7 @@
     ],
     shared_libs: [
         "libaudiofoundation",
+        "libbase",
         "libcutils",
         "libhidlbase",
         "liblog",
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index a40f6aa..856ae66 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -54,7 +54,7 @@
     DeviceVector supportedDevices() const  {
         return mProfile != nullptr ? mProfile->getSupportedDevices() :  DeviceVector(); }
 
-    void dump(String8 *dst) const override;
+    void dump(String8 *dst, int spaces, const char* extraInfo) const override;
 
     audio_io_handle_t   mIoHandle = AUDIO_IO_HANDLE_NONE; // input handle
     wp<AudioPolicyMix>  mPolicyMix;                   // non NULL when used by a dynamic policy
@@ -93,8 +93,10 @@
     audio_patch_handle_t getPatchHandle() const override;
     void setPatchHandle(audio_patch_handle_t handle) override;
     bool isMmap() override {
-        if (getPolicyAudioPort() != nullptr) {
-            return getPolicyAudioPort()->isMmap();
+        if (const auto policyPort = getPolicyAudioPort(); policyPort != nullptr) {
+            if (const auto port = policyPort->asAudioPort(); port != nullptr) {
+                return port->isMmap();
+            }
         }
         return false;
     }
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 7c7f02d..69082ac 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -149,7 +149,7 @@
                           AudioPolicyClientInterface *clientInterface);
     virtual ~AudioOutputDescriptor() {}
 
-    void dump(String8 *dst) const override;
+    void dump(String8 *dst, int spaces, const char* extraInfo = nullptr) const override;
     void        log(const char* indent);
 
     virtual DeviceVector devices() const { return mDevices; }
@@ -270,8 +270,10 @@
     audio_patch_handle_t getPatchHandle() const override;
     void setPatchHandle(audio_patch_handle_t handle) override;
     bool isMmap() override {
-        if (getPolicyAudioPort() != nullptr) {
-            return getPolicyAudioPort()->isMmap();
+        if (const auto policyPort = getPolicyAudioPort(); policyPort != nullptr) {
+            if (const auto port = policyPort->asAudioPort(); port != nullptr) {
+                return port->isMmap();
+            }
         }
         return false;
     }
@@ -307,6 +309,8 @@
     DeviceVector mDevices; /**< current devices this output is routed to */
     wp<AudioPolicyMix> mPolicyMix;  // non NULL when used by a dynamic policy
 
+    virtual uint32_t getRecommendedMuteDurationMs() const { return 0; }
+
 protected:
     const sp<PolicyAudioPort> mPolicyAudioPort;
     AudioPolicyClientInterface * const mClientInterface;
@@ -332,7 +336,7 @@
                             AudioPolicyClientInterface *clientInterface);
     virtual ~SwAudioOutputDescriptor() {}
 
-            void dump(String8 *dst) const override;
+    void dump(String8 *dst, int spaces, const char* extraInfo = nullptr) const override;
     virtual DeviceVector devices() const;
     void setDevices(const DeviceVector &devices) { mDevices = devices; }
     bool sharesHwModuleWith(const sp<SwAudioOutputDescriptor>& outputDesc);
@@ -415,6 +419,8 @@
      */
     DeviceVector filterSupportedDevices(const DeviceVector &devices) const;
 
+    uint32_t getRecommendedMuteDurationMs() const override;
+
     const sp<IOProfile> mProfile;          // I/O profile this output derives from
     audio_io_handle_t mIoHandle;           // output handle
     uint32_t mLatency;                  //
@@ -435,7 +441,7 @@
                             AudioPolicyClientInterface *clientInterface);
     virtual ~HwAudioOutputDescriptor() {}
 
-            void dump(String8 *dst) const override;
+    void dump(String8 *dst, int spaces, const char* extraInfo) const override;
 
     virtual bool setVolume(float volumeDb,
                            VolumeSource volumeSource, const StreamTypeVector &streams,
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
index a5de655..955b0cf 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
@@ -41,7 +41,7 @@
 
     void setUid(uid_t uid) { mUid = uid; }
 
-    void dump(String8 *dst, int spaces, int index) const;
+    void dump(String8 *dst, int spaces) const;
 
     struct audio_patch mPatch;
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index 74b3405..dc2403c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -16,19 +16,21 @@
 
 #pragma once
 
-#include <vector>
-#include <map>
-#include <unistd.h>
 #include <sys/types.h>
+#include <unistd.h>
 
-#include <system/audio.h>
+#include <map>
+#include <vector>
+
+#include <android-base/stringprintf.h>
 #include <audiomanager/AudioManager.h>
 #include <media/AudioProductStrategy.h>
+#include <policy.h>
+#include <system/audio.h>
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/RefBase.h>
 #include <utils/String8.h>
-#include <policy.h>
 #include <Volume.h>
 #include "AudioPatch.h"
 #include "EffectDescriptor.h"
@@ -52,7 +54,7 @@
         mPreferredDeviceForExclusiveUse(isPreferredDeviceForExclusiveUse){}
     ~ClientDescriptor() override = default;
 
-    virtual void dump(String8 *dst, int spaces, int index) const;
+    virtual void dump(String8 *dst, int spaces) const;
     virtual std::string toShortString() const;
 
     audio_port_handle_t portId() const { return mPortId; }
@@ -100,7 +102,7 @@
     ~TrackClientDescriptor() override = default;
 
     using ClientDescriptor::dump;
-    void dump(String8 *dst, int spaces, int index) const override;
+    void dump(String8 *dst, int spaces) const override;
     std::string toShortString() const override;
 
     audio_output_flags_t flags() const { return mFlags; }
@@ -168,7 +170,7 @@
     ~RecordClientDescriptor() override = default;
 
     using ClientDescriptor::dump;
-    void dump(String8 *dst, int spaces, int index) const override;
+    void dump(String8 *dst, int spaces) const override;
 
     audio_unique_id_t riid() const { return mRIId; }
     audio_source_t source() const { return mSource; }
@@ -219,7 +221,7 @@
     void setHwOutput(const sp<HwAudioOutputDescriptor>& hwOutput);
 
     using ClientDescriptor::dump;
-    void dump(String8 *dst, int spaces, int index) const override;
+    void dump(String8 *dst, int spaces) const override;
 
  private:
     audio_patch_handle_t mPatchHandle = AUDIO_PATCH_HANDLE_NONE;
@@ -269,10 +271,13 @@
     size_t getClientCount() const {
         return mClients.size();
     }
-    virtual void dump(String8 *dst) const {
+    virtual void dump(String8 *dst, int spaces, const char* extraInfo = nullptr) const {
+        (void)extraInfo;
         size_t index = 0;
         for (const auto& client: getClientIterable()) {
-            client->dump(dst, 2, index++);
+            const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", ++index);
+            dst->appendFormat("%s", prefix.c_str());
+            client->dump(dst, prefix.size());
         }
     }
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 58d05c6..4adc920 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -43,7 +43,7 @@
     DeviceDescriptor(const AudioDeviceTypeAddr &deviceTypeAddr, const std::string &tagName = "",
             const FormatVector &encodedFormats = FormatVector{});
 
-    virtual ~DeviceDescriptor() {}
+    virtual ~DeviceDescriptor() = default;
 
     virtual void addAudioProfile(const sp<AudioProfile> &profile) {
         addAudioProfileAndSort(mProfiles, profile);
@@ -51,8 +51,6 @@
 
     virtual const std::string getTagName() const { return mTagName; }
 
-    const FormatVector& encodedFormats() const { return mEncodedFormats; }
-
     audio_format_t getEncodedFormat() { return mCurrentEncodedFormat; }
 
     void setEncodedFormat(audio_format_t format) {
@@ -63,8 +61,6 @@
 
     bool hasCurrentEncodedFormat() const;
 
-    bool supportsFormat(audio_format_t format);
-
     void setDynamic() { mIsDynamic = true; }
     bool isDynamic() const { return mIsDynamic; }
 
@@ -95,7 +91,7 @@
 
     void setEncapsulationInfoFromHal(AudioPolicyClientInterface *clientInterface);
 
-    void dump(String8 *dst, int spaces, int index, bool verbose = true) const;
+    void dump(String8 *dst, int spaces, bool verbose = true) const;
 
 private:
     template <typename T, std::enable_if_t<std::is_same<T, struct audio_port>::value
@@ -106,7 +102,6 @@
     }
 
     std::string mTagName; // Unique human readable identifier for a device port found in conf file.
-    FormatVector        mEncodedFormats;
     audio_format_t      mCurrentEncodedFormat;
     bool                mIsDynamic = false;
     const std::string   mDeclaredAddress; // Original device address
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index 54b3408..436fcc1 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -115,7 +115,7 @@
                        const sp<PolicyAudioPort> &dstPort) const;
 
     // TODO remove from here (split serialization)
-    void dump(String8 *dst) const;
+    void dump(String8 *dst, int spaces) const;
 
 private:
     void refreshSupportedDevices();
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index a74cefa..90b812d 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -37,9 +37,7 @@
 public:
     IOProfile(const std::string &name, audio_port_role_t role)
         : AudioPort(name, AUDIO_PORT_TYPE_MIX, role),
-          maxOpenCount(1),
           curOpenCount(0),
-          maxActiveCount(1),
           curActiveCount(0) {}
 
     virtual ~IOProfile() = default;
@@ -59,11 +57,12 @@
     // Once capture clients are tracked individually and not per session this can be removed
     // MMAP no IRQ input streams do not have the default limitation of one active client
     // max as they can be used in shared mode by the same application.
+    // NOTE: Please consider moving to AudioPort when addressing the FIXME
     // NOTE: this works for explicit values set in audio_policy_configuration.xml because
     // flags are parsed before maxActiveCount by the serializer.
     void setFlags(uint32_t flags) override
     {
-        PolicyAudioPort::setFlags(flags);
+        AudioPort::setFlags(flags);
         if (getRole() == AUDIO_PORT_ROLE_SINK && (flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
             maxActiveCount = 0;
         }
@@ -98,7 +97,7 @@
                              uint32_t flags,
                              bool exactMatchRequiredForInputFlags = false) const;
 
-    void dump(String8 *dst) const;
+    void dump(String8 *dst, int spaces) const;
     void log();
 
     bool hasSupportedDevices() const { return !mSupportedDevices.isEmpty(); }
@@ -194,16 +193,8 @@
         return false;
     }
 
-    // Maximum number of input or output streams that can be simultaneously opened for this profile.
-    // By convention 0 means no limit. To respect legacy behavior, initialized to 1 for output
-    // profiles and 0 for input profiles
-    uint32_t     maxOpenCount;
     // Number of streams currently opened for this profile.
     uint32_t     curOpenCount;
-    // Maximum number of input or output streams that can be simultaneously active for this profile.
-    // By convention 0 means no limit. To respect legacy behavior, initialized to 0 for output
-    // profiles and 1 for input profiles
-    uint32_t     maxActiveCount;
     // Number of streams currently active for this profile. This is not the number of active clients
     // (AudioTrack or AudioRecord) but the number of active HAL streams.
     uint32_t     curActiveCount;
diff --git a/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
index ab33b38..acf787b 100644
--- a/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
@@ -36,7 +36,7 @@
 class PolicyAudioPort : public virtual RefBase, private HandleGenerator<audio_port_handle_t>
 {
 public:
-    PolicyAudioPort() : mFlags(AUDIO_OUTPUT_FLAG_NONE) {}
+    PolicyAudioPort() = default;
 
     virtual ~PolicyAudioPort() = default;
 
@@ -49,19 +49,6 @@
 
     virtual sp<AudioPort> asAudioPort() const = 0;
 
-    virtual void setFlags(uint32_t flags)
-    {
-        //force direct flag if offload flag is set: offloading implies a direct output stream
-        // and all common behaviors are driven by checking only the direct flag
-        // this should normally be set appropriately in the policy configuration file
-        if (asAudioPort()->getRole() == AUDIO_PORT_ROLE_SOURCE &&
-                (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
-            flags |= AUDIO_OUTPUT_FLAG_DIRECT;
-        }
-        mFlags = flags;
-    }
-    uint32_t getFlags() const { return mFlags; }
-
     virtual void attach(const sp<HwModule>& module);
     virtual void detach();
     bool isAttached() { return mModule != 0; }
@@ -105,22 +92,6 @@
     const char *getModuleName() const;
     sp<HwModule> getModule() const { return mModule; }
 
-    inline bool isDirectOutput() const
-    {
-        return (asAudioPort()->getType() == AUDIO_PORT_TYPE_MIX) &&
-                (asAudioPort()->getRole() == AUDIO_PORT_ROLE_SOURCE) &&
-                (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD));
-    }
-
-    inline bool isMmap() const
-    {
-        return (asAudioPort()->getType() == AUDIO_PORT_TYPE_MIX)
-                && (((asAudioPort()->getRole() == AUDIO_PORT_ROLE_SOURCE) &&
-                        ((mFlags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) != 0))
-                    || ((asAudioPort()->getRole() == AUDIO_PORT_ROLE_SINK) &&
-                        ((mFlags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0)));
-    }
-
     void addRoute(const sp<AudioRoute> &route) { mRoutes.add(route); }
     const AudioRouteVector &getRoutes() const { return mRoutes; }
 
@@ -129,7 +100,6 @@
                          const ChannelMaskSet &channelMasks) const;
     void pickSamplingRate(uint32_t &rate, const SampleRateSet &samplingRates) const;
 
-    uint32_t mFlags; // attribute flags mask (e.g primary output, direct output...).
     sp<HwModule> mModule;     // audio HW module exposing this I/O stream
     AudioRouteVector mRoutes; // Routes involving this port
 };
@@ -141,27 +111,18 @@
 
     virtual sp<PolicyAudioPort> getPolicyAudioPort() const = 0;
 
-    status_t validationBeforeApplyConfig(const struct audio_port_config *config) const;
-
-    void applyPolicyAudioPortConfig(const struct audio_port_config *config) {
-        if (config->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
-            mFlags = config->flags;
-        }
+    status_t validationBeforeApplyConfig(const struct audio_port_config *config) const {
+        sp<PolicyAudioPort> policyAudioPort = getPolicyAudioPort();
+        return policyAudioPort ? policyAudioPort->checkExactAudioProfile(config) : NO_INIT;
     }
 
-    void toPolicyAudioPortConfig(
-            struct audio_port_config *dstConfig,
-            const struct audio_port_config *srcConfig = NULL) const;
-
-
-    virtual bool hasSameHwModuleAs(const sp<PolicyAudioPortConfig>& other) const {
+    bool hasSameHwModuleAs(const sp<PolicyAudioPortConfig>& other) const {
         return (other.get() != nullptr) && (other->getPolicyAudioPort().get() != nullptr) &&
                 (getPolicyAudioPort().get() != nullptr) &&
                 (other->getPolicyAudioPort()->getModuleHandle() ==
                         getPolicyAudioPort()->getModuleHandle());
     }
 
-    union audio_io_flags mFlags = { AUDIO_INPUT_FLAG_NONE };
 };
 
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
index cd10010..580938e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
@@ -17,6 +17,8 @@
 #define LOG_TAG "APM::AudioCollections"
 //#define LOG_NDEBUG 0
 
+#include <android-base/stringprintf.h>
+
 #include "AudioCollections.h"
 #include "AudioRoute.h"
 #include "HwModule.h"
@@ -40,10 +42,11 @@
     if (audioRouteVector.isEmpty()) {
         return;
     }
-    dst->appendFormat("\n%*sAudio Routes (%zu):\n", spaces, "", audioRouteVector.size());
+    dst->appendFormat("%*s- Audio Routes (%zu):\n", spaces - 2, "", audioRouteVector.size());
     for (size_t i = 0; i < audioRouteVector.size(); i++) {
-        dst->appendFormat("%*s- Route %zu:\n", spaces, "", i + 1);
-        audioRouteVector.itemAt(i)->dump(dst, 4);
+        const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+        dst->append(prefix.c_str());
+        audioRouteVector.itemAt(i)->dump(dst, prefix.size());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 7016a08..966b8cb 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -17,6 +17,8 @@
 #define LOG_TAG "APM::AudioInputDescriptor"
 //#define LOG_NDEBUG 0
 
+#include <android-base/stringprintf.h>
+
 #include <audiomanager/AudioManager.h>
 #include <media/AudioPolicy.h>
 #include <policy.h>
@@ -62,7 +64,6 @@
     toAudioPortConfig(&localBackupConfig);
     if ((status = validationBeforeApplyConfig(config)) == NO_ERROR) {
         AudioPortConfig::applyAudioPortConfig(config, backupConfig);
-        applyPolicyAudioPortConfig(config);
     }
 
     if (backupConfig != NULL) {
@@ -83,7 +84,6 @@
     }
 
     AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
-    toPolicyAudioPortConfig(dstConfig, srcConfig);
 
     dstConfig->role = AUDIO_PORT_ROLE_SINK;
     dstConfig->type = AUDIO_PORT_TYPE_MIX;
@@ -510,17 +510,20 @@
     }
 }
 
-void AudioInputDescriptor::dump(String8 *dst) const
+void AudioInputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
 {
-    dst->appendFormat(" ID: %d\n", getId());
-    dst->appendFormat(" Sampling rate: %d\n", mSamplingRate);
-    dst->appendFormat(" Format: %d\n", mFormat);
-    dst->appendFormat(" Channels: %08x\n", mChannelMask);
-    dst->appendFormat(" Devices %s\n", mDevice->toString(true /*includeSensitiveInfo*/).c_str());
-    mEnabledEffects.dump(dst, 1 /*spaces*/, false /*verbose*/);
-    dst->append(" AudioRecord Clients:\n");
-    ClientMapHandler<RecordClientDescriptor>::dump(dst);
-    dst->append("\n");
+    dst->appendFormat("Port ID: %d%s%s\n",
+            getId(), extraInfo != nullptr ? "; " : "", extraInfo != nullptr ? extraInfo : "");
+    dst->appendFormat("%*s%s; %d; Channel mask: 0x%x\n", spaces, "",
+            audio_format_to_string(mFormat), mSamplingRate, mChannelMask);
+    dst->appendFormat("%*sDevices: %s\n", spaces, "",
+            mDevice->toString(true /*includeSensitiveInfo*/).c_str());
+    mEnabledEffects.dump(dst, spaces /*spaces*/, false /*verbose*/);
+    if (getClientCount() != 0) {
+        dst->appendFormat("%*sAudioRecord Clients (%zu):\n", spaces, "", getClientCount());
+        ClientMapHandler<RecordClientDescriptor>::dump(dst, spaces);
+        dst->append("\n");
+    }
 }
 
 bool AudioInputCollection::isSourceActive(audio_source_t source) const
@@ -608,10 +611,12 @@
 
 void AudioInputCollection::dump(String8 *dst) const
 {
-    dst->append("\nInputs dump:\n");
+    dst->appendFormat("\n Inputs (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("- Input %d dump:\n", keyAt(i));
-        valueAt(i)->dump(dst);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        const std::string extraInfo = base::StringPrintf("I/O handle: %d", keyAt(i));
+        dst->appendFormat("%s", prefix.c_str());
+        valueAt(i)->dump(dst, prefix.size(), extraInfo.c_str());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index f3d2326..235e4aa 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -17,6 +17,8 @@
 #define LOG_TAG "APM::AudioOutputDescriptor"
 //#define LOG_NDEBUG 0
 
+#include <android-base/stringprintf.h>
+
 #include <AudioPolicyInterface.h>
 #include "AudioOutputDescriptor.h"
 #include "AudioPolicyMix.h"
@@ -188,7 +190,6 @@
     toAudioPortConfig(&localBackupConfig);
     if ((status = validationBeforeApplyConfig(config)) == NO_ERROR) {
         AudioPortConfig::applyAudioPortConfig(config, backupConfig);
-        applyPolicyAudioPortConfig(config);
     }
 
     if (backupConfig != NULL) {
@@ -207,7 +208,6 @@
         dstConfig->config_mask |= srcConfig->config_mask;
     }
     AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
-    toPolicyAudioPortConfig(dstConfig, srcConfig);
 
     dstConfig->role = AUDIO_PORT_ROLE_SOURCE;
     dstConfig->type = AUDIO_PORT_TYPE_MIX;
@@ -245,32 +245,45 @@
         return client->volumeSource() != volumeSourceToIgnore; }) != end(mActiveClients);
 }
 
-void AudioOutputDescriptor::dump(String8 *dst) const
+void AudioOutputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
 {
-    dst->appendFormat(" ID: %d\n", mId);
-    dst->appendFormat(" Sampling rate: %d\n", mSamplingRate);
-    dst->appendFormat(" Format: %08x\n", mFormat);
-    dst->appendFormat(" Channels: %08x\n", mChannelMask);
-    dst->appendFormat(" Devices: %s\n", devices().toString(true /*includeSensitiveInfo*/).c_str());
-    dst->appendFormat(" Global active count: %u\n", mGlobalActiveCount);
-    for (const auto &iter : mRoutingActivities) {
-        dst->appendFormat(" Product Strategy id: %d", iter.first);
-        iter.second.dump(dst, 4);
+    dst->appendFormat("Port ID: %d%s%s\n",
+            mId, extraInfo != nullptr ? "; " : "", extraInfo != nullptr ? extraInfo : "");
+    dst->appendFormat("%*s%s; %d; Channel mask: 0x%x\n", spaces, "",
+            audio_format_to_string(mFormat), mSamplingRate, mChannelMask);
+    dst->appendFormat("%*sDevices: %s\n", spaces, "",
+            devices().toString(true /*includeSensitiveInfo*/).c_str());
+    dst->appendFormat("%*sGlobal active count: %u\n", spaces, "", mGlobalActiveCount);
+    if (!mRoutingActivities.empty()) {
+        dst->appendFormat("%*s- Product Strategies (%zu):\n", spaces - 2, "",
+                mRoutingActivities.size());
+        for (const auto &iter : mRoutingActivities) {
+            dst->appendFormat("%*sid %d: ", spaces + 1, "", iter.first);
+            iter.second.dump(dst, 0);
+        }
     }
-    for (const auto &iter : mVolumeActivities) {
-        dst->appendFormat(" Volume Activities id: %d", iter.first);
-        iter.second.dump(dst, 4);
+    if (!mVolumeActivities.empty()) {
+        dst->appendFormat("%*s- Volume Activities (%zu):\n", spaces - 2, "",
+                mVolumeActivities.size());
+        for (const auto &iter : mVolumeActivities) {
+            dst->appendFormat("%*sid %d: ", spaces + 1, "", iter.first);
+            iter.second.dump(dst, 0);
+        }
     }
-    dst->append(" AudioTrack Clients:\n");
-    ClientMapHandler<TrackClientDescriptor>::dump(dst);
-    dst->append("\n");
+    if (getClientCount() != 0) {
+        dst->appendFormat("%*s- AudioTrack clients (%zu):\n", spaces - 2, "", getClientCount());
+        ClientMapHandler<TrackClientDescriptor>::dump(dst, spaces);
+    }
     if (!mActiveClients.empty()) {
-        dst->append(" AudioTrack active (stream) clients:\n");
+        dst->appendFormat("%*s- AudioTrack active (stream) clients (%zu):\n", spaces - 2, "",
+                mActiveClients.size());
         size_t index = 0;
         for (const auto& client : mActiveClients) {
-            client->dump(dst, 2, index++);
+            const std::string prefix = base::StringPrintf(
+                    "%*sid %zu: ", spaces + 1, "", index + 1);
+            dst->appendFormat("%s", prefix.c_str());
+            client->dump(dst, prefix.size());
         }
-        dst->append(" \n");
     }
 }
 
@@ -294,11 +307,18 @@
     }
 }
 
-void SwAudioOutputDescriptor::dump(String8 *dst) const
+void SwAudioOutputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
 {
-    dst->appendFormat(" Latency: %d\n", mLatency);
-    dst->appendFormat(" Flags %08x\n", mFlags);
-    AudioOutputDescriptor::dump(dst);
+    String8 allExtraInfo;
+    if (extraInfo != nullptr) {
+        allExtraInfo.appendFormat("%s; ", extraInfo);
+    }
+    std::string flagsLiteral = toString(mFlags);
+    allExtraInfo.appendFormat("Latency: %d; 0x%04x", mLatency, mFlags);
+    if (!flagsLiteral.empty()) {
+        allExtraInfo.appendFormat(" (%s)", flagsLiteral.c_str());
+    }
+    AudioOutputDescriptor::dump(dst, spaces, allExtraInfo.c_str());
 }
 
 DeviceVector SwAudioOutputDescriptor::devices() const
@@ -670,6 +690,15 @@
     return NO_ERROR;
 }
 
+uint32_t SwAudioOutputDescriptor::getRecommendedMuteDurationMs() const
+{
+    if (isDuplicated()) {
+        return std::max(mOutput1->getRecommendedMuteDurationMs(),
+                mOutput2->getRecommendedMuteDurationMs());
+    }
+    return mProfile->recommendedMuteDurationMs;
+}
+
 // HwAudioOutputDescriptor implementation
 HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<SourceClientDescriptor>& source,
                                                  AudioPolicyClientInterface *clientInterface)
@@ -678,11 +707,11 @@
 {
 }
 
-void HwAudioOutputDescriptor::dump(String8 *dst) const
+void HwAudioOutputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
 {
-    AudioOutputDescriptor::dump(dst);
-    dst->append("Source:\n");
-    mSource->dump(dst, 0, 0);
+    AudioOutputDescriptor::dump(dst, spaces, extraInfo);
+    dst->appendFormat("%*sSource:\n", spaces, "");
+    mSource->dump(dst, spaces);
 }
 
 void HwAudioOutputDescriptor::toAudioPortConfig(
@@ -855,10 +884,12 @@
 
 void SwAudioOutputCollection::dump(String8 *dst) const
 {
-    dst->append("\nOutputs dump:\n");
+    dst->appendFormat("\n Outputs (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("- Output %d dump:\n", keyAt(i));
-        valueAt(i)->dump(dst);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        const std::string extraInfo = base::StringPrintf("I/O handle: %d", keyAt(i));
+        dst->appendFormat("%s", prefix.c_str());
+        valueAt(i)->dump(dst, prefix.size(), extraInfo.c_str());
     }
 }
 
@@ -877,10 +908,12 @@
 
 void HwAudioOutputCollection::dump(String8 *dst) const
 {
-    dst->append("\nOutputs dump:\n");
+    dst->appendFormat("\n Outputs (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("- Output %d dump:\n", keyAt(i));
-        valueAt(i)->dump(dst);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        const std::string extraInfo = base::StringPrintf("I/O handle: %d", keyAt(i));
+        dst->appendFormat("%s", prefix.c_str());
+        valueAt(i)->dump(dst, prefix.size(), extraInfo.c_str());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index d79110a..4f03db9 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -20,7 +20,9 @@
 #include "AudioPatch.h"
 #include "TypeConverter.h"
 
+#include <android-base/stringprintf.h>
 #include <log/log.h>
+#include <media/AudioDeviceTypeAddr.h>
 #include <utils/String8.h>
 
 namespace android {
@@ -37,20 +39,21 @@
 {
     for (int i = 0; i < count; ++i) {
         const audio_port_config &cfg = cfgs[i];
-        dst->appendFormat("%*s  [%s %d] ", spaces, "", prefix, i + 1);
+        dst->appendFormat("%*s[%s %d] ", spaces, "", prefix, i + 1);
         if (cfg.type == AUDIO_PORT_TYPE_DEVICE) {
-            dst->appendFormat("Device ID %d %s", cfg.id, toString(cfg.ext.device.type).c_str());
+            AudioDeviceTypeAddr device(cfg.ext.device.type, cfg.ext.device.address);
+            dst->appendFormat("Device Port ID: %d; {%s}",
+                    cfg.id, device.toString(true /*includeSensitiveInfo*/).c_str());
         } else {
-            dst->appendFormat("Mix ID %d I/O handle %d", cfg.id, cfg.ext.mix.handle);
+            dst->appendFormat("Mix Port ID: %d; I/O handle: %d;", cfg.id, cfg.ext.mix.handle);
         }
         dst->append("\n");
     }
 }
 
-void AudioPatch::dump(String8 *dst, int spaces, int index) const
+void AudioPatch::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat("%*sPatch %d: owner uid %4d, handle %2d, af handle %2d\n",
-            spaces, "", index + 1, mUid, mHandle, mAfPatchHandle);
+    dst->appendFormat("owner uid %4d; handle %2d; af handle %2d\n", mUid, mHandle, mAfPatchHandle);
     dumpPatchEndpoints(dst, spaces, "src ", mPatch.num_sources, mPatch.sources);
     dumpPatchEndpoints(dst, spaces, "sink", mPatch.num_sinks, mPatch.sinks);
 }
@@ -135,9 +138,11 @@
 
 void AudioPatchCollection::dump(String8 *dst) const
 {
-    dst->append("\nAudio Patches:\n");
+    dst->appendFormat("\n Audio Patches (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        valueAt(i)->dump(dst, 2, i);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        dst->appendFormat("%s", prefix.c_str());
+        valueAt(i)->dump(dst, prefix.size());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index b209a88..546f56b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -675,7 +675,7 @@
 
 void AudioPolicyMixCollection::dump(String8 *dst) const
 {
-    dst->append("\nAudio Policy Mix:\n");
+    dst->append("\n Audio Policy Mix:\n");
     for (size_t i = 0; i < size(); i++) {
         itemAt(i)->dump(dst, 2, i);
     }
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
index 866417e..53cc473 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
@@ -25,15 +25,16 @@
 
 void AudioRoute::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat("%*s- Type: %s\n", spaces, "", mType == AUDIO_ROUTE_MUX ? "Mux" : "Mix");
-    dst->appendFormat("%*s- Sink: %s\n", spaces, "", mSink->getTagName().c_str());
+    dst->appendFormat("%s; Sink: \"%s\"\n",
+            mType == AUDIO_ROUTE_MUX ? "Mux" : "Mix", mSink->getTagName().c_str());
     if (mSources.size() != 0) {
-        dst->appendFormat("%*s- Sources: \n", spaces, "");
+        dst->appendFormat("%*sSources: ", spaces, "");
         for (size_t i = 0; i < mSources.size(); i++) {
-            dst->appendFormat("%*s%s \n", spaces + 4, "", mSources[i]->getTagName().c_str());
+            dst->appendFormat("\"%s\"", mSources[i]->getTagName().c_str());
+            if (i + 1 < mSources.size()) dst->append(", ");
         }
+        dst->append("\n");
     }
-    dst->append("\n");
 }
 
 bool AudioRoute::supportsPatch(const sp<PolicyAudioPort> &srcPort,
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index afc4d01..035bef2 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -18,9 +18,12 @@
 //#define LOG_NDEBUG 0
 
 #include <sstream>
+
+#include <android-base/stringprintf.h>
+#include <TypeConverter.h>
 #include <utils/Log.h>
 #include <utils/String8.h>
-#include <TypeConverter.h>
+
 #include "AudioOutputDescriptor.h"
 #include "AudioPatch.h"
 #include "AudioPolicyMix.h"
@@ -39,35 +42,36 @@
     return ss.str();
 }
 
-void ClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void ClientDescriptor::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat("%*sClient %d:\n", spaces, "", index+1);
-    dst->appendFormat("%*s- Port Id: %d Session Id: %d UID: %d\n", spaces, "",
-             mPortId, mSessionId, mUid);
-    dst->appendFormat("%*s- Format: %08x Sampling rate: %d Channels: %08x\n", spaces, "",
-             mConfig.format, mConfig.sample_rate, mConfig.channel_mask);
-    dst->appendFormat("%*s- Attributes: %s\n", spaces, "", toString(mAttributes).c_str());
-    dst->appendFormat("%*s- Preferred Device Id: %08x\n", spaces, "", mPreferredDeviceId);
-    dst->appendFormat("%*s- State: %s\n", spaces, "", mActive ? "Active" : "Inactive");
+    dst->appendFormat("Port ID: %d; Session ID: %d; uid %d; State: %s\n",
+            mPortId, mSessionId, mUid, mActive ? "Active" : "Inactive");
+    dst->appendFormat("%*s%s; %d; Channel mask: 0x%x\n", spaces, "",
+            audio_format_to_string(mConfig.format), mConfig.sample_rate, mConfig.channel_mask);
+    dst->appendFormat("%*sAttributes: %s\n", spaces, "", toString(mAttributes).c_str());
+    if (mPreferredDeviceId != AUDIO_PORT_HANDLE_NONE) {
+        dst->appendFormat("%*sPreferred Device Port ID: %d;\n", spaces, "", mPreferredDeviceId);
+    }
 }
 
-void TrackClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void TrackClientDescriptor::dump(String8 *dst, int spaces) const
 {
-    ClientDescriptor::dump(dst, spaces, index);
-    dst->appendFormat("%*s- Stream: %d flags: %08x\n", spaces, "", mStream, mFlags);
-    dst->appendFormat("%*s- Refcount: %d\n", spaces, "", mActivityCount);
-    dst->appendFormat("%*s- DAP Primary Mix: %p\n", spaces, "", mPrimaryMix.promote().get());
-    dst->appendFormat("%*s- DAP Secondary Outputs:\n", spaces, "");
-    for (auto desc : mSecondaryOutputs) {
-        dst->appendFormat("%*s  - %d\n", spaces, "",
-                desc.promote() == nullptr ? 0 : desc.promote()->mIoHandle);
+    ClientDescriptor::dump(dst, spaces);
+    dst->appendFormat("%*sStream: %d; Flags: %08x; Refcount: %d\n", spaces, "",
+            mStream, mFlags, mActivityCount);
+    dst->appendFormat("%*sDAP Primary Mix: %p\n", spaces, "", mPrimaryMix.promote().get());
+    if (!mSecondaryOutputs.empty()) {
+        dst->appendFormat("%*sDAP Secondary Outputs: ", spaces - 2, "");
+        for (auto desc : mSecondaryOutputs) {
+            dst->appendFormat("%d, ", desc.promote() == nullptr ? 0 : desc.promote()->mIoHandle);
+        }
+        dst->append("\n");
     }
 }
 
 std::string TrackClientDescriptor::toShortString() const
 {
     std::stringstream ss;
-
     ss << ClientDescriptor::toShortString() << " Stream: " << mStream;
     return ss.str();
 }
@@ -81,10 +85,10 @@
     }
 }
 
-void RecordClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void RecordClientDescriptor::dump(String8 *dst, int spaces) const
 {
-    ClientDescriptor::dump(dst, spaces, index);
-    dst->appendFormat("%*s- Source: %d flags: %08x\n", spaces, "", mSource, mFlags);
+    ClientDescriptor::dump(dst, spaces);
+    dst->appendFormat("%*sSource: %d; Flags: %08x\n", spaces, "", mSource, mFlags);
     mEnabledEffects.dump(dst, spaces + 2 /*spaces*/, false /*verbose*/);
 }
 
@@ -109,18 +113,21 @@
     mHwOutput = hwOutput;
 }
 
-void SourceClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void SourceClientDescriptor::dump(String8 *dst, int spaces) const
 {
-    TrackClientDescriptor::dump(dst, spaces, index);
-    dst->appendFormat("%*s- Device:\n", spaces, "");
-    mSrcDevice->dump(dst, 2, 0);
+    TrackClientDescriptor::dump(dst, spaces);
+    const std::string prefix = base::StringPrintf("%*sDevice: ", spaces, "");
+    dst->appendFormat("%s", prefix.c_str());
+    mSrcDevice->dump(dst, prefix.size());
 }
 
 void SourceClientCollection::dump(String8 *dst) const
 {
-    dst->append("\nAudio sources:\n");
+    dst->append("\n Audio sources (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        valueAt(i)->dump(dst, 2, i);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        dst->appendFormat("%s", prefix.c_str());
+        valueAt(i)->dump(dst, prefix.size());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index c9c8ede..a909331 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -19,10 +19,11 @@
 
 #include <set>
 
-#include <AudioPolicyInterface.h>
+#include <android-base/stringprintf.h>
 #include <audio_utils/string.h>
 #include <media/AudioParameter.h>
 #include <media/TypeConverter.h>
+#include <AudioPolicyInterface.h>
 #include "DeviceDescriptor.h"
 #include "TypeConverter.h"
 #include "HwModule.h"
@@ -54,7 +55,7 @@
 DeviceDescriptor::DeviceDescriptor(const AudioDeviceTypeAddr &deviceTypeAddr,
                                    const std::string &tagName,
                                    const FormatVector &encodedFormats) :
-        DeviceDescriptorBase(deviceTypeAddr), mTagName(tagName), mEncodedFormats(encodedFormats),
+        DeviceDescriptorBase(deviceTypeAddr, encodedFormats), mTagName(tagName),
         mDeclaredAddress(DeviceDescriptorBase::address())
 {
     mCurrentEncodedFormat = AUDIO_FORMAT_DEFAULT;
@@ -109,20 +110,6 @@
     return (mCurrentEncodedFormat != AUDIO_FORMAT_DEFAULT);
 }
 
-bool DeviceDescriptor::supportsFormat(audio_format_t format)
-{
-    if (mEncodedFormats.empty()) {
-        return true;
-    }
-
-    for (const auto& devFormat : mEncodedFormats) {
-        if (devFormat == format) {
-            return true;
-        }
-    }
-    return false;
-}
-
 status_t DeviceDescriptor::applyAudioPortConfig(const struct audio_port_config *config,
                                                 audio_port_config *backupConfig)
 {
@@ -132,7 +119,6 @@
     toAudioPortConfig(&localBackupConfig);
     if ((status = validationBeforeApplyConfig(config)) == NO_ERROR) {
         AudioPortConfig::applyAudioPortConfig(config, backupConfig);
-        applyPolicyAudioPortConfig(config);
     }
 
     if (backupConfig != NULL) {
@@ -145,8 +131,6 @@
                                          const struct audio_port_config *srcConfig) const
 {
     DeviceDescriptorBase::toAudioPortConfig(dstConfig, srcConfig);
-    toPolicyAudioPortConfig(dstConfig, srcConfig);
-
     dstConfig->ext.device.hw_module = getModuleHandle();
 }
 
@@ -193,15 +177,15 @@
     }
 }
 
-void DeviceDescriptor::dump(String8 *dst, int spaces, int index, bool verbose) const
+void DeviceDescriptor::dump(String8 *dst, int spaces, bool verbose) const
 {
     String8 extraInfo;
     if (!mTagName.empty()) {
-        extraInfo.appendFormat("%*s- tag name: %s\n", spaces, "", mTagName.c_str());
+        extraInfo.appendFormat("\"%s\"", mTagName.c_str());
     }
 
     std::string descBaseDumpStr;
-    DeviceDescriptorBase::dump(&descBaseDumpStr, spaces, index, extraInfo.string(), verbose);
+    DeviceDescriptorBase::dump(&descBaseDumpStr, spaces, extraInfo.string(), verbose);
     dst->append(descBaseDumpStr.c_str());
 }
 
@@ -464,9 +448,11 @@
     if (isEmpty()) {
         return;
     }
-    dst->appendFormat("%*s- %s devices:\n", spaces, "", tag.string());
+    dst->appendFormat("%*s%s devices (%zu):\n", spaces, "", tag.string(), size());
     for (size_t i = 0; i < size(); i++) {
-        itemAt(i)->dump(dst, spaces + 2, i, verbose);
+        const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+        dst->appendFormat("%s", prefix.c_str());
+        itemAt(i)->dump(dst, prefix.size(), verbose);
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
index 843f5da..3f9c8b0 100644
--- a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "APM::EffectDescriptor"
 //#define LOG_NDEBUG 0
 
+#include <android-base/stringprintf.h>
 #include "EffectDescriptor.h"
 #include <utils/String8.h>
 
@@ -24,13 +25,11 @@
 
 void EffectDescriptor::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat("%*sID: %d\n", spaces, "", mId);
-    dst->appendFormat("%*sI/O: %d\n", spaces, "", mIo);
-    dst->appendFormat("%*sMusic Effect: %s\n", spaces, "", isMusicEffect()? "yes" : "no");
-    dst->appendFormat("%*sSession: %d\n", spaces, "", mSession);
-    dst->appendFormat("%*sName: %s\n", spaces, "",  mDesc.name);
-    dst->appendFormat("%*s%s\n", spaces, "",  mEnabled ? "Enabled" : "Disabled");
-    dst->appendFormat("%*s%s\n", spaces, "",  mSuspended ? "Suspended" : "Active");
+    dst->appendFormat("Effect ID: %d; Attached to I/O handle: %d; Session: %d;\n",
+            mId, mIo, mSession);
+    dst->appendFormat("%*sMusic Effect? %s; \"%s\"; %s; %s\n", spaces, "",
+            isMusicEffect()? "yes" : "no", mDesc.name,
+            mEnabled ? "Enabled" : "Disabled", mSuspended ? "Suspended" : "Active");
 }
 
 EffectDescriptorCollection::EffectDescriptorCollection() :
@@ -237,10 +236,14 @@
             mTotalEffectsMemory,
             mTotalEffectsMemoryMaxUsed);
     }
-    dst->appendFormat("%*sEffects:\n", spaces, "");
-    for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("%*s- Effect %d:\n", spaces, "", keyAt(i));
-        valueAt(i)->dump(dst, spaces + 2);
+    if (size() > 0) {
+        if (spaces > 1) spaces -= 2;
+        dst->appendFormat("%*s- Effects (%zu):\n", spaces, "", size());
+        for (size_t i = 0; i < size(); i++) {
+            const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+            dst->appendFormat("%s", prefix.c_str());
+            valueAt(i)->dump(dst, prefix.size());
+        }
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 3a143b0..418b7eb 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -17,11 +17,13 @@
 #define LOG_TAG "APM::HwModule"
 //#define LOG_NDEBUG 0
 
-#include "HwModule.h"
-#include "IOProfile.h"
+#include <android-base/stringprintf.h>
 #include <policy.h>
 #include <system/audio.h>
 
+#include "HwModule.h"
+#include "IOProfile.h"
+
 namespace android {
 
 HwModule::HwModule(const char *name, uint32_t halVersionMajor, uint32_t halVersionMinor)
@@ -247,28 +249,28 @@
     return false;
 }
 
-void HwModule::dump(String8 *dst) const
+void HwModule::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat("  - name: %s\n", getName());
-    dst->appendFormat("  - handle: %d\n", mHandle);
-    dst->appendFormat("  - version: %u.%u\n", getHalVersionMajor(), getHalVersionMinor());
+    dst->appendFormat("Handle: %d; \"%s\"\n", mHandle, getName());
     if (mOutputProfiles.size()) {
-        dst->append("  - outputs:\n");
+        dst->appendFormat("%*s- Output MixPorts (%zu):\n", spaces - 2, "", mOutputProfiles.size());
         for (size_t i = 0; i < mOutputProfiles.size(); i++) {
-            dst->appendFormat("    output %zu:\n", i);
-            mOutputProfiles[i]->dump(dst);
+            const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+            dst->append(prefix.c_str());
+            mOutputProfiles[i]->dump(dst, prefix.size());
         }
     }
     if (mInputProfiles.size()) {
-        dst->append("  - inputs:\n");
+        dst->appendFormat("%*s- Input MixPorts (%zu):\n", spaces - 2, "", mInputProfiles.size());
         for (size_t i = 0; i < mInputProfiles.size(); i++) {
-            dst->appendFormat("    input %zu:\n", i);
-            mInputProfiles[i]->dump(dst);
+            const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+            dst->append(prefix.c_str());
+            mInputProfiles[i]->dump(dst, prefix.size());
         }
     }
-    mDeclaredDevices.dump(dst, String8("Declared"), 2, true);
-    mDynamicDevices.dump(dst, String8("Dynamic"),  2, true);
-    dumpAudioRouteVector(mRoutes, dst, 2);
+    mDeclaredDevices.dump(dst, String8("- Declared"), spaces - 2, true);
+    mDynamicDevices.dump(dst, String8("- Dynamic"),  spaces - 2, true);
+    dumpAudioRouteVector(mRoutes, dst, spaces);
 }
 
 sp <HwModule> HwModuleCollection::getModuleFromName(const char *name) const
@@ -462,10 +464,11 @@
 
 void HwModuleCollection::dump(String8 *dst) const
 {
-    dst->append("\nHW Modules dump:\n");
+    dst->appendFormat("\n Hardware modules (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("- HW Module %zu:\n", i + 1);
-        itemAt(i)->dump(dst);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        dst->append(prefix.c_str());
+        itemAt(i)->dump(dst, prefix.size());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 09b614d..21f2018 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -116,27 +116,30 @@
                 return device == deviceDesc && deviceDesc->hasCurrentEncodedFormat(); }) == 1;
 }
 
-void IOProfile::dump(String8 *dst) const
+void IOProfile::dump(String8 *dst, int spaces) const
 {
-    std::string portStr;
-    AudioPort::dump(&portStr, 4);
-    dst->append(portStr.c_str());
-
-    dst->appendFormat("    - flags: 0x%04x", getFlags());
+    String8 extraInfo;
+    extraInfo.appendFormat("0x%04x", getFlags());
     std::string flagsLiteral =
             getRole() == AUDIO_PORT_ROLE_SINK ?
             toString(static_cast<audio_input_flags_t>(getFlags())) :
             getRole() == AUDIO_PORT_ROLE_SOURCE ?
             toString(static_cast<audio_output_flags_t>(getFlags())) : "";
     if (!flagsLiteral.empty()) {
-        dst->appendFormat(" (%s)", flagsLiteral.c_str());
+        extraInfo.appendFormat(" (%s)", flagsLiteral.c_str());
     }
-    dst->append("\n");
-    mSupportedDevices.dump(dst, String8("Supported"), 4, false);
-    dst->appendFormat("\n    - maxOpenCount: %u - curOpenCount: %u\n",
-             maxOpenCount, curOpenCount);
-    dst->appendFormat("    - maxActiveCount: %u - curActiveCount: %u\n",
-             maxActiveCount, curActiveCount);
+
+    std::string portStr;
+    AudioPort::dump(&portStr, spaces, extraInfo.c_str());
+    dst->append(portStr.c_str());
+
+    mSupportedDevices.dump(dst, String8("- Supported"), spaces - 2, false);
+    dst->appendFormat("%*s- maxOpenCount: %u; curOpenCount: %u\n",
+            spaces - 2, "", maxOpenCount, curOpenCount);
+    dst->appendFormat("%*s- maxActiveCount: %u; curActiveCount: %u\n",
+            spaces - 2, "", maxActiveCount, curActiveCount);
+    dst->appendFormat("%*s- recommendedMuteDurationMs: %u ms\n",
+            spaces - 2, "", recommendedMuteDurationMs);
 }
 
 void IOProfile::log()
diff --git a/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
index 5986069..ce8178f 100644
--- a/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
@@ -87,7 +87,7 @@
     // For direct outputs, pick minimum sampling rate: this helps ensuring that the
     // channel count / sampling rate combination chosen will be supported by the connected
     // sink
-    if (isDirectOutput()) {
+    if (asAudioPort()->isDirectOutput()) {
         uint32_t samplingRate = UINT_MAX;
         for (const auto rate : samplingRates) {
             if ((rate < samplingRate) && (rate > 0)) {
@@ -122,7 +122,7 @@
     // For direct outputs, pick minimum channel count: this helps ensuring that the
     // channel count / sampling rate combination chosen will be supported by the connected
     // sink
-    if (isDirectOutput()) {
+    if (asAudioPort()->isDirectOutput()) {
         uint32_t channelCount = UINT_MAX;
         for (const auto channelMask : channelMasks) {
             uint32_t cnlCount;
@@ -236,7 +236,7 @@
     audio_format_t bestFormat = sPcmFormatCompareTable[ARRAY_SIZE(sPcmFormatCompareTable) - 1];
     // For mixed output and inputs, use best mixer output format.
     // Do not limit format otherwise
-    if ((asAudioPort()->getType() != AUDIO_PORT_TYPE_MIX) || isDirectOutput()) {
+    if ((asAudioPort()->getType() != AUDIO_PORT_TYPE_MIX) || asAudioPort()->isDirectOutput()) {
         bestFormat = AUDIO_FORMAT_INVALID;
     }
 
@@ -266,29 +266,4 @@
             asAudioPort()->getName().c_str(), samplingRate, channelMask, format);
 }
 
-// --- PolicyAudioPortConfig class implementation
-
-status_t PolicyAudioPortConfig::validationBeforeApplyConfig(
-        const struct audio_port_config *config) const
-{
-    sp<PolicyAudioPort> policyAudioPort = getPolicyAudioPort();
-    return policyAudioPort ? policyAudioPort->checkExactAudioProfile(config) : NO_INIT;
-}
-
-void PolicyAudioPortConfig::toPolicyAudioPortConfig(struct audio_port_config *dstConfig,
-                                                    const struct audio_port_config *srcConfig) const
-{
-    if (dstConfig->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
-        if ((srcConfig != nullptr) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_FLAGS)) {
-            dstConfig->flags = srcConfig->flags;
-        } else {
-            dstConfig->flags = mFlags;
-        }
-    } else {
-        dstConfig->flags = { AUDIO_INPUT_FLAG_NONE };
-    }
-}
-
-
-
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 84ed656..4dfef73 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -123,6 +123,7 @@
         static constexpr const char *flags = "flags";
         static constexpr const char *maxOpenCount = "maxOpenCount";
         static constexpr const char *maxActiveCount = "maxActiveCount";
+        static constexpr const char *recommendedMuteDurationMs = "recommendedMuteDurationMs";
     };
 
     // Children: GainTraits
@@ -496,6 +497,13 @@
     if (!maxActiveCount.empty()) {
         convertTo(maxActiveCount, mixPort->maxActiveCount);
     }
+
+    std::string recommendedmuteDurationMsLiteral =
+            getXmlAttribute(child, Attributes::recommendedMuteDurationMs);
+    if (!recommendedmuteDurationMsLiteral.empty()) {
+        convertTo(recommendedmuteDurationMsLiteral, mixPort->recommendedMuteDurationMs);
+    }
+
     // Deserialize children
     AudioGainTraits::Collection gains;
     status = deserializeCollection<AudioGainTraits>(child, &gains, NULL);
diff --git a/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
index 22ff954..ce78eb0 100644
--- a/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
@@ -10,29 +10,6 @@
                      samplingRates="24000,16000"
                      channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
         </mixPort>
-        <!-- Le Audio Audio Ports -->
-        <mixPort name="le audio output" role="source">
-            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
-                     samplingRates="8000,16000,24000,32000,44100,48000"
-                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
-            <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
-                     samplingRates="8000,16000,24000,32000,44100,48000"
-                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
-            <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
-                     samplingRates="8000,16000,24000,32000,44100,48000"
-                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
-        </mixPort>
-        <mixPort name="le audio input" role="sink">
-            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
-                     samplingRates="8000,16000,24000,32000,44100,48000"
-                     channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
-            <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
-                     samplingRates="8000,16000,24000,32000,44100,48000"
-                     channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
-            <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
-                     samplingRates="8000,16000,24000,32000,44100,48000"
-                     channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
-        </mixPort>
     </mixPorts>
     <devicePorts>
         <!-- A2DP Audio Ports -->
@@ -53,14 +30,6 @@
         </devicePort>
         <!-- Hearing AIDs Audio Ports -->
         <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
-        <!-- BLE Audio Ports -->
-        <!-- Note that these device types are not valid in HAL versions < 7. Any device
-             running pre-V7 HAL and using this file will not pass VTS. Need to use
-             bluetooth_audio_policy_configuration_7_0.xml instead.
-        -->
-        <devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
-        <devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
-        <devicePort tagName="BLE Headset In" type="AUDIO_DEVICE_IN_BLE_HEADSET" role="source"/>
     </devicePorts>
     <routes>
         <route type="mix" sink="BT A2DP Out"
@@ -71,11 +40,5 @@
                sources="a2dp output"/>
         <route type="mix" sink="BT Hearing Aid Out"
                sources="hearing aid output"/>
-        <route type="mix" sink="BLE Headset Out"
-               sources="le audio output"/>
-        <route type="mix" sink="le audio input"
-               sources="BLE Headset In"/>
-        <route type="mix" sink="BLE Speaker Out"
-               sources="le audio output"/>
     </routes>
 </module>
diff --git a/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml b/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml
index aad00d6..2dffe02 100644
--- a/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml
+++ b/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml
@@ -10,29 +10,6 @@
                      samplingRates="24000 16000"
                      channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
         </mixPort>
-        <!-- Le Audio Audio Ports -->
-        <mixPort name="le audio output" role="source">
-            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
-                     samplingRates="8000 16000 24000 32000 44100 48000"
-                     channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
-            <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
-                     samplingRates="8000 16000 24000 32000 44100 48000"
-                     channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
-            <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
-                     samplingRates="8000 16000 24000 32000 44100 48000"
-                     channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
-        </mixPort>
-        <mixPort name="le audio input" role="sink">
-            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
-                     samplingRates="8000 16000 24000 32000 44100 48000"
-                     channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
-            <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
-                     samplingRates="8000 16000 24000 32000 44100 48000"
-                     channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
-            <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
-                     samplingRates="8000 16000 24000 32000 44100 48000"
-                     channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
-        </mixPort>
     </mixPorts>
     <devicePorts>
         <!-- A2DP Audio Ports -->
@@ -53,10 +30,6 @@
         </devicePort>
         <!-- Hearing AIDs Audio Ports -->
         <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
-        <!-- BLE Audio Ports -->
-        <devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
-        <devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
-        <devicePort tagName="BLE Headset In" type="AUDIO_DEVICE_IN_BLE_HEADSET" role="source"/>
     </devicePorts>
     <routes>
         <route type="mix" sink="BT A2DP Out"
@@ -67,11 +40,5 @@
                sources="a2dp output"/>
         <route type="mix" sink="BT Hearing Aid Out"
                sources="hearing aid output"/>
-        <route type="mix" sink="BLE Headset Out"
-               sources="le audio output"/>
-        <route type="mix" sink="le audio input"
-               sources="BLE Headset In"/>
-        <route type="mix" sink="BLE Speaker Out"
-               sources="le audio output"/>
     </routes>
 </module>
diff --git a/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration.xml b/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration.xml
new file mode 100644
index 0000000..22ff954
--- /dev/null
+++ b/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration.xml
@@ -0,0 +1,81 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Audio HAL Audio Policy Configuration file -->
+<module name="bluetooth" halVersion="2.0">
+    <mixPorts>
+        <!-- A2DP Audio Ports -->
+        <mixPort name="a2dp output" role="source"/>
+        <!-- Hearing AIDs Audio Ports -->
+        <mixPort name="hearing aid output" role="source">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="24000,16000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </mixPort>
+        <!-- Le Audio Audio Ports -->
+        <mixPort name="le audio output" role="source">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="8000,16000,24000,32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
+                     samplingRates="8000,16000,24000,32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
+                     samplingRates="8000,16000,24000,32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
+        </mixPort>
+        <mixPort name="le audio input" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="8000,16000,24000,32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
+                     samplingRates="8000,16000,24000,32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
+                     samplingRates="8000,16000,24000,32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+        </mixPort>
+    </mixPorts>
+    <devicePorts>
+        <!-- A2DP Audio Ports -->
+        <devicePort tagName="BT A2DP Out" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100,48000,88200,96000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <devicePort tagName="BT A2DP Headphones" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100,48000,88200,96000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <devicePort tagName="BT A2DP Speaker" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100,48000,88200,96000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <!-- Hearing AIDs Audio Ports -->
+        <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
+        <!-- BLE Audio Ports -->
+        <!-- Note that these device types are not valid in HAL versions < 7. Any device
+             running pre-V7 HAL and using this file will not pass VTS. Need to use
+             bluetooth_audio_policy_configuration_7_0.xml instead.
+        -->
+        <devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
+        <devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
+        <devicePort tagName="BLE Headset In" type="AUDIO_DEVICE_IN_BLE_HEADSET" role="source"/>
+    </devicePorts>
+    <routes>
+        <route type="mix" sink="BT A2DP Out"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT A2DP Headphones"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT A2DP Speaker"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT Hearing Aid Out"
+               sources="hearing aid output"/>
+        <route type="mix" sink="BLE Headset Out"
+               sources="le audio output"/>
+        <route type="mix" sink="le audio input"
+               sources="BLE Headset In"/>
+        <route type="mix" sink="BLE Speaker Out"
+               sources="le audio output"/>
+    </routes>
+</module>
diff --git a/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration_7_0.xml b/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration_7_0.xml
new file mode 100644
index 0000000..aad00d6
--- /dev/null
+++ b/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration_7_0.xml
@@ -0,0 +1,77 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Audio HAL Audio Policy Configuration file -->
+<module name="bluetooth" halVersion="2.0">
+    <mixPorts>
+        <!-- A2DP Audio Ports -->
+        <mixPort name="a2dp output" role="source"/>
+        <!-- Hearing AIDs Audio Ports -->
+        <mixPort name="hearing aid output" role="source">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="24000 16000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </mixPort>
+        <!-- Le Audio Audio Ports -->
+        <mixPort name="le audio output" role="source">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="8000 16000 24000 32000 44100 48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
+                     samplingRates="8000 16000 24000 32000 44100 48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
+                     samplingRates="8000 16000 24000 32000 44100 48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
+        </mixPort>
+        <mixPort name="le audio input" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="8000 16000 24000 32000 44100 48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
+                     samplingRates="8000 16000 24000 32000 44100 48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
+                     samplingRates="8000 16000 24000 32000 44100 48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
+        </mixPort>
+    </mixPorts>
+    <devicePorts>
+        <!-- A2DP Audio Ports -->
+        <devicePort tagName="BT A2DP Out" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100 48000 88200 96000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <devicePort tagName="BT A2DP Headphones" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100 48000 88200 96000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <devicePort tagName="BT A2DP Speaker" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100 48000 88200 96000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <!-- Hearing AIDs Audio Ports -->
+        <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
+        <!-- BLE Audio Ports -->
+        <devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
+        <devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
+        <devicePort tagName="BLE Headset In" type="AUDIO_DEVICE_IN_BLE_HEADSET" role="source"/>
+    </devicePorts>
+    <routes>
+        <route type="mix" sink="BT A2DP Out"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT A2DP Headphones"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT A2DP Speaker"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT Hearing Aid Out"
+               sources="hearing aid output"/>
+        <route type="mix" sink="BLE Headset Out"
+               sources="le audio output"/>
+        <route type="mix" sink="le audio input"
+               sources="BLE Headset In"/>
+        <route type="mix" sink="BLE Speaker Out"
+               sources="le audio output"/>
+    </routes>
+</module>
diff --git a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
index 665c2dd..b036e12 100644
--- a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
+++ b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
@@ -122,8 +122,12 @@
     {"STRATEGY_TRANSMITTED_THROUGH_SPEAKER",
      {
          {"", AUDIO_STREAM_TTS, "AUDIO_STREAM_TTS",
-          {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
-            AUDIO_FLAG_BEACON, ""}}
+          {
+              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
+                AUDIO_FLAG_BEACON, ""},
+              {AUDIO_CONTENT_TYPE_ULTRASOUND, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
+                AUDIO_FLAG_NONE, ""}
+          }
          }
      },
     }
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
index b3d144f..fbfcf72 100644
--- a/services/audiopolicy/engine/common/src/ProductStrategy.cpp
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -150,12 +150,8 @@
 void ProductStrategy::dump(String8 *dst, int spaces) const
 {
     dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
-    std::string deviceLiteral;
-    if (!deviceTypesToString(mApplicableDevices, deviceLiteral)) {
-        ALOGE("%s: failed to convert device %s",
-              __FUNCTION__, dumpDeviceTypes(mApplicableDevices).c_str());
-    }
-    dst->appendFormat("%*sSelected Device: {type:%s, @:%s}\n", spaces + 2, "",
+    std::string deviceLiteral = deviceTypesToString(mApplicableDevices);
+    dst->appendFormat("%*sSelected Device: {%s, @:%s}\n", spaces + 2, "",
                        deviceLiteral.c_str(), mDeviceAddress.c_str());
 
     for (const auto &attr : mAttributesVector) {
@@ -333,4 +329,3 @@
     dst->appendFormat("\n");
 }
 }
-
diff --git a/services/audiopolicy/engine/config/include/EngineConfig.h b/services/audiopolicy/engine/config/include/EngineConfig.h
index c565926..2ebb7df 100644
--- a/services/audiopolicy/engine/config/include/EngineConfig.h
+++ b/services/audiopolicy/engine/config/include/EngineConfig.h
@@ -70,7 +70,7 @@
 
 using ProductStrategies = std::vector<ProductStrategy>;
 
-using ValuePair = std::pair<uint32_t, std::string>;
+using ValuePair = std::tuple<uint64_t, uint32_t, std::string>;
 using ValuePairs = std::vector<ValuePair>;
 
 struct CriterionType
diff --git a/services/audiopolicy/engine/config/src/EngineConfig.cpp b/services/audiopolicy/engine/config/src/EngineConfig.cpp
index 81e803f..6f560d5 100644
--- a/services/audiopolicy/engine/config/src/EngineConfig.cpp
+++ b/services/audiopolicy/engine/config/src/EngineConfig.cpp
@@ -80,6 +80,7 @@
     struct Attributes {
         static constexpr const char *literal = "literal";
         static constexpr const char *numerical = "numerical";
+        static constexpr const char *androidType = "android_type";
     };
 
     static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
@@ -349,7 +350,16 @@
         ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::literal);
         return BAD_VALUE;
     }
-    uint32_t numerical = 0;
+    uint32_t androidType = 0;
+    std::string androidTypeliteral = getXmlAttribute(child, Attributes::androidType);
+    if (!androidTypeliteral.empty()) {
+        ALOGV("%s: androidType %s", __FUNCTION__, androidTypeliteral.c_str());
+        if (!convertTo(androidTypeliteral, androidType)) {
+            ALOGE("%s: : Invalid typeset value(%s)", __FUNCTION__, androidTypeliteral.c_str());
+            return BAD_VALUE;
+        }
+    }
+    uint64_t numerical = 0;
     std::string numericalTag = getXmlAttribute(child, Attributes::numerical);
     if (numericalTag.empty()) {
         ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::literal);
@@ -359,7 +369,7 @@
         ALOGE("%s: : Invalid value(%s)", __FUNCTION__, numericalTag.c_str());
         return BAD_VALUE;
     }
-    values.push_back({numerical, literal});
+    values.push_back({numerical,  androidType, literal});
     return NO_ERROR;
 }
 
diff --git a/services/audiopolicy/engineconfigurable/Android.bp b/services/audiopolicy/engineconfigurable/Android.bp
index a747822..dc8d9cf 100644
--- a/services/audiopolicy/engineconfigurable/Android.bp
+++ b/services/audiopolicy/engineconfigurable/Android.bp
@@ -41,8 +41,9 @@
         "libaudiopolicyengineconfigurable_pfwwrapper",
 
     ],
-    shared_libs: [
+  shared_libs: [
         "libaudiofoundation",
+        "libbase",
         "liblog",
         "libcutils",
         "libutils",
diff --git a/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
index 1fc2264..9fd8b8e 100644
--- a/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
+++ b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
@@ -77,12 +77,12 @@
      * Set the input device to be used by an input source.
      *
      * @param[in] inputSource: name of the input source for which the device to use has to be set
-     * @param[in] devices; mask of devices to be used for the given input source.
+     * @param[in] devices: mask of devices to be used for the given input source.
      *
      * @return true if the devices were set correclty for this input source, false otherwise.
      */
     virtual bool setDeviceForInputSource(const audio_source_t &inputSource,
-                                         audio_devices_t device) = 0;
+                                         uint64_t device) = 0;
 
     virtual void setDeviceAddressForProductStrategy(product_strategy_t strategy,
                                                     const std::string &address) = 0;
@@ -91,12 +91,12 @@
      * Set the device to be used by a product strategy.
      *
      * @param[in] strategy: name of the product strategy for which the device to use has to be set
-     * @param[in] devices; mask of devices to be used for the given strategy.
+     * @param[in] devices: mask of devices to be used for the given strategy.
      *
      * @return true if the devices were set correclty for this strategy, false otherwise.
      */
     virtual bool setDeviceTypesForProductStrategy(product_strategy_t strategy,
-                                                  audio_devices_t devices) = 0;
+                                                  uint64_t devices) = 0;
 
     virtual product_strategy_t getProductStrategyByName(const std::string &address) = 0;
 
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/common/Structure/PolicySubsystem-CommonTypes.xml.in b/services/audiopolicy/engineconfigurable/parameter-framework/examples/common/Structure/PolicySubsystem-CommonTypes.xml.in
index 2e9f37e..2c4c7b5 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/common/Structure/PolicySubsystem-CommonTypes.xml.in
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/common/Structure/PolicySubsystem-CommonTypes.xml.in
@@ -10,8 +10,8 @@
      <!--#################### GLOBAL COMPONENTS END ####################-->
 
     <!-- Automatically filled from audio-base.h file -->
-    <ComponentType Name="OutputDevicesMask" Description="32th bit is not allowed as dedicated for input devices detection">
-        <BitParameterBlock Name="mask" Size="32">
+    <ComponentType Name="OutputDevicesMask" Description="64bit representation of devices">
+        <BitParameterBlock Name="mask" Size="64">
         </BitParameterBlock>
     </ComponentType>
 
@@ -19,8 +19,8 @@
     profile. It must match with the Input device enum parameter.
     -->
     <!-- Automatically filled from audio-base.h file -->
-    <ComponentType Name="InputDevicesMask">
-        <BitParameterBlock Name="mask" Size="32">
+    <ComponentType Name="InputDevicesMask" Description="64bit representation of devices">
+        <BitParameterBlock Name="mask" Size="64">
         </BitParameterBlock>
     </ComponentType>
 
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
index f8a6fc0..df4e3e9 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
@@ -45,7 +45,7 @@
 
 bool InputSource::sendToHW(string & /*error*/)
 {
-    audio_devices_t applicableInputDevice;
+    uint64_t applicableInputDevice;
     blackboardRead(&applicableInputDevice, sizeof(applicableInputDevice));
     return mPolicyPluginInterface->setDeviceForInputSource(mId, applicableInputDevice);
 }
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h
index 6c8eb65..e65946e 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h
@@ -32,7 +32,7 @@
 
     struct Device
     {
-        audio_devices_t applicableDevice; /**< applicable device for this strategy. */
+        uint64_t applicableDevice; /**< applicable device for this strategy. */
         char deviceAddress[mMaxStringSize]; /**< device address associated with this strategy. */
     } __attribute__((packed));
 
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index 9a61a05..3d74920 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -36,6 +36,8 @@
 
 #include <media/TypeConverter.h>
 
+#include <cinttypes>
+
 using std::string;
 using std::map;
 
@@ -166,16 +168,13 @@
 status_t Engine::setDeviceConnectionState(const sp<DeviceDescriptor> device,
                                           audio_policy_dev_state_t state)
 {
-    mPolicyParameterMgr->setDeviceConnectionState(
-                device->type(), device->address().c_str(), state);
+    mPolicyParameterMgr->setDeviceConnectionState(device->type(), device->address(), state);
     if (audio_is_output_device(device->type())) {
-        // FIXME: Use DeviceTypeSet when the interface is ready
         return mPolicyParameterMgr->setAvailableOutputDevices(
-                    deviceTypesToBitMask(getApmObserver()->getAvailableOutputDevices().types()));
+                    getApmObserver()->getAvailableOutputDevices().types());
     } else if (audio_is_input_device(device->type())) {
-        // FIXME: Use DeviceTypeSet when the interface is ready
         return mPolicyParameterMgr->setAvailableInputDevices(
-                    deviceTypesToBitMask(getApmObserver()->getAvailableInputDevices().types()));
+                    getApmObserver()->getAvailableInputDevices().types());
     }
     return EngineBase::setDeviceConnectionState(device, state);
 }
@@ -374,17 +373,28 @@
     getProductStrategies().at(strategy)->setDeviceAddress(address);
 }
 
-bool Engine::setDeviceTypesForProductStrategy(product_strategy_t strategy, audio_devices_t devices)
+bool Engine::setDeviceTypesForProductStrategy(product_strategy_t strategy, uint64_t devices)
 {
     if (getProductStrategies().find(strategy) == getProductStrategies().end()) {
-        ALOGE("%s: set device %d on invalid strategy %d", __FUNCTION__, devices, strategy);
+        ALOGE("%s: set device %" PRId64 " on invalid strategy %d", __FUNCTION__, devices, strategy);
         return false;
     }
-    // FIXME: stop using deviceTypesFromBitMask when the interface is ready
-    getProductStrategies().at(strategy)->setDeviceTypes(deviceTypesFromBitMask(devices));
+    // Here device matches the criterion value, need to rebuitd android device types;
+    DeviceTypeSet types =
+            mPolicyParameterMgr->convertDeviceCriterionValueToDeviceTypes(devices, true /*isOut*/);
+    getProductStrategies().at(strategy)->setDeviceTypes(types);
     return true;
 }
 
+bool Engine::setDeviceForInputSource(const audio_source_t &inputSource, uint64_t device)
+{
+    DeviceTypeSet types = mPolicyParameterMgr->convertDeviceCriterionValueToDeviceTypes(
+                device, false /*isOut*/);
+    ALOG_ASSERT(types.size() <= 1, "one input device expected at most");
+    audio_devices_t deviceType = types.empty() ? AUDIO_DEVICE_IN_DEFAULT : *types.begin();
+    return setPropertyForKey<audio_devices_t, audio_source_t>(deviceType, inputSource);
+}
+
 template <>
 EngineInterface *Engine::queryInterface()
 {
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
index f665da5..4b559f0 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.h
+++ b/services/audiopolicy/engineconfigurable/src/Engine.h
@@ -82,15 +82,12 @@
     bool setVolumeProfileForStream(const audio_stream_type_t &stream,
                                    const audio_stream_type_t &volumeProfile) override;
 
-    bool setDeviceForInputSource(const audio_source_t &inputSource, audio_devices_t device) override
-    {
-        return setPropertyForKey<audio_devices_t, audio_source_t>(device, inputSource);
-    }
+    bool setDeviceForInputSource(const audio_source_t &inputSource, uint64_t device) override;
+
     void setDeviceAddressForProductStrategy(product_strategy_t strategy,
                                                     const std::string &address) override;
 
-    bool setDeviceTypesForProductStrategy(product_strategy_t strategy,
-                                                  audio_devices_t devices) override;
+    bool setDeviceTypesForProductStrategy(product_strategy_t strategy, uint64_t devices) override;
 
     product_strategy_t getProductStrategyByName(const std::string &name) override
     {
diff --git a/services/audiopolicy/engineconfigurable/src/InputSource.cpp b/services/audiopolicy/engineconfigurable/src/InputSource.cpp
index f4645e6..6fd2b70 100644
--- a/services/audiopolicy/engineconfigurable/src/InputSource.cpp
+++ b/services/audiopolicy/engineconfigurable/src/InputSource.cpp
@@ -26,7 +26,8 @@
 status_t Element<audio_source_t>::setIdentifier(audio_source_t identifier)
 {
     if (identifier > AUDIO_SOURCE_MAX && identifier != AUDIO_SOURCE_HOTWORD
-        && identifier != AUDIO_SOURCE_FM_TUNER && identifier != AUDIO_SOURCE_ECHO_REFERENCE) {
+        && identifier != AUDIO_SOURCE_FM_TUNER && identifier != AUDIO_SOURCE_ECHO_REFERENCE
+        && identifier != AUDIO_SOURCE_ULTRASOUND) {
         return BAD_VALUE;
     }
     mIdentifier = identifier;
@@ -46,12 +47,6 @@
 template <>
 status_t Element<audio_source_t>::set(audio_devices_t devices)
 {
-    if (devices == AUDIO_DEVICE_NONE) {
-        // Reset
-        mApplicableDevices = devices;
-        return NO_ERROR;
-    }
-    devices = static_cast<audio_devices_t>(devices | AUDIO_DEVICE_BIT_IN);
     if (!audio_is_input_device(devices)) {
         ALOGE("%s: trying to set an invalid device 0x%X for input source %s",
               __FUNCTION__, devices, getName().c_str());
diff --git a/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py b/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
index 43b3dd2..86ac76f 100755
--- a/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
+++ b/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
@@ -55,7 +55,7 @@
     while i < decimal:
         i = i << 1
         pos = pos + 1
-        if pos == 32:
+        if pos == 64:
             return -1
 
     # TODO: b/168065706. This is just to fix the build. That the problem of devices with
@@ -132,6 +132,9 @@
 
     logging.info("Checking Android Header file {}".format(androidaudiobaseheaderFile))
 
+    multi_bit_output_device_shift = 32
+    multi_bit_input_device_shift = 32
+
     for line_number, line in enumerate(androidaudiobaseheaderFile):
         match = criteria_pattern.match(line)
         if match:
@@ -143,16 +146,36 @@
 
             component_type_numerical_value = match.groupdict()['values']
 
-            # for AUDIO_DEVICE_IN: need to remove sign bit / rename default to stub
+            # for AUDIO_DEVICE_IN: rename default to stub
             if component_type_name == "InputDevicesMask":
-                component_type_numerical_value = str(int(component_type_numerical_value, 0) & ~2147483648)
+                component_type_numerical_value = str(int(component_type_numerical_value, 0))
                 if component_type_literal == "default":
                     component_type_literal = "stub"
 
+                string_int = int(component_type_numerical_value, 0)
+                num_bits = bin(string_int).count("1")
+                if num_bits > 1:
+                    logging.info("The value {} is for criterion {} binary rep {} has {} bits sets"
+                        .format(component_type_numerical_value, component_type_name, bin(string_int), num_bits))
+                    string_int = 2**multi_bit_input_device_shift
+                    logging.info("new val assigned is {} {}" .format(string_int, bin(string_int)))
+                    multi_bit_input_device_shift += 1
+                    component_type_numerical_value = str(string_int)
+
             if component_type_name == "OutputDevicesMask":
                 if component_type_literal == "default":
                     component_type_literal = "stub"
 
+                string_int = int(component_type_numerical_value, 0)
+                num_bits = bin(string_int).count("1")
+                if num_bits > 1:
+                    logging.info("The value {} is for criterion {} binary rep {} has {} bits sets"
+                        .format(component_type_numerical_value, component_type_name, bin(string_int), num_bits))
+                    string_int = 2**multi_bit_output_device_shift
+                    logging.info("new val assigned is {} {}" .format(string_int, bin(string_int)))
+                    multi_bit_output_device_shift += 1
+                    component_type_numerical_value = str(string_int)
+
             # Remove duplicated numerical values
             if int(component_type_numerical_value, 0) in all_component_types[component_type_name].values():
                 logging.info("The value {}:{} is duplicated for criterion {}, KEEPING LATEST".format(component_type_numerical_value, component_type_literal, component_type_name))
diff --git a/services/audiopolicy/engineconfigurable/tools/buildPolicyCriterionTypes.py b/services/audiopolicy/engineconfigurable/tools/buildPolicyCriterionTypes.py
index 76c35c1..a15a6ba 100755
--- a/services/audiopolicy/engineconfigurable/tools/buildPolicyCriterionTypes.py
+++ b/services/audiopolicy/engineconfigurable/tools/buildPolicyCriterionTypes.py
@@ -85,6 +85,9 @@
     return argparser.parse_args()
 
 
+output_devices_type_value = {}
+input_devices_type_value = {}
+
 def generateXmlCriterionTypesFile(criterionTypes, addressCriteria, criterionTypesFile, outputFile):
 
     logging.info("Importing criterionTypesFile {}".format(criterionTypesFile))
@@ -102,6 +105,11 @@
                     value_node.set('numerical', str(value))
                     value_node.set('literal', key)
 
+                    if criterion_type.get('name') == "OutputDevicesMaskType":
+                        value_node.set('android_type', output_devices_type_value[key])
+                    if criterion_type.get('name') == "InputDevicesMaskType":
+                        value_node.set('android_type', input_devices_type_value[key])
+
     if addressCriteria:
         for criterion_name, values_list in addressCriteria.items():
             for criterion_type in criterion_types_root.findall('criterion_type'):
@@ -200,10 +208,8 @@
     #
     ignored_values = ['CNT', 'MAX', 'ALL', 'NONE']
 
-    #
-    # Reaching 32 bit limit for inclusive criterion out devices: removing
-    #
-    ignored_output_device_values = ['BleSpeaker', 'BleHeadset']
+    multi_bit_outputdevice_shift = 32
+    multi_bit_inputdevice_shift = 32
 
     criteria_pattern = re.compile(
         r"\s*V\((?P<type>(?:"+'|'.join(criterion_mapping_table.keys()) + "))_" \
@@ -223,28 +229,59 @@
                 ''.join((w.capitalize() for w in match.groupdict()['literal'].split('_')))
             criterion_numerical_value = match.groupdict()['values']
 
-            # for AUDIO_DEVICE_IN: need to remove sign bit / rename default to stub
+            # for AUDIO_DEVICE_IN: rename default to stub
             if criterion_name == "InputDevicesMaskType":
                 if criterion_literal == "Default":
                     criterion_numerical_value = str(int("0x40000000", 0))
+                    input_devices_type_value[criterion_literal] = "0xC0000000"
                 else:
                     try:
                         string_int = int(criterion_numerical_value, 0)
+                        # Append AUDIO_DEVICE_IN for android type tag
+                        input_devices_type_value[criterion_literal] = hex(string_int | 2147483648)
+
+                        num_bits = bin(string_int).count("1")
+                        if num_bits > 1:
+                            logging.info("The value {}:{} is for criterion {} binary rep {} has {} bits sets"
+                                .format(criterion_numerical_value, criterion_literal, criterion_name, bin(string_int), num_bits))
+                            string_int = 2**multi_bit_inputdevice_shift
+                            logging.info("new val assigned is {} {}" .format(string_int, bin(string_int)))
+                            multi_bit_inputdevice_shift += 1
+                            criterion_numerical_value = str(string_int)
+
                     except ValueError:
                         # Handle the exception
                         logging.info("value {}:{} for criterion {} is not a number, ignoring"
                             .format(criterion_numerical_value, criterion_literal, criterion_name))
                         continue
-                    criterion_numerical_value = str(int(criterion_numerical_value, 0) & ~2147483648)
 
             if criterion_name == "OutputDevicesMaskType":
                 if criterion_literal == "Default":
                     criterion_numerical_value = str(int("0x40000000", 0))
-                if criterion_literal in ignored_output_device_values:
-                    logging.info("OutputDevicesMaskType skipping {}".format(criterion_literal))
-                    continue
+                    output_devices_type_value[criterion_literal] = "0x40000000"
+                else:
+                    try:
+                        string_int = int(criterion_numerical_value, 0)
+                        output_devices_type_value[criterion_literal] = criterion_numerical_value
+
+                        num_bits = bin(string_int).count("1")
+                        if num_bits > 1:
+                            logging.info("The value {}:{} is for criterion {} binary rep {} has {} bits sets"
+                                .format(criterion_numerical_value, criterion_literal, criterion_name, bin(string_int), num_bits))
+                            string_int = 2**multi_bit_outputdevice_shift
+                            logging.info("new val assigned is {} {}" .format(string_int, bin(string_int)))
+                            multi_bit_outputdevice_shift += 1
+                            criterion_numerical_value = str(string_int)
+
+                    except ValueError:
+                        # Handle the exception
+                        logging.info("The value {}:{} is for criterion {} is not a number, ignoring"
+                            .format(criterion_numerical_value, criterion_literal, criterion_name))
+                        continue
+
             try:
                 string_int = int(criterion_numerical_value, 0)
+
             except ValueError:
                 # Handle the exception
                 logging.info("The value {}:{} is for criterion {} is not a number, ignoring"
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.bp b/services/audiopolicy/engineconfigurable/wrapper/Android.bp
index 3e04b68..0ef0b82 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/Android.bp
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.bp
@@ -19,6 +19,7 @@
     header_libs: [
         "libbase_headers",
         "libaudiopolicycommon",
+        "libaudiofoundation_headers",
     ],
     shared_libs: [
         "liblog",
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
index 63990ac..099d55d 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
+++ b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
@@ -23,6 +23,7 @@
 #include <SelectionCriterionInterface.h>
 #include <media/convert.h>
 #include <algorithm>
+#include <cutils/bitops.h>
 #include <cutils/config_utils.h>
 #include <cutils/misc.h>
 #include <fstream>
@@ -31,6 +32,7 @@
 #include <string>
 #include <vector>
 #include <stdint.h>
+#include <cinttypes>
 #include <cmath>
 #include <utils/Log.h>
 
@@ -124,9 +126,22 @@
 
     for (auto pair : pairs) {
         std::string error;
-        ALOGV("%s: Adding pair %d,%s for criterionType %s", __FUNCTION__, pair.first,
-              pair.second.c_str(), name.c_str());
-        criterionType->addValuePair(pair.first, pair.second, error);
+        ALOGV("%s: Adding pair %" PRIu64", %s for criterionType %s", __func__, std::get<0>(pair),
+              std::get<2>(pair).c_str(), name.c_str());
+        criterionType->addValuePair(std::get<0>(pair), std::get<2>(pair), error);
+
+        if (name == gOutputDeviceCriterionName) {
+            ALOGV("%s: Adding mOutputDeviceToCriterionTypeMap %d %" PRIu64" for criterionType %s",
+                  __func__, std::get<1>(pair), std::get<0>(pair), name.c_str());
+            audio_devices_t androidType = static_cast<audio_devices_t>(std::get<1>(pair));
+            mOutputDeviceToCriterionTypeMap[androidType] = std::get<0>(pair);
+        }
+        if (name == gInputDeviceCriterionName) {
+            ALOGV("%s: Adding mInputDeviceToCriterionTypeMap %d %" PRIu64" for criterionType %s",
+                  __func__, std::get<1>(pair), std::get<0>(pair), name.c_str());
+            audio_devices_t androidType = static_cast<audio_devices_t>(std::get<1>(pair));
+            mInputDeviceToCriterionTypeMap[androidType] = std::get<0>(pair);
+        }
     }
     ALOG_ASSERT(mPolicyCriteria.find(name) == mPolicyCriteria.end(),
                 "%s: Criterion %s already added", __FUNCTION__, name.c_str());
@@ -135,7 +150,7 @@
     mPolicyCriteria[name] = criterion;
 
     if (not defaultValue.empty()) {
-        int numericalValue = 0;
+        uint64_t numericalValue = 0;
         if (not criterionType->getNumericalValue(defaultValue.c_str(), numericalValue)) {
             ALOGE("%s; trying to apply invalid default literal value (%s)", __FUNCTION__,
                   defaultValue.c_str());
@@ -263,7 +278,7 @@
 }
 
 status_t ParameterManagerWrapper::setDeviceConnectionState(
-        audio_devices_t type, const std::string address, audio_policy_dev_state_t state)
+        audio_devices_t type, const std::string &address, audio_policy_dev_state_t state)
 {
     std::string criterionName = audio_is_output_device(type) ?
                 gOutputDeviceAddressCriterionName : gInputDeviceAddressCriterionName;
@@ -279,7 +294,7 @@
     }
 
     auto criterionType = criterion->getCriterionType();
-    int deviceAddressId;
+    uint64_t deviceAddressId;
     if (not criterionType->getNumericalValue(address.c_str(), deviceAddressId)) {
         ALOGW("%s: unknown device address reported (%s) for criterion %s", __FUNCTION__,
               address.c_str(), criterionName.c_str());
@@ -296,28 +311,28 @@
     return NO_ERROR;
 }
 
-status_t ParameterManagerWrapper::setAvailableInputDevices(audio_devices_t inputDevices)
+status_t ParameterManagerWrapper::setAvailableInputDevices(const DeviceTypeSet &types)
 {
     ISelectionCriterionInterface *criterion =
             getElement<ISelectionCriterionInterface>(gInputDeviceCriterionName, mPolicyCriteria);
     if (criterion == NULL) {
-        ALOGE("%s: no criterion found for %s", __FUNCTION__, gInputDeviceCriterionName);
+        ALOGE("%s: no criterion found for %s", __func__, gInputDeviceCriterionName);
         return DEAD_OBJECT;
     }
-    criterion->setCriterionState(inputDevices & ~AUDIO_DEVICE_BIT_IN);
+    criterion->setCriterionState(convertDeviceTypesToCriterionValue(types));
     applyPlatformConfiguration();
     return NO_ERROR;
 }
 
-status_t ParameterManagerWrapper::setAvailableOutputDevices(audio_devices_t outputDevices)
+status_t ParameterManagerWrapper::setAvailableOutputDevices(const DeviceTypeSet &types)
 {
     ISelectionCriterionInterface *criterion =
             getElement<ISelectionCriterionInterface>(gOutputDeviceCriterionName, mPolicyCriteria);
     if (criterion == NULL) {
-        ALOGE("%s: no criterion found for %s", __FUNCTION__, gOutputDeviceCriterionName);
+        ALOGE("%s: no criterion found for %s", __func__, gOutputDeviceCriterionName);
         return DEAD_OBJECT;
     }
-    criterion->setCriterionState(outputDevices);
+    criterion->setCriterionState(convertDeviceTypesToCriterionValue(types));
     applyPlatformConfiguration();
     return NO_ERROR;
 }
@@ -327,5 +342,45 @@
     mPfwConnector->applyConfigurations();
 }
 
+uint64_t ParameterManagerWrapper::convertDeviceTypeToCriterionValue(audio_devices_t type) const {
+    bool isOut = audio_is_output_devices(type);
+    uint32_t typeMask = isOut ? type : (type & ~AUDIO_DEVICE_BIT_IN);
+
+    const auto &adapters = isOut ? mOutputDeviceToCriterionTypeMap : mInputDeviceToCriterionTypeMap;
+    // Only multibit devices need adaptation.
+    if (popcount(typeMask) > 1) {
+        const auto &adapter = adapters.find(type);
+        if (adapter != adapters.end()) {
+            ALOGV("%s: multibit device %d converted to criterion %" PRIu64, __func__, type,
+                  adapter->second);
+            return adapter->second;
+        }
+        ALOGE("%s: failed to find map for multibit device %d", __func__, type);
+        return 0;
+    }
+    return typeMask;
+}
+
+uint64_t ParameterManagerWrapper::convertDeviceTypesToCriterionValue(
+        const DeviceTypeSet &types) const {
+    uint64_t criterionValue = 0;
+    for (const auto &type : types) {
+        criterionValue += convertDeviceTypeToCriterionValue(type);
+    }
+    return criterionValue;
+}
+
+DeviceTypeSet ParameterManagerWrapper::convertDeviceCriterionValueToDeviceTypes(
+        uint64_t criterionValue, bool isOut) const {
+    DeviceTypeSet deviceTypes;
+    const auto &adapters = isOut ? mOutputDeviceToCriterionTypeMap : mInputDeviceToCriterionTypeMap;
+    for (const auto &adapter : adapters) {
+        if ((adapter.second & criterionValue) == adapter.second) {
+            deviceTypes.insert(adapter.first);
+        }
+    }
+    return deviceTypes;
+}
+
 } // namespace audio_policy
 } // namespace android
diff --git a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
index 62b129a..fa4ae1e 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
+++ b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
@@ -16,6 +16,7 @@
 
 #pragma once
 
+#include <media/AudioContainers.h>
 #include <system/audio.h>
 #include <system/audio_policy.h>
 #include <utils/Errors.h>
@@ -35,7 +36,8 @@
 namespace android {
 namespace audio_policy {
 
-using ValuePair = std::pair<uint32_t, std::string>;
+using ValuePair = std::tuple<uint64_t, uint32_t, std::string>;
+using DeviceToCriterionTypeAdapter = std::map<audio_devices_t, uint64_t>;
 using ValuePairs = std::vector<ValuePair>;
 
 class ParameterManagerWrapper
@@ -105,7 +107,7 @@
      *
      * @return NO_ERROR if devices criterion updated correctly, error code otherwise.
      */
-    status_t setAvailableInputDevices(audio_devices_t inputDevices);
+    status_t setAvailableInputDevices(const DeviceTypeSet &inputDeviceTypes);
 
     /**
      * Set the available output devices i.e. set the associated policy parameter framework criterion
@@ -114,7 +116,7 @@
      *
      * @return NO_ERROR if devices criterion updated correctly, error code otherwise.
      */
-    status_t setAvailableOutputDevices(audio_devices_t outputDevices);
+    status_t setAvailableOutputDevices(const DeviceTypeSet &outputDeviceTypes);
 
     /**
      * @brief setDeviceConnectionState propagates a state event on a given device(s)
@@ -124,7 +126,7 @@
      * @return NO_ERROR if new state corretly propagated to Engine Parameter-Framework, error
      * code otherwise.
      */
-    status_t setDeviceConnectionState(audio_devices_t type, const std::string address,
+    status_t setDeviceConnectionState(audio_devices_t type, const std::string &address,
                                       audio_policy_dev_state_t state);
 
     /**
@@ -138,6 +140,13 @@
     status_t addCriterion(const std::string &name, bool isInclusive, ValuePairs pairs,
                           const std::string &defaultValue);
 
+    uint64_t convertDeviceTypeToCriterionValue(audio_devices_t type) const;
+
+    uint64_t convertDeviceTypesToCriterionValue(const DeviceTypeSet &types) const;
+
+    DeviceTypeSet convertDeviceCriterionValueToDeviceTypes(
+            uint64_t criterionValue, bool isOut) const;
+
 private:
     /**
      * Apply the configuration of the platform on the policy parameter manager.
@@ -211,6 +220,9 @@
     template <typename T>
     struct parameterManagerElementSupported;
 
+    DeviceToCriterionTypeAdapter mOutputDeviceToCriterionTypeMap;
+    DeviceToCriterionTypeAdapter mInputDeviceToCriterionTypeMap;
+
     static const char *const mPolicyPfwDefaultConfFileName; /**< Default Policy PFW top file name.*/
     static const char *const mPolicyPfwVendorConfFileName; /**< Vendor Policy PFW top file name.*/
 };
diff --git a/services/audiopolicy/enginedefault/Android.bp b/services/audiopolicy/enginedefault/Android.bp
index 7f9c0ac..4671fe9 100644
--- a/services/audiopolicy/enginedefault/Android.bp
+++ b/services/audiopolicy/enginedefault/Android.bp
@@ -31,6 +31,7 @@
     ],
     shared_libs: [
         "libaudiofoundation",
+        "libbase",
         "liblog",
         "libcutils",
         "libutils",
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index c73c17d..88d267f 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -460,6 +460,7 @@
         case AUDIO_SOURCE_HOTWORD:
         case AUDIO_SOURCE_CAMCORDER:
         case AUDIO_SOURCE_VOICE_PERFORMANCE:
+        case AUDIO_SOURCE_ULTRASOUND:
             inputSource = AUDIO_SOURCE_VOICE_COMMUNICATION;
             break;
         default:
@@ -586,6 +587,10 @@
         device = availableDevices.getDevice(
                 AUDIO_DEVICE_IN_ECHO_REFERENCE, String8(""), AUDIO_FORMAT_DEFAULT);
         break;
+    case AUDIO_SOURCE_ULTRASOUND:
+        device = availableDevices.getFirstExistingDevice({
+                AUDIO_DEVICE_IN_BUILTIN_MIC, AUDIO_DEVICE_IN_BACK_MIC});
+        break;
     default:
         ALOGW("getDeviceForInputSource() invalid input source %d", inputSource);
         break;
diff --git a/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
index 8584702..654e4bf 100644
--- a/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
+++ b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
@@ -841,6 +841,8 @@
         : AudioPolicyManagerFuzzerWithConfigurationFile(fdp){};
     void process() override;
 
+    void fuzzGetDirectPlaybackSupport();
+
    protected:
     void setDeviceConnectionState();
     void explicitlyRoutingAfterConnection();
@@ -891,10 +893,27 @@
     }
 }
 
+void AudioPolicyManagerFuzzerDeviceConnection::fuzzGetDirectPlaybackSupport() {
+    const uint32_t numTestCases = mFdp->ConsumeIntegralInRange<uint32_t>(1, 10);
+    for (int i = 0; i < numTestCases; ++i) {
+        audio_attributes_t attr = AUDIO_ATTRIBUTES_INITIALIZER;
+        attr.content_type = getValueFromVector<audio_content_type_t>(mFdp, kAudioContentTypes);
+        attr.usage = getValueFromVector<audio_usage_t>(mFdp, kAudioUsages);
+        attr.source = getValueFromVector<audio_source_t>(mFdp, kAudioSources);
+        attr.flags = getValueFromVector<audio_flags_mask_t>(mFdp, kAudioFlagMasks);
+        audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+        config.channel_mask = getValueFromVector<audio_channel_mask_t>(mFdp, kAudioChannelOutMasks);
+        config.format = getValueFromVector<audio_format_t>(mFdp, kAudioFormats);
+        config.sample_rate = getValueFromVector<uint32_t>(mFdp, kSamplingRates);
+        mManager->getDirectPlaybackSupport(&attr, &config);
+    }
+}
+
 void AudioPolicyManagerFuzzerDeviceConnection::process() {
     if (initialize()) {
         setDeviceConnectionState();
         explicitlyRoutingAfterConnection();
+        fuzzGetDirectPlaybackSupport();
         fuzzPatchCreation();
     }
 }
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index d0f605c..1929f31 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -1386,6 +1386,11 @@
         ALOGV("Set VoIP and Direct output flags for PCM format");
     }
 
+    // Attach the Ultrasound flag for the AUDIO_CONTENT_TYPE_ULTRASOUND
+    if (attr->content_type == AUDIO_CONTENT_TYPE_ULTRASOUND) {
+        *flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_ULTRASOUND);
+    }
+
     if (mSpatializerOutput != nullptr
             && canBeSpatialized(attr, config, devices.toTypeAddrVector())) {
         return mSpatializerOutput->mIoHandle;
@@ -1683,7 +1688,7 @@
     // other criteria
     static const audio_output_flags_t kFunctionalFlags = (audio_output_flags_t)
         (AUDIO_OUTPUT_FLAG_VOIP_RX | AUDIO_OUTPUT_FLAG_INCALL_MUSIC |
-            AUDIO_OUTPUT_FLAG_TTS | AUDIO_OUTPUT_FLAG_DIRECT_PCM);
+            AUDIO_OUTPUT_FLAG_TTS | AUDIO_OUTPUT_FLAG_DIRECT_PCM | AUDIO_OUTPUT_FLAG_ULTRASOUND);
     // Flags expressing a performance request: have lower priority than serving
     // requested sampling rate or channel mask
     static const audio_output_flags_t kPerformanceFlags = (audio_output_flags_t)
@@ -1702,6 +1707,8 @@
     // The priority is as follows:
     // 1: the output supporting haptic playback when requesting haptic playback
     // 2: the output with the highest number of requested functional flags
+    //    with tiebreak preferring the minimum number of extra functional flags
+    //    (see b/200293124, the incorrect selection of AUDIO_OUTPUT_FLAG_VOIP_RX).
     // 3: the output supporting the exact channel mask
     // 4: the output with a higher channel count than requested
     // 5: the output with a higher sampling rate than requested
@@ -1743,7 +1750,12 @@
         }
 
         // functional flags match
-        currentMatchCriteria[1] = popcount(outputDesc->mFlags & functionalFlags);
+        const int matchingFunctionalFlags =
+                __builtin_popcount(outputDesc->mFlags & functionalFlags);
+        const int totalFunctionalFlags =
+                __builtin_popcount(outputDesc->mFlags & kFunctionalFlags);
+        // Prefer matching functional flags, but subtract unnecessary functional flags.
+        currentMatchCriteria[1] = 100 * (matchingFunctionalFlags + 1) - totalFunctionalFlags;
 
         // channel mask and channel count match
         uint32_t outputChannelCount = audio_channel_count_from_out_mask(
@@ -2173,8 +2185,9 @@
                                              audio_port_handle_t *portId)
 {
     ALOGV("%s() source %d, sampling rate %d, format %#x, channel mask %#x, session %d, "
-          "flags %#x attributes=%s", __func__, attr->source, config->sample_rate,
-          config->format, config->channel_mask, session, flags, toString(*attr).c_str());
+          "flags %#x attributes=%s requested device ID %d",
+          __func__, attr->source, config->sample_rate, config->format, config->channel_mask,
+          session, flags, toString(*attr).c_str(), *selectedDeviceId);
 
     status_t status = NO_ERROR;
     audio_source_t halInputSource;
@@ -2363,6 +2376,10 @@
         flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_VOIP_TX);
     }
 
+    if (attributes.source == AUDIO_SOURCE_ULTRASOUND) {
+        flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_ULTRASOUND);
+    }
+
     // find a compatible input profile (not necessarily identical in parameters)
     sp<IOProfile> profile;
     // sampling rate and flags may be updated by getInputProfile
@@ -3582,7 +3599,7 @@
 void AudioPolicyManager::dump(String8 *dst) const
 {
     dst->appendFormat("\nAudioPolicyManager Dump: %p\n", this);
-    dst->appendFormat(" Primary Output: %d\n",
+    dst->appendFormat(" Primary Output I/O handle: %d\n",
              hasPrimaryOutput() ? mPrimaryOutput->mIoHandle : AUDIO_IO_HANDLE_NONE);
     std::string stateLiteral;
     AudioModeConverter::toString(mEngine->getPhoneState(), stateLiteral);
@@ -3607,12 +3624,14 @@
     dst->appendFormat(" Communnication Strategy: %d\n", mCommunnicationStrategy);
     dst->appendFormat(" Config source: %s\n", mConfig.getSource().c_str()); // getConfig not const
 
-    mAvailableOutputDevices.dump(dst, String8("Available output"));
-    mAvailableInputDevices.dump(dst, String8("Available input"));
+    dst->append("\n");
+    mAvailableOutputDevices.dump(dst, String8("Available output"), 1);
+    dst->append("\n");
+    mAvailableInputDevices.dump(dst, String8("Available input"), 1);
     mHwModulesAll.dump(dst);
     mOutputs.dump(dst);
     mInputs.dump(dst);
-    mEffects.dump(dst);
+    mEffects.dump(dst, 1);
     mAudioPatches.dump(dst);
     mPolicyMixes.dump(dst);
     mAudioSources.dump(dst);
@@ -3652,53 +3671,7 @@
      offloadInfo.stream_type, offloadInfo.bit_rate, offloadInfo.duration_us,
      offloadInfo.has_video);
 
-    if (mMasterMono) {
-        return AUDIO_OFFLOAD_NOT_SUPPORTED; // no offloading if mono is set.
-    }
-
-    // Check if offload has been disabled
-    if (property_get_bool("audio.offload.disable", false /* default_value */)) {
-        ALOGV("%s: offload disabled by audio.offload.disable", __func__);
-        return AUDIO_OFFLOAD_NOT_SUPPORTED;
-    }
-
-    // Check if stream type is music, then only allow offload as of now.
-    if (offloadInfo.stream_type != AUDIO_STREAM_MUSIC)
-    {
-        ALOGV("%s: stream_type != MUSIC, returning false", __func__);
-        return AUDIO_OFFLOAD_NOT_SUPPORTED;
-    }
-
-    //TODO: enable audio offloading with video when ready
-    const bool allowOffloadWithVideo =
-            property_get_bool("audio.offload.video", false /* default_value */);
-    if (offloadInfo.has_video && !allowOffloadWithVideo) {
-        ALOGV("%s: has_video == true, returning false", __func__);
-        return AUDIO_OFFLOAD_NOT_SUPPORTED;
-    }
-
-    //If duration is less than minimum value defined in property, return false
-    const int min_duration_secs = property_get_int32(
-            "audio.offload.min.duration.secs", -1 /* default_value */);
-    if (min_duration_secs >= 0) {
-        if (offloadInfo.duration_us < min_duration_secs * 1000000LL) {
-            ALOGV("%s: Offload denied by duration < audio.offload.min.duration.secs(=%d)",
-                    __func__, min_duration_secs);
-            return AUDIO_OFFLOAD_NOT_SUPPORTED;
-        }
-    } else if (offloadInfo.duration_us < OFFLOAD_DEFAULT_MIN_DURATION_SECS * 1000000) {
-        ALOGV("%s: Offload denied by duration < default min(=%u)",
-                __func__, OFFLOAD_DEFAULT_MIN_DURATION_SECS);
-        return AUDIO_OFFLOAD_NOT_SUPPORTED;
-    }
-
-    // Do not allow offloading if one non offloadable effect is enabled. This prevents from
-    // creating an offloaded track and tearing it down immediately after start when audioflinger
-    // detects there is an active non offloadable effect.
-    // FIXME: We should check the audio session here but we do not have it in this context.
-    // This may prevent offloading in rare situations where effects are left active by apps
-    // in the background.
-    if (mEffects.isNonOffloadableEffectEnabled()) {
+    if (!isOffloadPossible(offloadInfo)) {
         return AUDIO_OFFLOAD_NOT_SUPPORTED;
     }
 
@@ -3726,7 +3699,8 @@
                                                  const audio_attributes_t& attributes) {
     audio_output_flags_t output_flags = AUDIO_OUTPUT_FLAG_NONE;
     audio_flags_to_audio_output_flags(attributes.flags, &output_flags);
-    sp<IOProfile> profile = getProfileForOutput(DeviceVector() /*ignore device */,
+    DeviceVector outputDevices = mEngine->getOutputDevicesForAttributes(attributes);
+    sp<IOProfile> profile = getProfileForOutput(outputDevices,
                                             config.sample_rate,
                                             config.format,
                                             config.channel_mask,
@@ -3740,6 +3714,123 @@
     return (profile != 0);
 }
 
+bool AudioPolicyManager::isOffloadPossible(const audio_offload_info_t &offloadInfo,
+                                           bool durationIgnored) {
+    if (mMasterMono) {
+        return false; // no offloading if mono is set.
+    }
+
+    // Check if offload has been disabled
+    if (property_get_bool("audio.offload.disable", false /* default_value */)) {
+        ALOGV("%s: offload disabled by audio.offload.disable", __func__);
+        return false;
+    }
+
+    // Check if stream type is music, then only allow offload as of now.
+    if (offloadInfo.stream_type != AUDIO_STREAM_MUSIC)
+    {
+        ALOGV("%s: stream_type != MUSIC, returning false", __func__);
+        return false;
+    }
+
+    //TODO: enable audio offloading with video when ready
+    const bool allowOffloadWithVideo =
+            property_get_bool("audio.offload.video", false /* default_value */);
+    if (offloadInfo.has_video && !allowOffloadWithVideo) {
+        ALOGV("%s: has_video == true, returning false", __func__);
+        return false;
+    }
+
+    //If duration is less than minimum value defined in property, return false
+    const int min_duration_secs = property_get_int32(
+            "audio.offload.min.duration.secs", -1 /* default_value */);
+    if (!durationIgnored) {
+        if (min_duration_secs >= 0) {
+            if (offloadInfo.duration_us < min_duration_secs * 1000000LL) {
+                ALOGV("%s: Offload denied by duration < audio.offload.min.duration.secs(=%d)",
+                      __func__, min_duration_secs);
+                return false;
+            }
+        } else if (offloadInfo.duration_us < OFFLOAD_DEFAULT_MIN_DURATION_SECS * 1000000) {
+            ALOGV("%s: Offload denied by duration < default min(=%u)",
+                  __func__, OFFLOAD_DEFAULT_MIN_DURATION_SECS);
+            return false;
+        }
+    }
+
+    // Do not allow offloading if one non offloadable effect is enabled. This prevents from
+    // creating an offloaded track and tearing it down immediately after start when audioflinger
+    // detects there is an active non offloadable effect.
+    // FIXME: We should check the audio session here but we do not have it in this context.
+    // This may prevent offloading in rare situations where effects are left active by apps
+    // in the background.
+    if (mEffects.isNonOffloadableEffectEnabled()) {
+        return false;
+    }
+
+    return true;
+}
+
+audio_direct_mode_t AudioPolicyManager::getDirectPlaybackSupport(const audio_attributes_t *attr,
+                                                                 const audio_config_t *config) {
+    audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
+    offloadInfo.format = config->format;
+    offloadInfo.sample_rate = config->sample_rate;
+    offloadInfo.channel_mask = config->channel_mask;
+    offloadInfo.stream_type = mEngine->getStreamTypeForAttributes(*attr);
+    offloadInfo.has_video = false;
+    offloadInfo.is_streaming = false;
+    const bool offloadPossible = isOffloadPossible(offloadInfo, true /*durationIgnored*/);
+
+    audio_direct_mode_t directMode = AUDIO_DIRECT_NOT_SUPPORTED;
+    audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
+    audio_flags_to_audio_output_flags(attr->flags, &flags);
+    // only retain flags that will drive compressed offload or passthrough
+    uint32_t relevantFlags = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+    if (offloadPossible) {
+        relevantFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+    }
+    flags = (audio_output_flags_t)((flags & relevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
+
+    DeviceVector outputDevices = mEngine->getOutputDevicesForAttributes(*attr);
+    for (const auto& hwModule : mHwModules) {
+        for (const auto& curProfile : hwModule->getOutputProfiles()) {
+            if (!curProfile->isCompatibleProfile(outputDevices,
+                    config->sample_rate, nullptr /*updatedSamplingRate*/,
+                    config->format, nullptr /*updatedFormat*/,
+                    config->channel_mask, nullptr /*updatedChannelMask*/,
+                    flags)) {
+                continue;
+            }
+            // reject profiles not corresponding to a device currently available
+            if (!mAvailableOutputDevices.containsAtLeastOne(curProfile->getSupportedDevices())) {
+                continue;
+            }
+            if ((curProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
+                        != AUDIO_OUTPUT_FLAG_NONE) {
+                if ((directMode & AUDIO_DIRECT_OFFLOAD_GAPLESS_SUPPORTED)
+                        != AUDIO_DIRECT_NOT_SUPPORTED) {
+                    // Already reports offload gapless supported. No need to report offload support.
+                    continue;
+                }
+                if ((curProfile->getFlags() & AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD)
+                        != AUDIO_OUTPUT_FLAG_NONE) {
+                    // If offload gapless is reported, no need to report offload support.
+                    directMode = (audio_direct_mode_t) ((directMode &
+                            ~AUDIO_DIRECT_OFFLOAD_SUPPORTED) |
+                            AUDIO_DIRECT_OFFLOAD_GAPLESS_SUPPORTED);
+                } else {
+                    directMode = (audio_direct_mode_t)(directMode |AUDIO_DIRECT_OFFLOAD_SUPPORTED);
+                }
+            } else {
+                directMode = (audio_direct_mode_t) (directMode |
+                                                    AUDIO_DIRECT_BITSTREAM_SUPPORTED);
+            }
+        }
+    }
+    return directMode;
+}
+
 status_t AudioPolicyManager::listAudioPorts(audio_port_role_t role,
                                             audio_port_type_t type,
                                             unsigned int *num_ports,
@@ -4115,7 +4206,7 @@
                     }
                     if (outputDesc != nullptr) {
                         audio_port_config srcMixPortConfig = {};
-                        outputDesc->toAudioPortConfig(&srcMixPortConfig, &patch->sources[0]);
+                        outputDesc->toAudioPortConfig(&srcMixPortConfig, nullptr);
                         // for volume control, we may need a valid stream
                         srcMixPortConfig.ext.mix.usecase.stream = sourceDesc != nullptr ?
                                     sourceDesc->stream() : AUDIO_STREAM_PATCH;
@@ -5971,14 +6062,20 @@
                     client->getSecondaryOutputs().begin(),
                     client->getSecondaryOutputs().end(),
                     secondaryDescs.begin(), secondaryDescs.end())) {
-                std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryDescs;
-                std::vector<audio_io_handle_t> secondaryOutputIds;
-                for (const auto& secondaryDesc : secondaryDescs) {
-                    secondaryOutputIds.push_back(secondaryDesc->mIoHandle);
-                    weakSecondaryDescs.push_back(secondaryDesc);
+                if (!audio_is_linear_pcm(client->config().format)) {
+                    // If the format is not PCM, the tracks should be invalidated to get correct
+                    // behavior when the secondary output is changed.
+                    streamsToInvalidate.insert(client->stream());
+                } else {
+                    std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryDescs;
+                    std::vector<audio_io_handle_t> secondaryOutputIds;
+                    for (const auto &secondaryDesc: secondaryDescs) {
+                        secondaryOutputIds.push_back(secondaryDesc->mIoHandle);
+                        weakSecondaryDescs.push_back(secondaryDesc);
+                    }
+                    trackSecondaryOutputs.emplace(client->portId(), secondaryOutputIds);
+                    client->setSecondaryOutputs(std::move(weakSecondaryDescs));
                 }
-                trackSecondaryOutputs.emplace(client->portId(), secondaryOutputIds);
-                client->setSecondaryOutputs(std::move(weakSecondaryDescs));
             }
         }
     }
@@ -6147,11 +6244,11 @@
     uid_t uid;
     sp<RecordClientDescriptor> topClient = inputDesc->getHighestPriorityClient();
     if (topClient != nullptr) {
-      attributes = topClient->attributes();
-      uid = topClient->uid();
+        attributes = topClient->attributes();
+        uid = topClient->uid();
     } else {
-      attributes = { .source = AUDIO_SOURCE_DEFAULT };
-      uid = 0;
+        attributes = { .source = AUDIO_SOURCE_DEFAULT };
+        uid = 0;
     }
 
     if (attributes.source == AUDIO_SOURCE_DEFAULT && isInCall()) {
@@ -6169,13 +6266,13 @@
     return (stream1 == stream2);
 }
 
-audio_devices_t AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) {
+DeviceTypeSet AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) {
     // By checking the range of stream before calling getStrategy, we avoid
     // getOutputDevicesForStream's behavior for invalid streams.
     // engine's getOutputDevicesForStream would fallback on its default behavior (most probably
     // device for music stream), but we want to return the empty set.
     if (stream < AUDIO_STREAM_MIN || stream >= AUDIO_STREAM_PUBLIC_CNT) {
-        return AUDIO_DEVICE_NONE;
+        return DeviceTypeSet{};
     }
     DeviceVector activeDevices;
     DeviceVector devices;
@@ -6206,8 +6303,7 @@
         devices.merge(mAvailableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER));
         devices.remove(speakerSafeDevices);
     }
-    // FIXME: use DeviceTypeSet when Java layer is ready for it.
-    return deviceTypesToBitMask(devices.types());
+    return devices.types();
 }
 
 status_t AudioPolicyManager::getDevicesForAttributes(
@@ -6371,11 +6467,18 @@
     // different per device volumes
     if (outputDesc->isActive() && (devices != prevDevices)) {
         uint32_t tempMuteWaitMs = outputDesc->latency() * 2;
-        // temporary mute duration is conservatively set to 4 times the reported latency
-        uint32_t tempMuteDurationMs = outputDesc->latency() * 4;
+
         if (muteWaitMs < tempMuteWaitMs) {
             muteWaitMs = tempMuteWaitMs;
         }
+
+        // If recommended duration is defined, replace temporary mute duration to avoid
+        // truncated notifications at beginning, which depends on duration of changing path in HAL.
+        // Otherwise, temporary mute duration is conservatively set to 4 times the reported latency.
+        uint32_t tempRecommendedMuteDuration = outputDesc->getRecommendedMuteDurationMs();
+        uint32_t tempMuteDurationMs = tempRecommendedMuteDuration > 0 ?
+                tempRecommendedMuteDuration : outputDesc->latency() * 4;
+
         for (const auto &activeVs : outputDesc->getActiveVolumeSources()) {
             // make sure that we do not start the temporary mute period too early in case of
             // delayed device change
@@ -6945,8 +7048,8 @@
 {
     audio_mode_t mode = mEngine->getPhoneState();
     return (mode == AUDIO_MODE_IN_CALL)
-            || (mode == AUDIO_MODE_IN_COMMUNICATION)
-            || (mode == AUDIO_MODE_CALL_SCREEN);
+            || (mode == AUDIO_MODE_CALL_SCREEN)
+            || (mode == AUDIO_MODE_CALL_REDIRECT);
 }
 
 void AudioPolicyManager::cleanUpForDevice(const sp<DeviceDescriptor>& deviceDesc)
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 3758429..bdeba3d 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -193,7 +193,7 @@
         }
 
         // return the enabled output devices for the given stream type
-        virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream);
+        virtual DeviceTypeSet getDevicesForStream(audio_stream_type_t stream);
 
         virtual status_t getDevicesForAttributes(
                 const audio_attributes_t &attributes,
@@ -366,6 +366,9 @@
 
         virtual status_t releaseSpatializerOutput(audio_io_handle_t output);
 
+        virtual audio_direct_mode_t getDirectPlaybackSupport(const audio_attributes_t *attr,
+                                                             const audio_config_t *config);
+
         bool isCallScreenModeSupported() override;
 
         void onNewAudioModulesAvailable() override;
@@ -1059,6 +1062,9 @@
         sp<SwAudioOutputDescriptor> openOutputWithProfileAndDevice(const sp<IOProfile>& profile,
                                                                    const DeviceVector& devices);
 
+        bool isOffloadPossible(const audio_offload_info_t& offloadInfo,
+                               bool durationIgnored = false);
+
 };
 
 };
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
index f3d4f2f..cdad9a6 100644
--- a/services/audiopolicy/service/Android.bp
+++ b/services/audiopolicy/service/Android.bp
@@ -49,6 +49,7 @@
         "libshmemcompat",
         "libutils",
         "libstagefright_foundation",
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index 79252d4..aaf6fba 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -56,17 +56,18 @@
     media::OpenOutputResponse response;
 
     request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
-    request.halConfig = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(*halConfig));
-    request.mixerConfig =
-            VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_base_t_AudioConfigBase(*mixerConfig));
+    request.halConfig = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(*halConfig, false /*isInput*/));
+    request.mixerConfig = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_base_t_AudioConfigBase(*mixerConfig, false /*isInput*/));
     request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_DeviceDescriptorBase(device));
     request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
 
     status_t status = af->openOutput(request, &response);
     if (status == OK) {
         *output = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_io_handle_t(response.output));
-        *halConfig =
-                VALUE_OR_RETURN_STATUS(aidl2legacy_AudioConfig_audio_config_t(response.config));
+        *halConfig = VALUE_OR_RETURN_STATUS(
+                aidl2legacy_AudioConfig_audio_config_t(response.config, false /*isInput*/));
         *latencyMs = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(response.latencyMs));
     }
     return status;
@@ -135,9 +136,10 @@
     media::OpenInputRequest request;
     request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
     request.input = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(*input));
-    request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(*config));
+    request.config = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_config_t_AudioConfig(*config, true /*isInput*/));
     request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(deviceTypeAddr));
-    request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSourceType(source));
+    request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSource(source));
     request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
 
     media::OpenInputResponse response;
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index 3f01de9..858a3fd 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -345,7 +345,8 @@
             (source > AUDIO_SOURCE_MAX &&
              source != AUDIO_SOURCE_HOTWORD &&
              source != AUDIO_SOURCE_FM_TUNER &&
-             source != AUDIO_SOURCE_ECHO_REFERENCE)) {
+             source != AUDIO_SOURCE_ECHO_REFERENCE &&
+             source != AUDIO_SOURCE_ULTRASOUND)) {
         ALOGE("addSourceDefaultEffect(): Unsupported source type %d", source);
         return BAD_VALUE;
     }
@@ -544,6 +545,7 @@
     CAMCORDER_SRC_TAG,
     VOICE_REC_SRC_TAG,
     VOICE_COMM_SRC_TAG,
+    REMOTE_SUBMIX_SRC_TAG,
     UNPROCESSED_SRC_TAG,
     VOICE_PERFORMANCE_SRC_TAG
 };
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index ff1e674..a30768a 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -32,6 +32,9 @@
        if (!_tmp.ok()) return aidl_utils::binderStatusFromStatusT(_tmp.error()); \
        std::move(_tmp.value()); })
 
+#define RETURN_BINDER_STATUS_IF_ERROR(x) \
+    if (status_t _tmp = (x); _tmp != OK) return aidl_utils::binderStatusFromStatusT(_tmp);
+
 #define RETURN_IF_BINDER_ERROR(x)      \
     {                                  \
         binder::Status _tmp = (x);     \
@@ -44,6 +47,19 @@
 using binder::Status;
 using aidl_utils::binderStatusFromStatusT;
 using content::AttributionSourceState;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceAddress;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioOffloadInfo;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::AudioUuid;
+using media::audio::common::Int;
 
 const std::vector<audio_usage_t>& SYSTEM_USAGES = {
     AUDIO_USAGE_CALL_ASSISTANT,
@@ -63,15 +79,22 @@
         != std::end(mSupportedSystemUsages);
 }
 
-status_t AudioPolicyService::validateUsage(audio_usage_t usage) {
-     return validateUsage(usage, getCallingAttributionSource());
+status_t AudioPolicyService::validateUsage(const audio_attributes_t& attr) {
+     return validateUsage(attr, getCallingAttributionSource());
 }
 
-status_t AudioPolicyService::validateUsage(audio_usage_t usage,
+status_t AudioPolicyService::validateUsage(const audio_attributes_t& attr,
         const AttributionSourceState& attributionSource) {
-    if (isSystemUsage(usage)) {
-        if (isSupportedSystemUsage(usage)) {
-            if (!modifyAudioRoutingAllowed(attributionSource)) {
+    if (isSystemUsage(attr.usage)) {
+        if (isSupportedSystemUsage(attr.usage)) {
+            if (attr.usage == AUDIO_USAGE_CALL_ASSISTANT
+                    && ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0)) {
+                if (!callAudioInterceptionAllowed(attributionSource)) {
+                    ALOGE(("permission denied: modify audio routing not allowed "
+                           "for attributionSource %s"), attributionSource.toString().c_str());
+                    return PERMISSION_DENIED;
+                }
+            } else if (!modifyAudioRoutingAllowed(attributionSource)) {
                 ALOGE(("permission denied: modify audio routing not allowed "
                        "for attributionSource %s"), attributionSource.toString().c_str());
                 return PERMISSION_DENIED;
@@ -96,16 +119,18 @@
 }
 
 Status AudioPolicyService::setDeviceConnectionState(
-        const media::AudioDevice& deviceAidl,
+        const AudioDevice& deviceAidl,
         media::AudioPolicyDeviceState stateAidl,
         const std::string& deviceNameAidl,
-        media::audio::common::AudioFormat encodedFormatAidl) {
-    audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl.type));
+        const AudioFormatDescription& encodedFormatAidl) {
+    audio_devices_t device;
+    std::string address;
+    RETURN_BINDER_STATUS_IF_ERROR(
+            aidl2legacy_AudioDevice_audio_device(deviceAidl, &device, &address));
     audio_policy_dev_state_t state = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioPolicyDeviceState_audio_policy_dev_state_t(stateAidl));
     audio_format_t encodedFormat = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioFormat_audio_format_t(encodedFormatAidl));
+            aidl2legacy_AudioFormatDescription_audio_format_t(encodedFormatAidl));
 
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
@@ -121,20 +146,20 @@
     ALOGV("setDeviceConnectionState()");
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
-    status_t status = mAudioPolicyManager->setDeviceConnectionState(device, state,
-                                                          deviceAidl.address.c_str(),
-                                                          deviceNameAidl.c_str(),
-                                                          encodedFormat);
+    status_t status = mAudioPolicyManager->setDeviceConnectionState(
+            device, state, address.c_str(), deviceNameAidl.c_str(), encodedFormat);
     if (status == NO_ERROR) {
         onCheckSpatializer_l();
     }
     return binderStatusFromStatusT(status);
 }
 
-Status AudioPolicyService::getDeviceConnectionState(const media::AudioDevice& deviceAidl,
+Status AudioPolicyService::getDeviceConnectionState(const AudioDevice& deviceAidl,
                                                     media::AudioPolicyDeviceState* _aidl_return) {
-    audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl.type));
+    audio_devices_t device;
+    std::string address;
+    RETURN_BINDER_STATUS_IF_ERROR(
+            aidl2legacy_AudioDevice_audio_device(deviceAidl, &device, &address));
     if (mAudioPolicyManager == NULL) {
         *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
                 legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(
@@ -144,19 +169,21 @@
     AutoCallerClear acc;
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
             legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(
-                    mAudioPolicyManager->getDeviceConnectionState(device,
-                                                                  deviceAidl.address.c_str())));
+                    mAudioPolicyManager->getDeviceConnectionState(
+                            device, address.c_str())));
     return Status::ok();
 }
 
 Status AudioPolicyService::handleDeviceConfigChange(
-        const media::AudioDevice& deviceAidl,
+        const AudioDevice& deviceAidl,
         const std::string& deviceNameAidl,
-        media::audio::common::AudioFormat encodedFormatAidl) {
-    audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl.type));
+        const AudioFormatDescription& encodedFormatAidl) {
+    audio_devices_t device;
+    std::string address;
+    RETURN_BINDER_STATUS_IF_ERROR(
+            aidl2legacy_AudioDevice_audio_device(deviceAidl, &device, &address));
     audio_format_t encodedFormat = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioFormat_audio_format_t(encodedFormatAidl));
+            aidl2legacy_AudioFormatDescription_audio_format_t(encodedFormatAidl));
 
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
@@ -169,7 +196,7 @@
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
     status_t status =  mAudioPolicyManager->handleDeviceConfigChange(
-            device, deviceAidl.address.c_str(), deviceNameAidl.c_str(), encodedFormat);
+            device, address.c_str(), deviceNameAidl.c_str(), encodedFormat);
 
     if (status == NO_ERROR) {
        onCheckSpatializer_l();
@@ -177,7 +204,7 @@
     return binderStatusFromStatusT(status);
 }
 
-Status AudioPolicyService::setPhoneState(media::AudioMode stateAidl, int32_t uidAidl)
+Status AudioPolicyService::setPhoneState(AudioMode stateAidl, int32_t uidAidl)
 {
     audio_mode_t state = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioMode_audio_mode_t(stateAidl));
@@ -199,7 +226,15 @@
     // can be interleaved).
     Mutex::Autolock _l(mLock);
     // TODO: check if it is more appropriate to do it in platform specific policy manager
-    AudioSystem::setMode(state);
+
+    // Audio HAL mode conversion for call redirect modes
+    audio_mode_t halMode = state;
+    if (state == AUDIO_MODE_CALL_REDIRECT) {
+        halMode = AUDIO_MODE_CALL_SCREEN;
+    } else if (state == AUDIO_MODE_COMMUNICATION_REDIRECT) {
+        halMode = AUDIO_MODE_NORMAL;
+    }
+    AudioSystem::setMode(halMode);
 
     AutoCallerClear acc;
     mAudioPolicyManager->setPhoneState(state);
@@ -209,7 +244,7 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::getPhoneState(media::AudioMode* _aidl_return) {
+Status AudioPolicyService::getPhoneState(AudioMode* _aidl_return) {
     Mutex::Autolock _l(mLock);
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_mode_t_AudioMode(mPhoneState));
     return Status::ok();
@@ -265,7 +300,7 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::getOutput(media::AudioStreamType streamAidl, int32_t* _aidl_return)
+Status AudioPolicyService::getOutput(AudioStreamType streamAidl, int32_t* _aidl_return)
 {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
@@ -289,7 +324,7 @@
 Status AudioPolicyService::getOutputForAttr(const media::AudioAttributesInternal& attrAidl,
                                             int32_t sessionAidl,
                                             const AttributionSourceState& attributionSource,
-                                            const media::AudioConfig& configAidl,
+                                            const AudioConfig& configAidl,
                                             int32_t flagsAidl,
                                             int32_t selectedDeviceIdAidl,
                                             media::GetOutputForAttrResponse* _aidl_return)
@@ -300,7 +335,7 @@
             aidl2legacy_int32_t_audio_session_t(sessionAidl));
     audio_stream_type_t stream = AUDIO_STREAM_DEFAULT;
     audio_config_t config = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioConfig_audio_config_t(configAidl));
+            aidl2legacy_AudioConfig_audio_config_t(configAidl, false /*isInput*/));
     audio_output_flags_t flags = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_output_flags_t_mask(flagsAidl));
     audio_port_handle_t selectedDeviceId = VALUE_OR_RETURN_BINDER_STATUS(
@@ -316,7 +351,7 @@
 
     RETURN_IF_BINDER_ERROR(
             binderStatusFromStatusT(AudioValidator::validateAudioAttributes(attr, "68953950")));
-    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr.usage, attributionSource)));
+    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr, attributionSource)));
 
     ALOGV("%s()", __func__);
     Mutex::Autolock _l(mLock);
@@ -341,6 +376,15 @@
         attr.flags = static_cast<audio_flags_mask_t>(
                 attr.flags & ~(AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE));
     }
+
+    if (attr.content_type == AUDIO_CONTENT_TYPE_ULTRASOUND) {
+        if (!accessUltrasoundAllowed(adjAttributionSource)) {
+            ALOGE("%s: permission denied: ultrasound not allowed for uid %d pid %d",
+                    __func__, adjAttributionSource.uid, adjAttributionSource.pid);
+            return binderStatusFromStatusT(PERMISSION_DENIED);
+        }
+    }
+
     AutoCallerClear acc;
     AudioPolicyInterface::output_type_t outputType;
     status_t result = mAudioPolicyManager->getOutputForAttr(&attr, &output, session,
@@ -358,7 +402,12 @@
         case AudioPolicyInterface::API_OUTPUT_LEGACY:
             break;
         case AudioPolicyInterface::API_OUTPUT_TELEPHONY_TX:
-            if (!modifyPhoneStateAllowed(adjAttributionSource)) {
+            if (((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0)
+                && !callAudioInterceptionAllowed(adjAttributionSource)) {
+                ALOGE("%s() permission denied: call redirection not allowed for uid %d",
+                    __func__, adjAttributionSource.uid);
+                result = PERMISSION_DENIED;
+            } else if (!modifyPhoneStateAllowed(adjAttributionSource)) {
                 ALOGE("%s() permission denied: modify phone state not allowed for uid %d",
                     __func__, adjAttributionSource.uid);
                 result = PERMISSION_DENIED;
@@ -518,7 +567,7 @@
                                            int32_t riidAidl,
                                            int32_t sessionAidl,
                                            const AttributionSourceState& attributionSource,
-                                           const media::AudioConfigBase& configAidl,
+                                           const AudioConfigBase& configAidl,
                                            int32_t flagsAidl,
                                            int32_t selectedDeviceIdAidl,
                                            media::GetInputForAttrResponse* _aidl_return) {
@@ -531,7 +580,7 @@
     audio_session_t session = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_session_t(sessionAidl));
     audio_config_base_t config = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl));
+            aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl, true /*isInput*/));
     audio_input_flags_t flags = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_input_flags_t_mask(flagsAidl));
     audio_port_handle_t selectedDeviceId = VALUE_OR_RETURN_BINDER_STATUS(
@@ -556,7 +605,8 @@
             || (inputSource >= AUDIO_SOURCE_CNT
                 && inputSource != AUDIO_SOURCE_HOTWORD
                 && inputSource != AUDIO_SOURCE_FM_TUNER
-                && inputSource != AUDIO_SOURCE_ECHO_REFERENCE)) {
+                && inputSource != AUDIO_SOURCE_ECHO_REFERENCE
+                && inputSource != AUDIO_SOURCE_ULTRASOUND)) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
 
@@ -585,32 +635,43 @@
         adjAttributionSource.pid = callingPid;
     }
 
-    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr.usage,
+    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr,
             adjAttributionSource)));
 
     // check calling permissions.
-    // Capturing from FM_TUNER source is controlled by captureTunerAudioInputAllowed() and
-    // captureAudioOutputAllowed() (deprecated) as this does not affect users privacy
-    // as does capturing from an actual microphone.
-    if (!(recordingAllowed(adjAttributionSource, attr.source)
-            || attr.source == AUDIO_SOURCE_FM_TUNER)) {
+    // Capturing from the following sources does not require permission RECORD_AUDIO
+    // as the captured audio does not come from a microphone:
+    // - FM_TUNER source is controlled by captureTunerAudioInputAllowed() or
+    // captureAudioOutputAllowed() (deprecated).
+    // - REMOTE_SUBMIX source is controlled by captureAudioOutputAllowed() if the input
+    // type is API_INPUT_MIX_EXT_POLICY_REROUTE and by AudioService if a media projection
+    // is used and input type is API_INPUT_MIX_PUBLIC_CAPTURE_PLAYBACK
+    // - ECHO_REFERENCE source is controlled by captureAudioOutputAllowed()
+    if (!(recordingAllowed(adjAttributionSource, inputSource)
+            || inputSource == AUDIO_SOURCE_FM_TUNER
+            || inputSource == AUDIO_SOURCE_REMOTE_SUBMIX
+            || inputSource == AUDIO_SOURCE_ECHO_REFERENCE)) {
         ALOGE("%s permission denied: recording not allowed for %s",
                 __func__, adjAttributionSource.toString().c_str());
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
 
     bool canCaptureOutput = captureAudioOutputAllowed(adjAttributionSource);
-    if ((inputSource == AUDIO_SOURCE_VOICE_UPLINK ||
-        inputSource == AUDIO_SOURCE_VOICE_DOWNLINK ||
-        inputSource == AUDIO_SOURCE_VOICE_CALL ||
-        inputSource == AUDIO_SOURCE_ECHO_REFERENCE)
-        && !canCaptureOutput) {
+    bool canInterceptCallAudio = callAudioInterceptionAllowed(adjAttributionSource);
+    bool isCallAudioSource = inputSource == AUDIO_SOURCE_VOICE_UPLINK
+             || inputSource == AUDIO_SOURCE_VOICE_DOWNLINK
+             || inputSource == AUDIO_SOURCE_VOICE_CALL;
+
+    if (isCallAudioSource && !canInterceptCallAudio && !canCaptureOutput) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
-
+    if (inputSource == AUDIO_SOURCE_ECHO_REFERENCE
+            && !canCaptureOutput) {
+        return binderStatusFromStatusT(PERMISSION_DENIED);
+    }
     if (inputSource == AUDIO_SOURCE_FM_TUNER
-        && !captureTunerAudioInputAllowed(adjAttributionSource)
-        && !canCaptureOutput) {
+        && !canCaptureOutput
+        && !captureTunerAudioInputAllowed(adjAttributionSource)) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
 
@@ -626,6 +687,14 @@
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
 
+    if (attr.source == AUDIO_SOURCE_ULTRASOUND) {
+        if (!accessUltrasoundAllowed(adjAttributionSource)) {
+            ALOGE("%s: permission denied: ultrasound not allowed for uid %d pid %d",
+                    __func__, adjAttributionSource.uid, adjAttributionSource.pid);
+            return binderStatusFromStatusT(PERMISSION_DENIED);
+        }
+    }
+
     sp<AudioPolicyEffects>audioPolicyEffects;
     {
         status_t status;
@@ -652,23 +721,30 @@
             case AudioPolicyInterface::API_INPUT_LEGACY:
                 break;
             case AudioPolicyInterface::API_INPUT_TELEPHONY_RX:
+                if ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0
+                        && canInterceptCallAudio) {
+                    break;
+                }
                 // FIXME: use the same permission as for remote submix for now.
+                FALLTHROUGH_INTENDED;
             case AudioPolicyInterface::API_INPUT_MIX_CAPTURE:
                 if (!canCaptureOutput) {
-                    ALOGE("getInputForAttr() permission denied: capture not allowed");
+                    ALOGE("%s permission denied: capture not allowed", __func__);
                     status = PERMISSION_DENIED;
                 }
                 break;
             case AudioPolicyInterface::API_INPUT_MIX_EXT_POLICY_REROUTE:
-                if (!modifyAudioRoutingAllowed(adjAttributionSource)) {
-                    ALOGE("getInputForAttr() permission denied: modify audio routing not allowed");
+                if (!(modifyAudioRoutingAllowed(adjAttributionSource)
+                        || ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0
+                            && canInterceptCallAudio))) {
+                    ALOGE("%s permission denied for remote submix capture", __func__);
                     status = PERMISSION_DENIED;
                 }
                 break;
             case AudioPolicyInterface::API_INPUT_INVALID:
             default:
-                LOG_ALWAYS_FATAL("getInputForAttr() encountered an invalid input type %d",
-                        (int)inputType);
+                LOG_ALWAYS_FATAL("%s encountered an invalid input type %d",
+                        __func__, (int)inputType);
             }
         }
 
@@ -738,8 +814,10 @@
 
     // check calling permissions
     if (!(startRecording(client->attributionSource, String16(msg.str().c_str()),
-        client->attributes.source)
-            || client->attributes.source == AUDIO_SOURCE_FM_TUNER)) {
+                         client->attributes.source)
+            || client->attributes.source == AUDIO_SOURCE_FM_TUNER
+            || client->attributes.source == AUDIO_SOURCE_REMOTE_SUBMIX
+            || client->attributes.source == AUDIO_SOURCE_ECHO_REFERENCE)) {
         ALOGE("%s permission denied: recording not allowed for attribution source %s",
                 __func__, client->attributionSource.toString().c_str());
         return binderStatusFromStatusT(PERMISSION_DENIED);
@@ -909,7 +987,7 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::initStreamVolume(media::AudioStreamType streamAidl,
+Status AudioPolicyService::initStreamVolume(AudioStreamType streamAidl,
                                             int32_t indexMinAidl,
                                             int32_t indexMaxAidl) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
@@ -932,13 +1010,14 @@
     return binderStatusFromStatusT(NO_ERROR);
 }
 
-Status AudioPolicyService::setStreamVolumeIndex(media::AudioStreamType streamAidl,
-                                                int32_t deviceAidl, int32_t indexAidl) {
+Status AudioPolicyService::setStreamVolumeIndex(AudioStreamType streamAidl,
+                                                const AudioDeviceDescription& deviceAidl,
+                                                int32_t indexAidl) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
     int index = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int>(indexAidl));
     audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
 
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
@@ -956,12 +1035,13 @@
                                                                              device));
 }
 
-Status AudioPolicyService::getStreamVolumeIndex(media::AudioStreamType streamAidl,
-                                                int32_t deviceAidl, int32_t* _aidl_return) {
+Status AudioPolicyService::getStreamVolumeIndex(AudioStreamType streamAidl,
+                                                const AudioDeviceDescription& deviceAidl,
+                                                int32_t* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
     audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
     int index;
 
     if (mAudioPolicyManager == NULL) {
@@ -979,12 +1059,13 @@
 }
 
 Status AudioPolicyService::setVolumeIndexForAttributes(
-        const media::AudioAttributesInternal& attrAidl, int32_t deviceAidl, int32_t indexAidl) {
+        const media::AudioAttributesInternal& attrAidl,
+        const AudioDeviceDescription& deviceAidl, int32_t indexAidl) {
     audio_attributes_t attributes = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
     int index = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int>(indexAidl));
     audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             AudioValidator::validateAudioAttributes(attributes, "169572641")));
 
@@ -1001,11 +1082,12 @@
 }
 
 Status AudioPolicyService::getVolumeIndexForAttributes(
-        const media::AudioAttributesInternal& attrAidl, int32_t deviceAidl, int32_t* _aidl_return) {
+        const media::AudioAttributesInternal& attrAidl,
+        const AudioDeviceDescription& deviceAidl, int32_t* _aidl_return) {
     audio_attributes_t attributes = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
     audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
     int index;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             AudioValidator::validateAudioAttributes(attributes, "169572641")));
@@ -1059,7 +1141,7 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::getStrategyForStream(media::AudioStreamType streamAidl,
+Status AudioPolicyService::getStrategyForStream(AudioStreamType streamAidl,
                                                 int32_t* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
@@ -1083,14 +1165,14 @@
 
 //audio policy: use audio_device_t appropriately
 
-Status AudioPolicyService::getDevicesForStream(media::AudioStreamType streamAidl,
-                                               int32_t* _aidl_return) {
+Status AudioPolicyService::getDevicesForStream(
+        AudioStreamType streamAidl,
+        std::vector<AudioDeviceDescription>* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
 
     if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
-        *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-                legacy2aidl_audio_devices_t_int32_t(AUDIO_DEVICE_NONE));
+        *_aidl_return = std::vector<AudioDeviceDescription>{};
         return Status::ok();
     }
     if (mAudioPolicyManager == NULL) {
@@ -1099,12 +1181,14 @@
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-            legacy2aidl_audio_devices_t_int32_t(mAudioPolicyManager->getDevicesForStream(stream)));
+            convertContainer<std::vector<AudioDeviceDescription>>(
+                    mAudioPolicyManager->getDevicesForStream(stream),
+                    legacy2aidl_audio_devices_t_AudioDeviceDescription));
     return Status::ok();
 }
 
 Status AudioPolicyService::getDevicesForAttributes(const media::AudioAttributesEx& attrAidl,
-                                                   std::vector<media::AudioDevice>* _aidl_return)
+                                                   std::vector<AudioDevice>* _aidl_return)
 {
     AudioAttributes aa = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioAttributesEx_AudioAttributes(attrAidl));
@@ -1118,8 +1202,8 @@
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDevicesForAttributes(aa.getAttributes(), &devices)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return Status::ok();
 }
 
@@ -1205,7 +1289,7 @@
     return binderStatusFromStatusT(mAudioPolicyManager->moveEffectsToIo(ids, io));
 }
 
-Status AudioPolicyService::isStreamActive(media::AudioStreamType streamAidl, int32_t inPastMsAidl,
+Status AudioPolicyService::isStreamActive(AudioStreamType streamAidl, int32_t inPastMsAidl,
                                           bool* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
@@ -1224,7 +1308,7 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::isStreamActiveRemotely(media::AudioStreamType streamAidl,
+Status AudioPolicyService::isStreamActiveRemotely(AudioStreamType streamAidl,
                                                   int32_t inPastMsAidl,
                                                   bool* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1244,9 +1328,9 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::isSourceActive(media::AudioSourceType sourceAidl, bool* _aidl_return) {
+Status AudioPolicyService::isSourceActive(AudioSource sourceAidl, bool* _aidl_return) {
     audio_source_t source = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(sourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(sourceAidl));
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1274,7 +1358,7 @@
 
 Status AudioPolicyService::queryDefaultPreProcessing(
         int32_t audioSessionAidl,
-        media::Int* countAidl,
+        Int* countAidl,
         std::vector<media::EffectDescriptor>* _aidl_return) {
     audio_session_t audioSession = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_session_t(audioSessionAidl));
@@ -1298,11 +1382,11 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::addSourceDefaultEffect(const media::AudioUuid& typeAidl,
+Status AudioPolicyService::addSourceDefaultEffect(const AudioUuid& typeAidl,
                                                   const std::string& opPackageNameAidl,
-                                                  const media::AudioUuid& uuidAidl,
+                                                  const AudioUuid& uuidAidl,
                                                   int32_t priority,
-                                                  media::AudioSourceType sourceAidl,
+                                                  AudioSource sourceAidl,
                                                   int32_t* _aidl_return) {
     effect_uuid_t type = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioUuid_audio_uuid_t(typeAidl));
@@ -1311,7 +1395,7 @@
     effect_uuid_t uuid = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioUuid_audio_uuid_t(uuidAidl));
     audio_source_t source = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(sourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(sourceAidl));
     audio_unique_id_t id;
 
     sp<AudioPolicyEffects>audioPolicyEffects;
@@ -1325,10 +1409,10 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::addStreamDefaultEffect(const media::AudioUuid& typeAidl,
+Status AudioPolicyService::addStreamDefaultEffect(const AudioUuid& typeAidl,
                                                   const std::string& opPackageNameAidl,
-                                                  const media::AudioUuid& uuidAidl,
-                                                  int32_t priority, media::AudioUsage usageAidl,
+                                                  const AudioUuid& uuidAidl,
+                                                  int32_t priority, AudioUsage usageAidl,
                                                   int32_t* _aidl_return) {
     effect_uuid_t type = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioUuid_audio_uuid_t(typeAidl));
@@ -1376,7 +1460,7 @@
 }
 
 Status AudioPolicyService::setSupportedSystemUsages(
-        const std::vector<media::AudioUsage>& systemUsagesAidl) {
+        const std::vector<AudioUsage>& systemUsagesAidl) {
     size_t size = systemUsagesAidl.size();
     if (size > MAX_ITEMS_PER_LIST) {
         size = MAX_ITEMS_PER_LIST;
@@ -1415,7 +1499,7 @@
             mAudioPolicyManager->setAllowedCapturePolicy(uid, capturePolicy));
 }
 
-Status AudioPolicyService::getOffloadSupport(const media::AudioOffloadInfo& infoAidl,
+Status AudioPolicyService::getOffloadSupport(const AudioOffloadInfo& infoAidl,
                                              media::AudioOffloadMode* _aidl_return) {
     audio_offload_info_t info = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioOffloadInfo_audio_offload_info_t(infoAidl));
@@ -1431,11 +1515,11 @@
 }
 
 Status AudioPolicyService::isDirectOutputSupported(
-        const media::AudioConfigBase& configAidl,
+        const AudioConfigBase& configAidl,
         const media::AudioAttributesInternal& attributesAidl,
         bool* _aidl_return) {
     audio_config_base_t config = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl));
+            aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl, false /*isInput*/));
     audio_attributes_t attributes = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioAttributesInternal_audio_attributes_t(attributesAidl));
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
@@ -1446,7 +1530,7 @@
         return binderStatusFromStatusT(NO_INIT);
     }
 
-    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes.usage)));
+    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes)));
 
     Mutex::Autolock _l(mLock);
     *_aidl_return = mAudioPolicyManager->isDirectOutputSupported(config, attributes);
@@ -1455,7 +1539,7 @@
 
 
 Status AudioPolicyService::listAudioPorts(media::AudioPortRole roleAidl,
-                                          media::AudioPortType typeAidl, media::Int* count,
+                                          media::AudioPortType typeAidl, Int* count,
                                           std::vector<media::AudioPort>* portsAidl,
                                           int32_t* _aidl_return) {
     audio_port_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1543,7 +1627,7 @@
                                                    IPCThreadState::self()->getCallingUid()));
 }
 
-Status AudioPolicyService::listAudioPatches(media::Int* count,
+Status AudioPolicyService::listAudioPatches(Int* count,
                                             std::vector<media::AudioPatch>* patchesAidl,
                                             int32_t* _aidl_return) {
     unsigned int num_patches = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1610,7 +1694,7 @@
     _aidl_return->ioHandle = VALUE_OR_RETURN_BINDER_STATUS(
             legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
     _aidl_return->device = VALUE_OR_RETURN_BINDER_STATUS(
-            legacy2aidl_audio_devices_t_int32_t(device));
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     return Status::ok();
 }
 
@@ -1680,7 +1764,7 @@
 
 Status AudioPolicyService::setUidDeviceAffinities(
         int32_t uidAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     uid_t uid = VALUE_OR_RETURN_BINDER_STATUS(aidl2legacy_int32_t_uid_t(uidAidl));
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
             convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
@@ -1713,7 +1797,7 @@
 
 Status AudioPolicyService::setUserIdDeviceAffinities(
         int32_t userIdAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     int userId = VALUE_OR_RETURN_BINDER_STATUS(convertReinterpret<int>(userIdAidl));
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
             convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
@@ -1762,7 +1846,7 @@
         return binderStatusFromStatusT(NO_INIT);
     }
 
-    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes.usage)));
+    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes)));
 
     // startAudioSource should be created as the calling uid
     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
@@ -1810,13 +1894,14 @@
 }
 
 
-Status AudioPolicyService::getStreamVolumeDB(media::AudioStreamType streamAidl, int32_t indexAidl,
-                                             int32_t deviceAidl, float* _aidl_return) {
+Status AudioPolicyService::getStreamVolumeDB(
+        AudioStreamType streamAidl, int32_t indexAidl,
+        const AudioDeviceDescription& deviceAidl, float* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
     int index = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int>(indexAidl));
     audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
 
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
@@ -1827,8 +1912,8 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::getSurroundFormats(media::Int* count,
-        std::vector<media::audio::common::AudioFormat>* formats,
+Status AudioPolicyService::getSurroundFormats(Int* count,
+        std::vector<AudioFormatDescription>* formats,
         std::vector<bool>* formatsEnabled) {
     unsigned int numSurroundFormats = VALUE_OR_RETURN_BINDER_STATUS(
             convertIntegral<unsigned int>(count->value));
@@ -1850,7 +1935,8 @@
     numSurroundFormatsReq = std::min(numSurroundFormats, numSurroundFormatsReq);
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             convertRange(surroundFormats.get(), surroundFormats.get() + numSurroundFormatsReq,
-                         std::back_inserter(*formats), legacy2aidl_audio_format_t_AudioFormat)));
+                         std::back_inserter(*formats),
+                         legacy2aidl_audio_format_t_AudioFormatDescription)));
     formatsEnabled->insert(
             formatsEnabled->begin(),
             surroundFormatsEnabled.get(),
@@ -1860,7 +1946,7 @@
 }
 
 Status AudioPolicyService::getReportedSurroundFormats(
-        media::Int* count, std::vector<media::audio::common::AudioFormat>* formats) {
+        Int* count, std::vector<AudioFormatDescription>* formats) {
     unsigned int numSurroundFormats = VALUE_OR_RETURN_BINDER_STATUS(
             convertIntegral<unsigned int>(count->value));
     if (numSurroundFormats > MAX_ITEMS_PER_LIST) {
@@ -1880,13 +1966,15 @@
     numSurroundFormatsReq = std::min(numSurroundFormats, numSurroundFormatsReq);
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             convertRange(surroundFormats.get(), surroundFormats.get() + numSurroundFormatsReq,
-                         std::back_inserter(*formats), legacy2aidl_audio_format_t_AudioFormat)));
+                         std::back_inserter(*formats),
+                         legacy2aidl_audio_format_t_AudioFormatDescription)));
     count->value = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<uint32_t>(numSurroundFormats));
     return Status::ok();
 }
 
 Status AudioPolicyService::getHwOffloadFormatsSupportedForBluetoothMedia(
-        int32_t deviceAidl, std::vector<media::audio::common::AudioFormat>* _aidl_return) {
+        const AudioDeviceDescription& deviceAidl,
+        std::vector<AudioFormatDescription>* _aidl_return) {
     std::vector<audio_format_t> formats;
 
     if (mAudioPolicyManager == NULL) {
@@ -1895,20 +1983,20 @@
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
     audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getHwOffloadFormatsSupportedForBluetoothMedia(device, &formats)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-            convertContainer<std::vector<media::audio::common::AudioFormat>>(
+            convertContainer<std::vector<AudioFormatDescription>>(
                     formats,
-                    legacy2aidl_audio_format_t_AudioFormat));
+                    legacy2aidl_audio_format_t_AudioFormatDescription));
     return Status::ok();
 }
 
 Status AudioPolicyService::setSurroundFormatEnabled(
-        media::audio::common::AudioFormat audioFormatAidl, bool enabled) {
+        const AudioFormatDescription& audioFormatAidl, bool enabled) {
     audio_format_t audioFormat = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioFormat_audio_format_t(audioFormatAidl));
+            aidl2legacy_AudioFormatDescription_audio_format_t(audioFormatAidl));
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -2059,7 +2147,7 @@
 Status AudioPolicyService::setDevicesRoleForStrategy(
         int32_t strategyAidl,
         media::DeviceRole roleAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     product_strategy_t strategy = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_product_strategy_t(strategyAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2099,7 +2187,7 @@
 Status AudioPolicyService::getDevicesForRoleAndStrategy(
         int32_t strategyAidl,
         media::DeviceRole roleAidl,
-        std::vector<media::AudioDevice>* _aidl_return) {
+        std::vector<AudioDevice>* _aidl_return) {
     product_strategy_t strategy = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_product_strategy_t(strategyAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2113,8 +2201,8 @@
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDevicesForRoleAndStrategy(strategy, role, devices)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return Status::ok();
 }
 
@@ -2125,11 +2213,11 @@
 }
 
 Status AudioPolicyService::setDevicesRoleForCapturePreset(
-        media::AudioSourceType audioSourceAidl,
+        AudioSource audioSourceAidl,
         media::DeviceRole roleAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_DeviceRole_device_role_t(roleAidl));
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2145,11 +2233,11 @@
 }
 
 Status AudioPolicyService::addDevicesRoleForCapturePreset(
-        media::AudioSourceType audioSourceAidl,
+        AudioSource audioSourceAidl,
         media::DeviceRole roleAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_DeviceRole_device_role_t(roleAidl));
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2165,11 +2253,11 @@
 }
 
 Status AudioPolicyService::removeDevicesRoleForCapturePreset(
-        media::AudioSourceType audioSourceAidl,
+        AudioSource audioSourceAidl,
         media::DeviceRole roleAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_DeviceRole_device_role_t(roleAidl));
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2184,10 +2272,10 @@
             mAudioPolicyManager->removeDevicesRoleForCapturePreset(audioSource, role, devices));
 }
 
-Status AudioPolicyService::clearDevicesRoleForCapturePreset(media::AudioSourceType audioSourceAidl,
+Status AudioPolicyService::clearDevicesRoleForCapturePreset(AudioSource audioSourceAidl,
                                                             media::DeviceRole roleAidl) {
     audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_DeviceRole_device_role_t(roleAidl));
 
@@ -2200,11 +2288,11 @@
 }
 
 Status AudioPolicyService::getDevicesForRoleAndCapturePreset(
-        media::AudioSourceType audioSourceAidl,
+        AudioSource audioSourceAidl,
         media::DeviceRole roleAidl,
-        std::vector<media::AudioDevice>* _aidl_return) {
+        std::vector<AudioDevice>* _aidl_return) {
     audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_DeviceRole_device_role_t(roleAidl));
     AudioDeviceTypeAddrVector devices;
@@ -2216,8 +2304,8 @@
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return Status::ok();
 }
 
@@ -2238,8 +2326,8 @@
 
 Status AudioPolicyService::canBeSpatialized(
         const std::optional<media::AudioAttributesInternal>& attrAidl,
-        const std::optional<media::AudioConfig>& configAidl,
-        const std::vector<media::AudioDevice>& devicesAidl,
+        const std::optional<AudioConfig>& configAidl,
+        const std::vector<AudioDevice>& devicesAidl,
         bool* _aidl_return) {
     if (mAudioPolicyManager == nullptr) {
         return binderStatusFromStatusT(NO_INIT);
@@ -2252,7 +2340,8 @@
     audio_config_t config = AUDIO_CONFIG_INITIALIZER;
     if (configAidl.has_value()) {
         config = VALUE_OR_RETURN_BINDER_STATUS(
-                                    aidl2legacy_AudioConfig_audio_config_t(configAidl.value()));
+                                    aidl2legacy_AudioConfig_audio_config_t(configAidl.value(),
+                                    false /*isInput*/));
     }
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
             convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
@@ -2263,4 +2352,24 @@
     return Status::ok();
 }
 
+Status AudioPolicyService::getDirectPlaybackSupport(const media::AudioAttributesInternal &attrAidl,
+                                                    const AudioConfig &configAidl,
+                                                    media::AudioDirectMode *_aidl_return) {
+    if (mAudioPolicyManager == nullptr) {
+        return binderStatusFromStatusT(NO_INIT);
+    }
+    if (_aidl_return == nullptr) {
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    audio_attributes_t attr = VALUE_OR_RETURN_BINDER_STATUS(
+            aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
+    audio_config_t config = VALUE_OR_RETURN_BINDER_STATUS(
+            aidl2legacy_AudioConfig_audio_config_t(configAidl, false /*isInput*/));
+    Mutex::Autolock _l(mLock);
+    *_aidl_return = static_cast<media::AudioDirectMode>(
+            VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_direct_mode_t_int32_t_mask(
+                    mAudioPolicyManager->getDirectPlaybackSupport(&attr, &config))));
+    return Status::ok();
+}
+
 } // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index cd83900..ef7a83b 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -509,22 +509,24 @@
             int32_t eventAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(event));
             media::RecordClientInfo clientInfoAidl = VALUE_OR_RETURN_STATUS(
                     legacy2aidl_record_client_info_t_RecordClientInfo(*clientInfo));
-            media::AudioConfigBase clientConfigAidl = VALUE_OR_RETURN_STATUS(
-                    legacy2aidl_audio_config_base_t_AudioConfigBase(*clientConfig));
+            AudioConfigBase clientConfigAidl = VALUE_OR_RETURN_STATUS(
+                    legacy2aidl_audio_config_base_t_AudioConfigBase(
+                            *clientConfig, true /*isInput*/));
             std::vector<media::EffectDescriptor> clientEffectsAidl = VALUE_OR_RETURN_STATUS(
                     convertContainer<std::vector<media::EffectDescriptor>>(
                             clientEffects,
                             legacy2aidl_effect_descriptor_t_EffectDescriptor));
-            media::AudioConfigBase deviceConfigAidl = VALUE_OR_RETURN_STATUS(
-                    legacy2aidl_audio_config_base_t_AudioConfigBase(*deviceConfig));
+            AudioConfigBase deviceConfigAidl = VALUE_OR_RETURN_STATUS(
+                    legacy2aidl_audio_config_base_t_AudioConfigBase(
+                            *deviceConfig, true /*isInput*/));
             std::vector<media::EffectDescriptor> effectsAidl = VALUE_OR_RETURN_STATUS(
                     convertContainer<std::vector<media::EffectDescriptor>>(
                             effects,
                             legacy2aidl_effect_descriptor_t_EffectDescriptor));
             int32_t patchHandleAidl = VALUE_OR_RETURN_STATUS(
                     legacy2aidl_audio_patch_handle_t_int32_t(patchHandle));
-            media::AudioSourceType sourceAidl = VALUE_OR_RETURN_STATUS(
-                    legacy2aidl_audio_source_t_AudioSourceType(source));
+            media::audio::common::AudioSource sourceAidl = VALUE_OR_RETURN_STATUS(
+                    legacy2aidl_audio_source_t_AudioSource(source));
             return aidl_utils::statusTFromBinderStatus(
                     mAudioPolicyServiceClient->onRecordingConfigurationUpdate(eventAidl,
                                                                               clientInfoAidl,
@@ -727,7 +729,8 @@
         if (current->attributes.source != AUDIO_SOURCE_HOTWORD) {
             onlyHotwordActive = false;
         }
-        if (currentUid == mPhoneStateOwnerUid) {
+        if (currentUid == mPhoneStateOwnerUid &&
+                !isVirtualSource(current->attributes.source)) {
             isPhoneStateOwnerActive = true;
         }
     }
@@ -906,6 +909,7 @@
     switch (source) {
         case AUDIO_SOURCE_FM_TUNER:
         case AUDIO_SOURCE_ECHO_REFERENCE:
+        case AUDIO_SOURCE_REMOTE_SUBMIX:
             return false;
         default:
             break;
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index d189224..84b1e50 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -49,6 +49,17 @@
 namespace android {
 
 using content::AttributionSourceState;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::AudioUuid;
+using media::audio::common::Int;
 
 // ----------------------------------------------------------------------------
 
@@ -71,25 +82,25 @@
     //
     binder::Status onNewAudioModulesAvailable() override;
     binder::Status setDeviceConnectionState(
-            const media::AudioDevice& device,
+            const AudioDevice& device,
             media::AudioPolicyDeviceState state,
             const std::string& deviceName,
-            media::audio::common::AudioFormat encodedFormat) override;
-    binder::Status getDeviceConnectionState(const media::AudioDevice& device,
+            const AudioFormatDescription& encodedFormat) override;
+    binder::Status getDeviceConnectionState(const AudioDevice& device,
                                             media::AudioPolicyDeviceState* _aidl_return) override;
     binder::Status handleDeviceConfigChange(
-            const media::AudioDevice& device,
+            const AudioDevice& device,
             const std::string& deviceName,
-            media::audio::common::AudioFormat encodedFormat) override;
-    binder::Status setPhoneState(media::AudioMode state, int32_t uid) override;
+            const AudioFormatDescription& encodedFormat) override;
+    binder::Status setPhoneState(AudioMode state, int32_t uid) override;
     binder::Status setForceUse(media::AudioPolicyForceUse usage,
                                media::AudioPolicyForcedConfig config) override;
     binder::Status getForceUse(media::AudioPolicyForceUse usage,
                                media::AudioPolicyForcedConfig* _aidl_return) override;
-    binder::Status getOutput(media::AudioStreamType stream, int32_t* _aidl_return) override;
+    binder::Status getOutput(AudioStreamType stream, int32_t* _aidl_return) override;
     binder::Status getOutputForAttr(const media::AudioAttributesInternal& attr, int32_t session,
                                     const AttributionSourceState &attributionSource,
-                                    const media::AudioConfig& config,
+                                    const AudioConfig& config,
                                     int32_t flags, int32_t selectedDeviceId,
                                     media::GetOutputForAttrResponse* _aidl_return) override;
     binder::Status startOutput(int32_t portId) override;
@@ -98,32 +109,37 @@
     binder::Status getInputForAttr(const media::AudioAttributesInternal& attr, int32_t input,
                                    int32_t riid, int32_t session,
                                    const AttributionSourceState &attributionSource,
-                                   const media::AudioConfigBase& config, int32_t flags,
+                                   const AudioConfigBase& config, int32_t flags,
                                    int32_t selectedDeviceId,
                                    media::GetInputForAttrResponse* _aidl_return) override;
     binder::Status startInput(int32_t portId) override;
     binder::Status stopInput(int32_t portId) override;
     binder::Status releaseInput(int32_t portId) override;
-    binder::Status initStreamVolume(media::AudioStreamType stream, int32_t indexMin,
+    binder::Status initStreamVolume(AudioStreamType stream, int32_t indexMin,
                                     int32_t indexMax) override;
-    binder::Status setStreamVolumeIndex(media::AudioStreamType stream, int32_t device,
+    binder::Status setStreamVolumeIndex(AudioStreamType stream,
+                                        const AudioDeviceDescription& device,
                                         int32_t index) override;
-    binder::Status getStreamVolumeIndex(media::AudioStreamType stream, int32_t device,
+    binder::Status getStreamVolumeIndex(AudioStreamType stream,
+                                        const AudioDeviceDescription& device,
                                         int32_t* _aidl_return) override;
     binder::Status setVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
-                                               int32_t device, int32_t index) override;
+                                               const AudioDeviceDescription& device,
+                                               int32_t index) override;
     binder::Status getVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
-                                               int32_t device, int32_t* _aidl_return) override;
+                                               const AudioDeviceDescription& device,
+                                               int32_t* _aidl_return) override;
     binder::Status getMaxVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
                                                   int32_t* _aidl_return) override;
     binder::Status getMinVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
                                                   int32_t* _aidl_return) override;
-    binder::Status getStrategyForStream(media::AudioStreamType stream,
+    binder::Status getStrategyForStream(AudioStreamType stream,
                                         int32_t* _aidl_return) override;
-    binder::Status getDevicesForStream(media::AudioStreamType stream,
-                                       int32_t* _aidl_return) override;
+    binder::Status getDevicesForStream(
+            AudioStreamType stream,
+            std::vector<AudioDeviceDescription>* _aidl_return) override;
     binder::Status getDevicesForAttributes(const media::AudioAttributesEx& attr,
-                                           std::vector<media::AudioDevice>* _aidl_return) override;
+                                           std::vector<AudioDevice>* _aidl_return) override;
     binder::Status getOutputForEffect(const media::EffectDescriptor& desc,
                                       int32_t* _aidl_return) override;
     binder::Status registerEffect(const media::EffectDescriptor& desc, int32_t io, int32_t strategy,
@@ -131,42 +147,42 @@
     binder::Status unregisterEffect(int32_t id) override;
     binder::Status setEffectEnabled(int32_t id, bool enabled) override;
     binder::Status moveEffectsToIo(const std::vector<int32_t>& ids, int32_t io) override;
-    binder::Status isStreamActive(media::AudioStreamType stream, int32_t inPastMs,
+    binder::Status isStreamActive(AudioStreamType stream, int32_t inPastMs,
                                   bool* _aidl_return) override;
-    binder::Status isStreamActiveRemotely(media::AudioStreamType stream, int32_t inPastMs,
+    binder::Status isStreamActiveRemotely(AudioStreamType stream, int32_t inPastMs,
                                           bool* _aidl_return) override;
-    binder::Status isSourceActive(media::AudioSourceType source, bool* _aidl_return) override;
+    binder::Status isSourceActive(AudioSource source, bool* _aidl_return) override;
     binder::Status queryDefaultPreProcessing(
-            int32_t audioSession, media::Int* count,
+            int32_t audioSession, Int* count,
             std::vector<media::EffectDescriptor>* _aidl_return) override;
-    binder::Status addSourceDefaultEffect(const media::AudioUuid& type,
+    binder::Status addSourceDefaultEffect(const AudioUuid& type,
                                           const std::string& opPackageName,
-                                          const media::AudioUuid& uuid, int32_t priority,
-                                          media::AudioSourceType source,
+                                          const AudioUuid& uuid, int32_t priority,
+                                          AudioSource source,
                                           int32_t* _aidl_return) override;
-    binder::Status addStreamDefaultEffect(const media::AudioUuid& type,
+    binder::Status addStreamDefaultEffect(const AudioUuid& type,
                                           const std::string& opPackageName,
-                                          const media::AudioUuid& uuid, int32_t priority,
-                                          media::AudioUsage usage, int32_t* _aidl_return) override;
+                                          const AudioUuid& uuid, int32_t priority,
+                                          AudioUsage usage, int32_t* _aidl_return) override;
     binder::Status removeSourceDefaultEffect(int32_t id) override;
     binder::Status removeStreamDefaultEffect(int32_t id) override;
     binder::Status setSupportedSystemUsages(
-            const std::vector<media::AudioUsage>& systemUsages) override;
+            const std::vector<AudioUsage>& systemUsages) override;
     binder::Status setAllowedCapturePolicy(int32_t uid, int32_t capturePolicy) override;
-    binder::Status getOffloadSupport(const media::AudioOffloadInfo& info,
+    binder::Status getOffloadSupport(const media::audio::common::AudioOffloadInfo& info,
                                      media::AudioOffloadMode* _aidl_return) override;
-    binder::Status isDirectOutputSupported(const media::AudioConfigBase& config,
+    binder::Status isDirectOutputSupported(const AudioConfigBase& config,
                                            const media::AudioAttributesInternal& attributes,
                                            bool* _aidl_return) override;
     binder::Status listAudioPorts(media::AudioPortRole role, media::AudioPortType type,
-                                  media::Int* count, std::vector<media::AudioPort>* ports,
+                                  Int* count, std::vector<media::AudioPort>* ports,
                                   int32_t* _aidl_return) override;
     binder::Status getAudioPort(const media::AudioPort& port,
                                 media::AudioPort* _aidl_return) override;
     binder::Status createAudioPatch(const media::AudioPatch& patch, int32_t handle,
                                     int32_t* _aidl_return) override;
     binder::Status releaseAudioPatch(int32_t handle) override;
-    binder::Status listAudioPatches(media::Int* count, std::vector<media::AudioPatch>* patches,
+    binder::Status listAudioPatches(Int* count, std::vector<media::AudioPatch>* patches,
                                     int32_t* _aidl_return) override;
     binder::Status setAudioPortConfig(const media::AudioPortConfig& config) override;
     binder::Status registerClient(const sp<media::IAudioPolicyServiceClient>& client) override;
@@ -174,15 +190,15 @@
     binder::Status setAudioVolumeGroupCallbacksEnabled(bool enabled) override;
     binder::Status acquireSoundTriggerSession(media::SoundTriggerSession* _aidl_return) override;
     binder::Status releaseSoundTriggerSession(int32_t session) override;
-    binder::Status getPhoneState(media::AudioMode* _aidl_return) override;
+    binder::Status getPhoneState(AudioMode* _aidl_return) override;
     binder::Status registerPolicyMixes(const std::vector<media::AudioMix>& mixes,
                                        bool registration) override;
     binder::Status setUidDeviceAffinities(int32_t uid,
-                                          const std::vector<media::AudioDevice>& devices) override;
+                                          const std::vector<AudioDevice>& devices) override;
     binder::Status removeUidDeviceAffinities(int32_t uid) override;
     binder::Status setUserIdDeviceAffinities(
             int32_t userId,
-            const std::vector<media::AudioDevice>& devices) override;
+            const std::vector<AudioDevice>& devices) override;
     binder::Status removeUserIdDeviceAffinities(int32_t userId) override;
     binder::Status startAudioSource(const media::AudioPortConfig& source,
                                     const media::AudioAttributesInternal& attributes,
@@ -190,16 +206,18 @@
     binder::Status stopAudioSource(int32_t portId) override;
     binder::Status setMasterMono(bool mono) override;
     binder::Status getMasterMono(bool* _aidl_return) override;
-    binder::Status getStreamVolumeDB(media::AudioStreamType stream, int32_t index, int32_t device,
+    binder::Status getStreamVolumeDB(AudioStreamType stream, int32_t index,
+                                     const AudioDeviceDescription& device,
                                      float* _aidl_return) override;
-    binder::Status getSurroundFormats(media::Int* count,
-                                      std::vector<media::audio::common::AudioFormat>* formats,
+    binder::Status getSurroundFormats(Int* count,
+                                      std::vector<AudioFormatDescription>* formats,
                                       std::vector<bool>* formatsEnabled) override;
     binder::Status getReportedSurroundFormats(
-            media::Int* count, std::vector<media::audio::common::AudioFormat>* formats) override;
+            Int* count, std::vector<AudioFormatDescription>* formats) override;
     binder::Status getHwOffloadFormatsSupportedForBluetoothMedia(
-            int32_t device, std::vector<media::audio::common::AudioFormat>* _aidl_return) override;
-    binder::Status setSurroundFormatEnabled(media::audio::common::AudioFormat audioFormat,
+            const AudioDeviceDescription& device,
+            std::vector<AudioFormatDescription>* _aidl_return) override;
+    binder::Status setSurroundFormatEnabled(const AudioFormatDescription& audioFormat,
                                             bool enabled) override;
     binder::Status setAssistantUid(int32_t uid) override;
     binder::Status setHotwordDetectionServiceUid(int32_t uid) override;
@@ -220,29 +238,29 @@
     binder::Status isCallScreenModeSupported(bool* _aidl_return) override;
     binder::Status setDevicesRoleForStrategy(
             int32_t strategy, media::DeviceRole role,
-            const std::vector<media::AudioDevice>& devices) override;
+            const std::vector<AudioDevice>& devices) override;
     binder::Status removeDevicesRoleForStrategy(int32_t strategy, media::DeviceRole role) override;
     binder::Status getDevicesForRoleAndStrategy(
             int32_t strategy, media::DeviceRole role,
-            std::vector<media::AudioDevice>* _aidl_return) override;
+            std::vector<AudioDevice>* _aidl_return) override;
     binder::Status setDevicesRoleForCapturePreset(
-            media::AudioSourceType audioSource,
+            AudioSource audioSource,
             media::DeviceRole role,
-            const std::vector<media::AudioDevice>& devices) override;
+            const std::vector<AudioDevice>& devices) override;
     binder::Status addDevicesRoleForCapturePreset(
-            media::AudioSourceType audioSource,
+            AudioSource audioSource,
             media::DeviceRole role,
-            const std::vector<media::AudioDevice>& devices) override;
+            const std::vector<AudioDevice>& devices) override;
     binder::Status removeDevicesRoleForCapturePreset(
-            media::AudioSourceType audioSource,
+            AudioSource audioSource,
             media::DeviceRole role,
-            const std::vector<media::AudioDevice>& devices) override;
-    binder::Status clearDevicesRoleForCapturePreset(media::AudioSourceType audioSource,
+            const std::vector<AudioDevice>& devices) override;
+    binder::Status clearDevicesRoleForCapturePreset(AudioSource audioSource,
                                                     media::DeviceRole role) override;
     binder::Status getDevicesForRoleAndCapturePreset(
-            media::AudioSourceType audioSource,
+            AudioSource audioSource,
             media::DeviceRole role,
-            std::vector<media::AudioDevice>* _aidl_return) override;
+            std::vector<AudioDevice>* _aidl_return) override;
     binder::Status registerSoundTriggerCaptureStateListener(
             const sp<media::ICaptureStateListener>& listener, bool* _aidl_return) override;
 
@@ -250,10 +268,14 @@
             media::GetSpatializerResponse* _aidl_return) override;
     binder::Status canBeSpatialized(
             const std::optional<media::AudioAttributesInternal>& attr,
-            const std::optional<media::AudioConfig>& config,
-            const std::vector<media::AudioDevice>& devices,
+            const std::optional<AudioConfig>& config,
+            const std::vector<AudioDevice>& devices,
             bool* _aidl_return) override;
 
+    binder::Status getDirectPlaybackSupport(const media::AudioAttributesInternal& attr,
+                                            const AudioConfig& config,
+                                            media::AudioDirectMode* _aidl_return) override;
+
     status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) override;
 
     // IBinder::DeathRecipient
@@ -367,8 +389,9 @@
     app_state_t apmStatFromAmState(int amState);
 
     bool isSupportedSystemUsage(audio_usage_t usage);
-    status_t validateUsage(audio_usage_t usage);
-    status_t validateUsage(audio_usage_t usage, const AttributionSourceState& attributionSource);
+    status_t validateUsage(const audio_attributes_t& attr);
+    status_t validateUsage(const audio_attributes_t& attr,
+                           const AttributionSourceState& attributionSource);
 
     void updateUidStates();
     void updateUidStates_l() REQUIRES(mLock);
diff --git a/services/audiopolicy/service/SpatializerPoseController.cpp b/services/audiopolicy/service/SpatializerPoseController.cpp
index ffedf63..440a7ff 100644
--- a/services/audiopolicy/service/SpatializerPoseController.cpp
+++ b/services/audiopolicy/service/SpatializerPoseController.cpp
@@ -35,24 +35,24 @@
 namespace {
 
 // This is how fast, in m/s, we allow position to shift during rate-limiting.
-constexpr auto kMaxTranslationalVelocity = 2;
+constexpr float kMaxTranslationalVelocity = 2;
 
 // This is how fast, in rad/s, we allow rotation angle to shift during rate-limiting.
-constexpr auto kMaxRotationalVelocity = 4 * M_PI;
+constexpr float kMaxRotationalVelocity = 8;
 
 // This should be set to the typical time scale that the translation sensors used drift in. This
 // means, loosely, for how long we can trust the reading to be "accurate enough". This would
 // determine the time constants used for high-pass filtering those readings. If the value is set
 // too high, we may experience drift. If it is set too low, we may experience poses tending toward
 // identity too fast.
-constexpr auto kTranslationalDriftTimeConstant = 20s;
+constexpr auto kTranslationalDriftTimeConstant = 40s;
 
 // This should be set to the typical time scale that the rotation sensors used drift in. This
 // means, loosely, for how long we can trust the reading to be "accurate enough". This would
 // determine the time constants used for high-pass filtering those readings. If the value is set
 // too high, we may experience drift. If it is set too low, we may experience poses tending toward
 // identity too fast.
-constexpr auto kRotationalDriftTimeConstant = 20s;
+constexpr auto kRotationalDriftTimeConstant = 60s;
 
 // This is how far into the future we predict the head pose, using linear extrapolation based on
 // twist (velocity). It should be set to a value that matches the characteristic durations of moving
@@ -64,6 +64,25 @@
 // stale;
 constexpr auto kMaxLostSamples = 4;
 
+// Auto-recenter kicks in after the head has been still for this long.
+constexpr auto kAutoRecenterWindowDuration = 6s;
+
+// Auto-recenter considers head not still if translated by this much (in meters, approx).
+constexpr float kAutoRecenterTranslationThreshold = 0.1f;
+
+// Auto-recenter considers head not still if rotated by this much (in radians, approx).
+constexpr float kAutoRecenterRotationThreshold = 7.0f / 180 * M_PI;
+
+// Screen is considered to be unstable (not still) if it has moved significantly within the last
+// time window of this duration.
+constexpr auto kScreenStillnessWindowDuration = 3s;
+
+// Screen is considered to have moved significantly if translated by this much (in meter, approx).
+constexpr float kScreenStillnessTranslationThreshold = 0.1f;
+
+// Screen is considered to have moved significantly if rotated by this much (in radians, approx).
+constexpr float kScreenStillnessRotationThreshold = 7.0f / 180 * M_PI;
+
 // Time units for system clock ticks. This is what the Sensor Framework timestamps represent and
 // what we use for pose filtering.
 using Ticks = std::chrono::nanoseconds;
@@ -81,10 +100,17 @@
       mProcessor(createHeadTrackingProcessor(HeadTrackingProcessor::Options{
               .maxTranslationalVelocity = kMaxTranslationalVelocity / kTicksPerSecond,
               .maxRotationalVelocity = kMaxRotationalVelocity / kTicksPerSecond,
-              .translationalDriftTimeConstant = Ticks(kTranslationalDriftTimeConstant).count(),
-              .rotationalDriftTimeConstant = Ticks(kRotationalDriftTimeConstant).count(),
+              .translationalDriftTimeConstant =
+                      double(Ticks(kTranslationalDriftTimeConstant).count()),
+              .rotationalDriftTimeConstant = double(Ticks(kRotationalDriftTimeConstant).count()),
               .freshnessTimeout = Ticks(sensorPeriod * kMaxLostSamples).count(),
               .predictionDuration = Ticks(kPredictionDuration).count(),
+              .autoRecenterWindowDuration = Ticks(kAutoRecenterWindowDuration).count(),
+              .autoRecenterTranslationalThreshold = kAutoRecenterTranslationThreshold,
+              .autoRecenterRotationalThreshold = kAutoRecenterRotationThreshold,
+              .screenStillnessWindowDuration = Ticks(kScreenStillnessWindowDuration).count(),
+              .screenStillnessTranslationalThreshold = kScreenStillnessTranslationThreshold,
+              .screenStillnessRotationalThreshold = kScreenStillnessRotationThreshold,
       })),
       mPoseProvider(SensorPoseProvider::create("headtracker", this)),
       mThread([this, maxUpdatePeriod] {
diff --git a/services/audiopolicy/tests/Android.bp b/services/audiopolicy/tests/Android.bp
index b296fb0..8fbe8b2 100644
--- a/services/audiopolicy/tests/Android.bp
+++ b/services/audiopolicy/tests/Android.bp
@@ -25,7 +25,7 @@
         "libmedia_helper",
         "libutils",
         "libxml2",
-        "libpermission",
+        "framework-permission-aidl-cpp",
         "libbinder",
     ],
 
diff --git a/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h b/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h
index a5ad9b1..7343b9b 100644
--- a/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h
+++ b/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h
@@ -28,19 +28,26 @@
 class AudioPolicyManagerTestClientForHdmi : public AudioPolicyManagerTestClient {
 public:
     String8 getParameters(audio_io_handle_t /* ioHandle */, const String8&  /* keys*/ ) override {
+        AudioParameter mAudioParameters;
+        std::string formats;
+        for (const auto& f : mSupportedFormats) {
+            if (!formats.empty()) formats += AUDIO_PARAMETER_VALUE_LIST_SEPARATOR;
+            formats += audio_format_to_string(f);
+        }
+        mAudioParameters.add(
+                String8(AudioParameter::keyStreamSupportedFormats),
+                String8(formats.c_str()));
+        mAudioParameters.addInt(String8(AudioParameter::keyStreamSupportedSamplingRates), 48000);
+        mAudioParameters.add(String8(AudioParameter::keyStreamSupportedChannels), String8(""));
         return mAudioParameters.toString();
     }
 
     void addSupportedFormat(audio_format_t format) override {
-        mAudioParameters.add(
-                String8(AudioParameter::keyStreamSupportedFormats),
-                String8(audio_format_to_string(format)));
-        mAudioParameters.addInt(String8(AudioParameter::keyStreamSupportedSamplingRates), 48000);
-        mAudioParameters.add(String8(AudioParameter::keyStreamSupportedChannels), String8(""));
+        mSupportedFormats.insert(format);
     }
 
 private:
-    AudioParameter mAudioParameters;
+    std::set<audio_format_t> mSupportedFormats;
 };
 
-} // namespace android
\ No newline at end of file
+} // namespace android
diff --git a/services/audiopolicy/tests/AudioPolicyTestManager.h b/services/audiopolicy/tests/AudioPolicyTestManager.h
index 7f67940..9d0d558 100644
--- a/services/audiopolicy/tests/AudioPolicyTestManager.h
+++ b/services/audiopolicy/tests/AudioPolicyTestManager.h
@@ -33,6 +33,7 @@
     using AudioPolicyManager::releaseMsdOutputPatches;
     using AudioPolicyManager::setMsdOutputPatches;
     using AudioPolicyManager::getAudioPatches;
+    using AudioPolicyManager::getDirectPlaybackSupport;
     uint32_t getAudioPortGeneration() const { return mAudioPortGeneration; }
 };
 
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index 9d2d2b3..9c1adc6 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -151,7 +151,7 @@
 void AudioPolicyManagerTest::SetUp() {
     mClient.reset(getClient());
     mManager.reset(new AudioPolicyTestManager(mClient.get()));
-    SetUpManagerConfig();  // Subclasses may want to customize the config.
+    ASSERT_NO_FATAL_FAILURE(SetUpManagerConfig());  // Subclasses may want to customize the config.
     ASSERT_EQ(NO_ERROR, mManager->initialize());
     ASSERT_EQ(NO_ERROR, mManager->initCheck());
 }
@@ -401,7 +401,7 @@
 
 void AudioPolicyManagerTestMsd::SetUpManagerConfig() {
     // TODO: Consider using Serializer to load part of the config from a string.
-    AudioPolicyManagerTest::SetUpManagerConfig();
+    ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTest::SetUpManagerConfig());
     AudioPolicyConfig& config = mManager->getConfig();
     mMsdOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_BUS);
     sp<AudioProfile> pcmOutputProfile = new AudioProfile(
@@ -660,6 +660,7 @@
 void AudioPolicyManagerTestWithConfigurationFile::SetUpManagerConfig() {
     status_t status = deserializeAudioPolicyFile(getConfigFile().c_str(), &mManager->getConfig());
     ASSERT_EQ(NO_ERROR, status);
+    mManager->getConfig().setSource(getConfigFile());
 }
 
 TEST_F(AudioPolicyManagerTestWithConfigurationFile, InitSuccess) {
@@ -803,7 +804,8 @@
 }
 
 class AudioPolicyManagerTestForHdmi
-        : public AudioPolicyManagerTestWithConfigurationFile {
+        : public AudioPolicyManagerTestWithConfigurationFile,
+          public testing::WithParamInterface<audio_format_t> {
 protected:
     void SetUp() override;
     std::string getConfigFile() override { return sTvConfig; }
@@ -824,7 +826,8 @@
         "test_settop_box_surround_configuration.xml";
 
 void AudioPolicyManagerTestForHdmi::SetUp() {
-    AudioPolicyManagerTest::SetUp();
+    ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTest::SetUp());
+    mClient->addSupportedFormat(AUDIO_FORMAT_AC3);
     mClient->addSupportedFormat(AUDIO_FORMAT_E_AC3);
     mManager->setDeviceConnectionState(
             AUDIO_DEVICE_OUT_HDMI, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
@@ -914,76 +917,90 @@
     return formats;
 }
 
-TEST_F(AudioPolicyManagerTestForHdmi, GetSurroundFormatsReturnsSupportedFormats) {
+TEST_P(AudioPolicyManagerTestForHdmi, GetSurroundFormatsReturnsSupportedFormats) {
     mManager->setForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS);
     auto surroundFormats = getSurroundFormatsHelper();
-    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(1, surroundFormats.count(GetParam()));
 }
 
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
         GetSurroundFormatsReturnsManipulatedFormats) {
     mManager->setForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL);
 
     status_t ret =
-            mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/);
+            mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/);
     ASSERT_EQ(NO_ERROR, ret);
     auto surroundFormats = getSurroundFormatsHelper();
-    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
-    ASSERT_FALSE(surroundFormats[AUDIO_FORMAT_E_AC3]);
+    ASSERT_EQ(1, surroundFormats.count(GetParam()));
+    ASSERT_FALSE(surroundFormats[GetParam()]);
 
-    ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, true /*enabled*/);
+    ret = mManager->setSurroundFormatEnabled(GetParam(), true /*enabled*/);
     ASSERT_EQ(NO_ERROR, ret);
     surroundFormats = getSurroundFormatsHelper();
-    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
-    ASSERT_TRUE(surroundFormats[AUDIO_FORMAT_E_AC3]);
+    ASSERT_EQ(1, surroundFormats.count(GetParam()));
+    ASSERT_TRUE(surroundFormats[GetParam()]);
 
-    ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/);
+    ret = mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/);
     ASSERT_EQ(NO_ERROR, ret);
     surroundFormats = getSurroundFormatsHelper();
-    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
-    ASSERT_FALSE(surroundFormats[AUDIO_FORMAT_E_AC3]);
+    ASSERT_EQ(1, surroundFormats.count(GetParam()));
+    ASSERT_FALSE(surroundFormats[GetParam()]);
 }
 
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
         ListAudioPortsReturnManipulatedHdmiFormats) {
     mManager->setForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL);
 
-    ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/));
+    ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/));
     auto formats = getFormatsFromPorts();
-    ASSERT_EQ(0, formats.count(AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(0, formats.count(GetParam()));
 
-    ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, true /*enabled*/));
+    ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(GetParam(), true /*enabled*/));
     formats = getFormatsFromPorts();
-    ASSERT_EQ(1, formats.count(AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(1, formats.count(GetParam()));
 }
 
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
         GetReportedSurroundFormatsReturnsHdmiReportedFormats) {
     mManager->setForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS);
     auto surroundFormats = getReportedSurroundFormatsHelper();
-    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), GetParam()));
 }
 
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
         GetReportedSurroundFormatsReturnsNonManipulatedHdmiReportedFormats) {
     mManager->setForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL);
 
-    status_t ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/);
+    status_t ret = mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/);
     ASSERT_EQ(NO_ERROR, ret);
     auto surroundFormats = getReportedSurroundFormatsHelper();
-    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), GetParam()));
 
-    ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, true /*enabled*/);
+    ret = mManager->setSurroundFormatEnabled(GetParam(), true /*enabled*/);
     ASSERT_EQ(NO_ERROR, ret);
     surroundFormats = getReportedSurroundFormatsHelper();
-    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), GetParam()));
 }
 
+TEST_P(AudioPolicyManagerTestForHdmi, GetSurroundFormatsIgnoresSupportedFormats) {
+    mManager->setForceUse(
+            AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER);
+    auto surroundFormats = getSurroundFormatsHelper();
+    ASSERT_EQ(1, surroundFormats.count(GetParam()));
+    ASSERT_FALSE(surroundFormats[GetParam()]);
+}
+
+INSTANTIATE_TEST_SUITE_P(SurroundFormatSupport, AudioPolicyManagerTestForHdmi,
+        testing::Values(AUDIO_FORMAT_AC3, AUDIO_FORMAT_E_AC3),
+        [](const ::testing::TestParamInfo<AudioPolicyManagerTestForHdmi::ParamType>& info) {
+            return audio_format_to_string(info.param);
+        });
+
 class AudioPolicyManagerTestDPNoRemoteSubmixModule : public AudioPolicyManagerTestDynamicPolicy {
 protected:
     std::string getConfigFile() override { return sPrimaryOnlyConfig; }
@@ -1035,7 +1052,7 @@
 };
 
 void AudioPolicyManagerTestDPPlaybackReRouting::SetUp() {
-    AudioPolicyManagerTestDynamicPolicy::SetUp();
+    ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTestDynamicPolicy::SetUp());
 
     mTracker.reset(new RecordingActivityTracker());
 
@@ -1221,7 +1238,7 @@
 };
 
 void AudioPolicyManagerTestDPMixRecordInjection::SetUp() {
-    AudioPolicyManagerTestDynamicPolicy::SetUp();
+    ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTestDynamicPolicy::SetUp());
 
     mTracker.reset(new RecordingActivityTracker());
 
@@ -1520,7 +1537,7 @@
 };
 
 void AudioPolicyManagerDynamicHwModulesTest::SetUpManagerConfig() {
-    AudioPolicyManagerTestWithConfigurationFile::SetUpManagerConfig();
+    ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTestWithConfigurationFile::SetUpManagerConfig());
     // Only allow successful opening of "primary" hw module during APM initialization.
     mClient->swapAllowedModuleNames({"primary"});
 }
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 26562e0..1b54e75 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -47,7 +47,6 @@
         "common/CameraDeviceBase.cpp",
         "common/CameraOfflineSessionBase.cpp",
         "common/CameraProviderManager.cpp",
-        "common/DepthPhotoProcessor.cpp",
         "common/FrameProcessorBase.cpp",
         "api1/Camera2Client.cpp",
         "api1/client2/Parameters.cpp",
@@ -76,14 +75,11 @@
         "device3/StatusTracker.cpp",
         "device3/Camera3BufferManager.cpp",
         "device3/Camera3StreamSplitter.cpp",
-        "device3/CoordinateMapper.cpp",
-        "device3/DistortionMapper.cpp",
-        "device3/ZoomRatioMapper.cpp",
-        "device3/RotateAndCropMapper.cpp",
         "device3/Camera3OutputStreamInterface.cpp",
         "device3/Camera3OutputUtils.cpp",
         "device3/Camera3DeviceInjectionMethods.cpp",
         "device3/UHRCropAndMeteringRegionMapper.cpp",
+        "device3/PreviewFrameScheduler.cpp",
         "gui/RingBufferConsumer.cpp",
         "hidl/AidlCameraDeviceCallbacks.cpp",
         "hidl/AidlCameraServiceListener.cpp",
@@ -94,7 +90,6 @@
         "utils/CameraThreadState.cpp",
         "utils/CameraTraces.cpp",
         "utils/AutoConditionLock.cpp",
-        "utils/ExifUtils.cpp",
         "utils/SessionConfigurationUtils.cpp",
         "utils/SessionStatsBuilder.cpp",
         "utils/TagMonitor.cpp",
@@ -107,6 +102,7 @@
     ],
 
     shared_libs: [
+        "libandroid",
         "libbase",
         "libdl",
         "libexif",
@@ -154,6 +150,7 @@
         "android.hardware.camera.device@3.5",
         "android.hardware.camera.device@3.6",
         "android.hardware.camera.device@3.7",
+        "android.hardware.camera.device@3.8",
         "media_permission-aidl-cpp",
     ],
 
@@ -161,6 +158,7 @@
         "libprocessinfoservice_aidl",
         "libbinderthreadstateutils",
         "media_permission-aidl-cpp",
+        "libcameraservice_device_independent",
     ],
 
     export_shared_lib_headers: [
@@ -190,3 +188,49 @@
     ],
 
 }
+
+cc_library_static {
+    name: "libcameraservice_device_independent",
+    host_supported: true,
+
+    // Camera service source
+
+    srcs: [
+        "common/DepthPhotoProcessor.cpp",
+        "device3/CoordinateMapper.cpp",
+        "device3/DistortionMapper.cpp",
+        "device3/RotateAndCropMapper.cpp",
+        "device3/ZoomRatioMapper.cpp",
+        "utils/ExifUtils.cpp",
+        "utils/SessionConfigurationUtilsHost.cpp",
+    ],
+
+    shared_libs: [
+        "libbase",
+        "libbinder",
+        "libcamera_metadata",
+        "libdynamic_depth",
+        "libexif",
+        "libjpeg",
+        "liblog",
+        "libutils",
+        "libxml2",
+    ],
+
+    include_dirs: [
+        "external/dynamic_depth/includes",
+        "external/dynamic_depth/internal",
+        "frameworks/av/camera/include",
+        "frameworks/av/camera/include/camera",
+    ],
+
+    export_include_dirs: ["."],
+
+    cflags: [
+        "-Wall",
+        "-Wextra",
+        "-Werror",
+        "-Wno-ignored-qualifiers",
+    ],
+
+}
\ No newline at end of file
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
deleted file mode 100644
index 4cfecfd..0000000
--- a/services/camera/libcameraservice/Android.mk
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2010 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-# Build tests
-
-include $(LOCAL_PATH)/tests/Android.mk
-
diff --git a/services/camera/libcameraservice/CameraFlashlight.cpp b/services/camera/libcameraservice/CameraFlashlight.cpp
index ccdd9e5..015ae2f 100644
--- a/services/camera/libcameraservice/CameraFlashlight.cpp
+++ b/services/camera/libcameraservice/CameraFlashlight.cpp
@@ -119,6 +119,59 @@
     return res;
 }
 
+status_t CameraFlashlight::turnOnTorchWithStrengthLevel(const String8& cameraId,
+            int32_t torchStrength) {
+    if (!mFlashlightMapInitialized) {
+        ALOGE("%s: findFlashUnits() must be called before this method.",
+               __FUNCTION__);
+        return NO_INIT;
+    }
+
+    ALOGV("%s: set torch strength of camera %s to %d", __FUNCTION__,
+            cameraId.string(), torchStrength);
+    status_t res = OK;
+    Mutex::Autolock l(mLock);
+
+    if (mOpenedCameraIds.indexOf(cameraId) != NAME_NOT_FOUND) {
+        ALOGE("%s: Camera device %s is in use, cannot be turned ON.",
+                __FUNCTION__, cameraId.string());
+        return -EBUSY;
+    }
+
+    if (mFlashControl == NULL) {
+        res = createFlashlightControl(cameraId);
+        if (res) {
+            return res;
+        }
+    }
+
+    res = mFlashControl->turnOnTorchWithStrengthLevel(cameraId, torchStrength);
+    return res;
+}
+
+
+status_t CameraFlashlight::getTorchStrengthLevel(const String8& cameraId,
+            int32_t* torchStrength) {
+    status_t res = OK;
+    if (!mFlashlightMapInitialized) {
+        ALOGE("%s: findFlashUnits() must be called before this method.",
+            __FUNCTION__);
+        return false;
+    }
+
+    Mutex::Autolock l(mLock);
+
+    if (mFlashControl == NULL) {
+        res = createFlashlightControl(cameraId);
+        if (res) {
+            return res;
+        }
+    }
+
+    res = mFlashControl->getTorchStrengthLevel(cameraId, torchStrength);
+    return res;
+}
+
 status_t CameraFlashlight::findFlashUnits() {
     Mutex::Autolock l(mLock);
     status_t res;
@@ -306,6 +359,22 @@
 
     return mProviderManager->setTorchMode(cameraId.string(), enabled);
 }
+
+status_t ProviderFlashControl::turnOnTorchWithStrengthLevel(const String8& cameraId,
+            int32_t torchStrength) {
+    ALOGV("%s: change torch strength level of camera %s to %d", __FUNCTION__,
+            cameraId.string(), torchStrength);
+
+    return mProviderManager->turnOnTorchWithStrengthLevel(cameraId.string(), torchStrength);
+}
+
+status_t ProviderFlashControl::getTorchStrengthLevel(const String8& cameraId,
+            int32_t* torchStrength) {
+    ALOGV("%s: get torch strength level of camera %s", __FUNCTION__,
+            cameraId.string());
+
+    return mProviderManager->getTorchStrengthLevel(cameraId.string(), torchStrength);
+}
 // ProviderFlashControl implementation ends
 
 }
diff --git a/services/camera/libcameraservice/CameraFlashlight.h b/services/camera/libcameraservice/CameraFlashlight.h
index b97fa5f..1703ddc 100644
--- a/services/camera/libcameraservice/CameraFlashlight.h
+++ b/services/camera/libcameraservice/CameraFlashlight.h
@@ -44,6 +44,14 @@
         // set the torch mode to on or off.
         virtual status_t setTorchMode(const String8& cameraId,
                     bool enabled) = 0;
+
+        // Change the brightness level of the torch. If the torch is OFF and
+        // torchStrength >= 1, then the torch will also be turned ON.
+        virtual status_t turnOnTorchWithStrengthLevel(const String8& cameraId,
+                    int32_t torchStrength) = 0;
+
+        // Returns the torch strength level.
+        virtual status_t getTorchStrengthLevel(const String8& cameraId, int32_t* torchStrength) = 0;
 };
 
 /**
@@ -67,6 +75,12 @@
         // set the torch mode to on or off.
         status_t setTorchMode(const String8& cameraId, bool enabled);
 
+        // Change the torch strength level of the flash unit in torch mode.
+        status_t turnOnTorchWithStrengthLevel(const String8& cameraId, int32_t torchStrength);
+
+        // Get the torch strength level
+        status_t getTorchStrengthLevel(const String8& cameraId, int32_t* torchStrength);
+
         // Notify CameraFlashlight that camera service is going to open a camera
         // device. CameraFlashlight will free the resources that may cause the
         // camera open to fail. Camera service must call this function before
@@ -115,6 +129,8 @@
         // FlashControlBase
         status_t hasFlashUnit(const String8& cameraId, bool *hasFlash);
         status_t setTorchMode(const String8& cameraId, bool enabled);
+        status_t turnOnTorchWithStrengthLevel(const String8& cameraId, int32_t torchStrength);
+        status_t getTorchStrengthLevel(const String8& cameraId, int32_t* torchStrength);
 
     private:
         sp<CameraProviderManager> mProviderManager;
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 229964c..97ec5d1 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -28,6 +28,7 @@
 #include <sys/types.h>
 #include <inttypes.h>
 #include <pthread.h>
+#include <poll.h>
 
 #include <android/hardware/ICamera.h>
 #include <android/hardware/ICameraClient.h>
@@ -85,7 +86,7 @@
 
 using base::StringPrintf;
 using binder::Status;
-using camera3::SessionConfigurationUtils;
+using namespace camera3;
 using frameworks::cameraservice::service::V2_0::implementation::HidlCameraService;
 using hardware::ICamera;
 using hardware::ICameraClient;
@@ -137,6 +138,7 @@
 static constexpr int32_t kVendorClientState = ActivityManager::PROCESS_STATE_PERSISTENT_UI;
 
 const String8 CameraService::kOfflineDevice("offline-");
+const String16 CameraService::kWatchAllClientsFlag("all");
 
 // Set to keep track of logged service error events.
 static std::set<String8> sServiceErrorEventSet;
@@ -569,6 +571,15 @@
     onTorchStatusChangedLocked(cameraId, newStatus, systemCameraKind);
 }
 
+void CameraService::broadcastTorchStrengthLevel(const String8& cameraId,
+        int32_t newStrengthLevel) {
+    Mutex::Autolock lock(mStatusListenerLock);
+    for (auto& i : mListenerList) {
+        i->getListener()->onTorchStrengthLevelChanged(String16{cameraId},
+                newStrengthLevel);
+    }
+}
+
 void CameraService::onTorchStatusChangedLocked(const String8& cameraId,
         TorchModeStatus newStatus, SystemCameraKind systemCameraKind) {
     ALOGI("%s: Torch status changed for cameraId=%s, newStatus=%d",
@@ -622,8 +633,10 @@
     broadcastTorchModeStatus(cameraId, newStatus, systemCameraKind);
 }
 
-static bool hasPermissionsForSystemCamera(int callingPid, int callingUid) {
-    return checkPermission(sSystemCameraPermission, callingPid, callingUid) &&
+static bool hasPermissionsForSystemCamera(int callingPid, int callingUid,
+        bool logPermissionFailure = false) {
+    return checkPermission(sSystemCameraPermission, callingPid, callingUid,
+            logPermissionFailure) &&
             checkPermission(sCameraPermission, callingPid, callingUid);
 }
 
@@ -702,8 +715,8 @@
     const std::vector<std::string> *deviceIds = &mNormalDeviceIdsWithoutSystemCamera;
     auto callingPid = CameraThreadState::getCallingPid();
     auto callingUid = CameraThreadState::getCallingUid();
-    if (checkPermission(sSystemCameraPermission, callingPid, callingUid) ||
-            getpid() == callingPid) {
+    if (checkPermission(sSystemCameraPermission, callingPid, callingUid,
+            /*logPermissionFailure*/false) || getpid() == callingPid) {
         deviceIds = &mNormalDeviceIds;
     }
     if (cameraIdInt < 0 || cameraIdInt >= static_cast<int>(deviceIds->size())) {
@@ -802,6 +815,31 @@
     return ret;
 }
 
+Status CameraService::getTorchStrengthLevel(const String16& cameraId,
+        int32_t* torchStrength) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mServiceLock);
+    if (!mInitialized) {
+        ALOGE("%s: Camera HAL couldn't be initialized.", __FUNCTION__);
+        return STATUS_ERROR(ERROR_DISCONNECTED, "Camera HAL couldn't be initialized.");
+    }
+
+    if(torchStrength == NULL) {
+        ALOGE("%s: strength level must not be null.", __FUNCTION__);
+        return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Strength level should not be null.");
+    }
+
+    status_t res = mCameraProviderManager->getTorchStrengthLevel(String8(cameraId).string(),
+        torchStrength);
+    if (res != OK) {
+        return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION, "Unable to retrieve torch "
+            "strength level for device %s: %s (%d)", String8(cameraId).string(),
+            strerror(-res), res);
+    }
+    ALOGI("%s: Torch strength level is: %d", __FUNCTION__, *torchStrength);
+    return Status::ok();
+}
+
 String8 CameraService::getFormattedCurrentTime() {
     time_t now = time(nullptr);
     char formattedTime[64];
@@ -909,6 +947,7 @@
         case CAMERA_DEVICE_API_VERSION_3_5:
         case CAMERA_DEVICE_API_VERSION_3_6:
         case CAMERA_DEVICE_API_VERSION_3_7:
+        case CAMERA_DEVICE_API_VERSION_3_8:
             if (effectiveApiLevel == API_1) { // Camera1 API route
                 sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
                 *client = new Camera2Client(cameraService, tmp, packageName, featureId,
@@ -1343,7 +1382,7 @@
                 auto clientSp = current->getValue();
                 if (clientSp.get() != nullptr) { // should never be needed
                     if (!clientSp->canCastToApiClient(effectiveApiLevel)) {
-                        ALOGW("CameraService connect called from same client, but with a different"
+                        ALOGW("CameraService connect called with a different"
                                 " API level, evicting prior client...");
                     } else if (clientSp->getRemote() == remoteCallback) {
                         ALOGI("CameraService::connect X (PID %d) (second call from same"
@@ -1606,7 +1645,7 @@
     //     same behavior for system camera devices.
     if (getCurrentServingCall() != BinderCallType::HWBINDER &&
             systemCameraKind == SystemCameraKind::SYSTEM_ONLY_CAMERA &&
-            !hasPermissionsForSystemCamera(cPid, cUid)) {
+            !hasPermissionsForSystemCamera(cPid, cUid, /*logPermissionFailure*/true)) {
         ALOGW("Rejecting access to system only camera %s, inadequete permissions",
                 cameraId.c_str());
         return true;
@@ -1795,7 +1834,8 @@
         LOG_ALWAYS_FATAL_IF(client.get() == nullptr, "%s: CameraService in invalid state",
                 __FUNCTION__);
 
-        err = client->initialize(mCameraProviderManager, mMonitorTags);
+        String8 monitorTags = isClientWatched(client.get()) ? mMonitorTags : String8("");
+        err = client->initialize(mCameraProviderManager, monitorTags);
         if (err != OK) {
             ALOGE("%s: Could not initialize client from HAL.", __FUNCTION__);
             // Errors could be from the HAL module open call or from AppOpsManager
@@ -1899,6 +1939,33 @@
     CameraServiceProxyWrapper::logOpen(cameraId, facing, clientPackageName,
             effectiveApiLevel, isNdk, openLatencyMs);
 
+    {
+        Mutex::Autolock lock(mInjectionParametersLock);
+        if (cameraId == mInjectionInternalCamId && mInjectionInitPending) {
+            mInjectionInitPending = false;
+            status_t res = NO_ERROR;
+            auto clientDescriptor = mActiveClientManager.get(mInjectionInternalCamId);
+            if (clientDescriptor != nullptr) {
+                sp<BasicClient> clientSp = clientDescriptor->getValue();
+                res = checkIfInjectionCameraIsPresent(mInjectionExternalCamId, clientSp);
+                if(res != OK) {
+                    return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+                            "No camera device with ID \"%s\" currently available",
+                            mInjectionExternalCamId.string());
+                }
+                res = clientSp->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
+                if (res != OK) {
+                    mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
+                }
+            } else {
+                ALOGE("%s: Internal camera ID = %s 's client does not exist!",
+                        __FUNCTION__, mInjectionInternalCamId.string());
+                res = NO_INIT;
+                mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
+            }
+        }
+    }
+
     return ret;
 }
 
@@ -1942,7 +2009,8 @@
             return BAD_VALUE;
         }
 
-        auto err = offlineClient->initialize(mCameraProviderManager, mMonitorTags);
+        String8 monitorTags = isClientWatched(offlineClient.get()) ? mMonitorTags : String8("");
+        auto err = offlineClient->initialize(mCameraProviderManager, monitorTags);
         if (err != OK) {
             ALOGE("%s: Could not initialize offline client.", __FUNCTION__);
             return err;
@@ -1974,6 +2042,132 @@
     return OK;
 }
 
+Status CameraService::turnOnTorchWithStrengthLevel(const String16& cameraId, int32_t torchStrength,
+        const sp<IBinder>& clientBinder) {
+    Mutex::Autolock lock(mServiceLock);
+
+    ATRACE_CALL();
+    if (clientBinder == nullptr) {
+        ALOGE("%s: torch client binder is NULL", __FUNCTION__);
+        return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT,
+                "Torch client binder in null.");
+    }
+
+    String8 id = String8(cameraId.string());
+    int uid = CameraThreadState::getCallingUid();
+
+    if (shouldRejectSystemCameraConnection(id)) {
+        return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT, "Unable to change the strength level"
+                "for system only device %s: ", id.string());
+    }
+
+    // verify id is valid
+    auto state = getCameraState(id);
+    if (state == nullptr) {
+        ALOGE("%s: camera id is invalid %s", __FUNCTION__, id.string());
+        return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+            "Camera ID \"%s\" is a not valid camera ID", id.string());
+    }
+
+    StatusInternal cameraStatus = state->getStatus();
+    if (cameraStatus != StatusInternal::NOT_AVAILABLE &&
+            cameraStatus != StatusInternal::PRESENT) {
+        ALOGE("%s: camera id is invalid %s, status %d", __FUNCTION__, id.string(),
+            (int)cameraStatus);
+        return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                "Camera ID \"%s\" is a not valid camera ID", id.string());
+    }
+
+    {
+        Mutex::Autolock al(mTorchStatusMutex);
+        TorchModeStatus status;
+        status_t err = getTorchStatusLocked(id, &status);
+        if (err != OK) {
+            if (err == NAME_NOT_FOUND) {
+             return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                    "Camera \"%s\" does not have a flash unit", id.string());
+            }
+            ALOGE("%s: getting current torch status failed for camera %s",
+                    __FUNCTION__, id.string());
+            return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                    "Error changing torch strength level for camera \"%s\": %s (%d)",
+                    id.string(), strerror(-err), err);
+        }
+
+        if (status == TorchModeStatus::NOT_AVAILABLE) {
+            if (cameraStatus == StatusInternal::NOT_AVAILABLE) {
+                ALOGE("%s: torch mode of camera %s is not available because "
+                        "camera is in use.", __FUNCTION__, id.string());
+                return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
+                        "Torch for camera \"%s\" is not available due to an existing camera user",
+                        id.string());
+            } else {
+                ALOGE("%s: torch mode of camera %s is not available due to "
+                       "insufficient resources", __FUNCTION__, id.string());
+                return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+                        "Torch for camera \"%s\" is not available due to insufficient resources",
+                        id.string());
+            }
+        }
+    }
+
+    {
+        Mutex::Autolock al(mTorchUidMapMutex);
+        updateTorchUidMapLocked(cameraId, uid);
+    }
+    // Check if the current torch strength level is same as the new one.
+    bool shouldSkipTorchStrengthUpdates = mCameraProviderManager->shouldSkipTorchStrengthUpdate(
+            id.string(), torchStrength);
+
+    status_t err = mFlashlight->turnOnTorchWithStrengthLevel(id, torchStrength);
+
+    if (err != OK) {
+        int32_t errorCode;
+        String8 msg;
+        switch (err) {
+            case -ENOSYS:
+                msg = String8::format("Camera \"%s\" has no flashlight.",
+                    id.string());
+                errorCode = ERROR_ILLEGAL_ARGUMENT;
+                break;
+            case -EBUSY:
+                msg = String8::format("Camera \"%s\" is in use",
+                    id.string());
+                errorCode = ERROR_CAMERA_IN_USE;
+                break;
+            default:
+                msg = String8::format("Changing torch strength level failed.");
+                errorCode = ERROR_INVALID_OPERATION;
+
+        }
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(errorCode, msg.string());
+    }
+
+    {
+        // update the link to client's death
+        // Store the last client that turns on each camera's torch mode.
+        Mutex::Autolock al(mTorchClientMapMutex);
+        ssize_t index = mTorchClientMap.indexOfKey(id);
+        if (index == NAME_NOT_FOUND) {
+            mTorchClientMap.add(id, clientBinder);
+        } else {
+            mTorchClientMap.valueAt(index)->unlinkToDeath(this);
+            mTorchClientMap.replaceValueAt(index, clientBinder);
+        }
+        clientBinder->linkToDeath(this);
+    }
+
+    int clientPid = CameraThreadState::getCallingPid();
+    const char *id_cstr = id.c_str();
+    ALOGI("%s: Torch strength for camera id %s changed to %d for client PID %d",
+            __FUNCTION__, id_cstr, torchStrength, clientPid);
+    if (!shouldSkipTorchStrengthUpdates) {
+        broadcastTorchStrengthLevel(id, torchStrength);
+    }
+    return Status::ok();
+}
+
 Status CameraService::setTorchMode(const String16& cameraId, bool enabled,
         const sp<IBinder>& clientBinder) {
     Mutex::Autolock lock(mServiceLock);
@@ -2045,13 +2239,7 @@
         // Update UID map - this is used in the torch status changed callbacks, so must be done
         // before setTorchMode
         Mutex::Autolock al(mTorchUidMapMutex);
-        if (mTorchUidMap.find(id) == mTorchUidMap.end()) {
-            mTorchUidMap[id].first = uid;
-            mTorchUidMap[id].second = uid;
-        } else {
-            // Set the pending UID
-            mTorchUidMap[id].first = uid;
-        }
+        updateTorchUidMapLocked(cameraId, uid);
     }
 
     status_t err = mFlashlight->setTorchMode(id, enabled);
@@ -2106,6 +2294,17 @@
     return Status::ok();
 }
 
+void CameraService::updateTorchUidMapLocked(const String16& cameraId, int uid) {
+    String8 id = String8(cameraId.string());
+    if (mTorchUidMap.find(id) == mTorchUidMap.end()) {
+        mTorchUidMap[id].first = uid;
+        mTorchUidMap[id].second = uid;
+    } else {
+        // Set the pending UID
+        mTorchUidMap[id].first = uid;
+    }
+}
+
 Status CameraService::notifySystemEvent(int32_t eventId,
         const std::vector<int32_t>& args) {
     const int pid = CameraThreadState::getCallingPid();
@@ -2136,6 +2335,13 @@
             doUserSwitch(/*newUserIds*/ args);
             break;
         }
+        case ICameraService::EVENT_USB_DEVICE_ATTACHED:
+        case ICameraService::EVENT_USB_DEVICE_DETACHED: {
+            // Notify CameraProviderManager for lazy HALs
+            mCameraProviderManager->notifyUsbDeviceEvent(eventId,
+                                                        std::to_string(args[0]));
+            break;
+        }
         case ICameraService::EVENT_NONE:
         default: {
             ALOGW("%s: Received invalid system event from system_server: %d", __FUNCTION__,
@@ -2349,7 +2555,7 @@
     auto clientUid = CameraThreadState::getCallingUid();
     auto clientPid = CameraThreadState::getCallingPid();
     bool openCloseCallbackAllowed = checkPermission(sCameraOpenCloseListenerPermission,
-            clientPid, clientUid);
+            clientPid, clientUid, /*logPermissionFailure*/false);
 
     Mutex::Autolock lock(mServiceLock);
 
@@ -2386,7 +2592,8 @@
         Mutex::Autolock lock(mCameraStatesLock);
         for (auto& i : mCameraStates) {
             cameraStatuses->emplace_back(i.first,
-                    mapToInterface(i.second->getStatus()), i.second->getUnavailablePhysicalIds());
+                    mapToInterface(i.second->getStatus()), i.second->getUnavailablePhysicalIds(),
+                    openCloseCallbackAllowed ? i.second->getClientPackage() : String8::empty());
         }
     }
     // Remove the camera statuses that should be hidden from the client, we do
@@ -2524,6 +2731,7 @@
         case CAMERA_DEVICE_API_VERSION_3_5:
         case CAMERA_DEVICE_API_VERSION_3_6:
         case CAMERA_DEVICE_API_VERSION_3_7:
+        case CAMERA_DEVICE_API_VERSION_3_8:
             ALOGV("%s: Camera id %s uses HAL3.2 or newer, supports api1/api2 directly",
                     __FUNCTION__, id.string());
             *isSupported = true;
@@ -2561,7 +2769,7 @@
         const String16& externalCamId,
         const sp<ICameraInjectionCallback>& callback,
         /*out*/
-        sp<hardware::camera2::ICameraInjectionSession>* cameraInjectionSession) {
+        sp<ICameraInjectionSession>* cameraInjectionSession) {
     ATRACE_CALL();
 
     if (!checkCallingPermission(sCameraInjectExternalCameraPermission)) {
@@ -2578,18 +2786,36 @@
         __FUNCTION__, String8(packageName).string(),
         String8(internalCamId).string(), String8(externalCamId).string());
 
-    binder::Status ret = binder::Status::ok();
-    // TODO: Implement the injection camera function.
-    // ret = internalInjectCamera(...);
-    // if(!ret.isOk()) {
-    //     mInjectionStatusListener->notifyInjectionError(...);
-    //     return ret;
-    // }
+    {
+        Mutex::Autolock lock(mInjectionParametersLock);
+        mInjectionInternalCamId = String8(internalCamId);
+        mInjectionExternalCamId = String8(externalCamId);
+        mInjectionStatusListener->addListener(callback);
+        *cameraInjectionSession = new CameraInjectionSession(this);
+        status_t res = NO_ERROR;
+        auto clientDescriptor = mActiveClientManager.get(mInjectionInternalCamId);
+        // If the client already exists, we can directly connect to the camera device through the
+        // client's injectCamera(), otherwise we need to wait until the client is established
+        // (execute connectHelper()) before injecting the camera to the camera device.
+        if (clientDescriptor != nullptr) {
+            mInjectionInitPending = false;
+            sp<BasicClient> clientSp = clientDescriptor->getValue();
+            res = checkIfInjectionCameraIsPresent(mInjectionExternalCamId, clientSp);
+            if(res != OK) {
+                return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+                        "No camera device with ID \"%s\" currently available",
+                        mInjectionExternalCamId.string());
+            }
+            res = clientSp->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
+            if(res != OK) {
+                mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
+            }
+        } else {
+            mInjectionInitPending = true;
+        }
+    }
 
-    mInjectionStatusListener->addListener(callback);
-    *cameraInjectionSession = new CameraInjectionSession(this);
-
-    return ret;
+    return binder::Status::ok();
 }
 
 void CameraService::removeByClient(const BasicClient* client) {
@@ -2597,6 +2823,7 @@
     for (auto& i : mActiveClientManager.getAll()) {
         auto clientSp = i->getValue();
         if (clientSp.get() == client) {
+            cacheClientTagDumpIfNeeded(client->mCameraIdStr, clientSp.get());
             mActiveClientManager.remove(i);
         }
     }
@@ -2673,7 +2900,11 @@
         return sp<BasicClient>{nullptr};
     }
 
-    return clientDescriptorPtr->getValue();
+    sp<BasicClient> client = clientDescriptorPtr->getValue();
+    if (client.get() != nullptr) {
+        cacheClientTagDumpIfNeeded(clientDescriptorPtr->getKey(), client.get());
+    }
+    return client;
 }
 
 void CameraService::doUserSwitch(const std::vector<int32_t>& newUserIds) {
@@ -3092,6 +3323,21 @@
     return OK;
 }
 
+status_t CameraService::BasicClient::startWatchingTags(const String8&, int) {
+    // Can't watch tags directly, must go through CameraService::startWatchingTags
+    return OK;
+}
+
+status_t CameraService::BasicClient::stopWatchingTags(int) {
+    // Can't watch tags directly, must go through CameraService::stopWatchingTags
+    return OK;
+}
+
+status_t CameraService::BasicClient::dumpWatchedEventsToVector(std::vector<std::string> &) {
+    // Can't watch tags directly, must go through CameraService::dumpWatchedEventsToVector
+    return OK;
+}
+
 String16 CameraService::BasicClient::getPackageName() const {
     return mClientPackageName;
 }
@@ -3750,6 +3996,16 @@
     return count > 0;
 }
 
+void CameraService::CameraState::setClientPackage(const String8& clientPackage) {
+    Mutex::Autolock lock(mStatusLock);
+    mClientPackage = clientPackage;
+}
+
+String8 CameraService::CameraState::getClientPackage() const {
+    Mutex::Autolock lock(mStatusLock);
+    return mClientPackage;
+}
+
 // ----------------------------------------------------------------------------
 //                  ClientEventListener
 // ----------------------------------------------------------------------------
@@ -3884,22 +4140,62 @@
 }
 
 void CameraService::InjectionStatusListener::notifyInjectionError(
-        int errorCode) {
-    Mutex::Autolock lock(mListenerLock);
+        String8 injectedCamId, status_t err) {
     if (mCameraInjectionCallback == nullptr) {
         ALOGW("InjectionStatusListener: mCameraInjectionCallback == nullptr");
         return;
     }
-    mCameraInjectionCallback->onInjectionError(errorCode);
+
+    switch (err) {
+        case -ENODEV:
+            mCameraInjectionCallback->onInjectionError(
+                    ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+            ALOGE("No camera device with ID \"%s\" currently available!",
+                    injectedCamId.string());
+            break;
+        case -EBUSY:
+            mCameraInjectionCallback->onInjectionError(
+                    ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+            ALOGE("Higher-priority client using camera, ID \"%s\" currently unavailable!",
+                    injectedCamId.string());
+            break;
+        case DEAD_OBJECT:
+            mCameraInjectionCallback->onInjectionError(
+                    ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+            ALOGE("Camera ID \"%s\" object is dead!",
+                    injectedCamId.string());
+            break;
+        case INVALID_OPERATION:
+            mCameraInjectionCallback->onInjectionError(
+                    ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+            ALOGE("Camera ID \"%s\" encountered an operating or internal error!",
+                    injectedCamId.string());
+            break;
+        case UNKNOWN_TRANSACTION:
+            mCameraInjectionCallback->onInjectionError(
+                    ICameraInjectionCallback::ERROR_INJECTION_UNSUPPORTED);
+            ALOGE("Camera ID \"%s\" method doesn't support!",
+                    injectedCamId.string());
+            break;
+        default:
+            mCameraInjectionCallback->onInjectionError(
+                    ICameraInjectionCallback::ERROR_INJECTION_INVALID_ERROR);
+            ALOGE("Unexpected error %s (%d) opening camera \"%s\"!",
+                    strerror(-err), err, injectedCamId.string());
+    }
 }
 
 void CameraService::InjectionStatusListener::binderDied(
         const wp<IBinder>& /*who*/) {
-    Mutex::Autolock lock(mListenerLock);
     ALOGV("InjectionStatusListener: ICameraInjectionCallback has died");
     auto parent = mParent.promote();
     if (parent != nullptr) {
-        parent->stopInjectionImpl();
+        auto clientDescriptor = parent->mActiveClientManager.get(parent->mInjectionInternalCamId);
+        if (clientDescriptor != nullptr) {
+            BasicClient* baseClientPtr = clientDescriptor->getValue().get();
+            baseClientPtr->stopInjection();
+        }
+        parent->clearInjectionParameters();
     }
 }
 
@@ -3915,7 +4211,20 @@
         return STATUS_ERROR(ICameraInjectionCallback::ERROR_INJECTION_SERVICE,
                 "Camera service encountered error");
     }
-    parent->stopInjectionImpl();
+
+    status_t res = NO_ERROR;
+    auto clientDescriptor = parent->mActiveClientManager.get(parent->mInjectionInternalCamId);
+    if (clientDescriptor != nullptr) {
+        BasicClient* baseClientPtr = clientDescriptor->getValue().get();
+        res = baseClientPtr->stopInjection();
+        if (res != OK) {
+            ALOGE("CameraInjectionSession: Failed to stop the injection camera!"
+                " ret != NO_ERROR: %d", res);
+            return STATUS_ERROR(ICameraInjectionCallback::ERROR_INJECTION_SESSION,
+                "Camera session encountered error");
+        }
+    }
+    parent->clearInjectionParameters();
     return binder::Status::ok();
 }
 
@@ -4056,7 +4365,7 @@
 
     // Dump camera traces if there were any
     dprintf(fd, "\n");
-    camera3::CameraTraces::dump(fd, args);
+    camera3::CameraTraces::dump(fd);
 
     // Process dump arguments, if any
     int n = args.size();
@@ -4150,6 +4459,45 @@
     dprintf(fd, "\n");
 }
 
+void CameraService::cacheClientTagDumpIfNeeded(const char *cameraId, BasicClient* client) {
+    Mutex::Autolock lock(mLogLock);
+    if (!isClientWatchedLocked(client)) { return; }
+
+    std::vector<std::string> dumpVector;
+    client->dumpWatchedEventsToVector(dumpVector);
+
+    if (dumpVector.empty()) { return; }
+
+    std::string dumpString;
+
+    String8 currentTime = getFormattedCurrentTime();
+    dumpString += "Cached @ ";
+    dumpString += currentTime.string();
+    dumpString += "\n"; // First line is the timestamp of when client is cached.
+
+
+    const String16 &packageName = client->getPackageName();
+
+    String8 packageName8 = String8(packageName);
+    const char *printablePackageName = packageName8.lockBuffer(packageName.size());
+
+
+    size_t i = dumpVector.size();
+
+    // Store the string in reverse order (latest last)
+    while (i > 0) {
+         i--;
+         dumpString += cameraId;
+         dumpString += ":";
+         dumpString += printablePackageName;
+         dumpString += "  ";
+         dumpString += dumpVector[i]; // implicitly ends with '\n'
+    }
+
+    packageName8.unlockBuffer();
+    mWatchedClientsDumpCache[packageName] = dumpString;
+}
+
 void CameraService::handleTorchClientBinderDied(const wp<IBinder> &who) {
     Mutex::Autolock al(mTorchClientMapMutex);
     for (size_t i = 0; i < mTorchClientMap.size(); i++) {
@@ -4260,6 +4608,18 @@
 
 void CameraService::updateOpenCloseStatus(const String8& cameraId, bool open,
         const String16& clientPackageName) {
+    auto state = getCameraState(cameraId);
+    if (state == nullptr) {
+        ALOGW("%s: Could not update the status for %s, no such device exists", __FUNCTION__,
+                cameraId.string());
+        return;
+    }
+    if (open) {
+        state->setClientPackage(String8(clientPackageName));
+    } else {
+        state->setClientPackage(String8::empty());
+    }
+
     Mutex::Autolock lock(mStatusListenerLock);
 
     for (const auto& it : mListenerList) {
@@ -4433,9 +4793,11 @@
         return handleGetImageDumpMask(out);
     } else if (args.size() >= 2 && args[0] == String16("set-camera-mute")) {
         return handleSetCameraMute(args);
+    } else if (args.size() >= 2 && args[0] == String16("watch")) {
+        return handleWatchCommand(args, in, out);
     } else if (args.size() == 1 && args[0] == String16("help")) {
         printHelp(out);
-        return NO_ERROR;
+        return OK;
     }
     printHelp(err);
     return BAD_VALUE;
@@ -4579,6 +4941,348 @@
     return OK;
 }
 
+status_t CameraService::handleWatchCommand(const Vector<String16>& args, int inFd, int outFd) {
+    if (args.size() >= 3 && args[1] == String16("start")) {
+        return startWatchingTags(args, outFd);
+    } else if (args.size() == 2 && args[1] == String16("stop")) {
+        return stopWatchingTags(outFd);
+    } else if (args.size() == 2 && args[1] == String16("dump")) {
+        return printWatchedTags(outFd);
+    } else if (args.size() >= 2 && args[1] == String16("live")) {
+        return printWatchedTagsUntilInterrupt(args, inFd, outFd);
+    } else if (args.size() == 2 && args[1] == String16("clear")) {
+        return clearCachedMonitoredTagDumps(outFd);
+    }
+    dprintf(outFd, "Camera service watch commands:\n"
+                 "  start -m <comma_separated_tag_list> [-c <comma_separated_client_list>]\n"
+                 "        starts watching the provided tags for clients with provided package\n"
+                 "        recognizes tag shorthands like '3a'\n"
+                 "        watches all clients if no client is passed, or if 'all' is listed\n"
+                 "  dump dumps the monitoring information and exits\n"
+                 "  stop stops watching all tags\n"
+                 "  live [-n <refresh_interval_ms>]\n"
+                 "        prints the monitored information in real time\n"
+                 "        Hit return to exit\n"
+                 "  clear clears all buffers storing information for watch command");
+  return BAD_VALUE;
+}
+
+status_t CameraService::startWatchingTags(const Vector<String16> &args, int outFd) {
+    Mutex::Autolock lock(mLogLock);
+    size_t tagsIdx; // index of '-m'
+    String16 tags("");
+    for (tagsIdx = 2; tagsIdx < args.size() && args[tagsIdx] != String16("-m"); tagsIdx++);
+    if (tagsIdx < args.size() - 1) {
+        tags = args[tagsIdx + 1];
+    } else {
+        dprintf(outFd, "No tags provided.\n");
+        return BAD_VALUE;
+    }
+
+    size_t clientsIdx; // index of '-c'
+    String16 clients = kWatchAllClientsFlag; // watch all clients if no clients are provided
+    for (clientsIdx = 2; clientsIdx < args.size() && args[clientsIdx] != String16("-c");
+         clientsIdx++);
+    if (clientsIdx < args.size() - 1) {
+        clients = args[clientsIdx + 1];
+    }
+    parseClientsToWatchLocked(String8(clients));
+
+    // track tags to initialize future clients with the monitoring information
+    mMonitorTags = String8(tags);
+
+    bool serviceLock = tryLock(mServiceLock);
+    int numWatchedClients = 0;
+    auto cameraClients = mActiveClientManager.getAll();
+    for (const auto &clientDescriptor: cameraClients) {
+        if (clientDescriptor == nullptr) { continue; }
+        sp<BasicClient> client = clientDescriptor->getValue();
+        if (client.get() == nullptr) { continue; }
+
+        if (isClientWatchedLocked(client.get())) {
+            client->startWatchingTags(mMonitorTags, outFd);
+            numWatchedClients++;
+        }
+    }
+    dprintf(outFd, "Started watching %d active clients\n", numWatchedClients);
+
+    if (serviceLock) { mServiceLock.unlock(); }
+    return OK;
+}
+
+status_t CameraService::stopWatchingTags(int outFd) {
+    // clear mMonitorTags to prevent new clients from monitoring tags at initialization
+    Mutex::Autolock lock(mLogLock);
+    mMonitorTags = String8::empty();
+
+    mWatchedClientPackages.clear();
+    mWatchedClientsDumpCache.clear();
+
+    bool serviceLock = tryLock(mServiceLock);
+    auto cameraClients = mActiveClientManager.getAll();
+    for (const auto &clientDescriptor : cameraClients) {
+        if (clientDescriptor == nullptr) { continue; }
+        sp<BasicClient> client = clientDescriptor->getValue();
+        if (client.get() == nullptr) { continue; }
+        client->stopWatchingTags(outFd);
+    }
+    dprintf(outFd, "Stopped watching all clients.\n");
+    if (serviceLock) { mServiceLock.unlock(); }
+    return OK;
+}
+
+status_t CameraService::clearCachedMonitoredTagDumps(int outFd) {
+    Mutex::Autolock lock(mLogLock);
+    size_t clearedSize = mWatchedClientsDumpCache.size();
+    mWatchedClientsDumpCache.clear();
+    dprintf(outFd, "Cleared tag information of %zu cached clients.\n", clearedSize);
+    return OK;
+}
+
+status_t CameraService::printWatchedTags(int outFd) {
+    Mutex::Autolock logLock(mLogLock);
+    std::set<String16> connectedMonitoredClients;
+
+    bool printedSomething = false; // tracks if any monitoring information was printed
+                                   // (from either cached or active clients)
+
+    bool serviceLock = tryLock(mServiceLock);
+    // get all watched clients that are currently connected
+    for (const auto &clientDescriptor: mActiveClientManager.getAll()) {
+        if (clientDescriptor == nullptr) { continue; }
+
+        sp<BasicClient> client = clientDescriptor->getValue();
+        if (client.get() == nullptr) { continue; }
+        if (!isClientWatchedLocked(client.get())) { continue; }
+
+        std::vector<std::string> dumpVector;
+        client->dumpWatchedEventsToVector(dumpVector);
+
+        size_t printIdx = dumpVector.size();
+        if (printIdx == 0) {
+            continue;
+        }
+
+        // Print tag dumps for active client
+        const String8 &cameraId = clientDescriptor->getKey();
+        String8 packageName8 = String8(client->getPackageName());
+        const char *printablePackageName = packageName8.lockBuffer(packageName8.size());
+        dprintf(outFd, "Client: %s (active)\n", printablePackageName);
+        while(printIdx > 0) {
+            printIdx--;
+            dprintf(outFd, "%s:%s  %s", cameraId.string(), printablePackageName,
+                    dumpVector[printIdx].c_str());
+        }
+        dprintf(outFd, "\n");
+        packageName8.unlockBuffer();
+        printedSomething = true;
+
+        connectedMonitoredClients.emplace(client->getPackageName());
+    }
+    if (serviceLock) { mServiceLock.unlock(); }
+
+    // Print entries in mWatchedClientsDumpCache for clients that are not connected
+    for (const auto &kv: mWatchedClientsDumpCache) {
+        const String16 &package = kv.first;
+        if (connectedMonitoredClients.find(package) != connectedMonitoredClients.end()) {
+            continue;
+        }
+
+        dprintf(outFd, "Client: %s (cached)\n", String8(package).string());
+        dprintf(outFd, "%s\n", kv.second.c_str());
+        printedSomething = true;
+    }
+
+    if (!printedSomething) {
+        dprintf(outFd, "No monitoring information to print.\n");
+    }
+
+    return OK;
+}
+
+// Print all events in vector `events' that came after lastPrintedEvent
+void printNewWatchedEvents(int outFd,
+                           const char *cameraId,
+                           const String16 &packageName,
+                           const std::vector<std::string> &events,
+                           const std::string &lastPrintedEvent) {
+    if (events.empty()) { return; }
+
+    // index of lastPrintedEvent in events.
+    // lastPrintedIdx = events.size() if lastPrintedEvent is not in events
+    size_t lastPrintedIdx;
+    for (lastPrintedIdx = 0;
+         lastPrintedIdx < events.size() && lastPrintedEvent != events[lastPrintedIdx];
+         lastPrintedIdx++);
+
+    if (lastPrintedIdx == 0) { return; } // early exit if no new event in `events`
+
+    String8 packageName8(packageName);
+    const char *printablePackageName = packageName8.lockBuffer(packageName8.size());
+
+    // print events in chronological order (latest event last)
+    size_t idxToPrint = lastPrintedIdx;
+    do {
+        idxToPrint--;
+        dprintf(outFd, "%s:%s  %s", cameraId, printablePackageName, events[idxToPrint].c_str());
+    } while (idxToPrint != 0);
+
+    packageName8.unlockBuffer();
+}
+
+// Returns true if adb shell cmd watch should be interrupted based on data in inFd. The watch
+// command should be interrupted if the user presses the return key, or if user loses any way to
+// signal interrupt.
+// If timeoutMs == 0, this function will always return false
+bool shouldInterruptWatchCommand(int inFd, int outFd, long timeoutMs) {
+    struct timeval startTime;
+    int startTimeError = gettimeofday(&startTime, nullptr);
+    if (startTimeError) {
+        dprintf(outFd, "Failed waiting for interrupt, aborting.\n");
+        return true;
+    }
+
+    const nfds_t numFds = 1;
+    struct pollfd pollFd = { .fd = inFd, .events = POLLIN, .revents = 0 };
+
+    struct timeval currTime;
+    char buffer[2];
+    while(true) {
+        int currTimeError = gettimeofday(&currTime, nullptr);
+        if (currTimeError) {
+            dprintf(outFd, "Failed waiting for interrupt, aborting.\n");
+            return true;
+        }
+
+        long elapsedTimeMs = ((currTime.tv_sec - startTime.tv_sec) * 1000L)
+                + ((currTime.tv_usec - startTime.tv_usec) / 1000L);
+        int remainingTimeMs = (int) (timeoutMs - elapsedTimeMs);
+
+        if (remainingTimeMs <= 0) {
+            // No user interrupt within timeoutMs, don't interrupt watch command
+            return false;
+        }
+
+        int numFdsUpdated = poll(&pollFd, numFds, remainingTimeMs);
+        if (numFdsUpdated < 0) {
+            dprintf(outFd, "Failed while waiting for user input. Exiting.\n");
+            return true;
+        }
+
+        if (numFdsUpdated == 0) {
+            // No user input within timeoutMs, don't interrupt watch command
+            return false;
+        }
+
+        if (!(pollFd.revents & POLLIN)) {
+            dprintf(outFd, "Failed while waiting for user input. Exiting.\n");
+            return true;
+        }
+
+        ssize_t sizeRead = read(inFd, buffer, sizeof(buffer) - 1);
+        if (sizeRead < 0) {
+            dprintf(outFd, "Error reading user input. Exiting.\n");
+            return true;
+        }
+
+        if (sizeRead == 0) {
+            // Reached end of input fd (can happen if input is piped)
+            // User has no way to signal an interrupt, so interrupt here
+            return true;
+        }
+
+        if (buffer[0] == '\n') {
+            // User pressed return, interrupt watch command.
+            return true;
+        }
+    }
+}
+
+status_t CameraService::printWatchedTagsUntilInterrupt(const Vector<String16> &args,
+                                                       int inFd, int outFd) {
+    // Figure out refresh interval, if present in args
+    long refreshTimeoutMs = 1000L; // refresh every 1s by default
+    if (args.size() > 2) {
+        size_t intervalIdx; // index of '-n'
+        for (intervalIdx = 2; intervalIdx < args.size() && String16("-n") != args[intervalIdx];
+             intervalIdx++);
+
+        size_t intervalValIdx = intervalIdx + 1;
+        if (intervalValIdx < args.size()) {
+            refreshTimeoutMs = strtol(String8(args[intervalValIdx].string()), nullptr, 10);
+            if (errno) { return BAD_VALUE; }
+        }
+    }
+
+    // Set min timeout of 10ms. This prevents edge cases in polling when timeout of 0 is passed.
+    refreshTimeoutMs = refreshTimeoutMs < 10 ? 10 : refreshTimeoutMs;
+
+    dprintf(outFd, "Press return to exit...\n\n");
+    std::map<String16, std::string> packageNameToLastEvent;
+
+    while (true) {
+        bool serviceLock = tryLock(mServiceLock);
+        auto cameraClients = mActiveClientManager.getAll();
+        if (serviceLock) { mServiceLock.unlock(); }
+
+        for (const auto& clientDescriptor : cameraClients) {
+            Mutex::Autolock lock(mLogLock);
+            if (clientDescriptor == nullptr) { continue; }
+
+            sp<BasicClient> client = clientDescriptor->getValue();
+            if (client.get() == nullptr) { continue; }
+            if (!isClientWatchedLocked(client.get())) { continue; }
+
+            const String16 &packageName = client->getPackageName();
+            // This also initializes the map entries with an empty string
+            const std::string& lastPrintedEvent = packageNameToLastEvent[packageName];
+
+            std::vector<std::string> latestEvents;
+            client->dumpWatchedEventsToVector(latestEvents);
+
+            if (!latestEvents.empty()) {
+                String8 cameraId = clientDescriptor->getKey();
+                const char *printableCameraId = cameraId.lockBuffer(cameraId.size());
+                printNewWatchedEvents(outFd,
+                                      printableCameraId,
+                                      packageName,
+                                      latestEvents,
+                                      lastPrintedEvent);
+                packageNameToLastEvent[packageName] = latestEvents[0];
+                cameraId.unlockBuffer();
+            }
+        }
+        if (shouldInterruptWatchCommand(inFd, outFd, refreshTimeoutMs)) {
+            break;
+        }
+    }
+    return OK;
+}
+
+void CameraService::parseClientsToWatchLocked(String8 clients) {
+    mWatchedClientPackages.clear();
+
+    const char *allSentinel = String8(kWatchAllClientsFlag).string();
+
+    char *tokenized = clients.lockBuffer(clients.size());
+    char *savePtr;
+    char *nextClient = strtok_r(tokenized, ",", &savePtr);
+
+    while (nextClient != nullptr) {
+        if (strcmp(nextClient, allSentinel) == 0) {
+            // Don't need to track any other package if 'all' is present
+            mWatchedClientPackages.clear();
+            mWatchedClientPackages.emplace(kWatchAllClientsFlag);
+            break;
+        }
+
+        // track package names
+        mWatchedClientPackages.emplace(nextClient);
+        nextClient = strtok_r(nullptr, ",", &savePtr);
+    }
+    clients.unlockBuffer();
+}
+
 status_t CameraService::printHelp(int out) {
     return dprintf(out, "Camera service commands:\n"
         "  get-uid-state <PACKAGE> [--user USER_ID] gets the uid state\n"
@@ -4591,9 +5295,20 @@
         "      Valid values 0=OFF, 1=ON for JPEG\n"
         "  get-image-dump-mask returns the current image-dump-mask value\n"
         "  set-camera-mute <0/1> enable or disable camera muting\n"
+        "  watch <start|stop|dump|print|clear> manages tag monitoring in connected clients\n"
         "  help print this message\n");
 }
 
+bool CameraService::isClientWatched(const BasicClient *client) {
+    Mutex::Autolock lock(mLogLock);
+    return isClientWatchedLocked(client);
+}
+
+bool CameraService::isClientWatchedLocked(const BasicClient *client) {
+    return mWatchedClientPackages.find(kWatchAllClientsFlag) != mWatchedClientPackages.end() ||
+           mWatchedClientPackages.find(client->getPackageName()) != mWatchedClientPackages.end();
+}
+
 int32_t CameraService::updateAudioRestriction() {
     Mutex::Autolock lock(mServiceLock);
     return updateAudioRestrictionLocked();
@@ -4615,10 +5330,43 @@
     return mode;
 }
 
-void CameraService::stopInjectionImpl() {
-    mInjectionStatusListener->removeListener();
+status_t CameraService::checkIfInjectionCameraIsPresent(const String8& externalCamId,
+        sp<BasicClient> clientSp) {
+    std::unique_ptr<AutoConditionLock> lock =
+            AutoConditionLock::waitAndAcquire(mServiceLockWrapper);
+    status_t res = NO_ERROR;
+    if ((res = checkIfDeviceIsUsable(externalCamId)) != NO_ERROR) {
+        ALOGW("Device %s is not usable!", externalCamId.string());
+        mInjectionStatusListener->notifyInjectionError(
+                externalCamId, UNKNOWN_TRANSACTION);
+        clientSp->notifyError(
+                hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+                CaptureResultExtras());
 
-    // TODO: Implement the stop injection function.
+        // Do not hold mServiceLock while disconnecting clients, but retain the condition blocking
+        // other clients from connecting in mServiceLockWrapper if held
+        mServiceLock.unlock();
+
+        // Clear caller identity temporarily so client disconnect PID checks work correctly
+        int64_t token = CameraThreadState::clearCallingIdentity();
+        clientSp->disconnect();
+        CameraThreadState::restoreCallingIdentity(token);
+
+        // Reacquire mServiceLock
+        mServiceLock.lock();
+    }
+
+    return res;
+}
+
+void CameraService::clearInjectionParameters() {
+    {
+        Mutex::Autolock lock(mInjectionParametersLock);
+        mInjectionInitPending = false;
+        mInjectionInternalCamId = "";
+    }
+    mInjectionExternalCamId = "";
+    mInjectionStatusListener->removeListener();
 }
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index d5feeeb..701d6b7 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -172,6 +172,12 @@
     virtual binder::Status    setTorchMode(const String16& cameraId, bool enabled,
             const sp<IBinder>& clientBinder);
 
+    virtual binder::Status    turnOnTorchWithStrengthLevel(const String16& cameraId,
+            int32_t torchStrength, const sp<IBinder>& clientBinder);
+
+    virtual binder::Status    getTorchStrengthLevel(const String16& cameraId,
+            int32_t* torchStrength);
+
     virtual binder::Status    notifySystemEvent(int32_t eventId,
             const std::vector<int32_t>& args);
 
@@ -280,6 +286,10 @@
         // Internal dump method to be called by CameraService
         virtual status_t dumpClient(int fd, const Vector<String16>& args) = 0;
 
+        virtual status_t startWatchingTags(const String8 &tags, int outFd);
+        virtual status_t stopWatchingTags(int outFd);
+        virtual status_t dumpWatchedEventsToVector(std::vector<std::string> &out);
+
         // Return the package name for this client
         virtual String16 getPackageName() const;
 
@@ -328,6 +338,14 @@
         // Set/reset camera mute
         virtual status_t setCameraMute(bool enabled) = 0;
 
+        // The injection camera session to replace the internal camera
+        // session.
+        virtual status_t injectCamera(const String8& injectedCamId,
+                sp<CameraProviderManager> manager) = 0;
+
+        // Stop the injection camera and restore to internal camera session.
+        virtual status_t stopInjection() = 0;
+
     protected:
         BasicClient(const sp<CameraService>& cameraService,
                 const sp<IBinder>& remoteCallback,
@@ -641,6 +659,12 @@
         bool removeUnavailablePhysicalId(const String8& physicalId);
 
         /**
+         * Set and get client package name.
+         */
+        void setClientPackage(const String8& clientPackage);
+        String8 getClientPackage() const;
+
+        /**
          * Return the unavailable physical ids for this device.
          *
          * This method acquires mStatusLock.
@@ -652,6 +676,7 @@
         const int mCost;
         std::set<String8> mConflicting;
         std::set<String8> mUnavailablePhysicalIds;
+        String8 mClientPackage;
         mutable Mutex mStatusLock;
         CameraParameters mShimParams;
         const SystemCameraKind mSystemCameraKind;
@@ -825,6 +850,14 @@
     RingBuffer<String8> mEventLog;
     Mutex mLogLock;
 
+    // set of client package names to watch. if this set contains 'all', then all clients will
+    // be watched. Access should be guarded by mLogLock
+    std::set<String16> mWatchedClientPackages;
+    // cache of last monitored tags dump immediately before the client disconnects. If a client
+    // re-connects, its entry is not updated until it disconnects again. Access should be guarded
+    // by mLogLock
+    std::map<String16, std::string> mWatchedClientsDumpCache;
+
     // The last monitored tags set by client
     String8 mMonitorTags;
 
@@ -957,6 +990,8 @@
      */
     void dumpEventLog(int fd);
 
+    void cacheClientTagDumpIfNeeded(const char *cameraId, BasicClient *client);
+
     /**
      * This method will acquire mServiceLock
      */
@@ -1149,9 +1184,43 @@
     // Set the camera mute state
     status_t handleSetCameraMute(const Vector<String16>& args);
 
+    // Handle 'watch' command as passed through 'cmd'
+    status_t handleWatchCommand(const Vector<String16> &args, int inFd, int outFd);
+
+    // Enable tag monitoring of the given tags in provided clients
+    status_t startWatchingTags(const Vector<String16> &args, int outFd);
+
+    // Disable tag monitoring
+    status_t stopWatchingTags(int outFd);
+
+    // Clears mWatchedClientsDumpCache
+    status_t clearCachedMonitoredTagDumps(int outFd);
+
+    // Print events of monitored tags in all cached and attached clients
+    status_t printWatchedTags(int outFd);
+
+    // Print events of monitored tags in all attached clients as they are captured. New events are
+    // fetched every `refreshMillis` ms
+    // NOTE: This function does not terminate until user passes '\n' to inFd.
+    status_t printWatchedTagsUntilInterrupt(const Vector<String16> &args, int inFd, int outFd);
+
+    // Parses comma separated clients list and adds them to mWatchedClientPackages.
+    // Does not acquire mLogLock before modifying mWatchedClientPackages. It is the caller's
+    // responsibility to acquire mLogLock before calling this function.
+    void parseClientsToWatchLocked(String8 clients);
+
     // Prints the shell command help
     status_t printHelp(int out);
 
+    // Returns true if client should monitor tags based on the contents of mWatchedClientPackages.
+    // Acquires mLogLock before querying mWatchedClientPackages.
+    bool isClientWatched(const BasicClient *client);
+
+    // Returns true if client should monitor tags based on the contents of mWatchedClientPackages.
+    // Does not acquire mLogLock before querying mWatchedClientPackages. It is the caller's
+    // responsibility to acquire mLogLock before calling this functions.
+    bool isClientWatchedLocked(const BasicClient *client);
+
     /**
      * Get the current system time as a formatted string.
      */
@@ -1176,12 +1245,18 @@
             hardware::camera::common::V1_0::TorchModeStatus status,
             SystemCameraKind systemCameraKind);
 
+    void broadcastTorchStrengthLevel(const String8& cameraId, int32_t newTorchStrengthLevel);
+
     void disconnectClient(const String8& id, sp<BasicClient> clientToDisconnect);
 
     // Regular online and offline devices must not be in conflict at camera service layer.
     // Use separate keys for offline devices.
     static const String8 kOfflineDevice;
 
+    // Sentinel value to be stored in `mWatchedClientsPackages` to indicate that all clients should
+    // be watched.
+    static const String16 kWatchAllClientsFlag;
+
     // TODO: right now each BasicClient holds one AppOpsManager instance.
     // We can refactor the code so all of clients share this instance
     AppOpsManager mAppOps;
@@ -1209,7 +1284,7 @@
 
             void addListener(const sp<hardware::camera2::ICameraInjectionCallback>& callback);
             void removeListener();
-            void notifyInjectionError(int errorCode);
+            void notifyInjectionError(String8 injectedCamId, status_t err);
 
             // IBinder::DeathRecipient implementation
             virtual void binderDied(const wp<IBinder>& who);
@@ -1236,7 +1311,22 @@
             wp<CameraService> mParent;
     };
 
-    void stopInjectionImpl();
+    // When injecting the camera, it will check whether the injecting camera status is unavailable.
+    // If it is, the disconnect function will be called to to prevent camera access on the device.
+    status_t checkIfInjectionCameraIsPresent(const String8& externalCamId,
+            sp<BasicClient> clientSp);
+
+    void clearInjectionParameters();
+
+    // This is the existing camera id being replaced.
+    String8 mInjectionInternalCamId;
+    // This is the external camera Id replacing the internalId.
+    String8 mInjectionExternalCamId;
+    bool mInjectionInitPending = false;
+    // Guard mInjectionInternalCamId and mInjectionInitPending.
+    Mutex mInjectionParametersLock;
+
+    void updateTorchUidMapLocked(const String16& cameraId, int uid);
 };
 
 } // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 80508e4..a406e62 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -864,7 +864,6 @@
 
     if (fabs(maxDigitalZoom.data.f[0] - 1.f) > 0.00001f) {
         params.set(CameraParameters::KEY_ZOOM, zoom);
-        params.set(CameraParameters::KEY_MAX_ZOOM, NUM_ZOOM_STEPS - 1);
 
         {
             String8 zoomRatios;
@@ -872,18 +871,34 @@
             float zoomIncrement = (maxDigitalZoom.data.f[0] - zoom) /
                     (NUM_ZOOM_STEPS-1);
             bool addComma = false;
-            for (size_t i=0; i < NUM_ZOOM_STEPS; i++) {
+            int previousZoom = -1;
+            size_t zoomSteps = 0;
+            for (size_t i = 0; i < NUM_ZOOM_STEPS; i++) {
+                int currentZoom = static_cast<int>(zoom * 100);
+                if (previousZoom == currentZoom) {
+                    zoom += zoomIncrement;
+                    continue;
+                }
                 if (addComma) zoomRatios += ",";
                 addComma = true;
-                zoomRatios += String8::format("%d", static_cast<int>(zoom * 100));
+                zoomRatios += String8::format("%d", currentZoom);
                 zoom += zoomIncrement;
+                previousZoom = currentZoom;
+                zoomSteps++;
             }
-            params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios);
+
+            if (zoomSteps > 0) {
+                params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios);
+                params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+                        CameraParameters::TRUE);
+                params.set(CameraParameters::KEY_MAX_ZOOM, zoomSteps - 1);
+                zoomAvailable = true;
+            } else {
+                params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+                        CameraParameters::FALSE);
+            }
         }
 
-        params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
-                CameraParameters::TRUE);
-        zoomAvailable = true;
     } else {
         params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
                 CameraParameters::FALSE);
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 83b8e95..a7ebcf4 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -51,8 +51,8 @@
 
 namespace android {
 using namespace camera2;
+using namespace camera3;
 using camera3::camera_stream_rotation_t::CAMERA_STREAM_ROTATION_0;
-using camera3::SessionConfigurationUtils;
 
 CameraDeviceClientBase::CameraDeviceClientBase(
         const sp<CameraService>& cameraService,
@@ -140,6 +140,40 @@
                 physicalKeysEntry.data.i32 + physicalKeysEntry.count);
     }
 
+    auto entry = deviceInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+    mDynamicProfileMap.emplace(
+            ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
+            ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD);
+    if (entry.count > 0) {
+        const auto it = std::find(entry.data.i32, entry.data.i32 + entry.count,
+                ANDROID_REQUEST_AVAILABLE_CAPABILITIES_DYNAMIC_RANGE_TEN_BIT);
+        if (it != entry.data.i32 + entry.count) {
+            entry = deviceInfo.find(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP);
+            if (entry.count > 0 || ((entry.count % 2) != 0)) {
+                int standardBitmap = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
+                for (size_t i = 0; i < entry.count; i += 2) {
+                    if (entry.data.i32[i] !=
+                            ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {
+                        mDynamicProfileMap.emplace(entry.data.i32[i], entry.data.i32[i+1]);
+                        if ((entry.data.i32[i+1] == 0) || (entry.data.i32[i+1] &
+                                ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD)) {
+                            standardBitmap |= entry.data.i32[i];
+                        }
+                    } else {
+                        ALOGE("%s: Device %s includes unexpected profile entry: 0x%x!",
+                                __FUNCTION__, mCameraIdStr.c_str(), entry.data.i32[i]);
+                    }
+                }
+                mDynamicProfileMap.emplace(
+                        ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
+                        standardBitmap);
+            } else {
+                ALOGE("%s: Device %s supports 10-bit output but doesn't include a dynamic range"
+                        " profile map!", __FUNCTION__, mCameraIdStr.c_str());
+            }
+        }
+    }
+
     mProviderManager = providerPtr;
     // Cache physical camera ids corresponding to this device and also the high
     // resolution sensors in this device + physical camera ids
@@ -297,6 +331,7 @@
         SurfaceMap surfaceMap;
         Vector<int32_t> outputStreamIds;
         std::vector<std::string> requestedPhysicalIds;
+        int dynamicProfileBitmap = 0;
         if (request.mSurfaceList.size() > 0) {
             for (const sp<Surface>& surface : request.mSurfaceList) {
                 if (surface == 0) continue;
@@ -313,6 +348,8 @@
                     String8 requestedPhysicalId(
                             mConfiguredOutputs.valueAt(index).getPhysicalCameraId());
                     requestedPhysicalIds.push_back(requestedPhysicalId.string());
+                    dynamicProfileBitmap |=
+                            mConfiguredOutputs.valueAt(index).getDynamicRangeProfile();
                 } else {
                     ALOGW("%s: Output stream Id not found among configured outputs!", __FUNCTION__);
                 }
@@ -348,6 +385,41 @@
                 String8 requestedPhysicalId(
                         mConfiguredOutputs.valueAt(index).getPhysicalCameraId());
                 requestedPhysicalIds.push_back(requestedPhysicalId.string());
+                dynamicProfileBitmap |=
+                        mConfiguredOutputs.valueAt(index).getDynamicRangeProfile();
+            }
+        }
+
+        if (dynamicProfileBitmap !=
+                    ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {
+            for (int i = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
+                    i < ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_MAX; i <<= 1) {
+                if ((dynamicProfileBitmap & i) == 0) {
+                    continue;
+                }
+
+                const auto& it = mDynamicProfileMap.find(i);
+                if (it != mDynamicProfileMap.end()) {
+                    if ((it->second == 0) ||
+                            ((it->second & dynamicProfileBitmap) == dynamicProfileBitmap)) {
+                        continue;
+                    } else {
+                        ALOGE("%s: Camera %s: Tried to submit a request with a surfaces that"
+                                " reference an unsupported dynamic range profile combination"
+                                " 0x%x!", __FUNCTION__, mCameraIdStr.string(),
+                                dynamicProfileBitmap);
+                        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                                "Request targets an unsupported dynamic range profile"
+                                " combination");
+                    }
+                } else {
+                    ALOGE("%s: Camera %s: Tried to submit a request with a surface that"
+                            " references unsupported dynamic range profile 0x%x!",
+                            __FUNCTION__, mCameraIdStr.string(), i);
+                    return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                            "Request targets 10-bit Surface with unsupported dynamic range"
+                            " profile");
+                }
             }
         }
 
@@ -638,7 +710,7 @@
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
 
-    hardware::camera::device::V3_7::StreamConfiguration streamConfiguration;
+    hardware::camera::device::V3_8::StreamConfiguration streamConfiguration;
     bool earlyExit = false;
     camera3::metadataGetter getMetadata = [this](const String8 &id, bool /*overrideForPerfClass*/) {
           return mDevice->infoPhysical(id);};
@@ -801,6 +873,7 @@
     String8 physicalCameraId = String8(outputConfiguration.getPhysicalCameraId());
     bool deferredConsumerOnly = deferredConsumer && numBufferProducers == 0;
     bool isMultiResolution = outputConfiguration.isMultiResolution();
+    int dynamicRangeProfile = outputConfiguration.getDynamicRangeProfile();
 
     res = SessionConfigurationUtils::checkSurfaceType(numBufferProducers, deferredConsumer,
             outputConfiguration.getSurfaceType());
@@ -844,7 +917,7 @@
         sp<Surface> surface;
         res = SessionConfigurationUtils::createSurfaceFromGbp(streamInfo,
                 isStreamInfoValid, surface, bufferProducer, mCameraIdStr,
-                mDevice->infoPhysical(physicalCameraId), sensorPixelModesUsed);
+                mDevice->infoPhysical(physicalCameraId), sensorPixelModesUsed, dynamicRangeProfile);
 
         if (!res.isOk())
             return res;
@@ -888,7 +961,8 @@
                 streamInfo.height, streamInfo.format, streamInfo.dataSpace,
                 static_cast<camera_stream_rotation_t>(outputConfiguration.getRotation()),
                 &streamId, physicalCameraId, streamInfo.sensorPixelModesUsed, &surfaceIds,
-                outputConfiguration.getSurfaceSetID(), isShared, isMultiResolution);
+                outputConfiguration.getSurfaceSetID(), isShared, isMultiResolution,
+                streamInfo.dynamicRangeProfile);
     }
 
     if (err != OK) {
@@ -982,7 +1056,8 @@
             overriddenSensorPixelModesUsed,
             &surfaceIds,
             outputConfiguration.getSurfaceSetID(), isShared,
-            outputConfiguration.isMultiResolution(), consumerUsage);
+            outputConfiguration.isMultiResolution(), consumerUsage,
+            outputConfiguration.getDynamicRangeProfile());
 
     if (err != OK) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
@@ -995,7 +1070,8 @@
         mDeferredStreams.push_back(streamId);
         mStreamInfoMap.emplace(std::piecewise_construct, std::forward_as_tuple(streamId),
                 std::forward_as_tuple(width, height, format, dataSpace, consumerUsage,
-                        overriddenSensorPixelModesUsed));
+                        overriddenSensorPixelModesUsed,
+                        outputConfiguration.getDynamicRangeProfile()));
 
         ALOGV("%s: Camera %s: Successfully created a new stream ID %d for a deferred surface"
                 " (%d x %d) stream with format 0x%x.",
@@ -1184,12 +1260,14 @@
     const std::vector<int32_t> &sensorPixelModesUsed =
             outputConfiguration.getSensorPixelModesUsed();
 
+    int dynamicRangeProfile = outputConfiguration.getDynamicRangeProfile();
+
     for (size_t i = 0; i < newOutputsMap.size(); i++) {
         OutputStreamInfo outInfo;
         sp<Surface> surface;
         res = SessionConfigurationUtils::createSurfaceFromGbp(outInfo,
                 /*isStreamInfoValid*/ false, surface, newOutputsMap.valueAt(i), mCameraIdStr,
-                mDevice->infoPhysical(physicalCameraId), sensorPixelModesUsed);
+                mDevice->infoPhysical(physicalCameraId), sensorPixelModesUsed, dynamicRangeProfile);
         if (!res.isOk())
             return res;
 
@@ -1546,6 +1624,7 @@
     std::vector<sp<Surface>> consumerSurfaces;
     const std::vector<int32_t> &sensorPixelModesUsed =
             outputConfiguration.getSensorPixelModesUsed();
+    int dynamicRangeProfile = outputConfiguration.getDynamicRangeProfile();
     for (auto& bufferProducer : bufferProducers) {
         // Don't create multiple streams for the same target surface
         ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
@@ -1558,7 +1637,7 @@
         sp<Surface> surface;
         res = SessionConfigurationUtils::createSurfaceFromGbp(mStreamInfoMap[streamId],
                 true /*isStreamInfoValid*/, surface, bufferProducer, mCameraIdStr,
-                mDevice->infoPhysical(physicalId), sensorPixelModesUsed);
+                mDevice->infoPhysical(physicalId), sensorPixelModesUsed, dynamicRangeProfile);
 
         if (!res.isOk())
             return res;
@@ -1804,6 +1883,35 @@
     return dumpDevice(fd, args);
 }
 
+status_t CameraDeviceClient::startWatchingTags(const String8 &tags, int out) {
+    sp<CameraDeviceBase> device = mDevice;
+    if (!device) {
+        dprintf(out, "  Device is detached.");
+        return OK;
+    }
+    device->startWatchingTags(tags);
+    return OK;
+}
+
+status_t CameraDeviceClient::stopWatchingTags(int out) {
+    sp<CameraDeviceBase> device = mDevice;
+    if (!device) {
+        dprintf(out, "  Device is detached.");
+        return OK;
+    }
+    device->stopWatchingTags();
+    return OK;
+}
+
+status_t CameraDeviceClient::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+    sp<CameraDeviceBase> device = mDevice;
+    if (!device) {
+        return OK;
+    }
+    device->dumpWatchedEventsToVector(out);
+    return OK;
+}
+
 void CameraDeviceClient::notifyError(int32_t errorCode,
                                      const CaptureResultExtras& resultExtras) {
     // Thread safe. Don't bother locking.
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 76b3f53..77cdf9c 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -22,6 +22,7 @@
 #include <camera/camera2/OutputConfiguration.h>
 #include <camera/camera2/SessionConfiguration.h>
 #include <camera/camera2/SubmitInfo.h>
+#include <unordered_map>
 
 #include "CameraOfflineSessionClient.h"
 #include "CameraService.h"
@@ -199,6 +200,10 @@
 
     virtual status_t      dumpClient(int fd, const Vector<String16>& args);
 
+    virtual status_t      startWatchingTags(const String8 &tags, int out);
+    virtual status_t      stopWatchingTags(int out);
+    virtual status_t      dumpWatchedEventsToVector(std::vector<std::string> &out);
+
     /**
      * Device listener interface
      */
@@ -299,6 +304,10 @@
     // Stream ID -> OutputConfiguration. Used for looking up Surface by stream/surface index
     KeyedVector<int32_t, hardware::camera2::params::OutputConfiguration> mConfiguredOutputs;
 
+    // Dynamic range profile id -> Supported dynamic profiles bitmap within an single capture
+    // request
+    std::unordered_map<int, int> mDynamicProfileMap;
+
     struct InputStreamConfiguration {
         bool configured;
         int32_t width;
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
index ef15f2d..10fa33f 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
@@ -110,6 +110,18 @@
     return OK;
 }
 
+status_t CameraOfflineSessionClient::startWatchingTags(const String8 &tags, int outFd) {
+    return BasicClient::startWatchingTags(tags, outFd);
+}
+
+status_t CameraOfflineSessionClient::stopWatchingTags(int outFd) {
+    return BasicClient::stopWatchingTags(outFd);
+}
+
+status_t CameraOfflineSessionClient::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+    return BasicClient::dumpWatchedEventsToVector(out);
+}
+
 binder::Status CameraOfflineSessionClient::disconnect() {
     Mutex::Autolock icl(mBinderSerializationLock);
 
@@ -330,5 +342,19 @@
                 CaptureResultExtras());
 }
 
+status_t CameraOfflineSessionClient::injectCamera(const String8& injectedCamId,
+            sp<CameraProviderManager> manager) {
+    ALOGV("%s: This client doesn't support the injection camera. injectedCamId: %s providerPtr: %p",
+            __FUNCTION__, injectedCamId.string(), manager.get());
+
+    return OK;
+}
+
+status_t CameraOfflineSessionClient::stopInjection() {
+    ALOGV("%s: This client doesn't support the injection camera.", __FUNCTION__);
+
+    return OK;
+}
+
 // ----------------------------------------------------------------------------
 }; // namespace android
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index b219a4c..920a176 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -71,6 +71,10 @@
 
     status_t dumpClient(int /*fd*/, const Vector<String16>& /*args*/) override;
 
+    status_t startWatchingTags(const String8 &tags, int outFd) override;
+    status_t stopWatchingTags(int outFd) override;
+    status_t dumpWatchedEventsToVector(std::vector<std::string> &out) override;
+
     status_t initialize(sp<CameraProviderManager> /*manager*/,
             const String8& /*monitorTags*/) override;
 
@@ -98,6 +102,9 @@
     void notifyPrepared(int streamId) override;
     void notifyRequestQueueEmpty() override;
     void notifyRepeatingRequestError(long lastFrameNumber) override;
+    status_t injectCamera(const String8& injectedCamId,
+            sp<CameraProviderManager> manager) override;
+    status_t stopInjection() override;
 
 private:
     mutable Mutex mBinderSerializationLock;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 5e086c0..5d17c11 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -156,6 +156,38 @@
 }
 
 template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::startWatchingTags(const String8 &tags, int out) {
+  sp<CameraDeviceBase> device = mDevice;
+  if (!device) {
+    dprintf(out, "  Device is detached");
+    return OK;
+  }
+
+  return device->startWatchingTags(tags);
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::stopWatchingTags(int out) {
+  sp<CameraDeviceBase> device = mDevice;
+  if (!device) {
+    dprintf(out, "  Device is detached");
+    return OK;
+  }
+
+  return device->stopWatchingTags();
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+    sp<CameraDeviceBase> device = mDevice;
+    if (!device) {
+        // Nothing to dump if the device is detached
+        return OK;
+    }
+    return device->dumpWatchedEventsToVector(out);
+}
+
+template <typename TClientBase>
 status_t Camera2ClientBase<TClientBase>::dumpDevice(
                                                 int fd,
                                                 const Vector<String16>& args) {
@@ -414,6 +446,17 @@
     mRemoteCallback.clear();
 }
 
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::injectCamera(const String8& injectedCamId,
+        sp<CameraProviderManager> manager) {
+    return mDevice->injectCamera(injectedCamId, manager);
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::stopInjection() {
+    return mDevice->stopInjection();
+}
+
 template class Camera2ClientBase<CameraService::Client>;
 template class Camera2ClientBase<CameraDeviceClientBase>;
 
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index c49ea2c..4688502 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -62,6 +62,9 @@
 
     virtual status_t      initialize(sp<CameraProviderManager> manager, const String8& monitorTags);
     virtual status_t      dumpClient(int fd, const Vector<String16>& args);
+    virtual status_t      startWatchingTags(const String8 &tags, int out);
+    virtual status_t      stopWatchingTags(int out);
+    virtual status_t      dumpWatchedEventsToVector(std::vector<std::string> &out);
 
     /**
      * NotificationListener implementation
@@ -115,6 +118,10 @@
         mutable Mutex mRemoteCallbackLock;
     } mSharedCameraCallbacks;
 
+    status_t      injectCamera(const String8& injectedCamId,
+                               sp<CameraProviderManager> manager) override;
+    status_t      stopInjection() override;
+
 protected:
 
     // The PID provided in the constructor call
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index b42f3f6..e936cb6 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -97,6 +97,9 @@
     virtual status_t disconnect() = 0;
 
     virtual status_t dump(int fd, const Vector<String16> &args) = 0;
+    virtual status_t startWatchingTags(const String8 &tags) = 0;
+    virtual status_t stopWatchingTags() = 0;
+    virtual status_t dumpWatchedEventsToVector(std::vector<std::string> &out) = 0;
 
     /**
      * The physical camera device's static characteristics metadata buffer, or
@@ -179,7 +182,8 @@
             std::vector<int> *surfaceIds = nullptr,
             int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
             bool isShared = false, bool isMultiResolution = false,
-            uint64_t consumerUsage = 0) = 0;
+            uint64_t consumerUsage = 0,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) = 0;
 
     /**
      * Create an output stream of the requested size, format, rotation and
@@ -196,7 +200,8 @@
             std::vector<int> *surfaceIds = nullptr,
             int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
             bool isShared = false, bool isMultiResolution = false,
-            uint64_t consumerUsage = 0) = 0;
+            uint64_t consumerUsage = 0,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) = 0;
 
     /**
      * Create an input stream of width, height, and format.
@@ -217,10 +222,12 @@
         android_dataspace dataSpace;
         bool dataSpaceOverridden;
         android_dataspace originalDataSpace;
+        uint32_t dynamicRangeProfile;
 
         StreamInfo() : width(0), height(0), format(0), formatOverridden(false), originalFormat(0),
                 dataSpace(HAL_DATASPACE_UNKNOWN), dataSpaceOverridden(false),
-                originalDataSpace(HAL_DATASPACE_UNKNOWN) {}
+                originalDataSpace(HAL_DATASPACE_UNKNOWN),
+                dynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD){}
         /**
          * Check whether the format matches the current or the original one in case
          * it got overridden.
@@ -439,6 +446,18 @@
      */
     void setImageDumpMask(int mask) { mImageDumpMask = mask; }
 
+    /**
+     * The injection camera session to replace the internal camera
+     * session.
+     */
+    virtual status_t injectCamera(const String8& injectedCamId,
+            sp<CameraProviderManager> manager) = 0;
+
+    /**
+     * Stop the injection camera and restore to internal camera session.
+     */
+    virtual status_t stopInjection() = 0;
+
 protected:
     bool mImageDumpMask = 0;
 };
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 0cce2ca..9831328 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -20,7 +20,7 @@
 
 #include "CameraProviderManager.h"
 
-#include <android/hardware/camera/device/3.7/ICameraDevice.h>
+#include <android/hardware/camera/device/3.8/ICameraDevice.h>
 
 #include <algorithm>
 #include <chrono>
@@ -46,13 +46,14 @@
 
 using namespace ::android::hardware::camera;
 using namespace ::android::hardware::camera::common::V1_0;
-using camera3::SessionConfigurationUtils;
+using namespace ::android::camera3;
 using std::literals::chrono_literals::operator""s;
 using hardware::camera2::utils::CameraIdAndSessionConfiguration;
 using hardware::camera::provider::V2_7::CameraIdAndStreamCombination;
 
 namespace {
 const bool kEnableLazyHal(property_get_bool("ro.camera.enableLazyHal", false));
+const std::string kExternalProviderName = "external/0";
 } // anonymous namespace
 
 const float CameraProviderManager::kDepthARTolerance = .1f;
@@ -267,7 +268,7 @@
 }
 
 status_t CameraProviderManager::isSessionConfigurationSupported(const std::string& id,
-        const hardware::camera::device::V3_7::StreamConfiguration &configuration,
+        const hardware::camera::device::V3_8::StreamConfiguration &configuration,
         bool *status /*out*/) const {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
     auto deviceInfo = findDeviceInfoLocked(id);
@@ -307,6 +308,50 @@
     return OK;
 }
 
+status_t CameraProviderManager::getTorchStrengthLevel(const std::string &id,
+        int32_t* torchStrength /*out*/) {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    return deviceInfo->getTorchStrengthLevel(torchStrength);
+}
+
+status_t CameraProviderManager::turnOnTorchWithStrengthLevel(const std::string &id,
+        int32_t torchStrength) {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    return deviceInfo->turnOnTorchWithStrengthLevel(torchStrength);
+}
+
+bool CameraProviderManager::shouldSkipTorchStrengthUpdate(const std::string &id,
+        int32_t torchStrength) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    if (deviceInfo->mTorchStrengthLevel == torchStrength) {
+        ALOGV("%s: Skipping torch strength level updates prev_level: %d, new_level: %d",
+                __FUNCTION__, deviceInfo->mTorchStrengthLevel, torchStrength);
+        return true;
+    }
+    return false;
+}
+
+int32_t CameraProviderManager::getTorchDefaultStrengthLevel(const std::string &id) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    return deviceInfo->mTorchDefaultStrengthLevel;
+}
+
 bool CameraProviderManager::supportSetTorchMode(const std::string &id) const {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
     for (auto& provider : mProviders) {
@@ -351,6 +396,73 @@
     return OK;
 }
 
+sp<CameraProviderManager::ProviderInfo> CameraProviderManager::startExternalLazyProvider() const {
+    std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    for (const auto& providerInfo : mProviders) {
+        if (providerInfo->isExternalLazyHAL()) {
+            const sp<provider::V2_4::ICameraProvider>
+                  interface = providerInfo->startProviderInterface();
+            if (interface == nullptr) {
+                return nullptr;
+            } else {
+                return providerInfo;
+            }
+        }
+    }
+    return nullptr;
+}
+
+status_t CameraProviderManager::notifyUsbDeviceEvent(int32_t eventId,
+                                                     const std::string& usbDeviceId) {
+    if (!kEnableLazyHal) {
+        return OK;
+    }
+
+    ALOGV("notifySystemEvent: %d usbDeviceId : %s", eventId, usbDeviceId.c_str());
+
+    if (eventId == android::hardware::ICameraService::EVENT_USB_DEVICE_ATTACHED) {
+        sp<ProviderInfo> externalProvider = startExternalLazyProvider();
+        if (externalProvider != nullptr) {
+            auto usbDevices = mExternalUsbDevicesForProvider.first;
+            usbDevices.push_back(usbDeviceId);
+            mExternalUsbDevicesForProvider = {usbDevices, externalProvider};
+        }
+    } else if (eventId
+          == android::hardware::ICameraService::EVENT_USB_DEVICE_DETACHED) {
+        usbDeviceDetached(usbDeviceId);
+    }
+
+    return OK;
+}
+
+status_t CameraProviderManager::usbDeviceDetached(const std::string &usbDeviceId) {
+    std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
+    std::lock_guard<std::mutex> interfaceLock(mInterfaceMutex);
+
+    auto usbDevices = mExternalUsbDevicesForProvider.first;
+    auto foundId = std::find(usbDevices.begin(), usbDevices.end(), usbDeviceId);
+    if (foundId != usbDevices.end()) {
+        sp<ProviderInfo> providerInfo = mExternalUsbDevicesForProvider.second;
+        if (providerInfo == nullptr) {
+              ALOGE("%s No valid external provider for USB device: %s",
+                    __FUNCTION__,
+                    usbDeviceId.c_str());
+              mExternalUsbDevicesForProvider = {std::vector<std::string>(), nullptr};
+              return DEAD_OBJECT;
+        } else {
+            mInterfaceMutex.unlock();
+            providerInfo->removeAllDevices();
+            mInterfaceMutex.lock();
+            mExternalUsbDevicesForProvider = {std::vector<std::string>(), nullptr};
+        }
+    } else {
+        return DEAD_OBJECT;
+    }
+    return OK;
+}
+
 status_t CameraProviderManager::notifyDeviceStateChange(
         hardware::hidl_bitfield<provider::V2_5::DeviceState> newState) {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
@@ -1270,9 +1382,10 @@
         if (providerInfo->mProviderName == newProvider) {
             ALOGW("%s: Camera provider HAL with name '%s' already registered",
                     __FUNCTION__, newProvider.c_str());
-            if (preexisting) {
+            // Do not add new instances for lazy HAL external provider
+            if (preexisting || providerInfo->isExternalLazyHAL()) {
                 return ALREADY_EXISTS;
-            } else{
+            } else {
                 ALOGW("%s: The new provider instance will get initialized immediately after the"
                         " currently present instance is removed!", __FUNCTION__);
                 providerPresent = true;
@@ -1562,36 +1675,56 @@
 
     auto interface = mActiveInterface.promote();
     if (interface == nullptr) {
-        ALOGI("Camera HAL provider needs restart, calling getService(%s)", mProviderName.c_str());
-        interface = mManager->mServiceProxy->getService(mProviderName);
-        interface->setCallback(this);
-        hardware::Return<bool> linked = interface->linkToDeath(this, /*cookie*/ mId);
-        if (!linked.isOk()) {
-            ALOGE("%s: Transaction error in linking to camera provider '%s' death: %s",
-                    __FUNCTION__, mProviderName.c_str(), linked.description().c_str());
-            mManager->removeProvider(mProviderName);
-            return nullptr;
-        } else if (!linked) {
-            ALOGW("%s: Unable to link to provider '%s' death notifications",
+        // Try to get service without starting
+        interface = mManager->mServiceProxy->tryGetService(mProviderName);
+        if (interface == nullptr) {
+            ALOGV("Camera provider actually needs restart, calling getService(%s)",
+                  mProviderName.c_str());
+            interface = mManager->mServiceProxy->getService(mProviderName);
+
+            // Set all devices as ENUMERATING, provider should update status
+            // to PRESENT after initializing.
+            // This avoids failing getCameraDeviceInterface_V3_x before devices
+            // are ready.
+            for (auto& device : mDevices) {
+              device->mIsDeviceAvailable = false;
+            }
+
+            interface->setCallback(this);
+            hardware::Return<bool>
+                linked = interface->linkToDeath(this, /*cookie*/ mId);
+            if (!linked.isOk()) {
+              ALOGE(
+                  "%s: Transaction error in linking to camera provider '%s' death: %s",
+                  __FUNCTION__,
+                  mProviderName.c_str(),
+                  linked.description().c_str());
+              mManager->removeProvider(mProviderName);
+              return nullptr;
+            } else if (!linked) {
+              ALOGW("%s: Unable to link to provider '%s' death notifications",
                     __FUNCTION__, mProviderName.c_str());
-        }
-        // Send current device state
-        if (mMinorVersion >= 5) {
-            auto castResult = provider::V2_5::ICameraProvider::castFrom(interface);
-            if (castResult.isOk()) {
+            }
+            // Send current device state
+            if (mMinorVersion >= 5) {
+              auto castResult =
+                  provider::V2_5::ICameraProvider::castFrom(interface);
+              if (castResult.isOk()) {
                 sp<provider::V2_5::ICameraProvider> interface_2_5 = castResult;
                 if (interface_2_5 != nullptr) {
-                    ALOGV("%s: Initial device state for %s: 0x %" PRIx64,
-                            __FUNCTION__, mProviderName.c_str(), mDeviceState);
-                    interface_2_5->notifyDeviceStateChange(mDeviceState);
+                  ALOGV("%s: Initial device state for %s: 0x %" PRIx64,
+                        __FUNCTION__, mProviderName.c_str(), mDeviceState);
+                  interface_2_5->notifyDeviceStateChange(mDeviceState);
                 }
+              }
             }
         }
-
         mActiveInterface = interface;
     } else {
-        ALOGV("Camera provider (%s) already in use. Re-using instance.", mProviderName.c_str());
+        ALOGV("Camera provider (%s) already in use. Re-using instance.",
+              mProviderName.c_str());
     }
+
     return interface;
 }
 
@@ -1666,15 +1799,50 @@
             mUniqueCameraIds.erase(id);
             if ((*it)->isAPI1Compatible()) {
                 mUniqueAPI1CompatibleCameraIds.erase(std::remove(
-                        mUniqueAPI1CompatibleCameraIds.begin(),
-                        mUniqueAPI1CompatibleCameraIds.end(), id));
+                    mUniqueAPI1CompatibleCameraIds.begin(),
+                    mUniqueAPI1CompatibleCameraIds.end(), id));
             }
+
+            // Remove reference to camera provider to avoid pointer leak when
+            // unplugging external camera while in use with lazy HALs
+            mManager->removeRef(DeviceMode::CAMERA, id);
+            mManager->removeRef(DeviceMode::TORCH, id);
+
             mDevices.erase(it);
             break;
         }
     }
 }
 
+void CameraProviderManager::ProviderInfo::removeAllDevices() {
+    std::lock_guard<std::mutex> lock(mLock);
+
+    auto itDevices = mDevices.begin();
+    while (itDevices != mDevices.end()) {
+        std::string id = (*itDevices)->mId;
+        std::string deviceName = (*itDevices)->mName;
+        removeDevice(id);
+        // device was removed, reset iterator
+        itDevices = mDevices.begin();
+
+        //notify CameraService of status change
+        sp<StatusListener> listener = mManager->getStatusListener();
+        if (listener != nullptr) {
+            mLock.unlock();
+            ALOGV("%s: notify device not_present: %s",
+                  __FUNCTION__,
+                  deviceName.c_str());
+            listener->onDeviceStatusChanged(String8(id.c_str()),
+                                            CameraDeviceStatus::NOT_PRESENT);
+            mLock.lock();
+        }
+    }
+}
+
+bool CameraProviderManager::ProviderInfo::isExternalLazyHAL() const {
+    return kEnableLazyHal && (mProviderName == kExternalProviderName);
+}
+
 status_t CameraProviderManager::ProviderInfo::dump(int fd, const Vector<String16>&) const {
     dprintf(fd, "== Camera Provider HAL %s (v2.%d, %s) static info: %zu devices: ==\n",
             mProviderInstance.c_str(),
@@ -1854,12 +2022,16 @@
     std::string cameraId;
     for (auto& deviceInfo : mDevices) {
         if (deviceInfo->mName == cameraDeviceName) {
+            Mutex::Autolock l(deviceInfo->mDeviceAvailableLock);
             ALOGI("Camera device %s status is now %s, was %s", cameraDeviceName.c_str(),
                     deviceStatusToString(newStatus), deviceStatusToString(deviceInfo->mStatus));
             deviceInfo->mStatus = newStatus;
             // TODO: Handle device removal (NOT_PRESENT)
             cameraId = deviceInfo->mId;
             known = true;
+            deviceInfo->mIsDeviceAvailable =
+                (newStatus == CameraDeviceStatus::PRESENT);
+            deviceInfo->mDeviceAvailableSignal.signal();
             break;
         }
     }
@@ -1873,6 +2045,11 @@
         addDevice(cameraDeviceName, newStatus, &cameraId);
     } else if (newStatus == CameraDeviceStatus::NOT_PRESENT) {
         removeDevice(cameraId);
+    } else if (isExternalLazyHAL()) {
+        // Do not notify CameraService for PRESENT->PRESENT (lazy HAL restart)
+        // because NOT_AVAILABLE is set on CameraService::connect and a PRESENT
+        // notif. would overwrite it
+        return BAD_VALUE;
     }
     if (reCacheConcurrentStreamingCameraIdsLocked() != OK) {
         ALOGE("%s: CameraProvider %s could not re-cache concurrent streaming camera id list ",
@@ -2253,11 +2430,27 @@
 
 template<class InterfaceT>
 sp<InterfaceT> CameraProviderManager::ProviderInfo::DeviceInfo::startDeviceInterface() {
+    Mutex::Autolock l(mDeviceAvailableLock);
     sp<InterfaceT> device;
     ATRACE_CALL();
     if (mSavedInterface == nullptr) {
         sp<ProviderInfo> parentProvider = mParentProvider.promote();
         if (parentProvider != nullptr) {
+            // Wait for lazy HALs to confirm device availability
+            if (parentProvider->isExternalLazyHAL() && !mIsDeviceAvailable) {
+                ALOGV("%s: Wait for external device to become available %s",
+                      __FUNCTION__,
+                      mId.c_str());
+
+                auto res = mDeviceAvailableSignal.waitRelative(mDeviceAvailableLock,
+                                                         kDeviceAvailableTimeout);
+                if (res != OK) {
+                    ALOGE("%s: Failed waiting for device to become available",
+                          __FUNCTION__);
+                    return nullptr;
+                }
+            }
+
             device = parentProvider->startDeviceInterface<InterfaceT>(mName);
         }
     } else {
@@ -2385,6 +2578,22 @@
         mHasFlashUnit = false;
     }
 
+    camera_metadata_entry entry =
+            mCameraCharacteristics.find(ANDROID_FLASH_INFO_STRENGTH_DEFAULT_LEVEL);
+    if (entry.count == 1) {
+        mTorchDefaultStrengthLevel = entry.data.i32[0];
+    } else {
+        mTorchDefaultStrengthLevel = 0;
+    }
+
+    entry = mCameraCharacteristics.find(ANDROID_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL);
+    if (entry.count == 1) {
+        mTorchMaximumStrengthLevel = entry.data.i32[0];
+    } else {
+        mTorchMaximumStrengthLevel = 0;
+    }
+
+    mTorchStrengthLevel = 0;
     queryPhysicalCameraIds();
 
     // Get physical camera characteristics if applicable
@@ -2468,6 +2677,80 @@
     return setTorchModeForDevice<InterfaceT>(enabled);
 }
 
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::turnOnTorchWithStrengthLevel(
+        int32_t torchStrength) {
+    const sp<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT> interface =
+        startDeviceInterface<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT>();
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+    sp<hardware::camera::device::V3_8::ICameraDevice> interface_3_8 = nullptr;
+    auto castResult_3_8 = device::V3_8::ICameraDevice::castFrom(interface);
+    if (castResult_3_8.isOk()) {
+        interface_3_8 = castResult_3_8;
+    }
+
+    if (interface_3_8 == nullptr) {
+        return INVALID_OPERATION;
+    }
+
+    Status s = interface_3_8->turnOnTorchWithStrengthLevel(torchStrength);
+    if (s == Status::OK) {
+        mTorchStrengthLevel = torchStrength;
+    }
+    return mapToStatusT(s);
+}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getTorchStrengthLevel(
+        int32_t *torchStrength) {
+    if (torchStrength == nullptr) {
+        return BAD_VALUE;
+    }
+    const sp<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT> interface =
+        startDeviceInterface<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT>();
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+    auto castResult_3_8 = device::V3_8::ICameraDevice::castFrom(interface);
+    sp<hardware::camera::device::V3_8::ICameraDevice> interface_3_8 = nullptr;
+    if (castResult_3_8.isOk()) {
+        interface_3_8 = castResult_3_8;
+    }
+
+    if (interface_3_8 == nullptr) {
+        return INVALID_OPERATION;
+    }
+
+    Status callStatus;
+    status_t res;
+    hardware::Return<void> ret = interface_3_8->getTorchStrengthLevel([&callStatus, &torchStrength]
+        (Status status, const int32_t& torchStrengthLevel) {
+        callStatus = status;
+        if (status == Status::OK) {
+             *torchStrength = torchStrengthLevel;
+        } });
+
+    if (ret.isOk()) {
+        switch (callStatus) {
+            case Status::OK:
+                // Expected case, do nothing.
+                res = OK;
+                break;
+            case Status::METHOD_NOT_SUPPORTED:
+                res = INVALID_OPERATION;
+                break;
+            default:
+                ALOGE("%s: Get torch strength level failed: %d", __FUNCTION__, callStatus);
+                res = UNKNOWN_ERROR;
+        }
+    } else {
+        ALOGE("%s: Unexpected binder error: %s", __FUNCTION__, ret.description().c_str());
+        res = UNKNOWN_ERROR;
+    }
+
+    return res;
+}
+
 status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getCameraInfo(
         hardware::CameraInfo *info) const {
     if (info == nullptr) return BAD_VALUE;
@@ -2564,7 +2847,7 @@
 }
 
 status_t CameraProviderManager::ProviderInfo::DeviceInfo3::isSessionConfigurationSupported(
-        const hardware::camera::device::V3_7::StreamConfiguration &configuration,
+        const hardware::camera::device::V3_8::StreamConfiguration &configuration,
         bool *status /*out*/) {
 
     const sp<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT> interface =
@@ -2576,6 +2859,8 @@
     sp<hardware::camera::device::V3_5::ICameraDevice> interface_3_5 = castResult_3_5;
     auto castResult_3_7 = device::V3_7::ICameraDevice::castFrom(interface);
     sp<hardware::camera::device::V3_7::ICameraDevice> interface_3_7 = castResult_3_7;
+    auto castResult_3_8 = device::V3_8::ICameraDevice::castFrom(interface);
+    sp<hardware::camera::device::V3_8::ICameraDevice> interface_3_8 = castResult_3_8;
 
     status_t res;
     Status callStatus;
@@ -2585,12 +2870,28 @@
                 callStatus = s;
                 *status = combStatus;
             };
-    if (interface_3_7 != nullptr) {
-        ret = interface_3_7->isStreamCombinationSupported_3_7(configuration, halCb);
+    if (interface_3_8 != nullptr) {
+        ret = interface_3_8->isStreamCombinationSupported_3_8(configuration, halCb);
+    } else if (interface_3_7 != nullptr) {
+        hardware::camera::device::V3_7::StreamConfiguration configuration_3_7;
+        bool success = SessionConfigurationUtils::convertHALStreamCombinationFromV38ToV37(
+                configuration_3_7, configuration);
+        if (!success) {
+            *status = false;
+            return OK;
+        }
+        ret = interface_3_7->isStreamCombinationSupported_3_7(configuration_3_7, halCb);
     } else if (interface_3_5 != nullptr) {
+        hardware::camera::device::V3_7::StreamConfiguration configuration_3_7;
+        bool success = SessionConfigurationUtils::convertHALStreamCombinationFromV38ToV37(
+                configuration_3_7, configuration);
+        if (!success) {
+            *status = false;
+            return OK;
+        }
         hardware::camera::device::V3_4::StreamConfiguration configuration_3_4;
-        bool success = SessionConfigurationUtils::convertHALStreamCombinationFromV37ToV34(
-                configuration_3_4, configuration);
+        success = SessionConfigurationUtils::convertHALStreamCombinationFromV37ToV34(
+                configuration_3_4, configuration_3_7);
         if (!success) {
             *status = false;
             return OK;
@@ -3067,7 +3368,7 @@
     status_t res = OK;
     for (auto &cameraIdAndSessionConfig : cameraIdsAndSessionConfigs) {
         const std::string& cameraId = cameraIdAndSessionConfig.mCameraId;
-        hardware::camera::device::V3_7::StreamConfiguration streamConfiguration;
+        hardware::camera::device::V3_8::StreamConfiguration streamConfiguration;
         CameraMetadata deviceInfo;
         bool overrideForPerfClass =
                 SessionConfigurationUtils::targetPerfClassPrimaryCamera(
@@ -3101,7 +3402,8 @@
         }
         CameraIdAndStreamCombination halCameraIdAndStream;
         halCameraIdAndStream.cameraId = cameraId;
-        halCameraIdAndStream.streamConfiguration = streamConfiguration;
+        SessionConfigurationUtils::convertHALStreamCombinationFromV38ToV37(
+                halCameraIdAndStream.streamConfiguration, streamConfiguration);
         halCameraIdsAndStreamsV.push_back(halCameraIdAndStream);
     }
     *halCameraIdsAndStreamCombinations = halCameraIdsAndStreamsV;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index fdb2673..ac710bf 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -29,13 +29,16 @@
 #include <camera/CameraParameters2.h>
 #include <camera/CameraMetadata.h>
 #include <camera/CameraBase.h>
+#include <utils/Condition.h>
 #include <utils/Errors.h>
+#include <android/hardware/ICameraService.h>
 #include <android/hardware/camera/common/1.0/types.h>
 #include <android/hardware/camera/provider/2.5/ICameraProvider.h>
 #include <android/hardware/camera/provider/2.6/ICameraProviderCallback.h>
 #include <android/hardware/camera/provider/2.6/ICameraProvider.h>
 #include <android/hardware/camera/provider/2.7/ICameraProvider.h>
 #include <android/hardware/camera/device/3.7/types.h>
+#include <android/hardware/camera/device/3.8/types.h>
 #include <android/hidl/manager/1.0/IServiceNotification.h>
 #include <camera/VendorTagDescriptor.h>
 
@@ -88,6 +91,7 @@
 #define CAMERA_DEVICE_API_VERSION_3_5 HARDWARE_DEVICE_API_VERSION(3, 5)
 #define CAMERA_DEVICE_API_VERSION_3_6 HARDWARE_DEVICE_API_VERSION(3, 6)
 #define CAMERA_DEVICE_API_VERSION_3_7 HARDWARE_DEVICE_API_VERSION(3, 7)
+#define CAMERA_DEVICE_API_VERSION_3_8 HARDWARE_DEVICE_API_VERSION(3, 8)
 
 /**
  * A manager for all camera providers available on an Android device.
@@ -242,7 +246,7 @@
      * Check for device support of specific stream combination.
      */
     status_t isSessionConfigurationSupported(const std::string& id,
-            const hardware::camera::device::V3_7::StreamConfiguration &configuration,
+            const hardware::camera::device::V3_8::StreamConfiguration &configuration,
             bool *status /*out*/) const;
 
     /**
@@ -257,6 +261,17 @@
     bool supportSetTorchMode(const std::string &id) const;
 
     /**
+     * Check if torch strength update should be skipped or not.
+     */
+    bool shouldSkipTorchStrengthUpdate(const std::string &id, int32_t torchStrength) const;
+
+    /**
+     * Return the default torch strength level if the torch strength control
+     * feature is supported.
+     */
+    int32_t getTorchDefaultStrengthLevel(const std::string &id) const;
+
+    /**
      * Turn on or off the flashlight on a given camera device.
      * May fail if the device does not support this API, is in active use, or if the device
      * doesn't exist, etc.
@@ -264,6 +279,24 @@
     status_t setTorchMode(const std::string &id, bool enabled);
 
     /**
+     * Change the brightness level of the flash unit associated with the cameraId and
+     * set it to the value in torchStrength.
+     * If the torch is OFF and torchStrength > 0, the torch will be turned ON with the
+     * specified strength level. If the torch is ON, only the brightness level will be
+     * changed.
+     *
+     * This operation will fail if the device does not have flash unit, has flash unit
+     * but does not support this API, torchStrength is invalid or if the device doesn't
+     * exist etc.
+     */
+    status_t turnOnTorchWithStrengthLevel(const std::string &id, int32_t torchStrength);
+
+    /**
+     * Return the torch strength level of this camera device.
+     */
+    status_t getTorchStrengthLevel(const std::string &id, int32_t* torchStrength);
+
+    /**
      * Setup vendor tags for all registered providers
      */
     status_t setUpVendorTags();
@@ -327,6 +360,8 @@
 
     status_t filterSmallJpegSizes(const std::string& cameraId);
 
+    status_t notifyUsbDeviceEvent(int32_t eventId, const std::string &usbDeviceId);
+
     static const float kDepthARTolerance;
 private:
     // All private members, unless otherwise noted, expect mInterfaceMutex to be locked before use
@@ -457,6 +492,17 @@
                                 &halCameraIdsAndStreamCombinations,
                 bool *isSupported);
 
+        /**
+         * Remove all devices associated with this provider and notify listeners
+         * with NOT_PRESENT state.
+         */
+        void removeAllDevices();
+
+        /**
+         * Provider is an external lazy HAL
+         */
+        bool isExternalLazyHAL() const;
+
         // Basic device information, common to all camera devices
         struct DeviceInfo {
             const std::string mName;  // Full instance name
@@ -474,10 +520,23 @@
             hardware::camera::common::V1_0::CameraDeviceStatus mStatus;
 
             wp<ProviderInfo> mParentProvider;
+            // Torch strength default, maximum levels if the torch strength control
+            // feature is supported.
+            int32_t mTorchStrengthLevel;
+            int32_t mTorchMaximumStrengthLevel;
+            int32_t mTorchDefaultStrengthLevel;
+
+            // Wait for lazy HALs to confirm device availability
+            static const nsecs_t kDeviceAvailableTimeout = 2000e6; // 2000 ms
+            Mutex     mDeviceAvailableLock;
+            Condition mDeviceAvailableSignal;
+            bool mIsDeviceAvailable = true;
 
             bool hasFlashUnit() const { return mHasFlashUnit; }
             bool supportNativeZoomRatio() const { return mSupportNativeZoomRatio; }
             virtual status_t setTorchMode(bool enabled) = 0;
+            virtual status_t turnOnTorchWithStrengthLevel(int32_t torchStrength) = 0;
+            virtual status_t getTorchStrengthLevel(int32_t *torchStrength) = 0;
             virtual status_t getCameraInfo(hardware::CameraInfo *info) const = 0;
             virtual bool isAPI1Compatible() const = 0;
             virtual status_t dumpState(int fd) = 0;
@@ -495,7 +554,7 @@
             }
 
             virtual status_t isSessionConfigurationSupported(
-                    const hardware::camera::device::V3_7::StreamConfiguration &/*configuration*/,
+                    const hardware::camera::device::V3_8::StreamConfiguration &/*configuration*/,
                     bool * /*status*/) {
                 return INVALID_OPERATION;
             }
@@ -515,8 +574,10 @@
                     mName(name), mId(id), mVersion(version), mProviderTagid(tagId),
                     mIsLogicalCamera(false), mResourceCost(resourceCost),
                     mStatus(hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT),
-                    mParentProvider(parentProvider), mHasFlashUnit(false),
-                    mSupportNativeZoomRatio(false), mPublicCameraIds(publicCameraIds) {}
+                    mParentProvider(parentProvider), mTorchStrengthLevel(0),
+                    mTorchMaximumStrengthLevel(0), mTorchDefaultStrengthLevel(0),
+                    mHasFlashUnit(false), mSupportNativeZoomRatio(false),
+                    mPublicCameraIds(publicCameraIds) {}
             virtual ~DeviceInfo();
         protected:
             bool mHasFlashUnit; // const after constructor
@@ -550,6 +611,9 @@
             typedef hardware::camera::device::V3_2::ICameraDevice InterfaceT;
 
             virtual status_t setTorchMode(bool enabled) override;
+            virtual status_t turnOnTorchWithStrengthLevel(int32_t torchStrength) override;
+            virtual status_t getTorchStrengthLevel(int32_t *torchStrength) override;
+
             virtual status_t getCameraInfo(hardware::CameraInfo *info) const override;
             virtual bool isAPI1Compatible() const override;
             virtual status_t dumpState(int fd) override;
@@ -559,7 +623,7 @@
             virtual status_t getPhysicalCameraCharacteristics(const std::string& physicalCameraId,
                     CameraMetadata *characteristics) const override;
             virtual status_t isSessionConfigurationSupported(
-                    const hardware::camera::device::V3_7::StreamConfiguration &configuration,
+                    const hardware::camera::device::V3_8::StreamConfiguration &configuration,
                     bool *status /*out*/)
                     override;
             virtual status_t filterSmallJpegSizes() override;
@@ -692,6 +756,12 @@
             hardware::hidl_version minVersion = hardware::hidl_version{0,0},
             hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
 
+    // Map external providers to USB devices in order to handle USB hotplug
+    // events for lazy HALs
+    std::pair<std::vector<std::string>, sp<ProviderInfo>>
+        mExternalUsbDevicesForProvider;
+    sp<ProviderInfo> startExternalLazyProvider() const;
+
     status_t addProviderLocked(const std::string& newProvider, bool preexisting = false);
 
     status_t tryToInitializeProviderLocked(const std::string& providerName,
@@ -739,6 +809,8 @@
               hardware::hidl_vec<hardware::camera::provider::V2_7::CameraIdAndStreamCombination>
                       *halCameraIdsAndStreamCombinations,
               bool *earlyExit);
+
+    status_t usbDeviceDetached(const std::string &usbDeviceId);
 };
 
 } // namespace android
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
index c995670..719ff2c 100644
--- a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
@@ -42,6 +42,10 @@
 #include <xmpmeta/xmp_data.h>
 #include <xmpmeta/xmp_writer.h>
 
+#ifndef __unused
+#define __unused __attribute__((__unused__))
+#endif
+
 using dynamic_depth::Camera;
 using dynamic_depth::Cameras;
 using dynamic_depth::CameraParams;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index de418da..4c1e7f0 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -531,6 +531,12 @@
     return measured;
 }
 
+CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap
+Camera3Device::mapToHidlDynamicProfile(int dynamicRangeProfile) {
+    return static_cast<CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap>(
+            dynamicRangeProfile);
+}
+
 hardware::graphics::common::V1_0::PixelFormat Camera3Device::mapToPixelFormat(
         int frameworkFormat) {
     return (hardware::graphics::common::V1_0::PixelFormat) frameworkFormat;
@@ -850,6 +856,21 @@
     return OK;
 }
 
+status_t Camera3Device::startWatchingTags(const String8 &tags) {
+    mTagMonitor.parseTagsToMonitor(tags);
+    return OK;
+}
+
+status_t Camera3Device::stopWatchingTags() {
+    mTagMonitor.disableMonitoring();
+    return OK;
+}
+
+status_t Camera3Device::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+    mTagMonitor.getLatestMonitoredTagEvents(out);
+    return OK;
+}
+
 const CameraMetadata& Camera3Device::infoPhysical(const String8& physicalId) const {
     ALOGVV("%s: E", __FUNCTION__);
     if (CC_UNLIKELY(mStatus == STATUS_UNINITIALIZED ||
@@ -1162,6 +1183,16 @@
 
 hardware::Return<void> Camera3Device::notify(
         const hardware::hidl_vec<hardware::camera::device::V3_2::NotifyMsg>& msgs) {
+    return notifyHelper<hardware::camera::device::V3_2::NotifyMsg>(msgs);
+}
+
+hardware::Return<void> Camera3Device::notify_3_8(
+        const hardware::hidl_vec<hardware::camera::device::V3_8::NotifyMsg>& msgs) {
+    return notifyHelper<hardware::camera::device::V3_8::NotifyMsg>(msgs);
+}
+
+template<typename NotifyMsgType>
+hardware::Return<void> Camera3Device::notifyHelper(const hardware::hidl_vec<NotifyMsgType>& msgs) {
     // Ideally we should grab mLock, but that can lead to deadlock, and
     // it's not super important to get up to date value of mStatus for this
     // warning print, hence skipping the lock here
@@ -1359,7 +1390,7 @@
             const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
             std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution,
-            uint64_t consumerUsage) {
+            uint64_t consumerUsage, int dynamicRangeProfile) {
     ATRACE_CALL();
 
     if (consumer == nullptr) {
@@ -1372,7 +1403,7 @@
 
     return createStream(consumers, /*hasDeferredConsumer*/ false, width, height,
             format, dataSpace, rotation, id, physicalCameraId, sensorPixelModesUsed, surfaceIds,
-            streamSetId, isShared, isMultiResolution, consumerUsage);
+            streamSetId, isShared, isMultiResolution, consumerUsage, dynamicRangeProfile);
 }
 
 static bool isRawFormat(int format) {
@@ -1392,7 +1423,7 @@
         android_dataspace dataSpace, camera_stream_rotation_t rotation, int *id,
         const String8& physicalCameraId, const std::unordered_set<int32_t> &sensorPixelModesUsed,
         std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution,
-        uint64_t consumerUsage) {
+        uint64_t consumerUsage, int dynamicRangeProfile) {
     ATRACE_CALL();
 
     Mutex::Autolock il(mInterfaceLock);
@@ -1470,7 +1501,7 @@
         newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, blobBufferSize, format, dataSpace, rotation,
                 mTimestampOffset, physicalCameraId, sensorPixelModesUsed, streamSetId,
-                isMultiResolution);
+                isMultiResolution, dynamicRangeProfile);
     } else if (format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
         bool maxResolution =
                 sensorPixelModesUsed.find(ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) !=
@@ -1484,22 +1515,22 @@
         newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, rawOpaqueBufferSize, format, dataSpace, rotation,
                 mTimestampOffset, physicalCameraId, sensorPixelModesUsed, streamSetId,
-                isMultiResolution);
+                isMultiResolution, dynamicRangeProfile);
     } else if (isShared) {
         newStream = new Camera3SharedOutputStream(mNextStreamId, consumers,
                 width, height, format, consumerUsage, dataSpace, rotation,
                 mTimestampOffset, physicalCameraId, sensorPixelModesUsed, streamSetId,
-                mUseHalBufManager);
+                mUseHalBufManager, dynamicRangeProfile);
     } else if (consumers.size() == 0 && hasDeferredConsumer) {
         newStream = new Camera3OutputStream(mNextStreamId,
                 width, height, format, consumerUsage, dataSpace, rotation,
                 mTimestampOffset, physicalCameraId, sensorPixelModesUsed, streamSetId,
-                isMultiResolution);
+                isMultiResolution, dynamicRangeProfile);
     } else {
         newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, format, dataSpace, rotation,
                 mTimestampOffset, physicalCameraId, sensorPixelModesUsed, streamSetId,
-                isMultiResolution);
+                isMultiResolution, dynamicRangeProfile);
     }
 
     size_t consumerCount = consumers.size();
@@ -1586,6 +1617,7 @@
     streamInfo->originalFormat = stream->getOriginalFormat();
     streamInfo->dataSpaceOverridden = stream->isDataSpaceOverridden();
     streamInfo->originalDataSpace = stream->getOriginalDataSpace();
+    streamInfo->dynamicRangeProfile = stream->getDynamicRangeProfile();
     return OK;
 }
 
@@ -2198,7 +2230,8 @@
                 streamStats.emplace_back(stream->getWidth(), stream->getHeight(),
                     stream->getFormat(), stream->getDataSpace(), usage,
                     stream->getMaxHalBuffers(),
-                    stream->getMaxTotalBuffers() - stream->getMaxHalBuffers());
+                    stream->getMaxTotalBuffers() - stream->getMaxHalBuffers(),
+                    stream->getDynamicRangeProfile());
             }
         }
     }
@@ -2849,17 +2882,28 @@
         mRequestBufferSM.onStreamsConfigured();
     }
 
+    // First call injectCamera() and then run configureStreamsLocked() case:
     // Since the streams configuration of the injection camera is based on the internal camera, we
-    // must wait until the internal camera configure streams before calling injectCamera() to
+    // must wait until the internal camera configure streams before running the injection job to
     // configure the injection streams.
     if (mInjectionMethods->isInjecting()) {
-        ALOGV("%s: Injection camera %s: Start to configure streams.",
+        ALOGD("%s: Injection camera %s: Start to configure streams.",
               __FUNCTION__, mInjectionMethods->getInjectedCamId().string());
         res = mInjectionMethods->injectCamera(config, bufferSizes);
         if (res != OK) {
             ALOGE("Can't finish inject camera process!");
             return res;
         }
+    } else {
+        // First run configureStreamsLocked() and then call injectCamera() case:
+        // If the stream configuration has been completed and camera deive is active, but the
+        // injection camera has not been injected yet, we need to store the stream configuration of
+        // the internal camera (because the stream configuration of the injection camera is based
+        // on the internal camera). When injecting occurs later, this configuration can be used by
+        // the injection camera.
+        ALOGV("%s: The stream configuration is complete and the camera device is active, but the"
+              " injection camera has not been injected yet.", __FUNCTION__);
+        mInjectionMethods->storeInjectionConfig(config, bufferSizes);
     }
 
     return OK;
@@ -3085,10 +3129,12 @@
 
 void Camera3Device::monitorMetadata(TagMonitor::eventSource source,
         int64_t frameNumber, nsecs_t timestamp, const CameraMetadata& metadata,
-        const std::unordered_map<std::string, CameraMetadata>& physicalMetadata) {
+        const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
+        const camera_stream_buffer_t *outputBuffers, uint32_t numOutputBuffers,
+        int32_t inputStreamId) {
 
     mTagMonitor.monitorMetadata(source, frameNumber, timestamp, metadata,
-            physicalMetadata);
+            physicalMetadata, outputBuffers, numOutputBuffers, inputStreamId);
 }
 
 /**
@@ -3106,6 +3152,10 @@
         mSupportOfflineProcessing(supportOfflineProcessing) {
     // Check with hardware service manager if we can downcast these interfaces
     // Somewhat expensive, so cache the results at startup
+    auto castResult_3_8 = device::V3_8::ICameraDeviceSession::castFrom(mHidlSession);
+    if (castResult_3_8.isOk()) {
+        mHidlSession_3_8 = castResult_3_8;
+    }
     auto castResult_3_7 = device::V3_7::ICameraDeviceSession::castFrom(mHidlSession);
     if (castResult_3_7.isOk()) {
         mHidlSession_3_7 = castResult_3_7;
@@ -3143,6 +3193,7 @@
 }
 
 void Camera3Device::HalInterface::clear() {
+    mHidlSession_3_8.clear();
     mHidlSession_3_7.clear();
     mHidlSession_3_6.clear();
     mHidlSession_3_5.clear();
@@ -3280,13 +3331,16 @@
     device::V3_2::StreamConfiguration requestedConfiguration3_2;
     device::V3_4::StreamConfiguration requestedConfiguration3_4;
     device::V3_7::StreamConfiguration requestedConfiguration3_7;
+    device::V3_8::StreamConfiguration requestedConfiguration3_8;
     requestedConfiguration3_2.streams.resize(config->num_streams);
     requestedConfiguration3_4.streams.resize(config->num_streams);
     requestedConfiguration3_7.streams.resize(config->num_streams);
+    requestedConfiguration3_8.streams.resize(config->num_streams);
     for (size_t i = 0; i < config->num_streams; i++) {
         device::V3_2::Stream &dst3_2 = requestedConfiguration3_2.streams[i];
         device::V3_4::Stream &dst3_4 = requestedConfiguration3_4.streams[i];
         device::V3_7::Stream &dst3_7 = requestedConfiguration3_7.streams[i];
+        device::V3_8::Stream &dst3_8 = requestedConfiguration3_8.streams[i];
         camera3::camera_stream_t *src = config->streams[i];
 
         Camera3Stream* cam3stream = Camera3Stream::cast(src);
@@ -3335,6 +3389,15 @@
             dst3_7.sensorPixelModesUsed[j++] =
                     static_cast<CameraMetadataEnumAndroidSensorPixelMode>(mode);
         }
+        if ((src->dynamic_range_profile !=
+                    ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) &&
+                (mHidlSession_3_8 == nullptr)) {
+            ALOGE("%s: Camera device doesn't support non-standard dynamic range profiles: %d",
+                    __FUNCTION__, src->dynamic_range_profile);
+            return BAD_VALUE;
+        }
+        dst3_8.v3_7 = dst3_7;
+        dst3_8.dynamicRangeProfile = mapToHidlDynamicProfile(src->dynamic_range_profile);
         activeStreams.insert(streamId);
         // Create Buffer ID map if necessary
         mBufferRecords.tryCreateBufferCache(streamId);
@@ -3360,6 +3423,10 @@
     requestedConfiguration3_7.sessionParams.setToExternal(
             reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(sessionParams)),
             sessionParamSize);
+    requestedConfiguration3_8.operationMode = operationMode;
+    requestedConfiguration3_8.sessionParams.setToExternal(
+            reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(sessionParams)),
+            sessionParamSize);
 
     // Invoke configureStreams
     device::V3_3::HalStreamConfiguration finalConfiguration;
@@ -3406,7 +3473,17 @@
             };
 
     // See which version of HAL we have
-    if (mHidlSession_3_7 != nullptr) {
+    if (mHidlSession_3_8 != nullptr) {
+        ALOGV("%s: v3.8 device found", __FUNCTION__);
+        requestedConfiguration3_8.streamConfigCounter = mNextStreamConfigCounter++;
+        requestedConfiguration3_8.multiResolutionInputImage = config->input_is_multi_resolution;
+        auto err = mHidlSession_3_8->configureStreams_3_8(requestedConfiguration3_8,
+                configStream36Cb);
+        res = postprocConfigStream36(err);
+        if (res != OK) {
+            return res;
+        }
+    } else if (mHidlSession_3_7 != nullptr) {
         ALOGV("%s: v3.7 device found", __FUNCTION__);
         requestedConfiguration3_7.streamConfigCounter = mNextStreamConfigCounter++;
         requestedConfiguration3_7.multiResolutionInputImage = config->input_is_multi_resolution;
@@ -4014,6 +4091,18 @@
     return OK;
 }
 
+status_t Camera3Device::HalInterface::repeatingRequestEnd(uint32_t frameNumber,
+        hardware::hidl_vec<int32_t> streamIds) {
+    ATRACE_NAME("CameraHal::repeatingRequestEnd");
+    if (!valid()) return INVALID_OPERATION;
+
+    if (mHidlSession_3_8.get() != nullptr) {
+        mHidlSession_3_8->repeatingRequestEnd(frameNumber, streamIds);
+    }
+
+    return OK;
+}
+
 status_t Camera3Device::HalInterface::close() {
     ATRACE_NAME("CameraHal::close()");
     if (!valid()) return INVALID_OPERATION;
@@ -4348,10 +4437,20 @@
 }
 
 status_t Camera3Device::RequestThread::clearRepeatingRequestsLocked(/*out*/int64_t *lastFrameNumber) {
+    std::vector<int32_t> streamIds;
+    for (const auto& request : mRepeatingRequests) {
+        for (const auto& stream : request->mOutputStreams) {
+            streamIds.push_back(stream->getId());
+        }
+    }
+
     mRepeatingRequests.clear();
     if (lastFrameNumber != NULL) {
         *lastFrameNumber = mRepeatingLastFrameNumber;
     }
+
+    mInterface->repeatingRequestEnd(mRepeatingLastFrameNumber, streamIds);
+
     mRepeatingLastFrameNumber = hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES;
     return OK;
 }
@@ -4597,9 +4696,15 @@
 
         sp<Camera3Device> parent = mParent.promote();
         if (parent != NULL) {
+            int32_t inputStreamId = -1;
+            if (halRequest.input_buffer != nullptr) {
+              inputStreamId = Camera3Stream::cast(halRequest.input_buffer->stream)->getId();
+            }
+
             parent->monitorMetadata(TagMonitor::REQUEST,
                     halRequest.frame_number,
-                    0, mLatestRequest, mLatestPhysicalRequest);
+                    0, mLatestRequest, mLatestPhysicalRequest, halRequest.output_buffers,
+                    halRequest.num_output_buffers, inputStreamId);
         }
     }
 
@@ -5438,7 +5543,8 @@
                     outputBuffers->editItemAt(i).acquire_fence = -1;
                 }
                 outputBuffers->editItemAt(i).status = CAMERA_BUFFER_STATUS_ERROR;
-                captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i], 0,
+                captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i],
+                        /*timestamp*/0, /*readoutTimestamp*/0,
                         /*timestampIncreasing*/true, std::vector<size_t> (),
                         captureRequest->mResultExtras.frameNumber);
             }
@@ -6649,6 +6755,13 @@
     ALOGI("%s Injection camera: injectedCamId = %s", __FUNCTION__, injectedCamId.string());
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
+    // When the camera device is active, injectCamera() and stopInjection() will call
+    // internalPauseAndWaitLocked() and internalResumeLocked(), and then they will call
+    // mStatusChanged.waitRelative(mLock, timeout) of waitUntilStateThenRelock(). But
+    // mStatusChanged.waitRelative(mLock, timeout)'s parameter: mutex "mLock" must be in the locked
+    // state, so we need to add "Mutex::Autolock l(mLock)" to lock the "mLock" before calling
+    // waitUntilStateThenRelock().
+    Mutex::Autolock l(mLock);
 
     status_t res = NO_ERROR;
     if (mInjectionMethods->isInjecting()) {
@@ -6671,16 +6784,25 @@
         return res;
     }
 
-    camera3::camera_stream_configuration injectionConfig;
-    std::vector<uint32_t> injectionBufferSizes;
-    mInjectionMethods->getInjectionConfig(&injectionConfig, &injectionBufferSizes);
     // When the second display of android is cast to the remote device, and the opened camera is
     // also cast to the second display, in this case, because the camera has configured the streams
     // at this time, we can directly call injectCamera() to replace the internal camera with
     // injection camera.
-    if (mOperatingMode >= 0 && injectionConfig.num_streams > 0
-                && injectionBufferSizes.size() > 0) {
-        ALOGV("%s: The opened camera is directly cast to the remote device.", __FUNCTION__);
+    if (mInjectionMethods->isStreamConfigCompleteButNotInjected()) {
+        ALOGD("%s: The opened camera is directly cast to the remote device.", __FUNCTION__);
+
+        camera3::camera_stream_configuration injectionConfig;
+        std::vector<uint32_t> injectionBufferSizes;
+        mInjectionMethods->getInjectionConfig(&injectionConfig, &injectionBufferSizes);
+        if (mOperatingMode < 0 || injectionConfig.num_streams <= 0
+                    || injectionBufferSizes.size() <= 0) {
+            ALOGE("Failed to inject camera due to abandoned configuration! "
+                    "mOperatingMode: %d injectionConfig.num_streams: %d "
+                    "injectionBufferSizes.size(): %zu", mOperatingMode,
+                    injectionConfig.num_streams, injectionBufferSizes.size());
+            return DEAD_OBJECT;
+        }
+
         res = mInjectionMethods->injectCamera(
                 injectionConfig, injectionBufferSizes);
         if (res != OK) {
@@ -6695,6 +6817,7 @@
 status_t Camera3Device::stopInjection() {
     ALOGI("%s: Injection camera: stopInjection", __FUNCTION__);
     Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
     return mInjectionMethods->stopInjection();
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index e8a6a08..3ce17f9 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -36,13 +36,16 @@
 #include <android/hardware/camera/device/3.5/ICameraDeviceSession.h>
 #include <android/hardware/camera/device/3.6/ICameraDeviceSession.h>
 #include <android/hardware/camera/device/3.7/ICameraDeviceSession.h>
+#include <android/hardware/camera/device/3.8/ICameraDeviceSession.h>
 #include <android/hardware/camera/device/3.2/ICameraDeviceCallback.h>
 #include <android/hardware/camera/device/3.4/ICameraDeviceCallback.h>
 #include <android/hardware/camera/device/3.5/ICameraDeviceCallback.h>
+#include <android/hardware/camera/device/3.8/ICameraDeviceCallback.h>
 #include <fmq/MessageQueue.h>
 
 #include <camera/CaptureResult.h>
 
+#include "android/hardware/camera/metadata/3.8/types.h"
 #include "common/CameraDeviceBase.h"
 #include "device3/BufferUtils.h"
 #include "device3/StatusTracker.h"
@@ -67,6 +70,7 @@
 using android::camera3::camera_stream_configuration_mode_t;
 using android::camera3::CAMERA_TEMPLATE_COUNT;
 using android::camera3::OutputStreamInfo;
+using android::hardware::camera::metadata::V3_8::CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap;
 
 namespace android {
 
@@ -83,7 +87,7 @@
  */
 class Camera3Device :
             public CameraDeviceBase,
-            virtual public hardware::camera::device::V3_5::ICameraDeviceCallback,
+            virtual public hardware::camera::device::V3_8::ICameraDeviceCallback,
             public camera3::SetErrorInterface,
             public camera3::InflightRequestUpdateInterface,
             public camera3::RequestBufferInterface,
@@ -106,6 +110,9 @@
     status_t initialize(sp<CameraProviderManager> manager, const String8& monitorTags) override;
     status_t disconnect() override;
     status_t dump(int fd, const Vector<String16> &args) override;
+    status_t startWatchingTags(const String8 &tags) override;
+    status_t stopWatchingTags() override;
+    status_t dumpWatchedEventsToVector(std::vector<std::string> &out) override;
     const CameraMetadata& info() const override;
     const CameraMetadata& infoPhysical(const String8& physicalId) const override;
 
@@ -137,7 +144,9 @@
             std::vector<int> *surfaceIds = nullptr,
             int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
             bool isShared = false, bool isMultiResolution = false,
-            uint64_t consumerUsage = 0) override;
+            uint64_t consumerUsage = 0,
+            int dynamicRangeProfile =
+            ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) override;
 
     status_t createStream(const std::vector<sp<Surface>>& consumers,
             bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
@@ -147,7 +156,9 @@
             std::vector<int> *surfaceIds = nullptr,
             int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
             bool isShared = false, bool isMultiResolution = false,
-            uint64_t consumerUsage = 0) override;
+            uint64_t consumerUsage = 0,
+            int dynamicRangeProfile =
+            ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) override;
 
     status_t createInputStream(
             uint32_t width, uint32_t height, int format, bool isMultiResolution,
@@ -283,6 +294,8 @@
     static hardware::graphics::common::V1_0::PixelFormat mapToPixelFormat(int frameworkFormat);
     static hardware::camera::device::V3_2::DataspaceFlags mapToHidlDataspace(
             android_dataspace dataSpace);
+    static CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap mapToHidlDynamicProfile(
+            int dynamicRangeProfile);
     static hardware::camera::device::V3_2::BufferUsageFlags mapToConsumerUsage(uint64_t usage);
     static hardware::camera::device::V3_2::StreamRotation mapToStreamRotation(
             camera_stream_rotation_t rotation);
@@ -412,6 +425,8 @@
                 /*out*/sp<hardware::camera::device::V3_6::ICameraOfflineSession>* offlineSession,
                 /*out*/camera3::BufferRecords* bufferRecords);
 
+        status_t repeatingRequestEnd(uint32_t frameNumber, hardware::hidl_vec<int32_t> streamIds);
+
         /////////////////////////////////////////////////////////////////////
         // Implements BufferRecordsInterface
 
@@ -454,6 +469,8 @@
         sp<hardware::camera::device::V3_6::ICameraDeviceSession> mHidlSession_3_6;
         // Valid if ICameraDeviceSession is @3.7 or newer
         sp<hardware::camera::device::V3_7::ICameraDeviceSession> mHidlSession_3_7;
+        // Valid if ICameraDeviceSession is @3.8 or newer
+        sp<hardware::camera::device::V3_8::ICameraDeviceSession> mHidlSession_3_8;
 
         std::shared_ptr<RequestMetadataQueue> mRequestMetadataQueue;
 
@@ -641,6 +658,14 @@
             const hardware::hidl_vec<
                     hardware::camera::device::V3_2::StreamBuffer>& buffers) override;
 
+    hardware::Return<void> notify_3_8(
+            const hardware::hidl_vec<
+                    hardware::camera::device::V3_8::NotifyMsg>& msgs) override;
+
+    template<typename NotifyMsgType>
+    hardware::Return<void> notifyHelper(
+            const hardware::hidl_vec<NotifyMsgType>& msgs);
+
     // Handle one notify message
     void notify(const hardware::camera::device::V3_2::NotifyMsg& msg);
 
@@ -1254,7 +1279,9 @@
 
     void monitorMetadata(TagMonitor::eventSource source, int64_t frameNumber,
             nsecs_t timestamp, const CameraMetadata& metadata,
-            const std::unordered_map<std::string, CameraMetadata>& physicalMetadata);
+            const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
+            const camera_stream_buffer_t *outputBuffers, uint32_t numOutputBuffers,
+            int32_t inputStreamId);
 
     metadata_vendor_id_t mVendorTagId;
 
@@ -1378,25 +1405,34 @@
         // when device is IDLE and request thread is paused.
         status_t injectCamera(
                 camera3::camera_stream_configuration& injectionConfig,
-                std::vector<uint32_t>& injectionBufferSizes);
+                const std::vector<uint32_t>& injectionBufferSizes);
 
         // Stop the injection camera and switch back to backup hal interface.
         status_t stopInjection();
 
         bool isInjecting();
 
+        bool isStreamConfigCompleteButNotInjected();
+
         const String8& getInjectedCamId() const;
 
         void getInjectionConfig(/*out*/ camera3::camera_stream_configuration* injectionConfig,
                 /*out*/ std::vector<uint32_t>* injectionBufferSizes);
 
+        // When the streaming configuration is completed and the camera device is active, but the
+        // injection camera has not yet been injected, the streaming configuration of the internal
+        // camera will be stored first.
+        void storeInjectionConfig(
+                const camera3::camera_stream_configuration& injectionConfig,
+                const std::vector<uint32_t>& injectionBufferSizes);
+
       private:
         // Configure the streams of injection camera, it need wait until the
         // output streams are created and configured to the original camera before
         // proceeding.
         status_t injectionConfigureStreams(
                 camera3::camera_stream_configuration& injectionConfig,
-                std::vector<uint32_t>& injectionBufferSizes);
+                const std::vector<uint32_t>& injectionBufferSizes);
 
         // Disconnect the injection camera and delete the hal interface.
         void injectionDisconnectImpl();
@@ -1414,9 +1450,23 @@
         // Generated injection camera hal interface.
         sp<HalInterface> mInjectedCamHalInterface;
 
+        // Backup of the original camera hal result FMQ.
+        std::unique_ptr<ResultMetadataQueue> mBackupResultMetadataQueue;
+
+        // FMQ writes the result for the injection camera. Must be guarded by
+        // mProcessCaptureResultLock.
+        std::unique_ptr<ResultMetadataQueue> mInjectionResultMetadataQueue;
+
+        // The flag indicates that the stream configuration is complete, the camera device is
+        // active, but the injection camera has not yet been injected.
+        bool mIsStreamConfigCompleteButNotInjected = false;
+
         // Copy the configuration of the internal camera.
         camera3::camera_stream_configuration mInjectionConfig;
 
+        // Copy the streams of the internal camera.
+        Vector<camera3::camera_stream_t*> mInjectionStreams;
+
         // Copy the bufferSizes of the output streams of the internal camera.
         std::vector<uint32_t> mInjectionBufferSizes;
 
diff --git a/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp b/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp
index f145dac..4744a6d 100644
--- a/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp
@@ -86,7 +86,7 @@
         return DEAD_OBJECT;
     }
 
-    std::unique_ptr<ResultMetadataQueue>& resQueue = parent->mResultMetadataQueue;
+    std::unique_ptr<ResultMetadataQueue>& resQueue = mInjectionResultMetadataQueue;
     auto resultQueueRet = session->getCaptureResultMetadataQueue(
         [&resQueue](const auto& descriptor) {
             resQueue = std::make_unique<ResultMetadataQueue>(descriptor);
@@ -127,10 +127,8 @@
 
 status_t Camera3Device::Camera3DeviceInjectionMethods::injectCamera(
         camera3::camera_stream_configuration& injectionConfig,
-        std::vector<uint32_t>& injectionBufferSizes) {
+        const std::vector<uint32_t>& injectionBufferSizes) {
     status_t res = NO_ERROR;
-    mInjectionConfig = injectionConfig;
-    mInjectionBufferSizes = injectionBufferSizes;
 
     if (mInjectedCamHalInterface == nullptr) {
         ALOGE("%s: mInjectedCamHalInterface does not exist!", __FUNCTION__);
@@ -148,7 +146,6 @@
     if (parent->mStatus == STATUS_ACTIVE) {
         ALOGV("%s: Let the device be IDLE and the request thread is paused",
                 __FUNCTION__);
-        parent->mPauseStateNotify = true;
         res = parent->internalPauseAndWaitLocked(maxExpectedDuration);
         if (res != OK) {
             ALOGE("%s: Can't pause captures to inject camera!", __FUNCTION__);
@@ -188,7 +185,7 @@
         ALOGV("%s: Restarting activity to inject camera", __FUNCTION__);
         // Reuse current operating mode and session parameters for new stream
         // config.
-        parent->internalUpdateStatusLocked(STATUS_ACTIVE);
+        parent->internalResumeLocked();
     }
 
     return OK;
@@ -208,7 +205,6 @@
     if (parent->mStatus == STATUS_ACTIVE) {
         ALOGV("%s: Let the device be IDLE and the request thread is paused",
                 __FUNCTION__);
-        parent->mPauseStateNotify = true;
         res = parent->internalPauseAndWaitLocked(maxExpectedDuration);
         if (res != OK) {
             ALOGE("%s: Can't pause captures to stop injection!", __FUNCTION__);
@@ -229,7 +225,7 @@
         ALOGV("%s: Restarting activity to stop injection", __FUNCTION__);
         // Reuse current operating mode and session parameters for new stream
         // config.
-        parent->internalUpdateStatusLocked(STATUS_ACTIVE);
+        parent->internalResumeLocked();
     }
 
     return OK;
@@ -243,6 +239,10 @@
     }
 }
 
+bool Camera3Device::Camera3DeviceInjectionMethods::isStreamConfigCompleteButNotInjected() {
+    return mIsStreamConfigCompleteButNotInjected;
+}
+
 const String8& Camera3Device::Camera3DeviceInjectionMethods::getInjectedCamId()
         const {
     return mInjectedCamId;
@@ -260,10 +260,22 @@
     *injectionBufferSizes = mInjectionBufferSizes;
 }
 
+void Camera3Device::Camera3DeviceInjectionMethods::storeInjectionConfig(
+        const camera3::camera_stream_configuration& injectionConfig,
+        const std::vector<uint32_t>& injectionBufferSizes) {
+    mIsStreamConfigCompleteButNotInjected = true;
+    mInjectionConfig = injectionConfig;
+    mInjectionStreams.clear();
+    for (size_t i = 0; i < injectionConfig.num_streams; i++) {
+        mInjectionStreams.push_back(injectionConfig.streams[i]);
+    }
+    mInjectionConfig.streams = mInjectionStreams.editArray();
+    mInjectionBufferSizes = injectionBufferSizes;
+}
 
 status_t Camera3Device::Camera3DeviceInjectionMethods::injectionConfigureStreams(
         camera3::camera_stream_configuration& injectionConfig,
-        std::vector<uint32_t>& injectionBufferSizes) {
+        const std::vector<uint32_t>& injectionBufferSizes) {
     ATRACE_CALL();
     status_t res = NO_ERROR;
 
@@ -326,7 +338,6 @@
             mInjectedCamId.string());
 
     auto rc = parent->mPreparerThread->resume();
-
     if (rc != OK) {
         ALOGE("%s: Injection camera %s: Preparer thread failed to resume!",
                  __FUNCTION__, mInjectedCamId.string());
@@ -339,6 +350,9 @@
 void Camera3Device::Camera3DeviceInjectionMethods::injectionDisconnectImpl() {
     ATRACE_CALL();
     ALOGI("%s: Injection camera disconnect", __FUNCTION__);
+    mIsStreamConfigCompleteButNotInjected = false;
+    mInjectionStreams.clear();
+    mInjectionConfig.streams = nullptr;
 
     mBackupHalInterface = nullptr;
     HalInterface* interface = nullptr;
@@ -380,10 +394,18 @@
         return INVALID_OPERATION;
     }
 
-    if (keepBackup && mBackupHalInterface == nullptr) {
-        mBackupHalInterface = parent->mInterface;
-    } else if (!keepBackup) {
+    if (keepBackup) {
+        if (mBackupHalInterface == nullptr) {
+            mBackupHalInterface = parent->mInterface;
+        }
+        if (mBackupResultMetadataQueue == nullptr) {
+            mBackupResultMetadataQueue = std::move(parent->mResultMetadataQueue);
+            parent->mResultMetadataQueue = std::move(mInjectionResultMetadataQueue);
+        }
+    } else {
         mBackupHalInterface = nullptr;
+        parent->mResultMetadataQueue = std::move(mBackupResultMetadataQueue);
+        mBackupResultMetadataQueue = nullptr;
     }
     parent->mInterface = newHalInterface;
 
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.cpp b/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
index b121e5d..61e43cb 100644
--- a/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
@@ -48,7 +48,7 @@
 
 status_t Camera3FakeStream::returnBufferLocked(
         const camera_stream_buffer &,
-        nsecs_t, int32_t, const std::vector<size_t>&) {
+        nsecs_t, nsecs_t, int32_t, const std::vector<size_t>&) {
     ATRACE_CALL();
     ALOGE("%s: Stream %d: Fake stream cannot return buffers!", __FUNCTION__, mId);
     return INVALID_OPERATION;
@@ -56,7 +56,7 @@
 
 status_t Camera3FakeStream::returnBufferCheckedLocked(
             const camera_stream_buffer &,
-            nsecs_t,
+            nsecs_t, nsecs_t,
             bool,
             int32_t,
             const std::vector<size_t>&,
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.h b/services/camera/libcameraservice/device3/Camera3FakeStream.h
index c11a3e4..df19c3d 100644
--- a/services/camera/libcameraservice/device3/Camera3FakeStream.h
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.h
@@ -108,6 +108,7 @@
     virtual status_t returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
             int32_t transform,
             const std::vector<size_t>& surface_ids,
@@ -135,7 +136,8 @@
             const std::vector<size_t>& surface_ids = std::vector<size_t>());
     virtual status_t returnBufferLocked(
             const camera_stream_buffer &buffer,
-            nsecs_t timestamp, int32_t transform, const std::vector<size_t>& surface_ids);
+            nsecs_t timestamp, nsecs_t readoutTimestamp, int32_t transform,
+            const std::vector<size_t>& surface_ids);
 
     virtual status_t configureQueueLocked();
 
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index 5f7e4cf..ba97367 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -34,10 +34,11 @@
         android_dataspace dataSpace, camera_stream_rotation_t rotation,
         const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
-        int setId, bool isMultiResolution) :
+        int setId, bool isMultiResolution, int dynamicRangeProfile) :
         Camera3Stream(id, type,
                 width, height, maxSize, format, dataSpace, rotation,
-                physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution),
+                physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
+                dynamicRangeProfile),
         mTotalBufferCount(0),
         mHandoutTotalBufferCount(0),
         mHandoutOutputBufferCount(0),
@@ -82,11 +83,12 @@
             camera_stream::width, camera_stream::height,
             camera_stream::format, camera_stream::data_space);
     lines.appendFormat("      Max size: %zu\n", mMaxSize);
-    lines.appendFormat("      Combined usage: %" PRIu64 ", max HAL buffers: %d\n",
+    lines.appendFormat("      Combined usage: 0x%" PRIx64 ", max HAL buffers: %d\n",
             mUsage | consumerUsage, camera_stream::max_buffers);
     if (strlen(camera_stream::physical_camera_id) > 0) {
         lines.appendFormat("      Physical camera id: %s\n", camera_stream::physical_camera_id);
     }
+    lines.appendFormat("      Dynamic Range Profile: 0x%x", camera_stream::dynamic_range_profile);
     lines.appendFormat("      Frames produced: %d, last timestamp: %" PRId64 " ns\n",
             mFrameCount, mLastTimestamp);
     lines.appendFormat("      Total buffers: %zu, currently dequeued: %zu\n",
@@ -224,6 +226,7 @@
 status_t Camera3IOStreamBase::returnAnyBufferLocked(
         const camera_stream_buffer &buffer,
         nsecs_t timestamp,
+        nsecs_t readoutTimestamp,
         bool output,
         int32_t transform,
         const std::vector<size_t>& surface_ids) {
@@ -242,7 +245,8 @@
     }
 
     sp<Fence> releaseFence;
-    res = returnBufferCheckedLocked(buffer, timestamp, output, transform, surface_ids,
+    res = returnBufferCheckedLocked(buffer, timestamp, readoutTimestamp,
+                                    output, transform, surface_ids,
                                     &releaseFence);
     // Res may be an error, but we still want to decrement our owned count
     // to enable clean shutdown. So we'll just return the error but otherwise
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index 6135b7e..518ee42 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -37,7 +37,8 @@
             android_dataspace dataSpace, camera_stream_rotation_t rotation,
             const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
-            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false);
+            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD);
 
   public:
 
@@ -67,6 +68,7 @@
     status_t         returnAnyBufferLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
             int32_t transform,
             const std::vector<size_t>& surface_ids = std::vector<size_t>());
@@ -74,6 +76,7 @@
     virtual status_t returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
             int32_t transform,
             const std::vector<size_t>& surface_ids,
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 6eb798e..9a3f7ed 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -105,6 +105,7 @@
 status_t Camera3InputStream::returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
             int32_t /*transform*/,
             const std::vector<size_t>&,
@@ -112,6 +113,7 @@
             sp<Fence> *releaseFenceOut) {
 
     (void)timestamp;
+    (void)readoutTimestamp;
     (void)output;
     ALOG_ASSERT(!output, "Expected output to be false");
 
@@ -176,7 +178,8 @@
         const camera_stream_buffer &buffer) {
     ATRACE_CALL();
 
-    return returnAnyBufferLocked(buffer, /*timestamp*/0, /*output*/false, /*transform*/ -1);
+    return returnAnyBufferLocked(buffer, /*timestamp*/0, /*readoutTimestamp*/0,
+                                 /*output*/false, /*transform*/ -1);
 }
 
 status_t Camera3InputStream::getInputBufferProducerLocked(
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h
index 6f66bca..5e0587b 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.h
@@ -61,6 +61,7 @@
     virtual status_t returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
             int32_t transform,
             const std::vector<size_t>& surface_ids,
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 3738d01..69723b6 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -22,6 +22,7 @@
 #include <fstream>
 
 #include <android-base/unique_fd.h>
+#include <cutils/properties.h>
 #include <ui/GraphicBuffer.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
@@ -45,10 +46,11 @@
         android_dataspace dataSpace, camera_stream_rotation_t rotation,
         nsecs_t timestampOffset, const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
-        int setId, bool isMultiResolution) :
+        int setId, bool isMultiResolution, int dynamicRangeProfile) :
         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
                             /*maxSize*/0, format, dataSpace, rotation,
-                            physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution),
+                            physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
+                            dynamicRangeProfile),
         mConsumer(consumer),
         mTransform(0),
         mTraceFirstBuffer(true),
@@ -73,10 +75,10 @@
         android_dataspace dataSpace, camera_stream_rotation_t rotation,
         nsecs_t timestampOffset, const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
-        int setId, bool isMultiResolution) :
+        int setId, bool isMultiResolution, int dynamicRangeProfile) :
         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height, maxSize,
                             format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
-                            setId, isMultiResolution),
+                            setId, isMultiResolution, dynamicRangeProfile),
         mConsumer(consumer),
         mTransform(0),
         mTraceFirstBuffer(true),
@@ -108,10 +110,11 @@
         camera_stream_rotation_t rotation, nsecs_t timestampOffset,
         const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
-        int setId, bool isMultiResolution) :
+        int setId, bool isMultiResolution, int dynamicRangeProfile) :
         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
                             /*maxSize*/0, format, dataSpace, rotation,
-                            physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution),
+                            physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
+                            dynamicRangeProfile),
         mConsumer(nullptr),
         mTransform(0),
         mTraceFirstBuffer(true),
@@ -148,11 +151,13 @@
                                          const String8& physicalCameraId,
                                         const std::unordered_set<int32_t> &sensorPixelModesUsed,
                                          uint64_t consumerUsage, nsecs_t timestampOffset,
-                                         int setId, bool isMultiResolution) :
+                                         int setId, bool isMultiResolution,
+                                         int dynamicRangeProfile) :
         Camera3IOStreamBase(id, type, width, height,
                             /*maxSize*/0,
                             format, dataSpace, rotation,
-                            physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution),
+                            physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
+                            dynamicRangeProfile),
         mTransform(0),
         mTraceFirstBuffer(true),
         mUseMonoTimestamp(false),
@@ -263,14 +268,16 @@
 
 status_t Camera3OutputStream::returnBufferLocked(
         const camera_stream_buffer &buffer,
-        nsecs_t timestamp, int32_t transform, const std::vector<size_t>& surface_ids) {
+        nsecs_t timestamp, nsecs_t readoutTimestamp,
+        int32_t transform, const std::vector<size_t>& surface_ids) {
     ATRACE_HFR_CALL();
 
     if (mHandoutTotalBufferCount == 1) {
         returnPrefetchedBuffersLocked();
     }
 
-    status_t res = returnAnyBufferLocked(buffer, timestamp, /*output*/true, transform, surface_ids);
+    status_t res = returnAnyBufferLocked(buffer, timestamp, readoutTimestamp,
+                                         /*output*/true, transform, surface_ids);
 
     if (res != OK) {
         return res;
@@ -285,6 +292,7 @@
 status_t Camera3OutputStream::returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
             int32_t transform,
             const std::vector<size_t>& surface_ids,
@@ -347,20 +355,6 @@
             mTraceFirstBuffer = false;
         }
 
-        if (transform != -1) {
-            setTransformLocked(transform);
-        }
-
-        /* Certain consumers (such as AudioSource or HardwareComposer) use
-         * MONOTONIC time, causing time misalignment if camera timestamp is
-         * in BOOTTIME. Do the conversion if necessary. */
-        res = native_window_set_buffers_timestamp(mConsumer.get(),
-                mUseMonoTimestamp ? timestamp - mTimestampOffset : timestamp);
-        if (res != OK) {
-            ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
-                  __FUNCTION__, mId, strerror(-res), res);
-            return res;
-        }
         // If this is a JPEG output, and image dump mask is set, save image to
         // disk.
         if (getFormat() == HAL_PIXEL_FORMAT_BLOB && getDataSpace() == HAL_DATASPACE_V0_JFIF &&
@@ -368,10 +362,35 @@
             dumpImageToDisk(timestamp, anwBuffer, anwReleaseFence);
         }
 
-        res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence, surface_ids);
-        if (shouldLogError(res, state)) {
-            ALOGE("%s: Stream %d: Error queueing buffer to native window:"
-                  " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
+        /* Certain consumers (such as AudioSource or HardwareComposer) use
+         * MONOTONIC time, causing time misalignment if camera timestamp is
+         * in BOOTTIME. Do the conversion if necessary. */
+        nsecs_t t = mPreviewFrameScheduler != nullptr ? readoutTimestamp : timestamp;
+        nsecs_t adjustedTs = mUseMonoTimestamp ? t - mTimestampOffset : t;
+        if (mPreviewFrameScheduler != nullptr) {
+            res = mPreviewFrameScheduler->queuePreviewBuffer(adjustedTs, transform,
+                    anwBuffer, anwReleaseFence);
+            if (res != OK) {
+                ALOGE("%s: Stream %d: Error queuing buffer to preview buffer scheduler: %s (%d)",
+                        __FUNCTION__, mId, strerror(-res), res);
+                return res;
+            }
+        } else {
+            setTransform(transform);
+            res = native_window_set_buffers_timestamp(mConsumer.get(), adjustedTs);
+            if (res != OK) {
+                ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
+                      __FUNCTION__, mId, strerror(-res), res);
+                return res;
+            }
+
+            queueHDRMetadata(anwBuffer->handle, currentConsumer, dynamic_range_profile);
+
+            res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence, surface_ids);
+            if (shouldLogError(res, state)) {
+                ALOGE("%s: Stream %d: Error queueing buffer to native window:"
+                      " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
+            }
         }
     }
     mLock.lock();
@@ -412,6 +431,9 @@
 
 status_t Camera3OutputStream::setTransformLocked(int transform) {
     status_t res = OK;
+
+    if (transform == -1) return res;
+
     if (mState == STATE_ERROR) {
         ALOGE("%s: Stream in error state", __FUNCTION__);
         return INVALID_OPERATION;
@@ -437,7 +459,7 @@
         return res;
     }
 
-    if ((res = configureConsumerQueueLocked()) != OK) {
+    if ((res = configureConsumerQueueLocked(true /*allowPreviewScheduler*/)) != OK) {
         return res;
     }
 
@@ -461,7 +483,7 @@
     return OK;
 }
 
-status_t Camera3OutputStream::configureConsumerQueueLocked() {
+status_t Camera3OutputStream::configureConsumerQueueLocked(bool allowPreviewScheduler) {
     status_t res;
 
     mTraceFirstBuffer = true;
@@ -547,6 +569,15 @@
     }
 
     mTotalBufferCount = maxConsumerBuffers + camera_stream::max_buffers;
+    if (allowPreviewScheduler && isConsumedByHWComposer()) {
+        // We cannot distinguish between a SurfaceView and an ImageReader of
+        // preview buffer format. The PreviewFrameScheduler needs to handle both.
+        if (!property_get_bool("camera.disable_preview_scheduler", false)) {
+            mPreviewFrameScheduler = std::make_unique<PreviewFrameScheduler>(*this, mConsumer);
+            mTotalBufferCount += PreviewFrameScheduler::kQueueDepthWatermark;
+        }
+    }
+
     mHandoutTotalBufferCount = 0;
     mFrameCount = 0;
     mLastTimestamp = 0;
@@ -1185,6 +1216,11 @@
     }
 }
 
+bool Camera3OutputStream::shouldLogError(status_t res) {
+    Mutex::Autolock l(mLock);
+    return shouldLogError(res, mState);
+}
+
 }; // namespace camera3
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 0872687..d9bf62a 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -27,6 +27,7 @@
 #include "Camera3IOStreamBase.h"
 #include "Camera3OutputStreamInterface.h"
 #include "Camera3BufferManager.h"
+#include "PreviewFrameScheduler.h"
 
 namespace android {
 
@@ -88,7 +89,8 @@
             android_dataspace dataSpace, camera_stream_rotation_t rotation,
             nsecs_t timestampOffset, const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
-            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false);
+            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD);
     /**
      * Set up a stream for formats that have a variable buffer size for the same
      * dimensions, such as compressed JPEG.
@@ -100,7 +102,8 @@
             android_dataspace dataSpace, camera_stream_rotation_t rotation,
             nsecs_t timestampOffset, const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
-            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false);
+            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD);
     /**
      * Set up a stream with deferred consumer for formats that have 2 dimensions, such as
      * RAW and YUV. The consumer must be set before using this stream for output. A valid
@@ -111,7 +114,8 @@
             camera_stream_rotation_t rotation, nsecs_t timestampOffset,
             const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
-            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false);
+            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD);
 
     virtual ~Camera3OutputStream();
 
@@ -229,6 +233,7 @@
     static void applyZSLUsageQuirk(int format, uint64_t *consumerUsage /*inout*/);
 
     void setImageDumpMask(int mask) { mImageDumpMask = mask; }
+    bool shouldLogError(status_t res);
 
   protected:
     Camera3OutputStream(int id, camera_stream_type_t type,
@@ -237,7 +242,8 @@
             const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
             uint64_t consumerUsage = 0, nsecs_t timestampOffset = 0,
-            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false);
+            int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD);
 
     /**
      * Note that we release the lock briefly in this function
@@ -245,6 +251,7 @@
     virtual status_t returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
             int32_t transform,
             const std::vector<size_t>& surface_ids,
@@ -255,7 +262,7 @@
 
     status_t getEndpointUsageForSurface(uint64_t *usage,
             const sp<Surface>& surface) const;
-    status_t configureConsumerQueueLocked();
+    status_t configureConsumerQueueLocked(bool allowPreviewScheduler);
 
     // Consumer as the output of camera HAL
     sp<Surface> mConsumer;
@@ -333,7 +340,8 @@
 
     virtual status_t returnBufferLocked(
             const camera_stream_buffer &buffer,
-            nsecs_t timestamp, int32_t transform, const std::vector<size_t>& surface_ids);
+            nsecs_t timestamp, nsecs_t readoutTimestamp,
+            int32_t transform, const std::vector<size_t>& surface_ids);
 
     virtual status_t queueBufferToConsumer(sp<ANativeWindow>& consumer,
             ANativeWindowBuffer* buffer, int anwReleaseFence,
@@ -370,6 +378,8 @@
 
     int mImageDumpMask = 0;
 
+    // The preview stream scheduler for re-timing frames
+    std::unique_ptr<PreviewFrameScheduler> mPreviewFrameScheduler;
 }; // class Camera3OutputStream
 
 } // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index 5c54dc7..24f81f3 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -463,7 +463,7 @@
         returnOutputBuffers(
             states.useHalBufManager, states.listener,
             request.pendingOutputBuffers.array(),
-            request.pendingOutputBuffers.size(), 0,
+            request.pendingOutputBuffers.size(), /*timestamp*/0, /*readoutTimestamp*/0,
             /*requested*/true, request.requestTimeNs, states.sessionStatsBuilder,
             /*timestampIncreasing*/true,
             request.outputSurfaces, request.resultExtras,
@@ -870,9 +870,9 @@
         bool useHalBufManager,
         sp<NotificationListener> listener,
         const camera_stream_buffer_t *outputBuffers, size_t numBuffers,
-        nsecs_t timestamp, bool requested, nsecs_t requestTimeNs,
-        SessionStatsBuilder& sessionStatsBuilder, bool timestampIncreasing,
-        const SurfaceMap& outputSurfaces,
+        nsecs_t timestamp, nsecs_t readoutTimestamp, bool requested,
+        nsecs_t requestTimeNs, SessionStatsBuilder& sessionStatsBuilder,
+        bool timestampIncreasing, const SurfaceMap& outputSurfaces,
         const CaptureResultExtras &inResultExtras,
         ERROR_BUF_STRATEGY errorBufStrategy, int32_t transform) {
 
@@ -916,11 +916,11 @@
                 errorBufStrategy != ERROR_BUF_CACHE) {
             if (it != outputSurfaces.end()) {
                 res = stream->returnBuffer(
-                        outputBuffers[i], timestamp, timestampIncreasing, it->second,
-                        inResultExtras.frameNumber, transform);
+                        outputBuffers[i], timestamp, readoutTimestamp, timestampIncreasing,
+                        it->second, inResultExtras.frameNumber, transform);
             } else {
                 res = stream->returnBuffer(
-                        outputBuffers[i], timestamp, timestampIncreasing,
+                        outputBuffers[i], timestamp, readoutTimestamp, timestampIncreasing,
                         std::vector<size_t> (), inResultExtras.frameNumber, transform);
             }
         }
@@ -951,7 +951,7 @@
             // cancel the buffer
             camera_stream_buffer_t sb = outputBuffers[i];
             sb.status = CAMERA_BUFFER_STATUS_ERROR;
-            stream->returnBuffer(sb, /*timestamp*/0,
+            stream->returnBuffer(sb, /*timestamp*/0, /*readoutTimestamp*/0,
                     timestampIncreasing, std::vector<size_t> (),
                     inResultExtras.frameNumber, transform);
 
@@ -974,8 +974,8 @@
     returnOutputBuffers(useHalBufManager, listener,
             request.pendingOutputBuffers.array(),
             request.pendingOutputBuffers.size(),
-            request.shutterTimestamp, /*requested*/true,
-            request.requestTimeNs, sessionStatsBuilder, timestampIncreasing,
+            request.shutterTimestamp, request.shutterReadoutTimestamp,
+            /*requested*/true, request.requestTimeNs, sessionStatsBuilder, timestampIncreasing,
             request.outputSurfaces, request.resultExtras,
             request.errorBufStrategy, request.transform);
 
@@ -1036,6 +1036,7 @@
             }
 
             r.shutterTimestamp = msg.timestamp;
+            r.shutterReadoutTimestamp = msg.readout_timestamp;
             if (r.hasCallback) {
                 ALOGVV("Camera %s: %s: Shutter fired for frame %d (id %d) at %" PRId64,
                     states.cameraId.string(), __FUNCTION__,
@@ -1194,7 +1195,30 @@
 }
 
 void notify(CaptureOutputStates& states,
-        const hardware::camera::device::V3_2::NotifyMsg& msg) {
+        const hardware::camera::device::V3_8::NotifyMsg& msg) {
+    using android::hardware::camera::device::V3_2::MsgType;
+
+    hardware::camera::device::V3_2::NotifyMsg msg_3_2;
+    msg_3_2.type = msg.type;
+    bool hasReadoutTime = false;
+    uint64_t readoutTime = 0;
+    switch (msg.type) {
+        case MsgType::ERROR:
+            msg_3_2.msg.error = msg.msg.error;
+            break;
+        case MsgType::SHUTTER:
+            msg_3_2.msg.shutter = msg.msg.shutter.v3_2;
+            hasReadoutTime = true;
+            readoutTime = msg.msg.shutter.readoutTimestamp;
+            break;
+    }
+    notify(states, msg_3_2, hasReadoutTime, readoutTime);
+}
+
+void notify(CaptureOutputStates& states,
+        const hardware::camera::device::V3_2::NotifyMsg& msg,
+        bool hasReadoutTime, uint64_t readoutTime) {
+
     using android::hardware::camera::device::V3_2::MsgType;
     using android::hardware::camera::device::V3_2::ErrorCode;
 
@@ -1235,11 +1259,21 @@
             m.type = CAMERA_MSG_SHUTTER;
             m.message.shutter.frame_number = msg.msg.shutter.frameNumber;
             m.message.shutter.timestamp = msg.msg.shutter.timestamp;
+            m.message.shutter.readout_timestamp = hasReadoutTime ?
+                    readoutTime : m.message.shutter.timestamp;
             break;
     }
     notify(states, &m);
 }
 
+// The buffers requested through this call are not tied to any CaptureRequest in
+// particular. They may used by the hal for a particular frame's output buffer
+// or for its internal use as well. In the case that the hal does use any buffer
+// from the requested list here, for a particular frame's output buffer, the
+// buffer will be returned with the processCaptureResult call corresponding to
+// the frame. The other buffers will be returned through returnStreamBuffers.
+// The buffers returned via returnStreamBuffers will not have a valid
+// timestamp(0) and will be dropped by the bufferqueue.
 void requestStreamBuffers(RequestBufferStates& states,
         const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
         hardware::camera::device::V3_5::ICameraDeviceCallback::requestStreamBuffers_cb _hidl_cb) {
@@ -1417,7 +1451,8 @@
                 sb.status = CAMERA_BUFFER_STATUS_ERROR;
             }
             returnOutputBuffers(states.useHalBufManager, /*listener*/nullptr,
-                    streamBuffers.data(), numAllocatedBuffers, 0, /*requested*/false,
+                    streamBuffers.data(), numAllocatedBuffers, /*timestamp*/0,
+                    /*readoutTimestamp*/0, /*requested*/false,
                     /*requestTimeNs*/0, states.sessionStatsBuilder);
             for (auto buf : newBuffers) {
                 states.bufferRecordsIntf.removeOneBufferCache(streamId, buf);
@@ -1478,8 +1513,8 @@
         }
         streamBuffer.stream = stream->asHalStream();
         returnOutputBuffers(states.useHalBufManager, /*listener*/nullptr,
-                &streamBuffer, /*size*/1, /*timestamp*/ 0, /*requested*/false,
-                /*requestTimeNs*/0, states.sessionStatsBuilder);
+                &streamBuffer, /*size*/1, /*timestamp*/ 0, /*readoutTimestamp*/0,
+                /*requested*/false, /*requestTimeNs*/0, states.sessionStatsBuilder);
     }
 }
 
@@ -1492,9 +1527,10 @@
             returnOutputBuffers(
                 states.useHalBufManager, states.listener,
                 request.pendingOutputBuffers.array(),
-                request.pendingOutputBuffers.size(), 0, /*requested*/true,
-                request.requestTimeNs, states.sessionStatsBuilder, /*timestampIncreasing*/true,
-                request.outputSurfaces, request.resultExtras, request.errorBufStrategy);
+                request.pendingOutputBuffers.size(), /*timestamp*/0, /*readoutTimestamp*/0,
+                /*requested*/true, request.requestTimeNs, states.sessionStatsBuilder,
+                /*timestampIncreasing*/true, request.outputSurfaces, request.resultExtras,
+                request.errorBufStrategy);
             ALOGW("%s: Frame %d |  Timestamp: %" PRId64 ", metadata"
                     " arrived: %s, buffers left: %d.\n", __FUNCTION__,
                     states.inflightMap.keyAt(idx), request.shutterTimestamp,
@@ -1566,7 +1602,7 @@
                 switch (halStream->stream_type) {
                     case CAMERA_STREAM_OUTPUT:
                         res = stream->returnBuffer(streamBuffer, /*timestamp*/ 0,
-                                /*timestampIncreasing*/true,
+                                /*readoutTimestamp*/0, /*timestampIncreasing*/true,
                                 std::vector<size_t> (), frameNumber);
                         if (res != OK) {
                             ALOGE("%s: Can't return output buffer for frame %d to"
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.h b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
index 06b7ab4..51899ee 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
@@ -26,6 +26,8 @@
 
 #include <common/CameraDeviceBase.h>
 
+#include <android/hardware/camera/device/3.8/ICameraDeviceCallback.h>
+
 #include "device3/BufferUtils.h"
 #include "device3/DistortionMapper.h"
 #include "device3/ZoomRatioMapper.h"
@@ -42,66 +44,6 @@
 
 namespace camera3 {
 
-    typedef struct camera_stream_configuration {
-        uint32_t num_streams;
-        camera_stream_t **streams;
-        uint32_t operation_mode;
-        bool input_is_multi_resolution;
-    } camera_stream_configuration_t;
-
-    typedef struct camera_capture_request {
-        uint32_t frame_number;
-        const camera_metadata_t *settings;
-        camera_stream_buffer_t *input_buffer;
-        uint32_t num_output_buffers;
-        const camera_stream_buffer_t *output_buffers;
-        uint32_t num_physcam_settings;
-        const char **physcam_id;
-        const camera_metadata_t **physcam_settings;
-        int32_t input_width;
-        int32_t input_height;
-    } camera_capture_request_t;
-
-    typedef struct camera_capture_result {
-        uint32_t frame_number;
-        const camera_metadata_t *result;
-        uint32_t num_output_buffers;
-        const camera_stream_buffer_t *output_buffers;
-        const camera_stream_buffer_t *input_buffer;
-        uint32_t partial_result;
-        uint32_t num_physcam_metadata;
-        const char **physcam_ids;
-        const camera_metadata_t **physcam_metadata;
-    } camera_capture_result_t;
-
-    typedef struct camera_shutter_msg {
-        uint32_t frame_number;
-        uint64_t timestamp;
-    } camera_shutter_msg_t;
-
-    typedef struct camera_error_msg {
-        uint32_t frame_number;
-        camera_stream_t *error_stream;
-        int error_code;
-    } camera_error_msg_t;
-
-    typedef enum camera_error_msg_code {
-        CAMERA_MSG_ERROR_DEVICE = 1,
-        CAMERA_MSG_ERROR_REQUEST = 2,
-        CAMERA_MSG_ERROR_RESULT = 3,
-        CAMERA_MSG_ERROR_BUFFER = 4,
-        CAMERA_MSG_NUM_ERRORS
-    } camera_error_msg_code_t;
-
-    typedef struct camera_notify_msg {
-        int type;
-
-        union {
-            camera_error_msg_t error;
-            camera_shutter_msg_t shutter;
-        } message;
-    } camera_notify_msg_t;
-
     /**
      * Helper methods shared between Camera3Device/Camera3OfflineSession for HAL callbacks
      */
@@ -112,7 +54,8 @@
             bool useHalBufManager,
             sp<NotificationListener> listener, // Only needed when outputSurfaces is not empty
             const camera_stream_buffer_t *outputBuffers,
-            size_t numBuffers, nsecs_t timestamp, bool requested, nsecs_t requestTimeNs,
+            size_t numBuffers, nsecs_t timestamp,
+            nsecs_t readoutTimestamp, bool requested, nsecs_t requestTimeNs,
             SessionStatsBuilder& sessionStatsBuilder, bool timestampIncreasing = true,
             // The following arguments are only meant for surface sharing use case
             const SurfaceMap& outputSurfaces = SurfaceMap{},
@@ -179,7 +122,10 @@
 
     // Handle one notify message
     void notify(CaptureOutputStates& states,
-            const hardware::camera::device::V3_2::NotifyMsg& msg);
+            const hardware::camera::device::V3_2::NotifyMsg& msg,
+            bool hasReadoutTime = false, uint64_t readoutTime = 0LL);
+    void notify(CaptureOutputStates& states,
+            const hardware::camera::device::V3_8::NotifyMsg& msg);
 
     struct RequestBufferStates {
         const String8& cameraId;
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
index 15cf7f4..0e2671a 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -33,10 +33,10 @@
         camera_stream_rotation_t rotation,
         nsecs_t timestampOffset, const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
-        int setId, bool useHalBufManager) :
+        int setId, bool useHalBufManager, int dynamicProfile) :
         Camera3OutputStream(id, CAMERA_STREAM_OUTPUT, width, height,
                             format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
-                            consumerUsage, timestampOffset, setId),
+                            consumerUsage, timestampOffset, setId, dynamicProfile),
         mUseHalBufManager(useHalBufManager) {
     size_t consumerCount = std::min(surfaces.size(), kMaxOutputs);
     if (surfaces.size() > consumerCount) {
@@ -67,7 +67,7 @@
     }
 
     res = mStreamSplitter->connect(initialSurfaces, usage, mUsage, camera_stream::max_buffers,
-            getWidth(), getHeight(), getFormat(), &mConsumer);
+            getWidth(), getHeight(), getFormat(), &mConsumer, camera_stream::dynamic_range_profile);
     if (res != OK) {
         ALOGE("%s: Failed to connect to stream splitter: %s(%d)",
                 __FUNCTION__, strerror(-res), res);
@@ -247,7 +247,7 @@
         return res;
     }
 
-    res = configureConsumerQueueLocked();
+    res = configureConsumerQueueLocked(false/*allowPreviewScheduler*/);
     if (res != OK) {
         ALOGE("Failed to configureConsumerQueueLocked: %s(%d)", strerror(-res), res);
         return res;
@@ -388,13 +388,15 @@
         bool sizeMismatch = ((static_cast<uint32_t>(infoIt.width) != getWidth()) ||
                                 (static_cast<uint32_t> (infoIt.height) != getHeight())) ?
                                 true : false;
-        if ((imgReaderUsage && sizeMismatch) ||
+        bool dynamicRangeMismatch = dynamic_range_profile != infoIt.dynamicRangeProfile;
+        if ((imgReaderUsage && sizeMismatch) || dynamicRangeMismatch ||
                 (infoIt.format != getOriginalFormat() && infoIt.format != getFormat()) ||
                 (infoIt.dataSpace != getDataSpace() &&
                  infoIt.dataSpace != getOriginalDataSpace())) {
-            ALOGE("%s: Shared surface parameters format: 0x%x dataSpace: 0x%x "
-                    " don't match source stream format: 0x%x  dataSpace: 0x%x", __FUNCTION__,
-                    infoIt.format, infoIt.dataSpace, getFormat(), getDataSpace());
+            ALOGE("%s: Shared surface parameters format: 0x%x dataSpace: 0x%x dynamic range 0x%x "
+                    " don't match source stream format: 0x%x  dataSpace: 0x%x dynamic range 0x%x"
+                    , __FUNCTION__, infoIt.format, infoIt.dataSpace, infoIt.dynamicRangeProfile,
+                    getFormat(), getDataSpace(), dynamic_range_profile);
             return BAD_VALUE;
         }
     }
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
index 4b6341b..fafa26f 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
@@ -40,7 +40,8 @@
             const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
             int setId = CAMERA3_STREAM_SET_ID_INVALID,
-            bool useHalBufManager = false);
+            bool useHalBufManager = false,
+            int dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD);
 
     virtual ~Camera3SharedOutputStream();
 
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index afcfd2a..83f9a98 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#include <vector>
+#include "system/window.h"
 #define LOG_TAG "Camera3-Stream"
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
@@ -23,6 +25,7 @@
 #include "device3/Camera3Stream.h"
 #include "device3/StatusTracker.h"
 #include "utils/TraceHFR.h"
+#include "ui/GraphicBufferMapper.h"
 
 #include <cutils/properties.h>
 
@@ -51,7 +54,7 @@
         android_dataspace dataSpace, camera_stream_rotation_t rotation,
         const String8& physicalCameraId,
         const std::unordered_set<int32_t> &sensorPixelModesUsed,
-        int setId, bool isMultiResolution) :
+        int setId, bool isMultiResolution, int dynamicRangeProfile) :
     camera_stream(),
     mId(id),
     mSetId(setId),
@@ -87,6 +90,7 @@
     camera_stream::max_buffers = 0;
     camera_stream::physical_camera_id = mPhysicalCameraId.string();
     camera_stream::sensor_pixel_modes_used = sensorPixelModesUsed;
+    camera_stream::dynamic_range_profile = dynamicRangeProfile;
 
     if ((format == HAL_PIXEL_FORMAT_BLOB || format == HAL_PIXEL_FORMAT_RAW_OPAQUE) &&
             maxSize == 0) {
@@ -147,6 +151,10 @@
     return mOriginalFormat;
 }
 
+int Camera3Stream::getDynamicRangeProfile() const {
+    return camera_stream::dynamic_range_profile;
+}
+
 void Camera3Stream::setDataSpaceOverride(bool dataSpaceOverridden) {
     mDataSpaceOverridden = dataSpaceOverridden;
 }
@@ -557,7 +565,8 @@
     for (size_t i = 0; i < mPreparedBufferIdx; i++) {
         mPreparedBuffers.editItemAt(i).release_fence = -1;
         mPreparedBuffers.editItemAt(i).status = CAMERA_BUFFER_STATUS_ERROR;
-        returnBufferLocked(mPreparedBuffers[i], 0, /*transform*/ -1);
+        returnBufferLocked(mPreparedBuffers[i], /*timestamp*/0, /*readoutTimestamp*/0,
+                /*transform*/ -1);
     }
     mPreparedBuffers.clear();
     mPreparedBufferIdx = 0;
@@ -713,7 +722,7 @@
 }
 
 status_t Camera3Stream::returnBuffer(const camera_stream_buffer &buffer,
-        nsecs_t timestamp, bool timestampIncreasing,
+        nsecs_t timestamp, nsecs_t readoutTimestamp, bool timestampIncreasing,
          const std::vector<size_t>& surface_ids, uint64_t frameNumber, int32_t transform) {
     ATRACE_HFR_CALL();
     Mutex::Autolock l(mLock);
@@ -743,7 +752,7 @@
      *
      * Do this for getBuffer as well.
      */
-    status_t res = returnBufferLocked(b, timestamp, transform, surface_ids);
+    status_t res = returnBufferLocked(b, timestamp, readoutTimestamp, transform, surface_ids);
     if (res == OK) {
         fireBufferListenersLocked(b, /*acquired*/false, /*output*/true, timestamp, frameNumber);
     }
@@ -931,7 +940,7 @@
 }
 
 status_t Camera3Stream::returnBufferLocked(const camera_stream_buffer &,
-                                           nsecs_t, int32_t, const std::vector<size_t>&) {
+                                           nsecs_t, nsecs_t, int32_t, const std::vector<size_t>&) {
     ALOGE("%s: This type of stream does not support output", __FUNCTION__);
     return INVALID_OPERATION;
 }
@@ -1077,6 +1086,52 @@
     return res;
 }
 
+void Camera3Stream::queueHDRMetadata(buffer_handle_t buffer, sp<ANativeWindow>& anw,
+        int dynamicRangeProfile) {
+    auto& mapper = GraphicBufferMapper::get();
+    switch (dynamicRangeProfile) {
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10: {
+            std::optional<ui::Smpte2086> smpte2086;
+            auto res = mapper.getSmpte2086(buffer, &smpte2086);
+            if ((res == OK) && smpte2086.has_value()) {
+                const auto& metaValue = smpte2086.value();
+                android_smpte2086_metadata meta = {
+                    .displayPrimaryRed.x = metaValue.primaryRed.x,
+                    .displayPrimaryRed.y = metaValue.primaryRed.y,
+                    .displayPrimaryGreen.x = metaValue.primaryGreen.x,
+                    .displayPrimaryGreen.y = metaValue.primaryGreen.y,
+                    .displayPrimaryBlue.x = metaValue.primaryBlue.x,
+                    .displayPrimaryBlue.y = metaValue.primaryBlue.y,
+                    .whitePoint.x = metaValue.whitePoint.x,
+                    .whitePoint.y = metaValue.whitePoint.y,
+                    .maxLuminance = metaValue.maxLuminance,
+                    .minLuminance = metaValue.minLuminance};
+                native_window_set_buffers_smpte2086_metadata(anw.get(), &meta);
+            } else {
+                ALOGE("%s Couldn't retrieve Smpte2086 metadata %s (%d)",
+                        __FUNCTION__, strerror(-res), res);
+            }
+            break;
+        }
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10_PLUS: {
+            std::optional<std::vector<uint8_t>> smpte2094_40;
+            auto res = mapper.getSmpte2094_40(buffer, &smpte2094_40);
+            if ((res == OK) && smpte2094_40.has_value()) {
+                native_window_set_buffers_hdr10_plus_metadata(anw.get(),
+                        smpte2094_40.value().size(), smpte2094_40.value().data());
+            } else {
+                ALOGE("%s Couldn't retrieve Smpte2094_40 metadata %s (%d)",
+                        __FUNCTION__, strerror(-res), res);
+            }
+            break;
+        }
+        default:
+            // No-op
+            break;
+    }
+}
+
+
 }; // namespace camera3
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index fc75f79..bbbea8d 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -138,6 +138,10 @@
     static Camera3Stream*       cast(camera_stream *stream);
     static const Camera3Stream* cast(const camera_stream *stream);
 
+    // Queue corresponding HDR metadata to given native window.
+    static void queueHDRMetadata(buffer_handle_t buffer, sp<ANativeWindow>& anw,
+            int dynamicRangeProfile);
+
     /**
      * Get the stream's ID
      */
@@ -168,6 +172,7 @@
     void              setFormatOverride(bool formatOverriden);
     bool              isFormatOverridden() const;
     int               getOriginalFormat() const;
+    int               getDynamicRangeProfile() const;
     void              setDataSpaceOverride(bool dataSpaceOverriden);
     bool              isDataSpaceOverridden() const;
     android_dataspace getOriginalDataSpace() const;
@@ -352,7 +357,7 @@
      * For bidirectional streams, this method applies to the output-side buffers
      */
     status_t         returnBuffer(const camera_stream_buffer &buffer,
-            nsecs_t timestamp, bool timestampIncreasing,
+            nsecs_t timestamp, nsecs_t readoutTimestamp, bool timestampIncreasing,
             const std::vector<size_t>& surface_ids = std::vector<size_t>(),
             uint64_t frameNumber = 0, int32_t transform = -1);
 
@@ -500,7 +505,7 @@
             android_dataspace dataSpace, camera_stream_rotation_t rotation,
             const String8& physicalCameraId,
             const std::unordered_set<int32_t> &sensorPixelModesUsed,
-            int setId, bool isMultiResolution);
+            int setId, bool isMultiResolution, int dynamicRangeProfile);
 
     wp<Camera3StreamBufferFreedListener> mBufferFreedListener;
 
@@ -517,7 +522,7 @@
     virtual status_t getBufferLocked(camera_stream_buffer *buffer,
             const std::vector<size_t>& surface_ids = std::vector<size_t>());
     virtual status_t returnBufferLocked(const camera_stream_buffer &buffer,
-            nsecs_t timestamp, int32_t transform,
+            nsecs_t timestamp, nsecs_t readoutTimestamp, int32_t transform,
             const std::vector<size_t>& surface_ids = std::vector<size_t>());
 
     virtual status_t getBuffersLocked(std::vector<OutstandingBuffer>*);
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 3aa5a3c..ef10f0d 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -64,6 +64,7 @@
     const char* physical_camera_id;
 
     std::unordered_set<int32_t> sensor_pixel_modes_used;
+    int dynamic_range_profile;
 } camera_stream_t;
 
 typedef struct camera_stream_buffer {
@@ -107,14 +108,17 @@
         bool finalized = false;
         bool supportsOffline = false;
         std::unordered_set<int32_t> sensorPixelModesUsed;
+        int dynamicRangeProfile;
         OutputStreamInfo() :
             width(-1), height(-1), format(-1), dataSpace(HAL_DATASPACE_UNKNOWN),
-            consumerUsage(0) {}
+            consumerUsage(0),
+            dynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {}
         OutputStreamInfo(int _width, int _height, int _format, android_dataspace _dataSpace,
-                uint64_t _consumerUsage, const std::unordered_set<int32_t>& _sensorPixelModesUsed) :
+                uint64_t _consumerUsage, const std::unordered_set<int32_t>& _sensorPixelModesUsed,
+                int _dynamicRangeProfile) :
             width(_width), height(_height), format(_format),
             dataSpace(_dataSpace), consumerUsage(_consumerUsage),
-            sensorPixelModesUsed(_sensorPixelModesUsed) {}
+            sensorPixelModesUsed(_sensorPixelModesUsed), dynamicRangeProfile(_dynamicRangeProfile){}
 };
 
 /**
@@ -154,6 +158,7 @@
     virtual uint32_t getWidth() const = 0;
     virtual uint32_t getHeight() const = 0;
     virtual int      getFormat() const = 0;
+    virtual int      getDynamicRangeProfile() const = 0;
     virtual android_dataspace getDataSpace() const = 0;
     virtual void setFormatOverride(bool formatOverriden) = 0;
     virtual bool isFormatOverridden() const = 0;
@@ -357,7 +362,7 @@
      * For bidirectional streams, this method applies to the output-side buffers
      */
     virtual status_t returnBuffer(const camera_stream_buffer &buffer,
-            nsecs_t timestamp, bool timestampIncreasing = true,
+            nsecs_t timestamp, nsecs_t readoutTimestamp, bool timestampIncreasing = true,
             const std::vector<size_t>& surface_ids = std::vector<size_t>(),
             uint64_t frameNumber = 0, int32_t transform = -1) = 0;
 
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
index 5c6c518..1149d13 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
@@ -34,13 +34,16 @@
 
 #include <cutils/atomic.h>
 
+#include "Camera3Stream.h"
+
 #include "Camera3StreamSplitter.h"
 
 namespace android {
 
 status_t Camera3StreamSplitter::connect(const std::unordered_map<size_t, sp<Surface>> &surfaces,
         uint64_t consumerUsage, uint64_t producerUsage, size_t halMaxBuffers, uint32_t width,
-        uint32_t height, android::PixelFormat format, sp<Surface>* consumer) {
+        uint32_t height, android::PixelFormat format, sp<Surface>* consumer,
+        int dynamicRangeProfile) {
     ATRACE_CALL();
     if (consumer == nullptr) {
         SP_LOGE("%s: consumer pointer is NULL", __FUNCTION__);
@@ -61,6 +64,7 @@
 
     mMaxHalBuffers = halMaxBuffers;
     mConsumerName = getUniqueConsumerName();
+    mDynamicRangeProfile = dynamicRangeProfile;
     // Add output surfaces. This has to be before creating internal buffer queue
     // in order to get max consumer side buffers.
     for (auto &it : surfaces) {
@@ -136,6 +140,7 @@
         }
     }
     mOutputs.clear();
+    mOutputSurfaces.clear();
     mOutputSlots.clear();
     mConsumerBufferCount.clear();
 
@@ -258,6 +263,7 @@
 
     // Add new entry into mOutputs
     mOutputs[surfaceId] = gbp;
+    mOutputSurfaces[surfaceId] = outputQueue;
     mConsumerBufferCount[surfaceId] = maxConsumerBuffers;
     if (mConsumerBufferCount[surfaceId] > mMaxHalBuffers) {
         SP_LOGW("%s: Consumer buffer count %zu larger than max. Hal buffers: %zu", __FUNCTION__,
@@ -316,6 +322,7 @@
         }
     }
     mOutputs[surfaceId] = nullptr;
+    mOutputSurfaces[surfaceId] = nullptr;
     mOutputSlots[gbp] = nullptr;
     for (const auto &id : pendingBufferIds) {
         decrementBufRefCountLocked(id, surfaceId);
@@ -356,6 +363,14 @@
     const BufferTracker& tracker = *(mBuffers[bufferId]);
     int slot = getSlotForOutputLocked(output, tracker.getBuffer());
 
+    if (mOutputSurfaces[surfaceId] != nullptr) {
+        sp<ANativeWindow> anw = mOutputSurfaces[surfaceId];
+        camera3::Camera3Stream::queueHDRMetadata(
+                bufferItem.mGraphicBuffer->getNativeBuffer()->handle, anw, mDynamicRangeProfile);
+    } else {
+        SP_LOGE("%s: Invalid surface id: %zu!", __FUNCTION__, surfaceId);
+    }
+
     // In case the output BufferQueue has its own lock, if we hold splitter lock while calling
     // queueBuffer (which will try to acquire the output lock), the output could be holding its
     // own lock calling releaseBuffer (which  will try to acquire the splitter lock), running into
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
index 4eb455a..827865c 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
@@ -19,6 +19,8 @@
 
 #include <unordered_set>
 
+#include <camera/CameraMetadata.h>
+
 #include <gui/IConsumerListener.h>
 #include <gui/IProducerListener.h>
 #include <gui/BufferItemConsumer.h>
@@ -55,7 +57,8 @@
     // with output surfaces.
     status_t connect(const std::unordered_map<size_t, sp<Surface>> &surfaces,
             uint64_t consumerUsage, uint64_t producerUsage, size_t halMaxBuffers, uint32_t width,
-            uint32_t height, android::PixelFormat format, sp<Surface>* consumer);
+            uint32_t height, android::PixelFormat format, sp<Surface>* consumer,
+            int dynamicRangeProfile);
 
     // addOutput adds an output BufferQueue to the splitter. The splitter
     // connects to outputQueue as a CPU producer, and any buffers queued
@@ -232,6 +235,7 @@
     uint32_t mHeight = 0;
     android::PixelFormat mFormat = android::PIXEL_FORMAT_NONE;
     uint64_t mProducerUsage = 0;
+    int mDynamicRangeProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
 
     // The attachBuffer call will happen on different thread according to mUseHalBufManager and have
     // different timing constraint.
@@ -251,6 +255,9 @@
     //Map surface ids -> gbp outputs
     std::unordered_map<int, sp<IGraphicBufferProducer> > mOutputs;
 
+    //Map surface ids -> gbp outputs
+    std::unordered_map<int, sp<Surface>> mOutputSurfaces;
+
     //Map surface ids -> consumer buffer count
     std::unordered_map<int, size_t > mConsumerBufferCount;
 
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.cpp b/services/camera/libcameraservice/device3/DistortionMapper.cpp
index 89dd115..15807bf 100644
--- a/services/camera/libcameraservice/device3/DistortionMapper.cpp
+++ b/services/camera/libcameraservice/device3/DistortionMapper.cpp
@@ -22,7 +22,7 @@
 #include <cmath>
 
 #include "device3/DistortionMapper.h"
-#include "utils/SessionConfigurationUtils.h"
+#include "utils/SessionConfigurationUtilsHost.h"
 
 namespace android {
 
diff --git a/services/camera/libcameraservice/device3/InFlightRequest.h b/services/camera/libcameraservice/device3/InFlightRequest.h
index 42fa8db..0c97f3e 100644
--- a/services/camera/libcameraservice/device3/InFlightRequest.h
+++ b/services/camera/libcameraservice/device3/InFlightRequest.h
@@ -30,6 +30,67 @@
 
 namespace camera3 {
 
+typedef struct camera_stream_configuration {
+    uint32_t num_streams;
+    camera_stream_t **streams;
+    uint32_t operation_mode;
+    bool input_is_multi_resolution;
+} camera_stream_configuration_t;
+
+typedef struct camera_capture_request {
+    uint32_t frame_number;
+    const camera_metadata_t *settings;
+    camera_stream_buffer_t *input_buffer;
+    uint32_t num_output_buffers;
+    const camera_stream_buffer_t *output_buffers;
+    uint32_t num_physcam_settings;
+    const char **physcam_id;
+    const camera_metadata_t **physcam_settings;
+    int32_t input_width;
+    int32_t input_height;
+} camera_capture_request_t;
+
+typedef struct camera_capture_result {
+    uint32_t frame_number;
+    const camera_metadata_t *result;
+    uint32_t num_output_buffers;
+    const camera_stream_buffer_t *output_buffers;
+    const camera_stream_buffer_t *input_buffer;
+    uint32_t partial_result;
+    uint32_t num_physcam_metadata;
+    const char **physcam_ids;
+    const camera_metadata_t **physcam_metadata;
+} camera_capture_result_t;
+
+typedef struct camera_shutter_msg {
+    uint32_t frame_number;
+    uint64_t timestamp;
+    uint64_t readout_timestamp;
+} camera_shutter_msg_t;
+
+typedef struct camera_error_msg {
+    uint32_t frame_number;
+    camera_stream_t *error_stream;
+    int error_code;
+} camera_error_msg_t;
+
+typedef enum camera_error_msg_code {
+    CAMERA_MSG_ERROR_DEVICE = 1,
+    CAMERA_MSG_ERROR_REQUEST = 2,
+    CAMERA_MSG_ERROR_RESULT = 3,
+    CAMERA_MSG_ERROR_BUFFER = 4,
+    CAMERA_MSG_NUM_ERRORS
+} camera_error_msg_code_t;
+
+typedef struct camera_notify_msg {
+    int type;
+
+    union {
+        camera_error_msg_t error;
+        camera_shutter_msg_t shutter;
+    } message;
+} camera_notify_msg_t;
+
 typedef enum {
     // Cache the buffers with STATUS_ERROR within InFlightRequest
     ERROR_BUF_CACHE,
@@ -41,9 +102,10 @@
 } ERROR_BUF_STRATEGY;
 
 struct InFlightRequest {
-
     // Set by notify() SHUTTER call.
     nsecs_t shutterTimestamp;
+    // Set by notify() SHUTTER call with readout time.
+    nsecs_t shutterReadoutTimestamp;
     // Set by process_capture_result().
     nsecs_t sensorTimestamp;
     int     requestStatus;
diff --git a/services/camera/libcameraservice/device3/PreviewFrameScheduler.cpp b/services/camera/libcameraservice/device3/PreviewFrameScheduler.cpp
new file mode 100644
index 0000000..6135f9e
--- /dev/null
+++ b/services/camera/libcameraservice/device3/PreviewFrameScheduler.cpp
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-PreviewFrameScheduler"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include <android/looper.h>
+#include "PreviewFrameScheduler.h"
+#include "Camera3OutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * Internal Choreographer thread implementation for polling and handling callbacks
+ */
+
+// Callback function for Choreographer
+static void frameCallback(const AChoreographerFrameCallbackData* callbackData, void* data) {
+    PreviewFrameScheduler* parent = static_cast<PreviewFrameScheduler*>(data);
+    if (parent == nullptr) {
+        ALOGE("%s: Invalid data for Choreographer callback!", __FUNCTION__);
+        return;
+    }
+
+    size_t length = AChoreographerFrameCallbackData_getFrameTimelinesLength(callbackData);
+    std::vector<nsecs_t> timeline(length);
+    for (size_t i = 0; i < length; i++) {
+        nsecs_t timestamp = AChoreographerFrameCallbackData_getFrameTimelineExpectedPresentTimeNanos(
+                callbackData, i);
+        timeline[i] = timestamp;
+    }
+
+    parent->onNewPresentationTime(timeline);
+
+    AChoreographer_postExtendedFrameCallback(AChoreographer_getInstance(), frameCallback, data);
+}
+
+struct ChoreographerThread : public Thread {
+    ChoreographerThread();
+    status_t start(PreviewFrameScheduler* parent);
+    virtual status_t readyToRun() override;
+    virtual bool threadLoop() override;
+
+protected:
+    virtual ~ChoreographerThread() {}
+
+private:
+    ChoreographerThread &operator=(const ChoreographerThread &);
+
+    // This only impacts the shutdown time. It won't impact the choreographer
+    // callback frequency.
+    static constexpr nsecs_t kPollingTimeoutMs = 5;
+    PreviewFrameScheduler* mParent = nullptr;
+};
+
+ChoreographerThread::ChoreographerThread() : Thread(false /*canCallJava*/) {
+}
+
+status_t ChoreographerThread::start(PreviewFrameScheduler* parent) {
+    mParent = parent;
+    return run("PreviewChoreographer");
+}
+
+status_t ChoreographerThread::readyToRun() {
+    ALooper_prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
+    if (AChoreographer_getInstance() == NULL) {
+        return NO_INIT;
+    }
+
+    AChoreographer_postExtendedFrameCallback(
+            AChoreographer_getInstance(), frameCallback, mParent);
+    return OK;
+}
+
+bool ChoreographerThread::threadLoop() {
+    if (exitPending()) {
+        return false;
+    }
+    ALooper_pollOnce(kPollingTimeoutMs, nullptr, nullptr, nullptr);
+    return true;
+}
+
+/**
+ * PreviewFrameScheduler implementation
+ */
+
+PreviewFrameScheduler::PreviewFrameScheduler(Camera3OutputStream& parent, sp<Surface> consumer) :
+        mParent(parent),
+        mConsumer(consumer),
+        mChoreographerThread(new ChoreographerThread()) {
+}
+
+PreviewFrameScheduler::~PreviewFrameScheduler() {
+    {
+        Mutex::Autolock l(mLock);
+        mChoreographerThread->requestExit();
+    }
+    mChoreographerThread->join();
+}
+
+status_t PreviewFrameScheduler::queuePreviewBuffer(nsecs_t timestamp, int32_t transform,
+        ANativeWindowBuffer* anwBuffer, int releaseFence) {
+    // Start choreographer thread if it's not already running.
+    if (!mChoreographerThread->isRunning()) {
+        status_t res = mChoreographerThread->start(this);
+        if (res != OK) {
+            ALOGE("%s: Failed to init choreographer thread!", __FUNCTION__);
+            return res;
+        }
+    }
+
+    {
+        Mutex::Autolock l(mLock);
+        mPendingBuffers.emplace(timestamp, transform, anwBuffer, releaseFence);
+
+        // Queue buffer to client right away if pending buffers are more than
+        // the queue depth watermark.
+        if (mPendingBuffers.size() > kQueueDepthWatermark) {
+            auto oldBuffer = mPendingBuffers.front();
+            mPendingBuffers.pop();
+
+            status_t res = queueBufferToClientLocked(oldBuffer, oldBuffer.timestamp);
+            if (res != OK) {
+                return res;
+            }
+
+            // Reset the last capture and presentation time
+            mLastCameraCaptureTime = 0;
+            mLastCameraPresentTime = 0;
+        } else {
+            ATRACE_INT(kPendingBufferTraceName, mPendingBuffers.size());
+        }
+    }
+    return OK;
+}
+
+void PreviewFrameScheduler::onNewPresentationTime(const std::vector<nsecs_t>& timeline) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mLock);
+    if (mPendingBuffers.size() > 0) {
+        auto nextBuffer = mPendingBuffers.front();
+        mPendingBuffers.pop();
+
+        // Find the best presentation time by finding the element in the
+        // choreographer timeline that's closest to the ideal presentation time.
+        // The ideal presentation time is the last presentation time + frame
+        // interval.
+        nsecs_t cameraInterval = nextBuffer.timestamp - mLastCameraCaptureTime;
+        nsecs_t idealPresentTime = (cameraInterval < kSpacingResetIntervalNs) ?
+                (mLastCameraPresentTime + cameraInterval) : nextBuffer.timestamp;
+        nsecs_t presentTime = *std::min_element(timeline.begin(), timeline.end(),
+                [idealPresentTime](nsecs_t p1, nsecs_t p2) {
+                        return std::abs(p1 - idealPresentTime) < std::abs(p2 - idealPresentTime);
+                });
+
+        status_t res = queueBufferToClientLocked(nextBuffer, presentTime);
+        ATRACE_INT(kPendingBufferTraceName, mPendingBuffers.size());
+
+        if (mParent.shouldLogError(res)) {
+            ALOGE("%s: Preview Stream: Error queueing buffer to native window:"
+                    " %s (%d)", __FUNCTION__, strerror(-res), res);
+        }
+
+        mLastCameraCaptureTime = nextBuffer.timestamp;
+        mLastCameraPresentTime = presentTime;
+    }
+}
+
+status_t PreviewFrameScheduler::queueBufferToClientLocked(
+        const BufferHolder& bufferHolder, nsecs_t timestamp) {
+    mParent.setTransform(bufferHolder.transform);
+
+    status_t res = native_window_set_buffers_timestamp(mConsumer.get(), timestamp);
+    if (res != OK) {
+        ALOGE("%s: Preview Stream: Error setting timestamp: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    Camera3Stream::queueHDRMetadata(bufferHolder.anwBuffer.get()->handle, mConsumer,
+            mParent.getDynamicRangeProfile());
+
+    res = mConsumer->queueBuffer(mConsumer.get(), bufferHolder.anwBuffer.get(),
+            bufferHolder.releaseFence);
+    if (res != OK) {
+        close(bufferHolder.releaseFence);
+    }
+
+    return res;
+}
+
+}; // namespace camera3
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/PreviewFrameScheduler.h b/services/camera/libcameraservice/device3/PreviewFrameScheduler.h
new file mode 100644
index 0000000..c0574fd
--- /dev/null
+++ b/services/camera/libcameraservice/device3/PreviewFrameScheduler.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA3_PREVIEWFRAMESCHEDULER_H
+#define ANDROID_SERVERS_CAMERA_CAMERA3_PREVIEWFRAMESCHEDULER_H
+
+#include <queue>
+
+#include <android/choreographer.h>
+#include <gui/Surface.h>
+#include <gui/ISurfaceComposer.h>
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
+#include <utils/Looper.h>
+#include <utils/Thread.h>
+#include <utils/Timers.h>
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3OutputStream;
+struct ChoreographerThread;
+
+/***
+ * Preview stream scheduler for better preview display synchronization
+ *
+ * The ideal viewfinder user experience is that frames are presented to the
+ * user in the same cadence as outputed by the camera sensor. However, the
+ * processing latency between frames could vary, due to factors such
+ * as CPU load, differences in request settings, etc. This frame processing
+ * latency results in variation in presentation of frames to the user.
+ *
+ * The PreviewFrameScheduler improves the viewfinder user experience by:
+ * 1. Cache preview buffers in the scheduler
+ * 2. For each choreographer callback, queue the oldest cached buffer with
+ *    the best matching presentation timestamp. Frame N's presentation timestamp
+ *    is the choreographer timeline timestamp closest to (Frame N-1's
+ *    presentation time + camera capture interval between frame N-1 and frame N).
+ * 3. Maintain at most 2 queue-able buffers. If the 3rd preview buffer becomes
+ *    available, queue the oldest cached buffer to the buffer queue.
+ */
+class PreviewFrameScheduler {
+  public:
+    explicit PreviewFrameScheduler(Camera3OutputStream& parent, sp<Surface> consumer);
+    virtual ~PreviewFrameScheduler();
+
+    // Queue preview buffer locally
+    status_t queuePreviewBuffer(nsecs_t timestamp, int32_t transform,
+            ANativeWindowBuffer* anwBuffer, int releaseFence);
+
+    // Callback function with a new presentation timeline from choreographer. This
+    // will trigger a locally queued buffer be sent to the buffer queue.
+    void onNewPresentationTime(const std::vector<nsecs_t>& presentationTimeline);
+
+    // Maintain at most 2 queue-able buffers
+    static constexpr int32_t kQueueDepthWatermark = 2;
+
+  private:
+    // structure holding cached preview buffer info
+    struct BufferHolder {
+        nsecs_t timestamp;
+        int32_t transform;
+        sp<ANativeWindowBuffer> anwBuffer;
+        int releaseFence;
+
+        BufferHolder(nsecs_t t, int32_t tr, ANativeWindowBuffer* anwb, int rf) :
+                timestamp(t), transform(tr), anwBuffer(anwb), releaseFence(rf) {}
+    };
+
+    status_t queueBufferToClientLocked(const BufferHolder& bufferHolder,
+            nsecs_t presentTime);
+
+    static constexpr char kPendingBufferTraceName[] = "pending_preview_buffers";
+
+    // Camera capture interval for resetting frame spacing between preview sessions
+    static constexpr nsecs_t kSpacingResetIntervalNs = 1000000000L; // 1 second
+
+    Camera3OutputStream& mParent;
+    sp<ANativeWindow> mConsumer;
+    mutable Mutex mLock;
+
+    std::queue<BufferHolder> mPendingBuffers;
+    nsecs_t mLastCameraCaptureTime = 0;
+    nsecs_t mLastCameraPresentTime = 0;
+
+    // Choreographer related
+    sp<Looper> mLooper;
+    sp<ChoreographerThread> mChoreographerThread;
+};
+
+}; //namespace camera3
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp b/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
index 7ec0956..27b00c9 100644
--- a/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
+++ b/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
@@ -20,7 +20,7 @@
 #include <algorithm>
 
 #include "device3/ZoomRatioMapper.h"
-#include "utils/SessionConfigurationUtils.h"
+#include "utils/SessionConfigurationUtilsHost.h"
 
 namespace android {
 
diff --git a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp
index 8e619e1..cca3f2e 100644
--- a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp
+++ b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp
@@ -70,6 +70,11 @@
   return binder::Status::ok();
 }
 
+::android::binder::Status H2BCameraServiceListener::onTorchStrengthLevelChanged(
+    const ::android::String16&, int32_t) {
+  return binder::Status::ok();
+}
+
 } // implementation
 } // V2_0
 } // common
diff --git a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
index 7148035..7ef413f 100644
--- a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
+++ b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
@@ -54,6 +54,8 @@
 
     virtual ::android::binder::Status onTorchStatusChanged(
             int32_t status, const ::android::String16& cameraId) override;
+    virtual ::android::binder::Status onTorchStrengthLevelChanged(
+            const ::android::String16& cameraId, int32_t newStrengthLevel) override;
     virtual binder::Status onCameraAccessPrioritiesChanged() {
         // TODO: no implementation yet.
         return binder::Status::ok();
diff --git a/services/camera/libcameraservice/hidl/HidlCameraService.cpp b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
index 7d1b3cf..a812587 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraService.cpp
+++ b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
@@ -279,6 +279,9 @@
         size_t numSections = sectionNames->size();
         std::vector<std::vector<HVendorTag>> tagsBySection(numSections);
         int tagCount = desc->getTagCount();
+        if (tagCount <= 0) {
+            continue;
+        }
         std::vector<uint32_t> tags(tagCount);
         desc->getTagArray(tags.data());
         for (int i = 0; i < tagCount; i++) {
diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp b/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
index 3d74f0b..ca73e4c 100644
--- a/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
+++ b/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
@@ -59,11 +59,17 @@
         "android.hardware.camera.device@3.5",
         "android.hardware.camera.device@3.6",
         "android.hardware.camera.device@3.7",
+        "android.hardware.camera.device@3.8",
     ],
     fuzz_config: {
         cc: [
             "android-media-fuzzing-reports@google.com",
         ],
         componentid: 155276,
+        libfuzzer_options: [
+            //based on b/187360866
+            "timeout=770",
+        ],
+
     },
 }
diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
index e46bf74..97d7bf4 100644
--- a/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
+++ b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
@@ -466,6 +466,12 @@
         // No op
         return binder::Status::ok();
     }
+
+    virtual binder::Status onTorchStrengthLevelChanged(const String16& /*cameraId*/,
+            int32_t /*torchStrength*/) {
+        // No op
+        return binder::Status::ok();
+    }
 };
 
 class TestCameraDeviceCallbacks : public hardware::camera2::BnCameraDeviceCallbacks {
diff --git a/services/camera/libcameraservice/tests/Android.bp b/services/camera/libcameraservice/tests/Android.bp
new file mode 100644
index 0000000..c3f0620
--- /dev/null
+++ b/services/camera/libcameraservice/tests/Android.bp
@@ -0,0 +1,125 @@
+// Copyright 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+    default_applicable_licenses: [
+        "frameworks_av_services_camera_libcameraservice_license",
+    ],
+}
+
+cc_test {
+    name: "cameraservice_test",
+
+    include_dirs: [
+        "system/media/private/camera/include",
+        "external/dynamic_depth/includes",
+        "external/dynamic_depth/internal",
+    ],
+
+    shared_libs: [
+        "libbase",
+        "libcutils",
+        "libcameraservice",
+        "libhidlbase",
+        "liblog",
+        "libcamera_client",
+        "libcamera_metadata",
+        "libui",
+        "libutils",
+        "libjpeg",
+        "libexif",
+        "android.hardware.camera.common@1.0",
+        "android.hardware.camera.provider@2.4",
+        "android.hardware.camera.provider@2.5",
+        "android.hardware.camera.provider@2.6",
+        "android.hardware.camera.provider@2.7",
+        "android.hardware.camera.device@1.0",
+        "android.hardware.camera.device@3.2",
+        "android.hardware.camera.device@3.4",
+        "android.hardware.camera.device@3.7",
+        "android.hardware.camera.device@3.8",
+        "android.hidl.token@1.0-utils",
+    ],
+
+    static_libs: [
+        "libgmock",
+    ],
+
+    srcs: [
+        "CameraProviderManagerTest.cpp",
+        "ClientManagerTest.cpp",
+        "DepthProcessorTest.cpp",
+        "DistortionMapperTest.cpp",
+        "ExifUtilsTest.cpp",
+        "NV12Compressor.cpp",
+        "RotateAndCropMapperTest.cpp",
+        "ZoomRatioTest.cpp",
+    ],
+
+    cflags: [
+        "-Wall",
+        "-Wextra",
+        "-Werror",
+    ],
+
+    test_suites: ["device-tests"],
+
+}
+
+cc_test_host {
+    name: "cameraservice_test_host",
+
+    include_dirs: [
+        "frameworks/av/camera/include",
+        "frameworks/av/camera/include/camera",
+        "frameworks/native/libs/binder/include_activitymanager"
+    ],
+
+    shared_libs: [
+        "libactivity_manager_procstate_aidl-cpp",
+        "libbase",
+        "libbinder",
+        "libcamera_metadata",
+        "libdynamic_depth",
+        "libexif",
+        "libjpeg",
+        "liblog",
+        "libutils",
+    ],
+
+    static_libs: [
+        "libcamera_client_host",
+        "libcameraservice_device_independent",
+        "libgmock",
+    ],
+
+    srcs: [
+        "ClientManagerTest.cpp",
+        "DepthProcessorTest.cpp",
+        "DistortionMapperTest.cpp",
+        "ExifUtilsTest.cpp",
+        "NV12Compressor.cpp",
+        "RotateAndCropMapperTest.cpp",
+        "ZoomRatioTest.cpp",
+    ],
+
+    cflags: [
+        "-Wall",
+        "-Wextra",
+        "-Werror",
+    ],
+
+    test_suites: ["device-tests"],
+
+}
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
deleted file mode 100644
index 0b5ad79..0000000
--- a/services/camera/libcameraservice/tests/Android.mk
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2013 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= $(call all-cpp-files-under, .)
-
-LOCAL_SHARED_LIBRARIES := \
-    libbase \
-    libcutils \
-    libcameraservice \
-    libhidlbase \
-    liblog \
-    libcamera_client \
-    libcamera_metadata \
-    libui \
-    libutils \
-    libjpeg \
-    libexif \
-    android.hardware.camera.common@1.0 \
-    android.hardware.camera.provider@2.4 \
-    android.hardware.camera.provider@2.5 \
-    android.hardware.camera.provider@2.6 \
-    android.hardware.camera.provider@2.7 \
-    android.hardware.camera.device@1.0 \
-    android.hardware.camera.device@3.2 \
-    android.hardware.camera.device@3.4 \
-    android.hardware.camera.device@3.7 \
-    android.hidl.token@1.0-utils
-
-LOCAL_STATIC_LIBRARIES := \
-    libgmock
-
-LOCAL_C_INCLUDES += \
-    system/media/private/camera/include \
-    external/dynamic_depth/includes \
-    external/dynamic_depth/internal \
-
-LOCAL_CFLAGS += -Wall -Wextra -Werror
-
-LOCAL_SANITIZE := address
-
-LOCAL_MODULE:= cameraservice_test
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/../NOTICE
-LOCAL_COMPATIBILITY_SUITE := device-tests
-LOCAL_MODULE_TAGS := tests
-
-include $(BUILD_NATIVE_TEST)
diff --git a/services/camera/libcameraservice/tests/NV12Compressor.h b/services/camera/libcameraservice/tests/NV12Compressor.h
index ee22d5e..a959871 100644
--- a/services/camera/libcameraservice/tests/NV12Compressor.h
+++ b/services/camera/libcameraservice/tests/NV12Compressor.h
@@ -19,6 +19,7 @@
 
 #include <setjmp.h>
 #include <stdlib.h>
+#include <stdio.h>
 extern "C" {
 #include <jpeglib.h>
 #include <jerror.h>
diff --git a/services/camera/libcameraservice/tests/PreviewSchedulerTest.cpp b/services/camera/libcameraservice/tests/PreviewSchedulerTest.cpp
new file mode 100644
index 0000000..025521a
--- /dev/null
+++ b/services/camera/libcameraservice/tests/PreviewSchedulerTest.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "PreviewSchedulerTest"
+
+#include <chrono>
+#include <thread>
+#include <utility>
+
+#include <gtest/gtest.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <utils/Mutex.h>
+
+#include <gui/BufferItemConsumer.h>
+#include <gui/BufferQueue.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <gui/Surface.h>
+
+#include "../device3/Camera3OutputStream.h"
+#include "../device3/PreviewFrameScheduler.h"
+
+using namespace android;
+using namespace android::camera3;
+
+// Consumer buffer available listener
+class SimpleListener : public BufferItemConsumer::FrameAvailableListener {
+public:
+    SimpleListener(size_t frameCount): mFrameCount(frameCount) {}
+
+    void waitForFrames() {
+        Mutex::Autolock lock(mMutex);
+        while (mFrameCount > 0) {
+            mCondition.wait(mMutex);
+        }
+    }
+
+    void onFrameAvailable(const BufferItem& /*item*/) override {
+        Mutex::Autolock lock(mMutex);
+        if (mFrameCount > 0) {
+            mFrameCount--;
+            mCondition.signal();
+        }
+    }
+
+    void reset(size_t frameCount) {
+        Mutex::Autolock lock(mMutex);
+        mFrameCount = frameCount;
+    }
+private:
+    size_t mFrameCount;
+    Mutex mMutex;
+    Condition mCondition;
+};
+
+// Test the PreviewFrameScheduler functionatliy of re-timing buffers
+TEST(PreviewSchedulerTest, BasicPreviewSchedulerTest) {
+    const int ID = 0;
+    const int FORMAT = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    const uint32_t WIDTH = 640;
+    const uint32_t HEIGHT = 480;
+    const int32_t TRANSFORM = 0;
+    const nsecs_t T_OFFSET = 0;
+    const android_dataspace DATASPACE = HAL_DATASPACE_UNKNOWN;
+    const camera_stream_rotation_t ROTATION = CAMERA_STREAM_ROTATION_0;
+    const String8 PHY_ID;
+    const std::unordered_set<int32_t> PIX_MODES;
+    const int BUFFER_COUNT = 4;
+    const int TOTAL_BUFFER_COUNT = BUFFER_COUNT * 2;
+
+    // Create buffer queue
+    sp<IGraphicBufferProducer> producer;
+    sp<IGraphicBufferConsumer> consumer;
+    BufferQueue::createBufferQueue(&producer, &consumer);
+    ASSERT_NE(producer, nullptr);
+    ASSERT_NE(consumer, nullptr);
+    ASSERT_EQ(NO_ERROR, consumer->setDefaultBufferSize(WIDTH, HEIGHT));
+
+    // Set up consumer
+    sp<BufferItemConsumer> bufferConsumer = new BufferItemConsumer(consumer,
+            GRALLOC_USAGE_HW_COMPOSER, BUFFER_COUNT);
+    ASSERT_NE(bufferConsumer, nullptr);
+    sp<SimpleListener> consumerListener = new SimpleListener(BUFFER_COUNT);
+    bufferConsumer->setFrameAvailableListener(consumerListener);
+
+    // Set up producer
+    sp<Surface> surface = new Surface(producer);
+    sp<StubProducerListener> listener = new StubProducerListener();
+    ASSERT_EQ(NO_ERROR, surface->connect(NATIVE_WINDOW_API_CPU, listener));
+    sp<ANativeWindow> anw(surface);
+    ASSERT_EQ(NO_ERROR, native_window_set_buffer_count(anw.get(), TOTAL_BUFFER_COUNT));
+
+    // Create Camera3OutputStream and PreviewFrameScheduler
+    sp<Camera3OutputStream> stream = new Camera3OutputStream(ID, surface, WIDTH, HEIGHT,
+            FORMAT, DATASPACE, ROTATION, T_OFFSET, PHY_ID, PIX_MODES);
+    ASSERT_NE(stream, nullptr);
+    std::unique_ptr<PreviewFrameScheduler> scheduler =
+            std::make_unique<PreviewFrameScheduler>(*stream, surface);
+    ASSERT_NE(scheduler, nullptr);
+
+    // The pair of nsecs_t: camera timestamp delta (negative means in the past) and frame interval
+    const std::pair<nsecs_t, nsecs_t> inputTimestamps[][BUFFER_COUNT] = {
+        // 30fps, 33ms interval
+        {{-100000000LL, 33333333LL}, {-66666667LL, 33333333LL},
+          {-33333333LL, 33333333LL}, {0, 0}},
+        // 30fps, variable interval
+        {{-100000000LL, 16666667LL}, {-66666667LL, 33333333LL},
+          {-33333333LL, 50000000LL}, {0, 0}},
+        // 60fps, 16.7ms interval
+        {{-50000000LL, 16666667LL}, {-33333333LL, 16666667LL},
+          {-16666667LL, 16666667LL}, {0, 0}},
+        // 60fps, variable interval
+        {{-50000000LL, 8666667LL}, {-33333333LL, 19666667LL},
+          {-16666667LL, 20666667LL}, {0, 0}},
+    };
+
+    // Go through different use cases, and check the buffer timestamp
+    size_t iterations = sizeof(inputTimestamps)/sizeof(inputTimestamps[0]);
+    for (size_t i = 0; i < iterations; i++) {
+        // Space out different test sets to reset the frame scheduler
+        nsecs_t timeBase = systemTime() - s2ns(1) * (iterations - i);
+        nsecs_t lastQueueTime = 0;
+        nsecs_t duration = 0;
+        for (size_t j = 0; j < BUFFER_COUNT; j++) {
+            ANativeWindowBuffer* buffer = nullptr;
+            int fenceFd;
+            ASSERT_EQ(NO_ERROR, anw->dequeueBuffer(anw.get(), &buffer, &fenceFd));
+
+            // Sleep to space out queuePreviewBuffer
+            nsecs_t currentTime = systemTime();
+            if (duration > 0 && duration > currentTime - lastQueueTime) {
+                std::this_thread::sleep_for(
+                        std::chrono::nanoseconds(duration + lastQueueTime - currentTime));
+            }
+            nsecs_t timestamp = timeBase + inputTimestamps[i][j].first;
+            ASSERT_EQ(NO_ERROR,
+                    scheduler->queuePreviewBuffer(timestamp, TRANSFORM, buffer, fenceFd));
+
+            lastQueueTime = systemTime();
+            duration = inputTimestamps[i][j].second;
+        }
+
+        // Collect output timestamps, making sure they are either set by
+        // producer, or set by the scheduler.
+        consumerListener->waitForFrames();
+        nsecs_t outputTimestamps[BUFFER_COUNT];
+        for (size_t j = 0; j < BUFFER_COUNT; j++) {
+            BufferItem bufferItem;
+            ASSERT_EQ(NO_ERROR, bufferConsumer->acquireBuffer(&bufferItem, 0/*presentWhen*/));
+
+            outputTimestamps[j] = bufferItem.mTimestamp;
+            ALOGV("%s: [%zu][%zu]: input: %" PRId64 ", output: %" PRId64, __FUNCTION__,
+                  i, j, timeBase + inputTimestamps[i][j].first, bufferItem.mTimestamp);
+            ASSERT_GT(bufferItem.mTimestamp, inputTimestamps[i][j].first);
+
+            ASSERT_EQ(NO_ERROR, bufferConsumer->releaseBuffer(bufferItem));
+        }
+
+        // Check the output timestamp intervals are aligned with input intervals
+        const nsecs_t SHIFT_THRESHOLD = ms2ns(2);
+        for (size_t j = 0; j < BUFFER_COUNT - 1; j ++) {
+            nsecs_t interval_shift = outputTimestamps[j+1] - outputTimestamps[j] -
+                    (inputTimestamps[i][j+1].first - inputTimestamps[i][j].first);
+            ASSERT_LE(std::abs(interval_shift), SHIFT_THRESHOLD);
+        }
+
+        consumerListener->reset(BUFFER_COUNT);
+    }
+
+    // Disconnect the surface
+    ASSERT_EQ(NO_ERROR, surface->disconnect(NATIVE_WINDOW_API_CPU));
+}
diff --git a/services/camera/libcameraservice/tests/how_to_run.txt b/services/camera/libcameraservice/tests/how_to_run.txt
new file mode 100644
index 0000000..93239e3
--- /dev/null
+++ b/services/camera/libcameraservice/tests/how_to_run.txt
@@ -0,0 +1,5 @@
+adb root &&
+m cameraservice_test &&
+adb push $ANDROID_PRODUCT_OUT/data/nativetest/cameraservice_test/cameraservice_test \
+    /data/nativetest/cameraservice_test/arm64/cameraservice_test &&
+adb shell /data/nativetest/cameraservice_test/arm64/cameraservice_test
\ No newline at end of file
diff --git a/services/camera/libcameraservice/utils/CameraTraces.cpp b/services/camera/libcameraservice/utils/CameraTraces.cpp
index 0198690..0cd4f5d 100644
--- a/services/camera/libcameraservice/utils/CameraTraces.cpp
+++ b/services/camera/libcameraservice/utils/CameraTraces.cpp
@@ -64,7 +64,7 @@
     ATRACE_END();
 }
 
-status_t CameraTraces::dump(int fd, const Vector<String16> &args __attribute__((unused))) {
+status_t CameraTraces::dump(int fd) {
     ALOGV("%s: fd = %d", __FUNCTION__, fd);
     Mutex::Autolock al(sImpl.tracesLock);
     List<ProcessCallStack>& pcsList = sImpl.pcsList;
diff --git a/services/camera/libcameraservice/utils/CameraTraces.h b/services/camera/libcameraservice/utils/CameraTraces.h
index 13ca16d..71fa334 100644
--- a/services/camera/libcameraservice/utils/CameraTraces.h
+++ b/services/camera/libcameraservice/utils/CameraTraces.h
@@ -42,7 +42,7 @@
      *
      * <p>Each line is indented by DUMP_INDENT spaces.</p>
      */
-    static status_t dump(int fd, const Vector<String16>& args);
+    static status_t dump(int fd);
 
 private:
     enum {
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index a239c81..a35e6f3 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -13,71 +13,28 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 #include <cutils/properties.h>
 
 #include "SessionConfigurationUtils.h"
 #include "../api2/DepthCompositeStream.h"
 #include "../api2/HeicCompositeStream.h"
+#include "android/hardware/camera/metadata/3.8/types.h"
 #include "common/CameraDeviceBase.h"
 #include "../CameraService.h"
 #include "device3/Camera3Device.h"
 #include "device3/Camera3OutputStream.h"
+#include "system/graphics-base-v1.1.h"
 
 using android::camera3::OutputStreamInfo;
 using android::camera3::OutputStreamInfo;
 using android::hardware::camera2::ICameraDeviceUser;
 using android::hardware::camera::metadata::V3_6::CameraMetadataEnumAndroidSensorPixelMode;
+using android::hardware::camera::metadata::V3_8::CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap;
 
 namespace android {
 namespace camera3 {
 
-int32_t SessionConfigurationUtils::PERF_CLASS_LEVEL =
-        property_get_int32("ro.odm.build.media_performance_class", 0);
-
-bool SessionConfigurationUtils::IS_PERF_CLASS = (PERF_CLASS_LEVEL == SDK_VERSION_S);
-
-camera3::Size SessionConfigurationUtils::getMaxJpegResolution(const CameraMetadata &metadata,
-        bool ultraHighResolution) {
-    int32_t maxJpegWidth = 0, maxJpegHeight = 0;
-    const int STREAM_CONFIGURATION_SIZE = 4;
-    const int STREAM_FORMAT_OFFSET = 0;
-    const int STREAM_WIDTH_OFFSET = 1;
-    const int STREAM_HEIGHT_OFFSET = 2;
-    const int STREAM_IS_INPUT_OFFSET = 3;
-
-    int32_t scalerSizesTag = ultraHighResolution ?
-            ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION :
-                    ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS;
-    camera_metadata_ro_entry_t availableStreamConfigs =
-            metadata.find(scalerSizesTag);
-    if (availableStreamConfigs.count == 0 ||
-            availableStreamConfigs.count % STREAM_CONFIGURATION_SIZE != 0) {
-        return camera3::Size(0, 0);
-    }
-
-    // Get max jpeg size (area-wise).
-    for (size_t i= 0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) {
-        int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET];
-        int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET];
-        int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET];
-        int32_t isInput = availableStreamConfigs.data.i32[i + STREAM_IS_INPUT_OFFSET];
-        if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT
-                && format == HAL_PIXEL_FORMAT_BLOB &&
-                (width * height > maxJpegWidth * maxJpegHeight)) {
-            maxJpegWidth = width;
-            maxJpegHeight = height;
-        }
-    }
-
-    return camera3::Size(maxJpegWidth, maxJpegHeight);
-}
-
-size_t SessionConfigurationUtils::getUHRMaxJpegBufferSize(camera3::Size uhrMaxJpegSize,
-        camera3::Size defaultMaxJpegSize, size_t defaultMaxJpegBufferSize) {
-    return (uhrMaxJpegSize.width * uhrMaxJpegSize.height) /
-            (defaultMaxJpegSize.width * defaultMaxJpegSize.height) * defaultMaxJpegBufferSize;
-}
-
 void StreamConfiguration::getStreamConfigurations(
         const CameraMetadata &staticInfo, int configuration,
         std::unordered_map<int, std::vector<StreamConfiguration>> *scm) {
@@ -126,65 +83,57 @@
     getStreamConfigurations(staticInfo, heicKey, scm);
 }
 
-int32_t SessionConfigurationUtils::getAppropriateModeTag(int32_t defaultTag, bool maxResolution) {
-    if (!maxResolution) {
-        return defaultTag;
+namespace SessionConfigurationUtils {
+
+int32_t PERF_CLASS_LEVEL =
+        property_get_int32("ro.odm.build.media_performance_class", 0);
+
+bool IS_PERF_CLASS = (PERF_CLASS_LEVEL == SDK_VERSION_S);
+
+camera3::Size getMaxJpegResolution(const CameraMetadata &metadata,
+        bool ultraHighResolution) {
+    int32_t maxJpegWidth = 0, maxJpegHeight = 0;
+    const int STREAM_CONFIGURATION_SIZE = 4;
+    const int STREAM_FORMAT_OFFSET = 0;
+    const int STREAM_WIDTH_OFFSET = 1;
+    const int STREAM_HEIGHT_OFFSET = 2;
+    const int STREAM_IS_INPUT_OFFSET = 3;
+
+    int32_t scalerSizesTag = ultraHighResolution ?
+            ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION :
+                    ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS;
+    camera_metadata_ro_entry_t availableStreamConfigs =
+            metadata.find(scalerSizesTag);
+    if (availableStreamConfigs.count == 0 ||
+            availableStreamConfigs.count % STREAM_CONFIGURATION_SIZE != 0) {
+        return camera3::Size(0, 0);
     }
-    switch (defaultTag) {
-        case ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS:
-            return ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS:
-            return ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_SCALER_AVAILABLE_STALL_DURATIONS:
-            return ANDROID_SCALER_AVAILABLE_STALL_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS:
-            return ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS:
-            return ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS:
-            return ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS:
-            return ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS:
-            return ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS:
-            return ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS:
-            return ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS:
-            return ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS:
-            return ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS_MAXIMUM_RESOLUTION;
-        case ANDROID_SENSOR_OPAQUE_RAW_SIZE:
-            return ANDROID_SENSOR_OPAQUE_RAW_SIZE_MAXIMUM_RESOLUTION;
-        case ANDROID_LENS_INTRINSIC_CALIBRATION:
-            return ANDROID_LENS_INTRINSIC_CALIBRATION_MAXIMUM_RESOLUTION;
-        case ANDROID_LENS_DISTORTION:
-            return ANDROID_LENS_DISTORTION_MAXIMUM_RESOLUTION;
-        default:
-            ALOGE("%s: Tag %d doesn't have a maximum resolution counterpart", __FUNCTION__,
-                    defaultTag);
-            return -1;
+
+    // Get max jpeg size (area-wise).
+    for (size_t i= 0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) {
+        int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET];
+        int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET];
+        int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET];
+        int32_t isInput = availableStreamConfigs.data.i32[i + STREAM_IS_INPUT_OFFSET];
+        if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT
+                && format == HAL_PIXEL_FORMAT_BLOB &&
+                (width * height > maxJpegWidth * maxJpegHeight)) {
+            maxJpegWidth = width;
+            maxJpegHeight = height;
+        }
     }
-    return -1;
+
+    return camera3::Size(maxJpegWidth, maxJpegHeight);
 }
 
-bool SessionConfigurationUtils::getArrayWidthAndHeight(const CameraMetadata *deviceInfo,
-        int32_t arrayTag, int32_t *width, int32_t *height) {
-    if (width == nullptr || height == nullptr) {
-        ALOGE("%s: width / height nullptr", __FUNCTION__);
-        return false;
-    }
-    camera_metadata_ro_entry_t entry;
-    entry = deviceInfo->find(arrayTag);
-    if (entry.count != 4) return false;
-    *width = entry.data.i32[2];
-    *height = entry.data.i32[3];
-    return true;
+size_t getUHRMaxJpegBufferSize(camera3::Size uhrMaxJpegSize,
+        camera3::Size defaultMaxJpegSize, size_t defaultMaxJpegBufferSize) {
+    return (uhrMaxJpegSize.width * uhrMaxJpegSize.height) /
+            (defaultMaxJpegSize.width * defaultMaxJpegSize.height) * defaultMaxJpegBufferSize;
 }
 
 StreamConfigurationPair
-SessionConfigurationUtils::getStreamConfigurationPair(const CameraMetadata &staticInfo) {
+getStreamConfigurationPair(const CameraMetadata &staticInfo) {
     camera3::StreamConfigurationPair streamConfigurationPair;
     camera3::StreamConfiguration::getStreamConfigurations(staticInfo, false,
             &streamConfigurationPair.mDefaultStreamConfigurationMap);
@@ -193,13 +142,13 @@
     return streamConfigurationPair;
 }
 
-int64_t SessionConfigurationUtils::euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1) {
+int64_t euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1) {
     int64_t d0 = x0 - x1;
     int64_t d1 = y0 - y1;
     return d0 * d0 + d1 * d1;
 }
 
-bool SessionConfigurationUtils::roundBufferDimensionNearest(int32_t width, int32_t height,
+bool roundBufferDimensionNearest(int32_t width, int32_t height,
         int32_t format, android_dataspace dataSpace,
         const CameraMetadata& info, bool maxResolution, /*out*/int32_t* outWidth,
         /*out*/int32_t* outHeight) {
@@ -260,7 +209,81 @@
     return true;
 }
 
-bool SessionConfigurationUtils::isPublicFormat(int32_t format)
+//check if format is 10-bit compatible
+bool is10bitCompatibleFormat(int32_t format) {
+    switch(format) {
+        case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+        case HAL_PIXEL_FORMAT_YCBCR_P010:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool isDynamicRangeProfileSupported(int dynamicRangeProfile, const CameraMetadata& staticInfo) {
+    if (dynamicRangeProfile == ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {
+        // Supported by default
+        return true;
+    }
+
+    camera_metadata_ro_entry_t entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+    bool is10bitDynamicRangeSupported = false;
+    for (size_t i = 0; i < entry.count; ++i) {
+        uint8_t capability = entry.data.u8[i];
+        if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_DYNAMIC_RANGE_TEN_BIT) {
+            is10bitDynamicRangeSupported = true;
+            break;
+        }
+    }
+
+    if (!is10bitDynamicRangeSupported) {
+        return false;
+    }
+
+    switch (dynamicRangeProfile) {
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10_PLUS:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HLG10:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_OEM:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_OEM_PO:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_REF:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_REF_PO:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_OEM:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_OEM_PO:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_REF:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_8B_HDR_REF_PO:
+            entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP);
+            for (size_t i = 0; i < entry.count; i += 2) {
+                if (dynamicRangeProfile == entry.data.i32[i]) {
+                    return true;
+                }
+            }
+
+            return false;
+        default:
+            return false;
+    }
+
+    return false;
+}
+
+//check if format is 10-bit compatible
+bool is10bitDynamicRangeProfile(int32_t dynamicRangeProfile) {
+    switch (dynamicRangeProfile) {
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10_PLUS:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HLG10:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_OEM:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_OEM_PO:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_REF:
+        case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_REF_PO:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool isPublicFormat(int32_t format)
 {
     switch(format) {
         case HAL_PIXEL_FORMAT_RGBA_8888:
@@ -287,11 +310,11 @@
     }
 }
 
-binder::Status SessionConfigurationUtils::createSurfaceFromGbp(
+binder::Status createSurfaceFromGbp(
         OutputStreamInfo& streamInfo, bool isStreamInfoValid,
         sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
         const String8 &logicalCameraId, const CameraMetadata &physicalCameraMetadata,
-        const std::vector<int32_t> &sensorPixelModesUsed){
+        const std::vector<int32_t> &sensorPixelModesUsed, int dynamicRangeProfile){
     // bufferProducer must be non-null
     if (gbp == nullptr) {
         String8 msg = String8::format("Camera %s: Surface is NULL", logicalCameraId.string());
@@ -389,6 +412,21 @@
         ALOGE("%s: %s", __FUNCTION__, msg.string());
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
+    if (!SessionConfigurationUtils::isDynamicRangeProfileSupported(dynamicRangeProfile,
+                physicalCameraMetadata)) {
+        String8 msg = String8::format("Camera %s: Dynamic range profile 0x%x not supported,"
+                " failed to create output stream", logicalCameraId.string(), dynamicRangeProfile);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
+    if (SessionConfigurationUtils::is10bitDynamicRangeProfile(dynamicRangeProfile) &&
+            !SessionConfigurationUtils::is10bitCompatibleFormat(format)) {
+        String8 msg = String8::format("Camera %s: No 10-bit supported stream configurations with "
+                "format %#x defined and profile %#x, failed to create output stream",
+                logicalCameraId.string(), format, dynamicRangeProfile);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
 
     if (!isStreamInfoValid) {
         streamInfo.width = width;
@@ -397,6 +435,7 @@
         streamInfo.dataSpace = dataSpace;
         streamInfo.consumerUsage = consumerUsage;
         streamInfo.sensorPixelModesUsed = overriddenSensorPixelModes;
+        streamInfo.dynamicRangeProfile = dynamicRangeProfile;
         return binder::Status::ok();
     }
     if (width != streamInfo.width) {
@@ -437,35 +476,38 @@
     return binder::Status::ok();
 }
 
-void SessionConfigurationUtils::mapStreamInfo(const OutputStreamInfo &streamInfo,
+void mapStreamInfo(const OutputStreamInfo &streamInfo,
             camera3::camera_stream_rotation_t rotation, String8 physicalId,
-            int32_t groupId, hardware::camera::device::V3_7::Stream *stream /*out*/) {
+            int32_t groupId, hardware::camera::device::V3_8::Stream *stream /*out*/) {
     if (stream == nullptr) {
         return;
     }
 
-    stream->v3_4.v3_2.streamType = hardware::camera::device::V3_2::StreamType::OUTPUT;
-    stream->v3_4.v3_2.width = streamInfo.width;
-    stream->v3_4.v3_2.height = streamInfo.height;
-    stream->v3_4.v3_2.format = Camera3Device::mapToPixelFormat(streamInfo.format);
+    stream->v3_7.v3_4.v3_2.streamType = hardware::camera::device::V3_2::StreamType::OUTPUT;
+    stream->v3_7.v3_4.v3_2.width = streamInfo.width;
+    stream->v3_7.v3_4.v3_2.height = streamInfo.height;
+    stream->v3_7.v3_4.v3_2.format = Camera3Device::mapToPixelFormat(streamInfo.format);
     auto u = streamInfo.consumerUsage;
     camera3::Camera3OutputStream::applyZSLUsageQuirk(streamInfo.format, &u);
-    stream->v3_4.v3_2.usage = Camera3Device::mapToConsumerUsage(u);
-    stream->v3_4.v3_2.dataSpace = Camera3Device::mapToHidlDataspace(streamInfo.dataSpace);
-    stream->v3_4.v3_2.rotation = Camera3Device::mapToStreamRotation(rotation);
-    stream->v3_4.v3_2.id = -1; // Invalid stream id
-    stream->v3_4.physicalCameraId = std::string(physicalId.string());
-    stream->v3_4.bufferSize = 0;
-    stream->groupId = groupId;
-    stream->sensorPixelModesUsed.resize(streamInfo.sensorPixelModesUsed.size());
+    stream->v3_7.v3_4.v3_2.usage = Camera3Device::mapToConsumerUsage(u);
+    stream->v3_7.v3_4.v3_2.dataSpace = Camera3Device::mapToHidlDataspace(streamInfo.dataSpace);
+    stream->v3_7.v3_4.v3_2.rotation = Camera3Device::mapToStreamRotation(rotation);
+    stream->v3_7.v3_4.v3_2.id = -1; // Invalid stream id
+    stream->v3_7.v3_4.physicalCameraId = std::string(physicalId.string());
+    stream->v3_7.v3_4.bufferSize = 0;
+    stream->v3_7.groupId = groupId;
+    stream->v3_7.sensorPixelModesUsed.resize(streamInfo.sensorPixelModesUsed.size());
     size_t idx = 0;
     for (auto mode : streamInfo.sensorPixelModesUsed) {
-        stream->sensorPixelModesUsed[idx++] =
+        stream->v3_7.sensorPixelModesUsed[idx++] =
                 static_cast<CameraMetadataEnumAndroidSensorPixelMode>(mode);
     }
+    stream->dynamicRangeProfile =
+        static_cast<CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap> (
+                streamInfo.dynamicRangeProfile);
 }
 
-binder::Status SessionConfigurationUtils::checkPhysicalCameraId(
+binder::Status checkPhysicalCameraId(
         const std::vector<std::string> &physicalCameraIds, const String8 &physicalCameraId,
         const String8 &logicalCameraId) {
     if (physicalCameraId.size() == 0) {
@@ -481,7 +523,7 @@
     return binder::Status::ok();
 }
 
-binder::Status SessionConfigurationUtils::checkSurfaceType(size_t numBufferProducers,
+binder::Status checkSurfaceType(size_t numBufferProducers,
         bool deferredConsumer, int surfaceType)  {
     if (numBufferProducers > MAX_SURFACES_PER_STREAM) {
         ALOGE("%s: GraphicBufferProducer count %zu for stream exceeds limit of %d",
@@ -503,7 +545,7 @@
     return binder::Status::ok();
 }
 
-binder::Status SessionConfigurationUtils::checkOperatingMode(int operatingMode,
+binder::Status checkOperatingMode(int operatingMode,
         const CameraMetadata &staticInfo, const String8 &cameraId) {
     if (operatingMode < 0) {
         String8 msg = String8::format(
@@ -538,11 +580,11 @@
 }
 
 binder::Status
-SessionConfigurationUtils::convertToHALStreamCombination(
+convertToHALStreamCombination(
         const SessionConfiguration& sessionConfiguration,
         const String8 &logicalCameraId, const CameraMetadata &deviceInfo,
         metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
-        hardware::camera::device::V3_7::StreamConfiguration &streamConfiguration,
+        hardware::camera::device::V3_8::StreamConfiguration &streamConfiguration,
         bool overrideForPerfClass, bool *earlyExit) {
 
     auto operatingMode = sessionConfiguration.getOperatingMode();
@@ -583,7 +625,7 @@
         defaultSensorPixelModes[0] =
                 static_cast<CameraMetadataEnumAndroidSensorPixelMode>(
                         ANDROID_SENSOR_PIXEL_MODE_DEFAULT);
-        streamConfiguration.streams[streamIdx++] = {{{/*streamId*/0,
+        streamConfiguration.streams[streamIdx++].v3_7 = {{{/*streamId*/0,
                 hardware::camera::device::V3_2::StreamType::INPUT,
                 static_cast<uint32_t> (sessionConfiguration.getInputWidth()),
                 static_cast<uint32_t> (sessionConfiguration.getInputHeight()),
@@ -601,6 +643,7 @@
         bool deferredConsumer = it.isDeferred();
         String8 physicalCameraId = String8(it.getPhysicalCameraId());
 
+        int dynamicRangeProfile = it.getDynamicRangeProfile();
         std::vector<int32_t> sensorPixelModesUsed = it.getSensorPixelModesUsed();
         const CameraMetadata &physicalDeviceInfo = getMetadata(physicalCameraId,
                 overrideForPerfClass);
@@ -632,6 +675,7 @@
             if (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) {
                 streamInfo.consumerUsage |= GraphicBuffer::USAGE_HW_COMPOSER;
             }
+            streamInfo.dynamicRangeProfile = it.getDynamicRangeProfile();
             if (checkAndOverrideSensorPixelModesUsed(sensorPixelModesUsed,
                     streamInfo.format, streamInfo.width,
                     streamInfo.height, metadataChosen, false /*flexibleConsumer*/,
@@ -653,7 +697,7 @@
         for (auto& bufferProducer : bufferProducers) {
             sp<Surface> surface;
             res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer,
-                    logicalCameraId, metadataChosen, sensorPixelModesUsed);
+                    logicalCameraId, metadataChosen, sensorPixelModesUsed, dynamicRangeProfile);
 
             if (!res.isOk())
                 return res;
@@ -729,7 +773,7 @@
     return std::unordered_set<int32_t>(sensorPixelModesUsed.begin(), sensorPixelModesUsed.end());
 }
 
-status_t SessionConfigurationUtils::checkAndOverrideSensorPixelModesUsed(
+status_t checkAndOverrideSensorPixelModesUsed(
         const std::vector<int32_t> &sensorPixelModesUsed, int format, int width, int height,
         const CameraMetadata &staticInfo, bool flexibleConsumer,
         std::unordered_set<int32_t> *overriddenSensorPixelModesUsed) {
@@ -795,21 +839,26 @@
     return OK;
 }
 
-bool SessionConfigurationUtils::isUltraHighResolutionSensor(const CameraMetadata &deviceInfo) {
-    camera_metadata_ro_entry_t entryCap;
-    entryCap = deviceInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
-    // Go through the capabilities and check if it has
-    // ANDROID_REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR
-    for (size_t i = 0; i < entryCap.count; ++i) {
-        uint8_t capability = entryCap.data.u8[i];
-        if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR) {
-            return true;
+bool convertHALStreamCombinationFromV38ToV37(
+        hardware::camera::device::V3_7::StreamConfiguration &streamConfigV37,
+        const hardware::camera::device::V3_8::StreamConfiguration &streamConfigV38) {
+    streamConfigV37.streams.resize(streamConfigV38.streams.size());
+    for (size_t i = 0; i < streamConfigV38.streams.size(); i++) {
+        if (static_cast<int32_t>(streamConfigV38.streams[i].dynamicRangeProfile) !=
+                ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {
+            // ICameraDevice older than 3.8 doesn't support 10-bit dynamic range profiles
+            // image
+            return false;
         }
+        streamConfigV37.streams[i] = streamConfigV38.streams[i].v3_7;
     }
-    return false;
+    streamConfigV37.operationMode = streamConfigV38.operationMode;
+    streamConfigV37.sessionParams = streamConfigV38.sessionParams;
+
+    return true;
 }
 
-bool SessionConfigurationUtils::convertHALStreamCombinationFromV37ToV34(
+bool convertHALStreamCombinationFromV37ToV34(
         hardware::camera::device::V3_4::StreamConfiguration &streamConfigV34,
         const hardware::camera::device::V3_7::StreamConfiguration &streamConfigV37) {
     if (streamConfigV37.multiResolutionInputImage) {
@@ -832,7 +881,7 @@
     return true;
 }
 
-bool SessionConfigurationUtils::targetPerfClassPrimaryCamera(
+bool targetPerfClassPrimaryCamera(
         const std::set<std::string>& perfClassPrimaryCameraIds, const std::string& cameraId,
         int targetSdkVersion) {
     bool isPerfClassPrimaryCamera =
@@ -840,5 +889,6 @@
     return targetSdkVersion >= SDK_VERSION_S && isPerfClassPrimaryCamera;
 }
 
+} // namespace SessionConfigurationUtils
 } // namespace camera3
 } // namespace android
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
index 192e241..9a5dc2c 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
@@ -21,15 +21,18 @@
 #include <camera/camera2/OutputConfiguration.h>
 #include <camera/camera2/SessionConfiguration.h>
 #include <camera/camera2/SubmitInfo.h>
-#include <android/hardware/camera/device/3.7/types.h>
+#include <android/hardware/camera/device/3.8/types.h>
 #include <android/hardware/camera/device/3.4/ICameraDeviceSession.h>
 #include <android/hardware/camera/device/3.7/ICameraDeviceSession.h>
+#include <android/hardware/camera/device/3.8/ICameraDeviceSession.h>
 
 #include <device3/Camera3StreamInterface.h>
 
 #include <set>
 #include <stdint.h>
 
+#include "SessionConfigurationUtilsHost.h"
+
 // Convenience methods for constructing binder::Status objects for error returns
 
 #define STATUS_ERROR(errorCode, errorString) \
@@ -69,96 +72,105 @@
             mMaximumResolutionStreamConfigurationMap;
 };
 
-class SessionConfigurationUtils {
-public:
-    static camera3::Size getMaxJpegResolution(const CameraMetadata &metadata,
-            bool ultraHighResolution);
+namespace SessionConfigurationUtils {
 
-    static size_t getUHRMaxJpegBufferSize(camera3::Size uhrMaxJpegSize,
-            camera3::Size defaultMaxJpegSize, size_t defaultMaxJpegBufferSize);
+camera3::Size getMaxJpegResolution(const CameraMetadata &metadata,
+        bool ultraHighResolution);
 
-    static int64_t euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1);
+size_t getUHRMaxJpegBufferSize(camera3::Size uhrMaxJpegSize,
+        camera3::Size defaultMaxJpegSize, size_t defaultMaxJpegBufferSize);
 
-    // Find the closest dimensions for a given format in available stream configurations with
-    // a width <= ROUNDING_WIDTH_CAP
-    static bool roundBufferDimensionNearest(int32_t width, int32_t height, int32_t format,
-            android_dataspace dataSpace, const CameraMetadata& info, bool maxResolution,
-            /*out*/int32_t* outWidth, /*out*/int32_t* outHeight);
+int64_t euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1);
 
-    static bool getArrayWidthAndHeight(const CameraMetadata *deviceInfo, int32_t arrayTag,
-            int32_t *width, int32_t *height);
+// Find the closest dimensions for a given format in available stream configurations with
+// a width <= ROUNDING_WIDTH_CAP
+bool roundBufferDimensionNearest(int32_t width, int32_t height, int32_t format,
+        android_dataspace dataSpace, const CameraMetadata& info, bool maxResolution,
+        /*out*/int32_t* outWidth, /*out*/int32_t* outHeight);
 
-    //check if format is not custom format
-    static bool isPublicFormat(int32_t format);
+// check if format is not custom format
+bool isPublicFormat(int32_t format);
 
-    // Create a Surface from an IGraphicBufferProducer. Returns error if
-    // IGraphicBufferProducer's property doesn't match with streamInfo
-    static binder::Status createSurfaceFromGbp(
-        camera3::OutputStreamInfo& streamInfo, bool isStreamInfoValid,
-        sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
-        const String8 &logicalCameraId, const CameraMetadata &physicalCameraMetadata,
-        const std::vector<int32_t> &sensorPixelModesUsed);
+// Create a Surface from an IGraphicBufferProducer. Returns error if
+// IGraphicBufferProducer's property doesn't match with streamInfo
+binder::Status createSurfaceFromGbp(
+camera3::OutputStreamInfo& streamInfo, bool isStreamInfoValid,
+sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
+const String8 &logicalCameraId, const CameraMetadata &physicalCameraMetadata,
+const std::vector<int32_t> &sensorPixelModesUsed,  int dynamicRangeProfile);
+void mapStreamInfo(const camera3::OutputStreamInfo &streamInfo,
+        camera3::camera_stream_rotation_t rotation, String8 physicalId, int32_t groupId,
+        hardware::camera::device::V3_7::Stream *stream /*out*/);
 
-    static void mapStreamInfo(const camera3::OutputStreamInfo &streamInfo,
-            camera3::camera_stream_rotation_t rotation, String8 physicalId, int32_t groupId,
-            hardware::camera::device::V3_7::Stream *stream /*out*/);
+//check if format is 10-bit output compatible
+bool is10bitCompatibleFormat(int32_t format);
 
-    // Check that the physicalCameraId passed in is spported by the camera
-    // device.
-    static binder::Status checkPhysicalCameraId(
-        const std::vector<std::string> &physicalCameraIds, const String8 &physicalCameraId,
-        const String8 &logicalCameraId);
+// check if the dynamic range requires 10-bit output
+bool is10bitDynamicRangeProfile(int32_t dynamicRangeProfile);
 
-    static binder::Status checkSurfaceType(size_t numBufferProducers,
-        bool deferredConsumer, int surfaceType);
+// Check if the device supports a given dynamicRangeProfile
+bool isDynamicRangeProfileSupported(int dynamicRangeProfile, const CameraMetadata& staticMeta);
 
-    static binder::Status checkOperatingMode(int operatingMode,
-        const CameraMetadata &staticInfo, const String8 &cameraId);
+// Check that the physicalCameraId passed in is spported by the camera
+// device.
+binder::Status checkPhysicalCameraId(
+const std::vector<std::string> &physicalCameraIds, const String8 &physicalCameraId,
+const String8 &logicalCameraId);
 
-    // utility function to convert AIDL SessionConfiguration to HIDL
-    // streamConfiguration. Also checks for validity of SessionConfiguration and
-    // returns a non-ok binder::Status if the passed in session configuration
-    // isn't valid.
-    static binder::Status
-    convertToHALStreamCombination(const SessionConfiguration& sessionConfiguration,
-            const String8 &cameraId, const CameraMetadata &deviceInfo,
-            metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
-            hardware::camera::device::V3_7::StreamConfiguration &streamConfiguration,
-            bool overrideForPerfClass, bool *earlyExit);
+binder::Status checkSurfaceType(size_t numBufferProducers,
+bool deferredConsumer, int surfaceType);
 
-    // Utility function to convert a V3_7::StreamConfiguration to
-    // V3_4::StreamConfiguration. Return false if the original V3_7 configuration cannot
-    // be used by older version HAL.
-    static bool convertHALStreamCombinationFromV37ToV34(
-            hardware::camera::device::V3_4::StreamConfiguration &streamConfigV34,
-            const hardware::camera::device::V3_7::StreamConfiguration &streamConfigV37);
+binder::Status checkOperatingMode(int operatingMode,
+const CameraMetadata &staticInfo, const String8 &cameraId);
 
-    static StreamConfigurationPair getStreamConfigurationPair(const CameraMetadata &metadata);
+// utility function to convert AIDL SessionConfiguration to HIDL
+// streamConfiguration. Also checks for validity of SessionConfiguration and
+// returns a non-ok binder::Status if the passed in session configuration
+// isn't valid.
+binder::Status
+convertToHALStreamCombination(const SessionConfiguration& sessionConfiguration,
+        const String8 &cameraId, const CameraMetadata &deviceInfo,
+        metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
+        hardware::camera::device::V3_8::StreamConfiguration &streamConfiguration,
+        bool overrideForPerfClass, bool *earlyExit);
 
-    static status_t checkAndOverrideSensorPixelModesUsed(
-            const std::vector<int32_t> &sensorPixelModesUsed, int format, int width, int height,
-            const CameraMetadata &staticInfo, bool flexibleConsumer,
-            std::unordered_set<int32_t> *overriddenSensorPixelModesUsed);
+// Utility function to convert a V3_8::StreamConfiguration to
+// V3_7::StreamConfiguration. Return false if the original V3_8 configuration cannot
+// be used by older version HAL.
+bool convertHALStreamCombinationFromV38ToV37(
+        hardware::camera::device::V3_7::StreamConfiguration &streamConfigV37,
+        const hardware::camera::device::V3_8::StreamConfiguration &streamConfigV38);
 
-    static bool isUltraHighResolutionSensor(const CameraMetadata &deviceInfo);
+// Utility function to convert a V3_7::StreamConfiguration to
+// V3_4::StreamConfiguration. Return false if the original V3_7 configuration cannot
+// be used by older version HAL.
+bool convertHALStreamCombinationFromV37ToV34(
+        hardware::camera::device::V3_4::StreamConfiguration &streamConfigV34,
+        const hardware::camera::device::V3_7::StreamConfiguration &streamConfigV37);
 
-    static int32_t getAppropriateModeTag(int32_t defaultTag, bool maxResolution = false);
+StreamConfigurationPair getStreamConfigurationPair(const CameraMetadata &metadata);
 
-    static bool targetPerfClassPrimaryCamera(
-            const std::set<std::string>& perfClassPrimaryCameraIds, const std::string& cameraId,
-            int32_t targetSdkVersion);
+status_t checkAndOverrideSensorPixelModesUsed(
+        const std::vector<int32_t> &sensorPixelModesUsed, int format, int width, int height,
+        const CameraMetadata &staticInfo, bool flexibleConsumer,
+        std::unordered_set<int32_t> *overriddenSensorPixelModesUsed);
 
-    static const int32_t MAX_SURFACES_PER_STREAM = 4;
+bool targetPerfClassPrimaryCamera(
+        const std::set<std::string>& perfClassPrimaryCameraIds, const std::string& cameraId,
+        int32_t targetSdkVersion);
 
-    static const int32_t ROUNDING_WIDTH_CAP = 1920;
+constexpr int32_t MAX_SURFACES_PER_STREAM = 4;
 
-    static const int32_t SDK_VERSION_S = 31;
-    static int32_t PERF_CLASS_LEVEL;
-    static bool IS_PERF_CLASS;
-    static const int32_t PERF_CLASS_JPEG_THRESH_W = 1920;
-    static const int32_t PERF_CLASS_JPEG_THRESH_H = 1080;
-};
+constexpr int32_t ROUNDING_WIDTH_CAP = 1920;
 
+constexpr int32_t SDK_VERSION_S = 31;
+extern int32_t PERF_CLASS_LEVEL;
+extern bool IS_PERF_CLASS;
+constexpr int32_t PERF_CLASS_JPEG_THRESH_W = 1920;
+constexpr int32_t PERF_CLASS_JPEG_THRESH_H = 1080;
+
+} // SessionConfigurationUtils
 } // camera3
 } // android
+
 #endif
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.cpp
new file mode 100644
index 0000000..1efdc60
--- /dev/null
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SessionConfigurationUtilsHost.h"
+
+namespace android {
+namespace camera3 {
+namespace SessionConfigurationUtils {
+
+int32_t getAppropriateModeTag(int32_t defaultTag, bool maxResolution) {
+    if (!maxResolution) {
+        return defaultTag;
+    }
+    switch (defaultTag) {
+        case ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS:
+            return ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS:
+            return ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_SCALER_AVAILABLE_STALL_DURATIONS:
+            return ANDROID_SCALER_AVAILABLE_STALL_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS:
+            return ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS:
+            return ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS:
+            return ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS:
+            return ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS:
+            return ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS:
+            return ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS:
+            return ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS:
+            return ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS:
+            return ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS_MAXIMUM_RESOLUTION;
+        case ANDROID_SENSOR_OPAQUE_RAW_SIZE:
+            return ANDROID_SENSOR_OPAQUE_RAW_SIZE_MAXIMUM_RESOLUTION;
+        case ANDROID_LENS_INTRINSIC_CALIBRATION:
+            return ANDROID_LENS_INTRINSIC_CALIBRATION_MAXIMUM_RESOLUTION;
+        case ANDROID_LENS_DISTORTION:
+            return ANDROID_LENS_DISTORTION_MAXIMUM_RESOLUTION;
+        default:
+            ALOGE("%s: Tag %d doesn't have a maximum resolution counterpart", __FUNCTION__,
+                    defaultTag);
+            return -1;
+    }
+    return -1;
+}
+
+bool isUltraHighResolutionSensor(const CameraMetadata &deviceInfo) {
+    camera_metadata_ro_entry_t entryCap;
+    entryCap = deviceInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+    // Go through the capabilities and check if it has
+    // ANDROID_REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR
+    for (size_t i = 0; i < entryCap.count; ++i) {
+        uint8_t capability = entryCap.data.u8[i];
+        if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool getArrayWidthAndHeight(const CameraMetadata *deviceInfo,
+        int32_t arrayTag, int32_t *width, int32_t *height) {
+    if (width == nullptr || height == nullptr) {
+        ALOGE("%s: width / height nullptr", __FUNCTION__);
+        return false;
+    }
+    camera_metadata_ro_entry_t entry;
+    entry = deviceInfo->find(arrayTag);
+    if (entry.count != 4) return false;
+    *width = entry.data.i32[2];
+    *height = entry.data.i32[3];
+    return true;
+}
+
+} // namespace SessionConfigurationUtils
+} // namespace camera3
+} // namespace android
\ No newline at end of file
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.h b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.h
new file mode 100644
index 0000000..45b1e91
--- /dev/null
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_SERVERS_CAMERA_SESSION_CONFIGURATION_UTILS_HOST_H
+#define ANDROID_SERVERS_CAMERA_SESSION_CONFIGURATION_UTILS_HOST_H
+
+#include "camera/CameraMetadata.h"
+
+namespace android {
+namespace camera3 {
+namespace SessionConfigurationUtils {
+
+bool isUltraHighResolutionSensor(const CameraMetadata &deviceInfo);
+
+int32_t getAppropriateModeTag(int32_t defaultTag, bool maxResolution = false);
+
+bool getArrayWidthAndHeight(const CameraMetadata *deviceInfo, int32_t arrayTag,
+        int32_t *width, int32_t *height);
+
+} // SessionConfigurationUtils
+} // camera3
+} // android
+
+#endif
\ No newline at end of file
diff --git a/services/camera/libcameraservice/utils/TagMonitor.cpp b/services/camera/libcameraservice/utils/TagMonitor.cpp
index 262f962..461f5e9 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.cpp
+++ b/services/camera/libcameraservice/utils/TagMonitor.cpp
@@ -24,6 +24,7 @@
 #include <utils/Log.h>
 #include <camera/VendorTagDescriptor.h>
 #include <camera_metadata_hidden.h>
+#include <device3/Camera3Stream.h>
 
 namespace android {
 
@@ -112,11 +113,15 @@
     mLastMonitoredResultValues.clear();
     mLastMonitoredPhysicalRequestKeys.clear();
     mLastMonitoredPhysicalResultKeys.clear();
+    mLastStreamIds.clear();
+    mLastInputStreamId = -1;
 }
 
 void TagMonitor::monitorMetadata(eventSource source, int64_t frameNumber, nsecs_t timestamp,
         const CameraMetadata& metadata,
-        const std::unordered_map<std::string, CameraMetadata>& physicalMetadata) {
+        const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
+        const camera3::camera_stream_buffer_t *outputBuffers, uint32_t numOutputBuffers,
+        int32_t inputStreamId) {
     if (!mMonitoringEnabled) return;
 
     std::lock_guard<std::mutex> lock(mMonitorMutex);
@@ -124,19 +129,27 @@
     if (timestamp == 0) {
         timestamp = systemTime(SYSTEM_TIME_BOOTTIME);
     }
-
+    std::unordered_set<int32_t> outputStreamIds;
+    for (size_t i = 0; i < numOutputBuffers; i++) {
+        const camera3::camera_stream_buffer_t *src = outputBuffers + i;
+        int32_t streamId = camera3::Camera3Stream::cast(src->stream)->getId();
+        outputStreamIds.emplace(streamId);
+    }
     std::string emptyId;
     for (auto tag : mMonitoredTagList) {
-        monitorSingleMetadata(source, frameNumber, timestamp, emptyId, tag, metadata);
+        monitorSingleMetadata(source, frameNumber, timestamp, emptyId, tag, metadata,
+                outputStreamIds, inputStreamId);
 
         for (auto& m : physicalMetadata) {
-            monitorSingleMetadata(source, frameNumber, timestamp, m.first, tag, m.second);
+            monitorSingleMetadata(source, frameNumber, timestamp, m.first, tag, m.second,
+                    outputStreamIds, inputStreamId);
         }
     }
 }
 
 void TagMonitor::monitorSingleMetadata(eventSource source, int64_t frameNumber, nsecs_t timestamp,
-        const std::string& cameraId, uint32_t tag, const CameraMetadata& metadata) {
+        const std::string& cameraId, uint32_t tag, const CameraMetadata& metadata,
+        const std::unordered_set<int32_t> &outputStreamIds, int32_t inputStreamId) {
 
     CameraMetadata &lastValues = (source == REQUEST) ?
             (cameraId.empty() ? mLastMonitoredRequestValues :
@@ -177,13 +190,22 @@
             // No last entry, so always consider to be different
             isDifferent = true;
         }
-
+        // Also monitor when the stream ids change, this helps visually see what
+        // monitored metadata values are for capture requests with different
+        // stream ids.
+        if (source == REQUEST &&
+                (inputStreamId != mLastInputStreamId || outputStreamIds != mLastStreamIds)) {
+            mLastInputStreamId = inputStreamId;
+            mLastStreamIds = outputStreamIds;
+            isDifferent = true;
+        }
         if (isDifferent) {
             ALOGV("%s: Tag %s changed", __FUNCTION__,
                   get_local_camera_metadata_tag_name_vendor_id(
                           tag, mVendorTagId));
             lastValues.update(entry);
-            mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId);
+            mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId,
+                    outputStreamIds, inputStreamId);
         }
     } else if (lastEntry.count > 0) {
         // Value has been removed
@@ -195,7 +217,10 @@
         entry.type = get_local_camera_metadata_tag_type_vendor_id(tag,
                 mVendorTagId);
         entry.count = 0;
-        mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId);
+        mLastInputStreamId = inputStreamId;
+        mLastStreamIds = outputStreamIds;
+        mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId, outputStreamIds,
+                inputStreamId);
     }
 }
 
@@ -214,37 +239,59 @@
     } else {
         dprintf(fd, "     Tag monitoring disabled (enable with -m <name1,..,nameN>)\n");
     }
-    if (mMonitoringEvents.size() > 0) {
-        dprintf(fd, "     Monitored tag event log:\n");
-        for (const auto& event : mMonitoringEvents) {
-            int indentation = (event.source == REQUEST) ? 15 : 30;
-            dprintf(fd, "        f%d:%" PRId64 "ns:%*s%*s%s.%s: ",
-                    event.frameNumber, event.timestamp,
-                    2, event.cameraId.c_str(),
-                    indentation,
-                    event.source == REQUEST ? "REQ:" : "RES:",
-                    get_local_camera_metadata_section_name_vendor_id(event.tag,
-                            mVendorTagId),
-                    get_local_camera_metadata_tag_name_vendor_id(event.tag,
-                            mVendorTagId));
-            if (event.newData.size() == 0) {
-                dprintf(fd, " (Removed)\n");
-            } else {
-                printData(fd, event.newData.data(), event.tag,
-                        event.type, event.newData.size() / camera_metadata_type_size[event.type],
-                        indentation + 18);
-            }
-        }
-    }
 
+    if (mMonitoringEvents.size() == 0) { return; }
+
+    dprintf(fd, "     Monitored tag event log:\n");
+
+    std::vector<std::string> eventStrs;
+    dumpMonitoredTagEventsToVectorLocked(eventStrs);
+    for (const std::string &eventStr : eventStrs) {
+        dprintf(fd, "        %s", eventStr.c_str());
+    }
 }
 
-// TODO: Consolidate with printData from camera_metadata.h
+void TagMonitor::getLatestMonitoredTagEvents(std::vector<std::string> &out) {
+    std::lock_guard<std::mutex> lock(mMonitorMutex);
+    dumpMonitoredTagEventsToVectorLocked(out);
+}
+
+void TagMonitor::dumpMonitoredTagEventsToVectorLocked(std::vector<std::string> &vec) {
+    if (mMonitoringEvents.size() == 0) { return; }
+
+    for (const auto& event : mMonitoringEvents) {
+        int indentation = (event.source == REQUEST) ? 15 : 30;
+        String8 eventString = String8::format("f%d:%" PRId64 "ns:%*s%*s%s.%s: ",
+                event.frameNumber, event.timestamp,
+                2, event.cameraId.c_str(),
+                indentation,
+                event.source == REQUEST ? "REQ:" : "RES:",
+                get_local_camera_metadata_section_name_vendor_id(event.tag, mVendorTagId),
+                get_local_camera_metadata_tag_name_vendor_id(event.tag, mVendorTagId));
+        if (event.newData.size() == 0) {
+            eventString += " (Removed)";
+        } else {
+            eventString += getEventDataString(event.newData.data(),
+                                    event.tag,
+                                    event.type,
+                                    event.newData.size() / camera_metadata_type_size[event.type],
+                                    indentation + 18,
+                                    event.outputStreamIds,
+                                    event.inputStreamId);
+        }
+        vec.emplace_back(eventString.string());
+    }
+}
 
 #define CAMERA_METADATA_ENUM_STRING_MAX_SIZE 29
 
-void TagMonitor::printData(int fd, const uint8_t *data_ptr, uint32_t tag,
-        int type, int count, int indentation) {
+String8 TagMonitor::getEventDataString(const uint8_t* data_ptr,
+                                    uint32_t tag,
+                                    int type,
+                                    int count,
+                                    int indentation,
+                                    const std::unordered_set<int32_t>& outputStreamIds,
+                                    int32_t inputStreamId) {
     static int values_per_line[NUM_TYPES] = {
         [TYPE_BYTE]     = 16,
         [TYPE_INT32]    = 8,
@@ -253,6 +300,7 @@
         [TYPE_DOUBLE]   = 4,
         [TYPE_RATIONAL] = 4,
     };
+
     size_t type_size = camera_metadata_type_size[type];
     char value_string_tmp[CAMERA_METADATA_ENUM_STRING_MAX_SIZE];
     uint32_t value;
@@ -260,10 +308,11 @@
     int lines = count / values_per_line[type];
     if (count % values_per_line[type] != 0) lines++;
 
+    String8 returnStr = String8();
     int index = 0;
     int j, k;
     for (j = 0; j < lines; j++) {
-        dprintf(fd, "%*s[", (j != 0) ? indentation + 4 : 0, "");
+        returnStr.appendFormat("%*s[", (j != 0) ? indentation + 4 : 0, "");
         for (k = 0;
              k < values_per_line[type] && count > 0;
              k++, count--, index += type_size) {
@@ -276,10 +325,9 @@
                                                      value_string_tmp,
                                                      sizeof(value_string_tmp))
                         == OK) {
-                        dprintf(fd, "%s ", value_string_tmp);
+                        returnStr += value_string_tmp;
                     } else {
-                        dprintf(fd, "%hhu ",
-                                *(data_ptr + index));
+                        returnStr.appendFormat("%hhu", *(data_ptr + index));
                     }
                     break;
                 case TYPE_INT32:
@@ -290,49 +338,57 @@
                                                      value_string_tmp,
                                                      sizeof(value_string_tmp))
                         == OK) {
-                        dprintf(fd, "%s ", value_string_tmp);
+                        returnStr += value_string_tmp;
                     } else {
-                        dprintf(fd, "%" PRId32 " ",
-                                *(int32_t*)(data_ptr + index));
+                        returnStr.appendFormat("%" PRId32 " ", *(int32_t*)(data_ptr + index));
                     }
                     break;
                 case TYPE_FLOAT:
-                    dprintf(fd, "%0.8f ",
-                            *(float*)(data_ptr + index));
+                    returnStr.appendFormat("%0.8f", *(float*)(data_ptr + index));
                     break;
                 case TYPE_INT64:
-                    dprintf(fd, "%" PRId64 " ",
-                            *(int64_t*)(data_ptr + index));
+                    returnStr.appendFormat("%" PRId64 " ", *(int64_t*)(data_ptr + index));
                     break;
                 case TYPE_DOUBLE:
-                    dprintf(fd, "%0.8f ",
-                            *(double*)(data_ptr + index));
+                    returnStr.appendFormat("%0.8f ", *(double*)(data_ptr + index));
                     break;
                 case TYPE_RATIONAL: {
                     int32_t numerator = *(int32_t*)(data_ptr + index);
                     int32_t denominator = *(int32_t*)(data_ptr + index + 4);
-                    dprintf(fd, "(%d / %d) ",
-                            numerator, denominator);
+                    returnStr.appendFormat("(%d / %d) ", numerator, denominator);
                     break;
                 }
                 default:
-                    dprintf(fd, "??? ");
+                    returnStr += "??? ";
             }
         }
-        dprintf(fd, "]\n");
+        returnStr += "] ";
+        if (!outputStreamIds.empty()) {
+            returnStr += "output stream ids: ";
+            for (const auto &id : outputStreamIds) {
+                returnStr.appendFormat(" %d ", id);
+            }
+        }
+        if (inputStreamId != -1) {
+            returnStr.appendFormat("input stream id: %d", inputStreamId);
+        }
+        returnStr += "\n";
     }
+    return returnStr;
 }
 
 template<typename T>
 TagMonitor::MonitorEvent::MonitorEvent(eventSource src, uint32_t frameNumber, nsecs_t timestamp,
-        const T &value, const std::string& cameraId) :
+        const T &value, const std::string& cameraId,
+        const std::unordered_set<int32_t> &outputStreamIds,
+        int32_t inputStreamId) :
         source(src),
         frameNumber(frameNumber),
         timestamp(timestamp),
         tag(value.tag),
         type(value.type),
         newData(value.data.u8, value.data.u8 + camera_metadata_type_size[value.type] * value.count),
-        cameraId(cameraId) {
+        cameraId(cameraId), outputStreamIds(outputStreamIds), inputStreamId(inputStreamId) {
 }
 
 TagMonitor::MonitorEvent::~MonitorEvent() {
diff --git a/services/camera/libcameraservice/utils/TagMonitor.h b/services/camera/libcameraservice/utils/TagMonitor.h
index 413f502..088d6fe 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.h
+++ b/services/camera/libcameraservice/utils/TagMonitor.h
@@ -30,6 +30,7 @@
 #include <system/camera_metadata.h>
 #include <system/camera_vendor_tags.h>
 #include <camera/CameraMetadata.h>
+#include <device3/InFlightRequest.h>
 
 namespace android {
 
@@ -66,19 +67,35 @@
     // Scan through the metadata and update the monitoring information
     void monitorMetadata(eventSource source, int64_t frameNumber,
             nsecs_t timestamp, const CameraMetadata& metadata,
-            const std::unordered_map<std::string, CameraMetadata>& physicalMetadata);
+            const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
+            const camera3::camera_stream_buffer_t *outputBuffers = nullptr,
+            uint32_t numOutputBuffers = 0, int32_t inputStreamId = -1);
 
     // Dump current event log to the provided fd
     void dumpMonitoredMetadata(int fd);
 
-  private:
+    // Dumps the latest monitored Tag events to the passed vector.
+    // NOTE: The events are appended to the vector in reverser chronological order
+    // (i.e. most recent first)
+    void getLatestMonitoredTagEvents(std::vector<std::string> &out);
 
-    static void printData(int fd, const uint8_t *data_ptr, uint32_t tag,
-            int type, int count, int indentation);
+  private:
+    // Dumps monitored tag events to the passed vector without acquiring
+    // mMonitorMutex. mMonitorMutex must be acquired before calling this
+    // function.
+    void dumpMonitoredTagEventsToVectorLocked(std::vector<std::string> &out);
+
+    static String8 getEventDataString(const uint8_t *data_ptr,
+                                       uint32_t tag, int type,
+                                       int count,
+                                       int indentation,
+                                       const std::unordered_set<int32_t> &outputStreamIds,
+                                       int32_t inputStreamId);
 
     void monitorSingleMetadata(TagMonitor::eventSource source, int64_t frameNumber,
             nsecs_t timestamp, const std::string& cameraId, uint32_t tag,
-            const CameraMetadata& metadata);
+            const CameraMetadata& metadata, const std::unordered_set<int32_t> &outputStreamIds,
+            int32_t inputStreamId);
 
     std::atomic<bool> mMonitoringEnabled;
     std::mutex mMonitorMutex;
@@ -93,6 +110,9 @@
     std::unordered_map<std::string, CameraMetadata> mLastMonitoredPhysicalRequestKeys;
     std::unordered_map<std::string, CameraMetadata> mLastMonitoredPhysicalResultKeys;
 
+    int32_t mLastInputStreamId = -1;
+    std::unordered_set<int32_t> mLastStreamIds;
+
     /**
      * A monitoring event
      * Stores a new metadata field value and the timestamp at which it changed.
@@ -101,7 +121,8 @@
     struct MonitorEvent {
         template<typename T>
         MonitorEvent(eventSource src, uint32_t frameNumber, nsecs_t timestamp,
-                const T &newValue, const std::string& cameraId);
+                const T &newValue, const std::string& cameraId,
+                const std::unordered_set<int32_t> &outputStreamIds, int32_t inputStreamId);
         ~MonitorEvent();
 
         eventSource source;
@@ -111,6 +132,8 @@
         uint8_t type;
         std::vector<uint8_t> newData;
         std::string cameraId;
+        std::unordered_set<int32_t> outputStreamIds;
+        int32_t inputStreamId = 1;
     };
 
     // A ring buffer for tracking the last kMaxMonitorEvents metadata changes
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
index e151a06..4317ccc 100644
--- a/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
@@ -80,4 +80,14 @@
 getgroups: 1
 sysinfo: 1
 
+# Android profiler (heapprofd, traced_perf) additions, where not already
+# covered by the rest of the file, or by builtin minijail allow-listing of
+# logging-related syscalls.
+# TODO(b/197184220): this is a targeted addition for a specific investigation,
+# and addresses just the arm64 framework av service policies. In the future, we
+# should make this more general (e.g. a central file that can be @included in
+# other policy files).
+setsockopt: 1
+sendmsg: 1
+
 @include /apex/com.android.media.swcodec/etc/seccomp_policy/code_coverage.arm64.policy
diff --git a/services/mediaextractor/TEST_MAPPING b/services/mediaextractor/TEST_MAPPING
new file mode 100644
index 0000000..7a66eeb
--- /dev/null
+++ b/services/mediaextractor/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+    "presubmit": [
+        {
+            "name": "CtsMediaTranscodingTestCases"
+        }
+    ]
+}
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy b/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
index 9bbd53b..e54c918 100644
--- a/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
@@ -46,5 +46,16 @@
 # Required by Sanitizers
 sched_yield: 1
 
+# Android profiler (heapprofd, traced_perf) additions, where not already
+# covered by the rest of the file, or by builtin minijail allow-listing of
+# logging-related syscalls.
+# TODO(b/197184220): this is a targeted addition for a specific investigation,
+# and addresses just the arm64 framework av service policies. In the future, we
+# should make this more general (e.g. a central file that can be @included in
+# other policy files).
+setsockopt: 1
+sendmsg: 1
+set_tid_address: 1
+
 @include /apex/com.android.media/etc/seccomp_policy/crash_dump.arm64.policy
 @include /apex/com.android.media/etc/seccomp_policy/code_coverage.arm64.policy
diff --git a/services/mediametrics/Android.bp b/services/mediametrics/Android.bp
index c98d5fc..0fa24cd 100644
--- a/services/mediametrics/Android.bp
+++ b/services/mediametrics/Android.bp
@@ -181,15 +181,16 @@
         "libstatssocket",
     ],
 
+    export_include_dirs: [
+        ".",
+    ],
+
     static_libs: [
         "libplatformprotos",
     ],
 
     header_libs: [
         "libaaudio_headers",
-    ],
-
-    include_dirs: [
-        "system/media/audio_utils/include",
+        "libaudioutils_headers",
     ],
 }
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index 21768f8..0e4dfcf 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -66,16 +66,7 @@
 }
 
 // The status variable contains status_t codes which are used by
-// the core audio framework.
-//
-// We also consider AAudio status codes as they are non-overlapping with status_t
-// and compiler checked here.
-//
-// Caution: As AAUDIO_ERROR codes have a unique range (AAUDIO_ERROR_BASE = -900),
-// overlap with status_t should not present an issue.
-//
-// See: system/core/libutils/include/utils/Errors.h
-//      frameworks/av/media/libaaudio/include/aaudio/AAudio.h
+// the core audio framework. We also consider AAudio status codes.
 //
 // Compare with mediametrics::statusToStatusString
 //
@@ -184,6 +175,24 @@
     "log_session_id",
 };
 
+static constexpr const char * const AudioTrackStatusFields[] {
+    "mediametrics_audiotrackstatus_reported",
+    "status",
+    "debug_message",
+    "sub_code",
+    "uid",
+    "event",
+    "flags",
+    "content_type",
+    "usage",
+    "encoding",
+    "channel_mask",
+    "buffer_frame_count",
+    "sample_rate",
+    "speed",
+    "pitch",
+};
+
 static constexpr const char * const AudioDeviceConnectionFields[] = {
     "mediametrics_audiodeviceconnection_reported",
     "input_devices",
@@ -525,6 +534,86 @@
     // Add to the heat map - we automatically track every item's status to see
     // the types of errors and the frequency of errors.
     mHeatMap.add(prefixKey, suffixKey, eventStr, statusString, uid, message, subCode);
+
+    // Certain keys/event pairs are sent to statsd.
+    // Note that the prefixes often end with a '.' so we use startsWith.
+    if (startsWith(key, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK)
+            && eventStr == AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE) {
+        const int atom_status = types::lookup<types::STATUS, int32_t>(statusString);
+
+        // currently we only send create status events.
+        const int32_t event =
+                android::util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__EVENT__EVENT_CREATE;
+
+        // The following fields should all be present in a create event.
+        std::string flagsStr;
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_ORIGINALFLAGS, &flagsStr),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_ORIGINALFLAGS);
+        const auto flags = types::lookup<types::OUTPUT_FLAG, int32_t>(flagsStr);
+
+        // AMEDIAMETRICS_PROP_SESSIONID omitted from atom
+
+        std::string contentTypeStr;
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_CONTENTTYPE, &contentTypeStr),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_CONTENTTYPE);
+        const auto contentType = types::lookup<types::CONTENT_TYPE, int32_t>(contentTypeStr);
+
+        std::string usageStr;
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_USAGE, &usageStr),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_USAGE);
+        const auto usage = types::lookup<types::USAGE, int32_t>(usageStr);
+
+        // AMEDIAMETRICS_PROP_SELECTEDDEVICEID omitted from atom
+
+        std::string encodingStr;
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_ENCODING, &encodingStr),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_ENCODING);
+        const auto encoding = types::lookup<types::ENCODING, int32_t>(encodingStr);
+
+        int32_t channelMask = 0;
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_CHANNELMASK, &channelMask),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_CHANNELMASK);
+        int32_t frameCount = 0;
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_FRAMECOUNT, &frameCount),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_FRAMECOUNT);
+        int32_t sampleRate = 0;
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_SAMPLERATE, &sampleRate),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_SAMPLERATE);
+        double speed = 0.f;  // default is 1.f
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, &speed),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_PLAYBACK_SPEED);
+        double pitch = 0.f;  // default is 1.f
+        ALOGD_IF(!item->get(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, &pitch),
+                "%s: %s missing %s field",
+                __func__, AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK, AMEDIAMETRICS_PROP_PLAYBACK_PITCH);
+        const auto [ result, str ] = sendToStatsd(AudioTrackStatusFields,
+                CONDITION(android::util::MEDIAMETRICS_AUDIOTRACKSTATUS_REPORTED)
+                , atom_status
+                , message.c_str()
+                , subCode
+                , uid
+                , event
+                , flags
+                , contentType
+                , usage
+                , encoding
+                , (int64_t)channelMask
+                , frameCount
+                , sampleRate
+                , (float)speed
+                , (float)pitch
+                );
+        ALOGV("%s: statsd %s", __func__, str.c_str());
+        mStatsdLog->log(android::util::MEDIAMETRICS_AUDIOTRACKSTATUS_REPORTED, str);
+    }
 }
 
 // HELPER METHODS
diff --git a/services/mediametrics/AudioTypes.cpp b/services/mediametrics/AudioTypes.cpp
index b67967b..7e406cc 100644
--- a/services/mediametrics/AudioTypes.cpp
+++ b/services/mediametrics/AudioTypes.cpp
@@ -192,6 +192,31 @@
     return map;
 }
 
+const std::unordered_map<std::string, int32_t>& getStatusMap() {
+    // DO NOT MODIFY VALUES(OK to add new ones).
+    static std::unordered_map<std::string, int32_t> map {
+        {"",
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__OK},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_OK,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__OK},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_ARGUMENT},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_IO,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_IO},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_MEMORY},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_SECURITY},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_STATE,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_STATE},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_TIMEOUT},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN,
+            util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_UNKNOWN},
+    };
+    return map;
+}
+
 // Helper: Create the corresponding int32 from string flags split with '|'.
 template <typename Traits>
 int32_t int32FromFlags(const std::string &flags)
@@ -433,6 +458,17 @@
 }
 
 template <>
+int32_t lookup<STATUS>(const std::string &status)
+{
+    auto& map = getStatusMap();
+    auto it = map.find(status);
+    if (it == map.end()) {
+        return util::MEDIAMETRICS_AUDIO_TRACK_STATUS_REPORTED__STATUS__ERROR_UNKNOWN;
+    }
+    return it->second;
+}
+
+template <>
 int32_t lookup<THREAD_TYPE>(const std::string &threadType)
 {
     auto& map = getAudioThreadTypeMap();
diff --git a/services/mediametrics/AudioTypes.h b/services/mediametrics/AudioTypes.h
index 4394d79..5dbff9b 100644
--- a/services/mediametrics/AudioTypes.h
+++ b/services/mediametrics/AudioTypes.h
@@ -39,6 +39,10 @@
 };
 
 // Enumeration for all the string translations to integers (generally int32_t) unless noted.
+// This is used to index the template method below:
+// template <AudioEnumCategory C, typename T, typename S>  T lookup(const S &str);
+//
+// Okay to keep AudioEnumCategory alphabetical and add new translations in the middle.
 enum AudioEnumCategory {
     AAUDIO_DIRECTION,
     AAUDIO_PERFORMANCE_MODE,
@@ -51,6 +55,7 @@
     OUTPUT_DEVICE, // int64_t
     OUTPUT_FLAG,
     SOURCE_TYPE,
+    STATUS,
     STREAM_TYPE,
     THREAD_TYPE,
     TRACK_TRAITS,
diff --git a/services/mediametrics/fuzzer/Android.bp b/services/mediametrics/fuzzer/Android.bp
index 9da7282..84d494e 100644
--- a/services/mediametrics/fuzzer/Android.bp
+++ b/services/mediametrics/fuzzer/Android.bp
@@ -59,9 +59,8 @@
         "packagemanager_aidl-cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/services/mediametrics",
-        "system/media/audio_utils/include",
+    header_libs: [
+        "libaudioutils_headers",
     ],
 
     fuzz_config: {
diff --git a/services/oboeservice/AAudioCommandQueue.cpp b/services/oboeservice/AAudioCommandQueue.cpp
new file mode 100644
index 0000000..9bd18b3
--- /dev/null
+++ b/services/oboeservice/AAudioCommandQueue.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioCommandQueue"
+//#define LOG_NDEBUG 0
+
+#include <chrono>
+
+#include <utils/Log.h>
+
+#include "AAudioCommandQueue.h"
+
+namespace aaudio {
+
+aaudio_result_t AAudioCommandQueue::sendCommand(std::shared_ptr<AAudioCommand> command) {
+    {
+        std::scoped_lock<std::mutex> _l(mLock);
+        if (!mRunning) {
+            ALOGE("Tried to send command while it was not running");
+            return AAUDIO_ERROR_INVALID_STATE;
+        }
+        mCommands.push(command);
+        mWaitWorkCond.notify_one();
+    }
+
+    std::unique_lock _cl(command->lock);
+    android::base::ScopedLockAssertion lockAssertion(command->lock);
+    ALOGV("Sending command %d, wait for reply(%d) with timeout %jd",
+           command->operationCode, command->isWaitingForReply, command->timeoutNanoseconds);
+    // `mWaitForReply` is first initialized when the command is constructed. It will be flipped
+    // when the command is completed.
+    auto timeoutExpire = std::chrono::steady_clock::now()
+            + std::chrono::nanoseconds(command->timeoutNanoseconds);
+    while (command->isWaitingForReply) {
+        if (command->conditionVariable.wait_until(_cl, timeoutExpire)
+                == std::cv_status::timeout) {
+            ALOGD("Command %d time out", command->operationCode);
+            command->result = AAUDIO_ERROR_TIMEOUT;
+            command->isWaitingForReply = false;
+        }
+    }
+    ALOGV("Command %d sent with result as %d", command->operationCode, command->result);
+    return command->result;
+}
+
+std::shared_ptr<AAudioCommand> AAudioCommandQueue::waitForCommand(int64_t timeoutNanos) {
+    std::shared_ptr<AAudioCommand> command;
+    {
+        std::unique_lock _l(mLock);
+        android::base::ScopedLockAssertion lockAssertion(mLock);
+        if (timeoutNanos >= 0) {
+            mWaitWorkCond.wait_for(_l, std::chrono::nanoseconds(timeoutNanos), [this]() {
+                android::base::ScopedLockAssertion lockAssertion(mLock);
+                return !mRunning || !mCommands.empty();
+            });
+        } else {
+            mWaitWorkCond.wait(_l, [this]() {
+                android::base::ScopedLockAssertion lockAssertion(mLock);
+                return !mRunning || !mCommands.empty();
+            });
+        }
+        if (!mCommands.empty() && mRunning) {
+            command = mCommands.front();
+            mCommands.pop();
+        }
+    }
+    return command;
+}
+
+void AAudioCommandQueue::startWaiting() {
+    std::scoped_lock<std::mutex> _l(mLock);
+    mRunning = true;
+}
+
+void AAudioCommandQueue::stopWaiting() {
+    std::scoped_lock<std::mutex> _l(mLock);
+    mRunning = false;
+    // Clear all commands in the queue as the command thread is stopped.
+    while (!mCommands.empty()) {
+        auto command = mCommands.front();
+        mCommands.pop();
+        std::scoped_lock<std::mutex> _cl(command->lock);
+        // If the command is waiting for result, returns AAUDIO_ERROR_INVALID_STATE
+        // as there is no thread waiting for the command.
+        if (command->isWaitingForReply) {
+            command->result = AAUDIO_ERROR_INVALID_STATE;
+            command->isWaitingForReply = false;
+            command->conditionVariable.notify_one();
+        }
+    }
+    mWaitWorkCond.notify_one();
+}
+
+} // namespace aaudio
\ No newline at end of file
diff --git a/services/oboeservice/AAudioCommandQueue.h b/services/oboeservice/AAudioCommandQueue.h
new file mode 100644
index 0000000..64442a3
--- /dev/null
+++ b/services/oboeservice/AAudioCommandQueue.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <condition_variable>
+#include <memory>
+#include <mutex>
+#include <queue>
+
+#include <aaudio/AAudio.h>
+#include <android-base/thread_annotations.h>
+
+namespace aaudio {
+
+typedef int32_t aaudio_command_opcode;
+
+class AAudioCommandParam {
+public:
+    AAudioCommandParam() = default;
+    virtual ~AAudioCommandParam() = default;
+};
+
+class AAudioCommand {
+public:
+    explicit AAudioCommand(
+            aaudio_command_opcode opCode, std::shared_ptr<AAudioCommandParam> param = nullptr,
+            bool waitForReply = false, int64_t timeoutNanos = 0)
+            : operationCode(opCode), parameter(param), isWaitingForReply(waitForReply),
+              timeoutNanoseconds(timeoutNanos) { }
+    virtual ~AAudioCommand() = default;
+
+    std::mutex lock;
+    std::condition_variable conditionVariable;
+
+    const aaudio_command_opcode operationCode;
+    std::shared_ptr<AAudioCommandParam> parameter;
+    bool isWaitingForReply GUARDED_BY(lock);
+    const int64_t timeoutNanoseconds;
+    aaudio_result_t result GUARDED_BY(lock) = AAUDIO_OK;
+};
+
+class AAudioCommandQueue {
+public:
+    AAudioCommandQueue() = default;
+    ~AAudioCommandQueue() = default;
+
+    /**
+     * Send a command to the command queue. The return will be waiting for a specified timeout
+     * period indicated by the command if it is required.
+     *
+     * @param command the command to send to the command queue.
+     * @return the result of sending the command or the result of executing the command if command
+     *         need to wait for a reply. If timeout happens, AAUDIO_ERROR_TIMEOUT will be returned.
+     */
+    aaudio_result_t sendCommand(std::shared_ptr<AAudioCommand> command);
+
+    /**
+     * Wait for next available command OR until the timeout is expired.
+     *
+     * @param timeoutNanos the maximum time to wait for next command (0 means return immediately in
+     *                     any case), negative to wait forever.
+     * @return the next available command if any or a nullptr when there is none.
+     */
+    std::shared_ptr<AAudioCommand> waitForCommand(int64_t timeoutNanos = -1);
+
+    /**
+     * Start waiting for commands. Commands can only be pushed into the command queue after it
+     * starts waiting.
+     */
+    void startWaiting();
+
+    /**
+     * Force stop waiting for next command
+     */
+    void stopWaiting();
+
+private:
+    std::mutex mLock;
+    std::condition_variable mWaitWorkCond;
+
+    std::queue<std::shared_ptr<AAudioCommand>> mCommands GUARDED_BY(mLock);
+    bool mRunning GUARDED_BY(mLock) = false;
+};
+
+} // namespace aaudio
\ No newline at end of file
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 40a664e..2679b2e 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -280,6 +280,22 @@
     AIDL_RETURN(serviceStream->unregisterAudioThread(clientThreadId));
 }
 
+Status AAudioService::exitStandby(int32_t streamHandle, Endpoint* endpoint, int32_t *_aidl_return) {
+    static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+    sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+    if (serviceStream.get() == nullptr) {
+        ALOGE("getStreamDescription(), illegal stream handle = 0x%0x", streamHandle);
+        AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+    }
+    AudioEndpointParcelable endpointParcelable;
+    aaudio_result_t result = serviceStream->exitStandby(&endpointParcelable);
+    if (result == AAUDIO_OK) {
+        *endpoint = std::move(endpointParcelable).parcelable();
+    }
+    AIDL_RETURN(result);
+}
+
 bool AAudioService::isCallerInService() {
     pid_t clientPid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mAudioClient.attributionSource.pid));
     uid_t clientUid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mAudioClient.attributionSource.uid));
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index 7c1b796..0a111fb 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -82,6 +82,9 @@
     binder::Status unregisterAudioThread(int32_t streamHandle, int32_t clientThreadId,
                                          int32_t* _aidl_return) override;
 
+    binder::Status exitStandby(int32_t streamHandle, ::aaudio::Endpoint* endpoint,
+                               int32_t* _aidl_return) override;
+
     aaudio_result_t startClient(aaudio::aaudio_handle_t streamHandle,
                                 const android::AudioClient& client,
                                 const audio_attributes_t *attr,
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index a7f63d3..92004c5 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -77,6 +77,16 @@
         return AAUDIO_ERROR_UNAVAILABLE;
     }
 
+    virtual aaudio_result_t standby() {
+        ALOGD("AAudioServiceEndpoint::standby() AAUDIO_ERROR_UNAVAILABLE");
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+
+    virtual aaudio_result_t exitStandby(AudioEndpointParcelable* parcelable) {
+        ALOGD("AAudioServiceEndpoint::exitStandby() AAUDIO_ERROR_UNAVAILABLE");
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+
     /**
      * @param positionFrames
      * @param timeNanos
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 35a0890..a266d5b 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -80,16 +80,16 @@
 
     audio_format_t audioFormat = getFormat();
 
-    // FLOAT is not directly supported by the HAL so ask for a 32-bit.
-    if (audioFormat == AUDIO_FORMAT_PCM_FLOAT) {
-        // TODO remove these logs when finished debugging.
-        ALOGD("%s() change format from %d to 32_BIT", __func__, audioFormat);
-        audioFormat = AUDIO_FORMAT_PCM_32_BIT;
-    }
-
     result = openWithFormat(audioFormat);
     if (result == AAUDIO_OK) return result;
 
+    if (result == AAUDIO_ERROR_UNAVAILABLE && audioFormat == AUDIO_FORMAT_PCM_FLOAT) {
+        ALOGD("%s() FLOAT failed, perhaps due to format. Try again with 32_BIT", __func__);
+        audioFormat = AUDIO_FORMAT_PCM_32_BIT;
+        result = openWithFormat(audioFormat);
+    }
+    if (result == AAUDIO_OK) return result;
+
     if (result == AAUDIO_ERROR_UNAVAILABLE && audioFormat == AUDIO_FORMAT_PCM_32_BIT) {
         ALOGD("%s() 32_BIT failed, perhaps due to format. Try again with 24_BIT_PACKED", __func__);
         audioFormat = AUDIO_FORMAT_PCM_24_BIT_PACKED;
@@ -186,37 +186,8 @@
     ALOGD("%s() deviceId = %d, sessionId = %d", __func__, getDeviceId(), getSessionId());
 
     // Create MMAP/NOIRQ buffer.
-    int32_t minSizeFrames = getBufferCapacity();
-    if (minSizeFrames <= 0) { // zero will get rejected
-        minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
-    }
-    status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
-    bool isBufferShareable = mMmapBufferinfo.flags & AUDIO_MMAP_APPLICATION_SHAREABLE;
-    if (status != OK) {
-        ALOGE("%s() - createMmapBuffer() failed with status %d %s",
-              __func__, status, strerror(-status));
-        result = AAUDIO_ERROR_UNAVAILABLE;
+    if (createMmapBuffer(&mAudioDataFileDescriptor) != AAUDIO_OK) {
         goto error;
-    } else {
-        ALOGD("%s() createMmapBuffer() buffer_size = %d fr, burst_size %d fr"
-                      ", Sharable FD: %s",
-              __func__,
-              mMmapBufferinfo.buffer_size_frames,
-              mMmapBufferinfo.burst_size_frames,
-              isBufferShareable ? "Yes" : "No");
-    }
-
-    setBufferCapacity(mMmapBufferinfo.buffer_size_frames);
-    if (!isBufferShareable) {
-        // Exclusive mode can only be used by the service because the FD cannot be shared.
-        int32_t audioServiceUid =
-            VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
-        if ((mMmapClient.attributionSource.uid != audioServiceUid) &&
-            getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE) {
-            ALOGW("%s() - exclusive FD cannot be used by client", __func__);
-            result = AAUDIO_ERROR_UNAVAILABLE;
-            goto error;
-        }
     }
 
     // Get information about the stream and pass it back to the caller.
@@ -224,24 +195,17 @@
             config.channel_mask, getDirection() == AAUDIO_DIRECTION_INPUT,
             AAudio_isChannelIndexMask(config.channel_mask)));
 
-    // AAudio creates a copy of this FD and retains ownership of the copy.
-    // Assume that AudioFlinger will close the original shared_memory_fd.
-    mAudioDataFileDescriptor.reset(dup(mMmapBufferinfo.shared_memory_fd));
-    if (mAudioDataFileDescriptor.get() == -1) {
-        ALOGE("%s() - could not dup shared_memory_fd", __func__);
-        result = AAUDIO_ERROR_INTERNAL;
-        goto error;
-    }
-    // Call to HAL to make sure the transport FD was able to be closed by binder.
-    // This is a tricky workaround for a problem in Binder.
-    // TODO:[b/192048842] When that problem is fixed we may be able to remove or change this code.
-    struct audio_mmap_position position;
-    mMmapStream->getMmapPosition(&position);
-
-    mFramesPerBurst = mMmapBufferinfo.burst_size_frames;
     setFormat(config.format);
     setSampleRate(config.sample_rate);
 
+    // If the position is not updated while the timestamp is updated for more than a certain amount,
+    // the timestamp reported from the HAL may not be accurate. Here, a timestamp grace period is
+    // set as 5 burst size. We may want to update this value if there is any report from OEMs saying
+    // that is too short.
+    static constexpr int kTimestampGraceBurstCount = 5;
+    mTimestampGracePeriodMs = ((int64_t) kTimestampGraceBurstCount * mFramesPerBurst
+            * AAUDIO_MILLIS_PER_SECOND) / getSampleRate();
+
     ALOGD("%s() actual rate = %d, channels = %d channelMask = %#x, deviceId = %d, capacity = %d\n",
           __func__, getSampleRate(), getSamplesPerFrame(), getChannelMask(),
           deviceId, getBufferCapacity());
@@ -315,6 +279,32 @@
     return result;
 }
 
+aaudio_result_t AAudioServiceEndpointMMAP::standby() {
+    if (mMmapStream == nullptr) {
+        return AAUDIO_ERROR_NULL;
+    }
+    aaudio_result_t result = AAudioConvert_androidToAAudioResult(mMmapStream->standby());
+    return result;
+}
+
+aaudio_result_t AAudioServiceEndpointMMAP::exitStandby(AudioEndpointParcelable* parcelable) {
+    if (mMmapStream == nullptr) {
+        return AAUDIO_ERROR_NULL;
+    }
+    mAudioDataFileDescriptor.reset();
+    aaudio_result_t result = createMmapBuffer(&mAudioDataFileDescriptor);
+    if (result == AAUDIO_OK) {
+        int32_t bytesPerFrame = calculateBytesPerFrame();
+        int32_t capacityInBytes = getBufferCapacity() * bytesPerFrame;
+        int fdIndex = parcelable->addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
+        parcelable->mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
+        parcelable->mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
+        parcelable->mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
+        parcelable->mDownDataQueueParcelable.setCapacityInFrames(getBufferCapacity());
+    }
+    return result;
+}
+
 // Get free-running DSP or DMA hardware position from the HAL.
 aaudio_result_t AAudioServiceEndpointMMAP::getFreeRunningPosition(int64_t *positionFrames,
                                                                 int64_t *timeNanos) {
@@ -401,30 +391,150 @@
 /**
  * Get an immutable description of the data queue from the HAL.
  */
-aaudio_result_t AAudioServiceEndpointMMAP::getDownDataDescription(AudioEndpointParcelable &parcelable)
+aaudio_result_t AAudioServiceEndpointMMAP::getDownDataDescription(
+        AudioEndpointParcelable* parcelable)
 {
     // Gather information on the data queue based on HAL info.
     int32_t bytesPerFrame = calculateBytesPerFrame();
     int32_t capacityInBytes = getBufferCapacity() * bytesPerFrame;
-    int fdIndex = parcelable.addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
-    parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
-    parcelable.mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
-    parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
-    parcelable.mDownDataQueueParcelable.setCapacityInFrames(getBufferCapacity());
+    int fdIndex = parcelable->addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
+    parcelable->mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
+    parcelable->mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
+    parcelable->mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
+    parcelable->mDownDataQueueParcelable.setCapacityInFrames(getBufferCapacity());
     return AAUDIO_OK;
 }
 
 aaudio_result_t AAudioServiceEndpointMMAP::getExternalPosition(uint64_t *positionFrames,
                                                                int64_t *timeNanos)
 {
-    if (!mExternalPositionSupported) {
-        return AAUDIO_ERROR_INVALID_STATE;
+    if (mHalExternalPositionStatus != AAUDIO_OK) {
+        return mHalExternalPositionStatus;
     }
-    status_t status = mMmapStream->getExternalPosition(positionFrames, timeNanos);
-    if (status == INVALID_OPERATION) {
-        // getExternalPosition is not supported. Set mExternalPositionSupported as false
+    uint64_t tempPositionFrames;
+    int64_t tempTimeNanos;
+    status_t status = mMmapStream->getExternalPosition(&tempPositionFrames, &tempTimeNanos);
+    if (status != OK) {
+        // getExternalPosition reports error. The HAL may not support the API. Cache the result
         // so that the call will not go to the HAL next time.
-        mExternalPositionSupported = false;
+        mHalExternalPositionStatus = AAudioConvert_androidToAAudioResult(status);
+        return mHalExternalPositionStatus;
     }
-    return AAudioConvert_androidToAAudioResult(status);
+
+    // If the HAL keeps reporting the same position or timestamp, the HAL may be having some issues
+    // to report correct external position. In that case, we will not trust the values reported from
+    // the HAL. Ideally, we may want to stop querying external position if the HAL cannot report
+    // correct position within a period. But it may not be a good idea to get system time too often.
+    // In that case, a maximum number of frozen external position is defined so that if the
+    // count of the same timestamp or position is reported by the HAL continuously, the values from
+    // the HAL will no longer be trusted.
+    static constexpr int kMaxFrozenCount = 20;
+    // If the HAL version is less than 7.0, the getPresentationPosition is an optional API.
+    // If the HAL version is 7.0 or later, the getPresentationPosition is a mandatory API.
+    // In that case, even the returned status is NO_ERROR, it doesn't indicate the returned
+    // position is a valid one. Do a simple validation, which is checking if the position is
+    // forward within half a second or not, here so that this function can return error if
+    // the validation fails. Note that we don't only apply this validation logic to HAL API
+    // less than 7.0. The reason is that there is a chance the HAL is not reporting the
+    // timestamp and position correctly.
+    if (mLastPositionFrames > tempPositionFrames) {
+        // If the position is going backwards, there must be something wrong with the HAL.
+        // In that case, we do not trust the values reported by the HAL.
+        ALOGW("%s position is going backwards, last position(%jd) current position(%jd)",
+              __func__, mLastPositionFrames, tempPositionFrames);
+        mHalExternalPositionStatus = AAUDIO_ERROR_INTERNAL;
+        return mHalExternalPositionStatus;
+    } else if (mLastPositionFrames == tempPositionFrames) {
+        if (tempTimeNanos - mTimestampNanosForLastPosition >
+                AAUDIO_NANOS_PER_MILLISECOND * mTimestampGracePeriodMs) {
+            ALOGW("%s, the reported position is not changed within %d msec. "
+                  "Set the external position as not supported", __func__, mTimestampGracePeriodMs);
+            mHalExternalPositionStatus = AAUDIO_ERROR_INTERNAL;
+            return mHalExternalPositionStatus;
+        }
+        mFrozenPositionCount++;
+    } else {
+        mFrozenPositionCount = 0;
+    }
+
+    if (mTimestampNanosForLastPosition > tempTimeNanos) {
+        // If the timestamp is going backwards, there must be something wrong with the HAL.
+        // In that case, we do not trust the values reported by the HAL.
+        ALOGW("%s timestamp is going backwards, last timestamp(%jd), current timestamp(%jd)",
+              __func__, mTimestampNanosForLastPosition, tempTimeNanos);
+        mHalExternalPositionStatus = AAUDIO_ERROR_INTERNAL;
+        return mHalExternalPositionStatus;
+    } else if (mTimestampNanosForLastPosition == tempTimeNanos) {
+        mFrozenTimestampCount++;
+    } else {
+        mFrozenTimestampCount = 0;
+    }
+
+    if (mFrozenTimestampCount + mFrozenPositionCount > kMaxFrozenCount) {
+        ALOGW("%s too many frozen external position from HAL.", __func__);
+        mHalExternalPositionStatus = AAUDIO_ERROR_INTERNAL;
+        return mHalExternalPositionStatus;
+    }
+
+    mLastPositionFrames = tempPositionFrames;
+    mTimestampNanosForLastPosition = tempTimeNanos;
+
+    // Only update the timestamp and position when they looks valid.
+    *positionFrames = tempPositionFrames;
+    *timeNanos = tempTimeNanos;
+    return mHalExternalPositionStatus;
+}
+
+aaudio_result_t AAudioServiceEndpointMMAP::createMmapBuffer(
+        android::base::unique_fd* fileDescriptor)
+{
+    memset(&mMmapBufferinfo, 0, sizeof(struct audio_mmap_buffer_info));
+    int32_t minSizeFrames = getBufferCapacity();
+    if (minSizeFrames <= 0) { // zero will get rejected
+        minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
+    }
+    status_t status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
+    bool isBufferShareable = mMmapBufferinfo.flags & AUDIO_MMAP_APPLICATION_SHAREABLE;
+    if (status != OK) {
+        ALOGE("%s() - createMmapBuffer() failed with status %d %s",
+              __func__, status, strerror(-status));
+        return AAUDIO_ERROR_UNAVAILABLE;
+    } else {
+        ALOGD("%s() createMmapBuffer() buffer_size = %d fr, burst_size %d fr"
+                      ", Sharable FD: %s",
+              __func__,
+              mMmapBufferinfo.buffer_size_frames,
+              mMmapBufferinfo.burst_size_frames,
+              isBufferShareable ? "Yes" : "No");
+    }
+
+    setBufferCapacity(mMmapBufferinfo.buffer_size_frames);
+    if (!isBufferShareable) {
+        // Exclusive mode can only be used by the service because the FD cannot be shared.
+        int32_t audioServiceUid =
+            VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
+        if ((mMmapClient.attributionSource.uid != audioServiceUid) &&
+            getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE) {
+            ALOGW("%s() - exclusive FD cannot be used by client", __func__);
+            return AAUDIO_ERROR_UNAVAILABLE;
+        }
+    }
+
+    // AAudio creates a copy of this FD and retains ownership of the copy.
+    // Assume that AudioFlinger will close the original shared_memory_fd.
+    fileDescriptor->reset(dup(mMmapBufferinfo.shared_memory_fd));
+    if (fileDescriptor->get() == -1) {
+        ALOGE("%s() - could not dup shared_memory_fd", __func__);
+        return AAUDIO_ERROR_INTERNAL;
+    }
+
+    // Call to HAL to make sure the transport FD was able to be closed by binder.
+    // This is a tricky workaround for a problem in Binder.
+    // TODO:[b/192048842] When that problem is fixed we may be able to remove or change this code.
+    struct audio_mmap_position position;
+    mMmapStream->getMmapPosition(&position);
+
+    mFramesPerBurst = mMmapBufferinfo.burst_size_frames;
+
+    return AAUDIO_OK;
 }
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index 5a53885..3e7f2c7 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -64,6 +64,10 @@
 
     aaudio_result_t stopClient(audio_port_handle_t clientHandle)  override;
 
+    aaudio_result_t standby() override;
+
+    aaudio_result_t exitStandby(AudioEndpointParcelable* parcelable) override;
+
     aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
 
     aaudio_result_t getTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
@@ -79,7 +83,7 @@
     void onRoutingChanged(audio_port_handle_t portHandle) override;
     // ------------------------------------------------------------------------------
 
-    aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable);
+    aaudio_result_t getDownDataDescription(AudioEndpointParcelable* parcelable);
 
     int64_t getHardwareTimeOffsetNanos() const {
         return mHardwareTimeOffsetNanos;
@@ -91,6 +95,8 @@
 
     aaudio_result_t openWithFormat(audio_format_t audioFormat);
 
+    aaudio_result_t createMmapBuffer(android::base::unique_fd* fileDescriptor);
+
     MonotonicCounter                          mFramesTransferred;
 
     // Interface to the AudioFlinger MMAP support.
@@ -106,7 +112,12 @@
 
     int64_t                                   mHardwareTimeOffsetNanos = 0; // TODO get from HAL
 
-    bool                                      mExternalPositionSupported = true;
+    aaudio_result_t                           mHalExternalPositionStatus = AAUDIO_OK;
+    uint64_t                                  mLastPositionFrames = 0;
+    int64_t                                   mTimestampNanosForLastPosition = 0;
+    int32_t                                   mTimestampGracePeriodMs;
+    int32_t                                   mFrozenPositionCount = 0;
+    int32_t                                   mFrozenTimestampCount = 0;
 
 };
 
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index 4e46033..f590fc8 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -21,6 +21,7 @@
 #include <assert.h>
 #include <map>
 #include <mutex>
+#include <media/AudioSystem.h>
 #include <utils/Singleton.h>
 
 #include "AAudioEndpointManager.h"
@@ -51,7 +52,7 @@
         mMixer.allocate(getStreamInternal()->getSamplesPerFrame(),
                         getStreamInternal()->getFramesPerBurst());
 
-        int32_t burstsPerBuffer = AAudioProperty_getMixerBursts();
+        int32_t burstsPerBuffer = AudioSystem::getAAudioMixerBurstCount();
         if (burstsPerBuffer == 0) {
             mLatencyTuningEnabled = true;
             burstsPerBuffer = BURSTS_PER_BUFFER_DEFAULT;
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 4ffc127..9f48f80 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -34,20 +34,23 @@
 #include "AAudioService.h"
 #include "AAudioServiceEndpoint.h"
 #include "AAudioServiceStreamBase.h"
-#include "TimestampScheduler.h"
 
 using namespace android;  // TODO just import names needed
 using namespace aaudio;   // TODO just import names needed
 
 using content::AttributionSourceState;
 
+static const int64_t TIMEOUT_NANOS = 3LL * 1000 * 1000 * 1000;
+// If the stream is idle for more than `IDLE_TIMEOUT_NANOS`, the stream will be put into standby.
+static const int64_t IDLE_TIMEOUT_NANOS = 3e9;
+
 /**
  * Base class for streams in the service.
  * @return
  */
 
 AAudioServiceStreamBase::AAudioServiceStreamBase(AAudioService &audioService)
-        : mTimestampThread("AATime")
+        : mCommandThread("AACommand")
         , mAtomicStreamTimestamp()
         , mAudioService(audioService) {
     mMmapClient.attributionSource = AttributionSourceState();
@@ -70,6 +73,13 @@
                         || getState() == AAUDIO_STREAM_STATE_UNINITIALIZED),
                         "service stream %p still open, state = %d",
                         this, getState());
+
+    // Stop the command thread before destroying.
+    if (mThreadEnabled) {
+        mThreadEnabled = false;
+        mCommandQueue.stopWaiting();
+        mCommandThread.stop();
+    }
 }
 
 std::string AAudioServiceStreamBase::dumpHeader() {
@@ -166,16 +176,36 @@
         mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
         copyFrom(*mServiceEndpoint);
     }
+
+    // Make sure this object does not get deleted before the run() method
+    // can protect it by making a strong pointer.
+    mCommandQueue.startWaiting();
+    mThreadEnabled = true;
+    incStrong(nullptr); // See run() method.
+    result = mCommandThread.start(this);
+    if (result != AAUDIO_OK) {
+        decStrong(nullptr); // run() can't do it so we have to do it here.
+        goto error;
+    }
     return result;
 
 error:
-    close();
+    closeAndClear();
+    mThreadEnabled = false;
+    mCommandQueue.stopWaiting();
+    mCommandThread.stop();
     return result;
 }
 
 aaudio_result_t AAudioServiceStreamBase::close() {
-    std::lock_guard<std::mutex> lock(mLock);
-    return close_l();
+    aaudio_result_t result = sendCommand(CLOSE, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+
+    // Stop the command thread as the stream is closed.
+    mThreadEnabled = false;
+    mCommandQueue.stopWaiting();
+    mCommandThread.stop();
+
+    return result;
 }
 
 aaudio_result_t AAudioServiceStreamBase::close_l() {
@@ -183,29 +213,10 @@
         return AAUDIO_OK;
     }
 
-    // This will call stopTimestampThread() and also stop the stream,
-    // just in case it was not already stopped.
+    // This will stop the stream, just in case it was not already stopped.
     stop_l();
 
-    aaudio_result_t result = AAUDIO_OK;
-    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
-    if (endpoint == nullptr) {
-        result = AAUDIO_ERROR_INVALID_STATE;
-    } else {
-        endpoint->unregisterStream(this);
-        AAudioEndpointManager &endpointManager = AAudioEndpointManager::getInstance();
-        endpointManager.closeEndpoint(endpoint);
-
-        // AAudioService::closeStream() prevents two threads from closing at the same time.
-        mServiceEndpoint.clear(); // endpoint will hold the pointer after this method returns.
-    }
-
-    setState(AAUDIO_STREAM_STATE_CLOSED);
-
-    mediametrics::LogItem(mMetricsId)
-        .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CLOSE)
-        .record();
-    return result;
+    return closeAndClear();
 }
 
 aaudio_result_t AAudioServiceStreamBase::startDevice() {
@@ -224,8 +235,10 @@
  * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
  */
 aaudio_result_t AAudioServiceStreamBase::start() {
-    std::lock_guard<std::mutex> lock(mLock);
+    return sendCommand(START, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+}
 
+aaudio_result_t AAudioServiceStreamBase::start_l() {
     const int64_t beginNs = AudioClock::getNanoseconds();
     aaudio_result_t result = AAUDIO_OK;
 
@@ -236,6 +249,12 @@
         return AAUDIO_ERROR_INVALID_STATE;
     }
 
+    if (mStandby) {
+        ALOGW("%s() the stream is standby, return ERROR_STANDBY, "
+              "expecting the client call exitStandby before start", __func__);
+        return AAUDIO_ERROR_STANDBY;
+    }
+
     mediametrics::Defer defer([&] {
         mediametrics::LogItem(mMetricsId)
             .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_START)
@@ -261,15 +280,6 @@
     // This should happen at the end of the start.
     sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
     setState(AAUDIO_STREAM_STATE_STARTED);
-    mThreadEnabled.store(true);
-    // Make sure this object does not get deleted before the run() method
-    // can protect it by making a strong pointer.
-    incStrong(nullptr); // See run() method.
-    result = mTimestampThread.start(this);
-    if (result != AAUDIO_OK) {
-        decStrong(nullptr); // run() can't do it so we have to do it here.
-        goto error;
-    }
 
     return result;
 
@@ -279,8 +289,7 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::pause() {
-    std::lock_guard<std::mutex> lock(mLock);
-    return pause_l();
+    return sendCommand(PAUSE, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
 }
 
 aaudio_result_t AAudioServiceStreamBase::pause_l() {
@@ -298,12 +307,6 @@
             .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)result)
             .record(); });
 
-    result = stopTimestampThread();
-    if (result != AAUDIO_OK) {
-        disconnect_l();
-        return result;
-    }
-
     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
     if (endpoint == nullptr) {
         ALOGE("%s() has no endpoint", __func__);
@@ -322,8 +325,7 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::stop() {
-    std::lock_guard<std::mutex> lock(mLock);
-    return stop_l();
+    return sendCommand(STOP, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
 }
 
 aaudio_result_t AAudioServiceStreamBase::stop_l() {
@@ -343,12 +345,6 @@
 
     setState(AAUDIO_STREAM_STATE_STOPPING);
 
-    // Temporarily unlock because we are joining the timestamp thread and it may try
-    // to acquire mLock.
-    mLock.unlock();
-    result = stopTimestampThread();
-    mLock.lock();
-
     if (result != AAUDIO_OK) {
         disconnect_l();
         return result;
@@ -373,17 +369,11 @@
     return result;
 }
 
-aaudio_result_t AAudioServiceStreamBase::stopTimestampThread() {
-    aaudio_result_t result = AAUDIO_OK;
-    // clear flag that tells thread to loop
-    if (mThreadEnabled.exchange(false)) {
-        result = mTimestampThread.stop();
-    }
-    return result;
+aaudio_result_t AAudioServiceStreamBase::flush() {
+    return sendCommand(FLUSH, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
 }
 
-aaudio_result_t AAudioServiceStreamBase::flush() {
-    std::lock_guard<std::mutex> lock(mLock);
+aaudio_result_t AAudioServiceStreamBase::flush_l() {
     aaudio_result_t result = AAudio_isFlushAllowed(getState());
     if (result != AAUDIO_OK) {
         return result;
@@ -404,48 +394,122 @@
     return AAUDIO_OK;
 }
 
-// implement Runnable, periodically send timestamps to client
+// implement Runnable, periodically send timestamps to client and process commands from queue.
 __attribute__((no_sanitize("integer")))
 void AAudioServiceStreamBase::run() {
-    ALOGD("%s() %s entering >>>>>>>>>>>>>> TIMESTAMPS", __func__, getTypeText());
+    ALOGD("%s() %s entering >>>>>>>>>>>>>> COMMANDS", __func__, getTypeText());
     // Hold onto the ref counted stream until the end.
     android::sp<AAudioServiceStreamBase> holdStream(this);
     TimestampScheduler timestampScheduler;
+    int64_t nextTime;
+    int64_t standbyTime = AudioClock::getNanoseconds() + IDLE_TIMEOUT_NANOS;
     // Balance the incStrong from when the thread was launched.
     holdStream->decStrong(nullptr);
 
-    timestampScheduler.setBurstPeriod(mFramesPerBurst, getSampleRate());
-    timestampScheduler.start(AudioClock::getNanoseconds());
-    int64_t nextTime = timestampScheduler.nextAbsoluteTime();
+    // Taking mLock while starting the thread. All the operation must be able to
+    // run with holding the lock.
+    std::scoped_lock<std::mutex> _l(mLock);
+
     int32_t loopCount = 0;
-    aaudio_result_t result = AAUDIO_OK;
-    while(mThreadEnabled.load()) {
+    while (mThreadEnabled.load()) {
         loopCount++;
-        if (AudioClock::getNanoseconds() >= nextTime) {
-            result = sendCurrentTimestamp();
-            if (result != AAUDIO_OK) {
-                ALOGE("%s() timestamp thread got result = %d", __func__, result);
-                break;
+        int64_t timeoutNanos = -1;
+        if (isRunning() || (isIdle_l() && !isStandby_l())) {
+            timeoutNanos = (isRunning() ? nextTime : standbyTime) - AudioClock::getNanoseconds();
+            timeoutNanos = std::max<int64_t>(0, timeoutNanos);
+        }
+
+        auto command = mCommandQueue.waitForCommand(timeoutNanos);
+        if (!mThreadEnabled) {
+            // Break the loop if the thread is disabled.
+            break;
+        }
+
+        if (isRunning() && AudioClock::getNanoseconds() >= nextTime) {
+            // It is time to update timestamp.
+            if (sendCurrentTimestamp_l() != AAUDIO_OK) {
+                ALOGE("Failed to send current timestamp, stop updating timestamp");
+                disconnect_l();
+            } else {
+                nextTime = timestampScheduler.nextAbsoluteTime();
             }
-            nextTime = timestampScheduler.nextAbsoluteTime();
-        } else  {
-            // Sleep until it is time to send the next timestamp.
-            // TODO Wait for a signal with a timeout so that we can stop more quickly.
-            AudioClock::sleepUntilNanoTime(nextTime);
+        }
+        if (isIdle_l() && AudioClock::getNanoseconds() >= standbyTime) {
+            standby_l();
+        }
+
+        if (command != nullptr) {
+            std::scoped_lock<std::mutex> _commandLock(command->lock);
+            switch (command->operationCode) {
+                case START:
+                    command->result = start_l();
+                    timestampScheduler.setBurstPeriod(mFramesPerBurst, getSampleRate());
+                    timestampScheduler.start(AudioClock::getNanoseconds());
+                    nextTime = timestampScheduler.nextAbsoluteTime();
+                    break;
+                case PAUSE:
+                    command->result = pause_l();
+                    standbyTime = AudioClock::getNanoseconds() + IDLE_TIMEOUT_NANOS;
+                    break;
+                case STOP:
+                    command->result = stop_l();
+                    standbyTime = AudioClock::getNanoseconds() + IDLE_TIMEOUT_NANOS;
+                    break;
+                case FLUSH:
+                    command->result = flush_l();
+                    break;
+                case CLOSE:
+                    command->result = close_l();
+                    break;
+                case DISCONNECT:
+                    disconnect_l();
+                    break;
+                case REGISTER_AUDIO_THREAD: {
+                    RegisterAudioThreadParam *param =
+                            (RegisterAudioThreadParam *) command->parameter.get();
+                    command->result =
+                            param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
+                                             : registerAudioThread_l(param->mOwnerPid,
+                                                                     param->mClientThreadId,
+                                                                     param->mPriority);
+                }
+                    break;
+                case UNREGISTER_AUDIO_THREAD: {
+                    UnregisterAudioThreadParam *param =
+                            (UnregisterAudioThreadParam *) command->parameter.get();
+                    command->result =
+                            param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
+                                             : unregisterAudioThread_l(param->mClientThreadId);
+                }
+                    break;
+                case GET_DESCRIPTION: {
+                    GetDescriptionParam *param = (GetDescriptionParam *) command->parameter.get();
+                    command->result = param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
+                                                        : getDescription_l(param->mParcelable);
+                }
+                    break;
+                case EXIT_STANDBY: {
+                    ExitStandbyParam *param = (ExitStandbyParam *) command->parameter.get();
+                    command->result = param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
+                                                       : exitStandby_l(param->mParcelable);
+                    standbyTime = AudioClock::getNanoseconds() + IDLE_TIMEOUT_NANOS;
+                } break;
+                default:
+                    ALOGE("Invalid command op code: %d", command->operationCode);
+                    break;
+            }
+            if (command->isWaitingForReply) {
+                command->isWaitingForReply = false;
+                command->conditionVariable.notify_one();
+            }
         }
     }
-    // This was moved from the calls in stop_l() and pause_l(), which could cause a deadlock
-    // if it resulted in a call to disconnect.
-    if (result == AAUDIO_OK) {
-        (void) sendCurrentTimestamp();
-    }
-    ALOGD("%s() %s exiting after %d loops <<<<<<<<<<<<<< TIMESTAMPS",
+    ALOGD("%s() %s exiting after %d loops <<<<<<<<<<<<<< COMMANDS",
           __func__, getTypeText(), loopCount);
 }
 
 void AAudioServiceStreamBase::disconnect() {
-    std::lock_guard<std::mutex> lock(mLock);
-    disconnect_l();
+    sendCommand(DISCONNECT);
 }
 
 void AAudioServiceStreamBase::disconnect_l() {
@@ -461,15 +525,21 @@
     }
 }
 
-aaudio_result_t AAudioServiceStreamBase::registerAudioThread(pid_t clientThreadId,
-        int priority) {
-    std::lock_guard<std::mutex> lock(mLock);
+aaudio_result_t AAudioServiceStreamBase::registerAudioThread(pid_t clientThreadId, int priority) {
+    const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
+    return sendCommand(REGISTER_AUDIO_THREAD,
+            std::make_shared<RegisterAudioThreadParam>(ownerPid, clientThreadId, priority),
+            true /*waitForReply*/,
+            TIMEOUT_NANOS);
+}
+
+aaudio_result_t AAudioServiceStreamBase::registerAudioThread_l(
+        pid_t ownerPid, pid_t clientThreadId, int priority) {
     aaudio_result_t result = AAUDIO_OK;
     if (getRegisteredThread() != AAudioServiceStreamBase::ILLEGAL_THREAD_ID) {
         ALOGE("AAudioService::registerAudioThread(), thread already registered");
         result = AAUDIO_ERROR_INVALID_STATE;
     } else {
-        const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
         setRegisteredThread(clientThreadId);
         int err = android::requestPriority(ownerPid, clientThreadId,
                                            priority, true /* isForApp */);
@@ -483,7 +553,13 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::unregisterAudioThread(pid_t clientThreadId) {
-    std::lock_guard<std::mutex> lock(mLock);
+    return sendCommand(UNREGISTER_AUDIO_THREAD,
+            std::make_shared<UnregisterAudioThreadParam>(clientThreadId),
+            true /*waitForReply*/,
+            TIMEOUT_NANOS);
+}
+
+aaudio_result_t AAudioServiceStreamBase::unregisterAudioThread_l(pid_t clientThreadId) {
     aaudio_result_t result = AAUDIO_OK;
     if (getRegisteredThread() != clientThreadId) {
         ALOGE("%s(), wrong thread", __func__);
@@ -552,7 +628,7 @@
     return sendServiceEvent(AAUDIO_SERVICE_EVENT_XRUN, (int64_t) xRunCount);
 }
 
-aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp() {
+aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp_l() {
     AAudioServiceMessage command;
     // It is not worth filling up the queue with timestamps.
     // That can cause the stream to get suspended.
@@ -562,8 +638,8 @@
     }
 
     // Send a timestamp for the clock model.
-    aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position,
-                                                    &command.timestamp.timestamp);
+    aaudio_result_t result = getFreeRunningPosition_l(&command.timestamp.position,
+                                                      &command.timestamp.timestamp);
     if (result == AAUDIO_OK) {
         ALOGV("%s() SERVICE  %8lld at %lld", __func__,
               (long long) command.timestamp.position,
@@ -573,8 +649,8 @@
 
         if (result == AAUDIO_OK) {
             // Send a hardware timestamp for presentation time.
-            result = getHardwareTimestamp(&command.timestamp.position,
-                                          &command.timestamp.timestamp);
+            result = getHardwareTimestamp_l(&command.timestamp.position,
+                                            &command.timestamp.timestamp);
             if (result == AAUDIO_OK) {
                 ALOGV("%s() HARDWARE %8lld at %lld", __func__,
                       (long long) command.timestamp.position,
@@ -596,7 +672,14 @@
  * used to communicate with the underlying HAL or Service.
  */
 aaudio_result_t AAudioServiceStreamBase::getDescription(AudioEndpointParcelable &parcelable) {
-    std::lock_guard<std::mutex> lock(mLock);
+    return sendCommand(
+            GET_DESCRIPTION,
+            std::make_shared<GetDescriptionParam>(&parcelable),
+            true /*waitForReply*/,
+            TIMEOUT_NANOS);
+}
+
+aaudio_result_t AAudioServiceStreamBase::getDescription_l(AudioEndpointParcelable* parcelable) {
     {
         std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
         if (mUpMessageQueue == nullptr) {
@@ -605,11 +688,50 @@
         }
         // Gather information on the message queue.
         mUpMessageQueue->fillParcelable(parcelable,
-                                        parcelable.mUpMessageQueueParcelable);
+                                        parcelable->mUpMessageQueueParcelable);
     }
-    return getAudioDataDescription(parcelable);
+    return getAudioDataDescription_l(parcelable);
+}
+
+aaudio_result_t AAudioServiceStreamBase::exitStandby(AudioEndpointParcelable *parcelable) {
+    auto command = std::make_shared<AAudioCommand>(
+            EXIT_STANDBY,
+            std::make_shared<ExitStandbyParam>(parcelable),
+            true /*waitForReply*/,
+            TIMEOUT_NANOS);
+    return mCommandQueue.sendCommand(command);
 }
 
 void AAudioServiceStreamBase::onVolumeChanged(float volume) {
     sendServiceEvent(AAUDIO_SERVICE_EVENT_VOLUME, volume);
 }
+
+aaudio_result_t AAudioServiceStreamBase::sendCommand(aaudio_command_opcode opCode,
+                                                     std::shared_ptr<AAudioCommandParam> param,
+                                                     bool waitForReply,
+                                                     int64_t timeoutNanos) {
+    return mCommandQueue.sendCommand(std::make_shared<AAudioCommand>(
+            opCode, param, waitForReply, timeoutNanos));
+}
+
+aaudio_result_t AAudioServiceStreamBase::closeAndClear() {
+    aaudio_result_t result = AAUDIO_OK;
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        result = AAUDIO_ERROR_INVALID_STATE;
+    } else {
+        endpoint->unregisterStream(this);
+        AAudioEndpointManager &endpointManager = AAudioEndpointManager::getInstance();
+        endpointManager.closeEndpoint(endpoint);
+
+        // AAudioService::closeStream() prevents two threads from closing at the same time.
+        mServiceEndpoint.clear(); // endpoint will hold the pointer after this method returns.
+    }
+
+    setState(AAUDIO_STREAM_STATE_CLOSED);
+
+    mediametrics::LogItem(mMetricsId)
+        .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CLOSE)
+        .record();
+    return result;
+}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 976996d..b2ba725 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -33,8 +33,10 @@
 #include "utility/AAudioUtilities.h"
 #include "utility/AudioClock.h"
 
-#include "SharedRingBuffer.h"
+#include "AAudioCommandQueue.h"
 #include "AAudioThread.h"
+#include "SharedRingBuffer.h"
+#include "TimestampScheduler.h"
 
 namespace android {
     class AAudioService;
@@ -114,6 +116,11 @@
      */
     aaudio_result_t flush() EXCLUDES(mLock);
 
+    /**
+     * Exit standby mode. The MMAP buffer will be reallocated.
+     */
+    aaudio_result_t exitStandby(AudioEndpointParcelable *parcelable) EXCLUDES(mLock);
+
     virtual aaudio_result_t startClient(const android::AudioClient& client,
                                         const audio_attributes_t *attr __unused,
                                         audio_port_handle_t *clientHandle __unused) {
@@ -235,10 +242,46 @@
     aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
                          aaudio_sharing_mode_t sharingMode);
 
+    aaudio_result_t start_l() REQUIRES(mLock);
     virtual aaudio_result_t close_l() REQUIRES(mLock);
     virtual aaudio_result_t pause_l() REQUIRES(mLock);
     virtual aaudio_result_t stop_l() REQUIRES(mLock);
     void disconnect_l() REQUIRES(mLock);
+    aaudio_result_t flush_l() REQUIRES(mLock);
+
+    class RegisterAudioThreadParam : public AAudioCommandParam {
+    public:
+        RegisterAudioThreadParam(pid_t ownerPid, pid_t clientThreadId, int priority)
+                : AAudioCommandParam(), mOwnerPid(ownerPid),
+                  mClientThreadId(clientThreadId), mPriority(priority) { }
+        ~RegisterAudioThreadParam() = default;
+
+        pid_t mOwnerPid;
+        pid_t mClientThreadId;
+        int mPriority;
+    };
+    aaudio_result_t registerAudioThread_l(
+            pid_t ownerPid, pid_t clientThreadId, int priority) REQUIRES(mLock);
+
+    class UnregisterAudioThreadParam : public AAudioCommandParam {
+    public:
+        UnregisterAudioThreadParam(pid_t clientThreadId)
+                : AAudioCommandParam(), mClientThreadId(clientThreadId) { }
+        ~UnregisterAudioThreadParam() = default;
+
+        pid_t mClientThreadId;
+    };
+    aaudio_result_t unregisterAudioThread_l(pid_t clientThreadId) REQUIRES(mLock);
+
+    class GetDescriptionParam : public AAudioCommandParam {
+    public:
+        GetDescriptionParam(AudioEndpointParcelable* parcelable)
+                : AAudioCommandParam(), mParcelable(parcelable) { }
+        ~GetDescriptionParam() = default;
+
+        AudioEndpointParcelable* mParcelable;
+    };
+    aaudio_result_t getDescription_l(AudioEndpointParcelable* parcelable) REQUIRES(mLock);
 
     void setState(aaudio_stream_state_t state);
 
@@ -250,7 +293,7 @@
 
     aaudio_result_t writeUpMessageQueue(AAudioServiceMessage *command);
 
-    aaudio_result_t sendCurrentTimestamp() EXCLUDES(mLock);
+    aaudio_result_t sendCurrentTimestamp_l() REQUIRES(mLock);
 
     aaudio_result_t sendXRunCount(int32_t xRunCount);
 
@@ -259,11 +302,13 @@
      * @param timeNanos
      * @return AAUDIO_OK or AAUDIO_ERROR_UNAVAILABLE or other negative error
      */
-    virtual aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) = 0;
+    virtual aaudio_result_t getFreeRunningPosition_l(
+            int64_t *positionFrames, int64_t *timeNanos) = 0;
 
-    virtual aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) = 0;
+    virtual aaudio_result_t getHardwareTimestamp_l(int64_t *positionFrames, int64_t *timeNanos) = 0;
 
-    virtual aaudio_result_t getAudioDataDescription(AudioEndpointParcelable &parcelable) = 0;
+    virtual aaudio_result_t getAudioDataDescription_l(AudioEndpointParcelable* parcelable) = 0;
+
 
     aaudio_stream_state_t   mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
 
@@ -274,14 +319,53 @@
         mDisconnected = flag;
     }
 
+    virtual aaudio_result_t standby_l() REQUIRES(mLock) {
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+    class ExitStandbyParam : public AAudioCommandParam {
+    public:
+        ExitStandbyParam(AudioEndpointParcelable* parcelable)
+                : AAudioCommandParam(), mParcelable(parcelable) { }
+        ~ExitStandbyParam() = default;
+
+        AudioEndpointParcelable* mParcelable;
+    };
+    virtual aaudio_result_t exitStandby_l(
+            AudioEndpointParcelable* parcelable __unused) REQUIRES(mLock) {
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+    bool isStandby_l() const REQUIRES(mLock) {
+        return mStandby;
+    }
+    void setStandby_l(bool standby) REQUIRES(mLock) {
+        mStandby = standby;
+    }
+
+    bool isIdle_l() const REQUIRES(mLock) {
+        return mState == AAUDIO_STREAM_STATE_OPEN || mState == AAUDIO_STREAM_STATE_PAUSED
+                || mState == AAUDIO_STREAM_STATE_STOPPED;
+    }
+
     pid_t                   mRegisteredClientThread = ILLEGAL_THREAD_ID;
 
     std::mutex              mUpMessageQueueLock;
     std::shared_ptr<SharedRingBuffer> mUpMessageQueue;
 
-    AAudioThread            mTimestampThread;
-    // This is used by one thread to tell another thread to exit. So it must be atomic.
+    enum : int32_t {
+        START,
+        PAUSE,
+        STOP,
+        FLUSH,
+        CLOSE,
+        DISCONNECT,
+        REGISTER_AUDIO_THREAD,
+        UNREGISTER_AUDIO_THREAD,
+        GET_DESCRIPTION,
+        EXIT_STANDBY,
+    };
+    AAudioThread            mCommandThread;
     std::atomic<bool>       mThreadEnabled{false};
+    AAudioCommandQueue      mCommandQueue;
 
     int32_t                 mFramesPerBurst = 0;
     android::AudioClient    mMmapClient; // set in open, used in MMAP start()
@@ -315,6 +399,13 @@
     aaudio_result_t sendServiceEvent(aaudio_service_event_t event,
                                      double dataDouble);
 
+    aaudio_result_t sendCommand(aaudio_command_opcode opCode,
+                                std::shared_ptr<AAudioCommandParam> param = nullptr,
+                                bool waitForReply = false,
+                                int64_t timeoutNanos = 0);
+
+    aaudio_result_t closeAndClear();
+
     /**
      * @return true if the queue is getting full.
      */
@@ -333,9 +424,13 @@
 
     bool                    mDisconnected GUARDED_BY(mLock) {false};
 
+    bool                    mStandby GUARDED_BY(mLock) = false;
+
 protected:
     // Locking order is important.
     // Acquire mLock before acquiring AAudioServiceEndpoint::mLockStreams
+    // The lock will be held by the command thread. All operations needing the lock must run from
+    // the command thread.
     std::mutex              mLock; // Prevent start/stop/close etcetera from colliding
 };
 
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 57dc1ab..ec9b2e2 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -117,6 +117,35 @@
     return result;
 }
 
+aaudio_result_t AAudioServiceStreamMMAP::standby_l() {
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    aaudio_result_t result = endpoint->standby();
+    if (result == AAUDIO_OK) {
+        setStandby_l(true);
+    }
+    return result;
+}
+
+aaudio_result_t AAudioServiceStreamMMAP::exitStandby_l(AudioEndpointParcelable* parcelable) {
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    aaudio_result_t result = endpoint->exitStandby(parcelable);
+    if (result == AAUDIO_OK) {
+        setStandby_l(false);
+    } else {
+        ALOGE("%s failed, result %d, disconnecting stream.", __func__, result);
+        disconnect_l();
+    }
+    return result;
+}
+
 aaudio_result_t AAudioServiceStreamMMAP::startClient(const android::AudioClient& client,
                                                      const audio_attributes_t *attr,
                                                      audio_port_handle_t *clientHandle) {
@@ -141,7 +170,7 @@
 }
 
 // Get free-running DSP or DMA hardware position from the HAL.
-aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition(int64_t *positionFrames,
+aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition_l(int64_t *positionFrames,
                                                                   int64_t *timeNanos) {
     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
     if (endpoint == nullptr) {
@@ -158,16 +187,15 @@
         *positionFrames = timestamp.getPosition();
         *timeNanos = timestamp.getNanoseconds();
     } else if (result != AAUDIO_ERROR_UNAVAILABLE) {
-        disconnect();
+        disconnect_l();
     }
     return result;
 }
 
 // Get timestamp from presentation position.
 // If it fails, get timestamp that was written by getFreeRunningPosition()
-aaudio_result_t AAudioServiceStreamMMAP::getHardwareTimestamp(int64_t *positionFrames,
+aaudio_result_t AAudioServiceStreamMMAP::getHardwareTimestamp_l(int64_t *positionFrames,
                                                                 int64_t *timeNanos) {
-
     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
     if (endpoint == nullptr) {
         ALOGE("%s() has no endpoint", __func__);
@@ -176,17 +204,17 @@
     sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
             static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
 
-    // Disable this code temporarily because the HAL is not returning
-    // a useful result.
-#if 0
     uint64_t position;
-    if (serviceEndpointMMAP->getExternalPosition(&position, timeNanos) == AAUDIO_OK) {
-        ALOGD("%s() getExternalPosition() says pos = %" PRIi64 ", time = %" PRIi64,
+    aaudio_result_t result = serviceEndpointMMAP->getExternalPosition(&position, timeNanos);
+    if (result == AAUDIO_OK) {
+        ALOGV("%s() getExternalPosition() says pos = %" PRIi64 ", time = %" PRIi64,
                 __func__, position, *timeNanos);
         *positionFrames = (int64_t) position;
         return AAUDIO_OK;
-    } else
-#endif
+    } else {
+        ALOGV("%s() getExternalPosition() returns error %d", __func__, result);
+    }
+
     if (mAtomicStreamTimestamp.isValid()) {
         Timestamp timestamp = mAtomicStreamTimestamp.read();
         *positionFrames = timestamp.getPosition();
@@ -198,8 +226,8 @@
 }
 
 // Get an immutable description of the data queue from the HAL.
-aaudio_result_t AAudioServiceStreamMMAP::getAudioDataDescription(
-        AudioEndpointParcelable &parcelable)
+aaudio_result_t AAudioServiceStreamMMAP::getAudioDataDescription_l(
+        AudioEndpointParcelable* parcelable)
 {
     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
     if (endpoint == nullptr) {
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h
index 667465a..cd8c91e 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.h
+++ b/services/oboeservice/AAudioServiceStreamMMAP.h
@@ -71,12 +71,18 @@
 
     aaudio_result_t stop_l() REQUIRES(mLock) override;
 
-    aaudio_result_t getAudioDataDescription(AudioEndpointParcelable &parcelable) override;
+    aaudio_result_t standby_l() REQUIRES(mLock) override;
 
-    aaudio_result_t getFreeRunningPosition(int64_t *positionFrames,
-            int64_t *timeNanos) EXCLUDES(mLock) override;
+    aaudio_result_t exitStandby_l(AudioEndpointParcelable* parcelable) REQUIRES(mLock) override;
 
-    aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
+    aaudio_result_t getAudioDataDescription_l(
+            AudioEndpointParcelable* parcelable) REQUIRES(mLock) override;
+
+    aaudio_result_t getFreeRunningPosition_l(int64_t *positionFrames,
+            int64_t *timeNanos) REQUIRES(mLock) override;
+
+    aaudio_result_t getHardwareTimestamp_l(
+            int64_t *positionFrames, int64_t *timeNanos) REQUIRES(mLock) override;
 
     /**
      * Device specific startup.
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index ad06d97..04fcd6d 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -211,8 +211,8 @@
 /**
  * Get an immutable description of the data queue created by this service.
  */
-aaudio_result_t AAudioServiceStreamShared::getAudioDataDescription(
-        AudioEndpointParcelable &parcelable)
+aaudio_result_t AAudioServiceStreamShared::getAudioDataDescription_l(
+        AudioEndpointParcelable* parcelable)
 {
     std::lock_guard<std::mutex> lock(audioDataQueueLock);
     if (mAudioDataQueue == nullptr) {
@@ -221,8 +221,8 @@
     }
     // Gather information on the data queue.
     mAudioDataQueue->fillParcelable(parcelable,
-                                    parcelable.mDownDataQueueParcelable);
-    parcelable.mDownDataQueueParcelable.setFramesPerBurst(getFramesPerBurst());
+                                    parcelable->mDownDataQueueParcelable);
+    parcelable->mDownDataQueueParcelable.setFramesPerBurst(getFramesPerBurst());
     return AAUDIO_OK;
 }
 
@@ -231,8 +231,8 @@
 }
 
 // Get timestamp that was written by mixer or distributor.
-aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition(int64_t *positionFrames,
-                                                                  int64_t *timeNanos) {
+aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition_l(int64_t *positionFrames,
+                                                                    int64_t *timeNanos) {
     // TODO Get presentation timestamp from the HAL
     if (mAtomicStreamTimestamp.isValid()) {
         Timestamp timestamp = mAtomicStreamTimestamp.read();
@@ -245,8 +245,8 @@
 }
 
 // Get timestamp from lower level service.
-aaudio_result_t AAudioServiceStreamShared::getHardwareTimestamp(int64_t *positionFrames,
-                                                                int64_t *timeNanos) {
+aaudio_result_t AAudioServiceStreamShared::getHardwareTimestamp_l(int64_t *positionFrames,
+                                                                  int64_t *timeNanos) {
 
     int64_t position = 0;
     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
index 4fae5b4..78f9787 100644
--- a/services/oboeservice/AAudioServiceStreamShared.h
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -88,11 +88,14 @@
 
 protected:
 
-    aaudio_result_t getAudioDataDescription(AudioEndpointParcelable &parcelable) override;
+    aaudio_result_t getAudioDataDescription_l(
+            AudioEndpointParcelable* parcelable) REQUIRES(mLock) override;
 
-    aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
+    aaudio_result_t getFreeRunningPosition_l(
+            int64_t *positionFrames, int64_t *timeNanos) REQUIRES(mLock) override;
 
-    aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
+    aaudio_result_t getHardwareTimestamp_l(
+            int64_t *positionFrames, int64_t *timeNanos) REQUIRES(mLock) override;
 
     /**
      * @param requestedCapacityFrames
diff --git a/services/oboeservice/AAudioThread.cpp b/services/oboeservice/AAudioThread.cpp
index 68496ac..549fa59 100644
--- a/services/oboeservice/AAudioThread.cpp
+++ b/services/oboeservice/AAudioThread.cpp
@@ -16,9 +16,10 @@
 
 #define LOG_TAG "AAudioThread"
 //#define LOG_NDEBUG 0
-#include <utils/Log.h>
 
-#include <pthread.h>
+#include <system_error>
+
+#include <utils/Log.h>
 
 #include <aaudio/AAudio.h>
 #include <utility/AAudioUtilities.h>
@@ -38,7 +39,7 @@
 }
 
 AAudioThread::~AAudioThread() {
-    ALOGE_IF(pthread_equal(pthread_self(), mThread),
+    ALOGE_IF(mThread.get_id() == std::this_thread::get_id(),
             "%s() destructor running in thread", __func__);
     ALOGE_IF(mHasThread, "%s() thread never joined", __func__);
 }
@@ -60,32 +61,16 @@
     }
 }
 
-// This is the entry point for the new thread created by createThread_l().
-// It converts the 'C' function call to a C++ method call.
-static void * AAudioThread_internalThreadProc(void *arg) {
-    AAudioThread *aaudioThread = (AAudioThread *) arg;
-    aaudioThread->dispatch();
-    return nullptr;
-}
-
 aaudio_result_t AAudioThread::start(Runnable *runnable) {
     if (mHasThread) {
         ALOGE("start() - mHasThread already true");
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    // mRunnable will be read by the new thread when it starts.
-    // pthread_create() forces a memory synchronization so mRunnable does not need to be atomic.
+    // mRunnable will be read by the new thread when it starts. A std::thread is created.
     mRunnable = runnable;
-    int err = pthread_create(&mThread, nullptr, AAudioThread_internalThreadProc, this);
-    if (err != 0) {
-        ALOGE("start() - pthread_create() returned %d %s", err, strerror(err));
-        return AAudioConvert_androidToAAudioResult(-err);
-    } else {
-        int err = pthread_setname_np(mThread, mName);
-        ALOGW_IF((err != 0), "Could not set name of AAudioThread. err = %d", err);
-        mHasThread = true;
-        return AAUDIO_OK;
-    }
+    mHasThread = true;
+    mThread = std::thread(&AAudioThread::dispatch, this);
+    return AAUDIO_OK;
 }
 
 aaudio_result_t AAudioThread::stop() {
@@ -93,18 +78,18 @@
         ALOGE("stop() but no thread running");
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    // Check to see if the thread is trying to stop itself.
-    if (pthread_equal(pthread_self(), mThread)) {
-        ALOGE("%s() attempt to pthread_join() from launched thread!", __func__);
-        return AAUDIO_ERROR_INTERNAL;
-    }
 
-    int err = pthread_join(mThread, nullptr);
-    if (err != 0) {
-        ALOGE("stop() - pthread_join() returned %d %s", err, strerror(err));
-        return AAudioConvert_androidToAAudioResult(-err);
-    } else {
+    if (mThread.get_id() == std::this_thread::get_id()) {
+        // The thread must not be joined by itself.
+        ALOGE("%s() attempt to join() from launched thread!", __func__);
+        return AAUDIO_ERROR_INTERNAL;
+    } else if (mThread.joinable()) {
+        // Double check if the thread is joinable to avoid exception when calling join.
+        mThread.join();
         mHasThread = false;
         return AAUDIO_OK;
+    } else {
+        ALOGE("%s() the thread is not joinable", __func__);
+        return AAUDIO_ERROR_INTERNAL;
     }
 }
diff --git a/services/oboeservice/AAudioThread.h b/services/oboeservice/AAudioThread.h
index 08a8a98..b2774e0 100644
--- a/services/oboeservice/AAudioThread.h
+++ b/services/oboeservice/AAudioThread.h
@@ -18,7 +18,7 @@
 #define AAUDIO_THREAD_H
 
 #include <atomic>
-#include <pthread.h>
+#include <thread>
 
 #include <aaudio/AAudio.h>
 
@@ -37,7 +37,6 @@
 
 /**
  * Abstraction for a host dependent thread.
- * TODO Consider using Android "Thread" class or std::thread instead.
  */
 class AAudioThread
 {
@@ -73,7 +72,7 @@
 
     Runnable    *mRunnable = nullptr;
     bool         mHasThread = false;
-    pthread_t    mThread = {};
+    std::thread  mThread;
 
     static std::atomic<uint32_t> mNextThreadIndex;
     char         mName[16]; // max length for a pthread_name
diff --git a/services/oboeservice/Android.bp b/services/oboeservice/Android.bp
index 3563d66..80e4296 100644
--- a/services/oboeservice/Android.bp
+++ b/services/oboeservice/Android.bp
@@ -27,6 +27,7 @@
 
     srcs: [
         "AAudioClientTracker.cpp",
+        "AAudioCommandQueue.cpp",
         "AAudioEndpointManager.cpp",
         "AAudioMixer.cpp",
         "AAudioService.cpp",
@@ -69,6 +70,7 @@
         "framework-permission-aidl-cpp",
         "libaudioclient_aidl_conversion",
         "packagemanager_aidl-cpp",
+        "android.media.audio.common.types-V1-cpp",
     ],
 
     export_shared_lib_headers: [
diff --git a/services/oboeservice/SharedRingBuffer.cpp b/services/oboeservice/SharedRingBuffer.cpp
index c1d4e16..fd2a454 100644
--- a/services/oboeservice/SharedRingBuffer.cpp
+++ b/services/oboeservice/SharedRingBuffer.cpp
@@ -85,9 +85,9 @@
     return AAUDIO_OK;
 }
 
-void SharedRingBuffer::fillParcelable(AudioEndpointParcelable &endpointParcelable,
+void SharedRingBuffer::fillParcelable(AudioEndpointParcelable* endpointParcelable,
                     RingBufferParcelable &ringBufferParcelable) {
-    int fdIndex = endpointParcelable.addFileDescriptor(mFileDescriptor, mSharedMemorySizeInBytes);
+    int fdIndex = endpointParcelable->addFileDescriptor(mFileDescriptor, mSharedMemorySizeInBytes);
     ringBufferParcelable.setupMemory(fdIndex,
                                      SHARED_RINGBUFFER_DATA_OFFSET,
                                      mDataMemorySizeInBytes,
diff --git a/services/oboeservice/SharedRingBuffer.h b/services/oboeservice/SharedRingBuffer.h
index c3a9bb7..cff1261 100644
--- a/services/oboeservice/SharedRingBuffer.h
+++ b/services/oboeservice/SharedRingBuffer.h
@@ -45,7 +45,7 @@
 
     aaudio_result_t allocate(android::fifo_frames_t bytesPerFrame, android::fifo_frames_t capacityInFrames);
 
-    void fillParcelable(AudioEndpointParcelable &endpointParcelable,
+    void fillParcelable(AudioEndpointParcelable* endpointParcelable,
                         RingBufferParcelable &ringBufferParcelable);
 
     /**
diff --git a/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp b/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
index 17e8d36..5e48955 100644
--- a/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
+++ b/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
@@ -180,6 +180,11 @@
         return AAUDIO_ERROR_UNAVAILABLE;
     }
 
+    aaudio_result_t exitStandby(aaudio_handle_t streamHandle UNUSED_PARAM,
+                                AudioEndpointParcelable &parcelable UNUSED_PARAM) override {
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+
     void onStreamChange(aaudio_handle_t handle, int32_t opcode, int32_t value) {}
 
     int getDeathCount() { return mDeathCount; }
diff --git a/services/tuner/.clang-format b/services/tuner/.clang-format
new file mode 100644
index 0000000..f14cc88
--- /dev/null
+++ b/services/tuner/.clang-format
@@ -0,0 +1,33 @@
+---
+BasedOnStyle: Google
+AllowShortFunctionsOnASingleLine: Inline
+AllowShortIfStatementsOnASingleLine: true
+AllowShortLoopsOnASingleLine: true
+BinPackArguments: true
+BinPackParameters: true
+CommentPragmas: NOLINT:.*
+ContinuationIndentWidth: 8
+DerivePointerAlignment: false
+IndentWidth: 4
+PointerAlignment: Left
+TabWidth: 4
+
+# Deviations from the above file:
+# "Don't indent the section label"
+AccessModifierOffset: -4
+# "Each line of text in your code should be at most 100 columns long."
+ColumnLimit: 100
+# "Constructor initializer lists can be all on one line or with subsequent
+# lines indented eight spaces.". clang-format does not support having the colon
+# on the same line as the constructor function name, so this is the best
+# approximation of that rule, which makes all entries in the list (except the
+# first one) have an eight space indentation.
+ConstructorInitializerIndentWidth: 6
+# There is nothing in go/droidcppstyle about case labels, but there seems to be
+# more code that does not indent the case labels in frameworks/base.
+IndentCaseLabels: false
+# There have been some bugs in which subsequent formatting operations introduce
+# weird comment jumps.
+ReflowComments: false
+# Android does support C++11 now.
+Standard: Cpp11
\ No newline at end of file
diff --git a/services/tuner/Android.bp b/services/tuner/Android.bp
index 1dcfe53..ec62d4e 100644
--- a/services/tuner/Android.bp
+++ b/services/tuner/Android.bp
@@ -7,33 +7,15 @@
     default_applicable_licenses: ["frameworks_av_license"],
 }
 
-filegroup {
-    name: "tv_tuner_aidl",
-    srcs: [
-        "aidl/android/media/tv/tuner/*.aidl",
-    ],
-    path: "aidl",
-}
-
-filegroup {
-    name: "tv_tuner_frontend_info",
-    srcs: [
-        "aidl/android/media/tv/tuner/TunerFrontendInfo.aidl",
-        "aidl/android/media/tv/tuner/TunerFrontend*Capabilities.aidl",
-    ],
-    path: "aidl",
-}
-
 aidl_interface {
     name: "tv_tuner_aidl_interface",
     unstable: true,
     local_include_dir: "aidl",
-    srcs: [
-        ":tv_tuner_aidl",
-    ],
+    srcs: ["aidl/android/media/tv/tuner/*.aidl"],
     imports: [
         "android.hardware.common-V2",
         "android.hardware.common.fmq-V1",
+        "android.hardware.tv.tuner-V1",
     ],
 
     backend: {
@@ -49,37 +31,18 @@
     },
 }
 
-aidl_interface {
-    name: "tv_tuner_frontend_info_aidl_interface",
-    unstable: true,
-    local_include_dir: "aidl",
-    srcs: [
-        ":tv_tuner_frontend_info",
-    ],
-
-    backend: {
-        java: {
-            enabled: true,
-        },
-        cpp: {
-            enabled: true,
-        },
-        ndk: {
-            enabled: true,
-        },
-    },
-}
-
 cc_library {
     name: "libtunerservice",
 
     srcs: [
         "Tuner*.cpp",
+        "hidl/Tuner*.cpp",
     ],
 
     shared_libs: [
         "android.hardware.tv.tuner@1.0",
         "android.hardware.tv.tuner@1.1",
+        "android.hardware.tv.tuner-V1-ndk",
         "libbase",
         "libbinder",
         "libbinder_ndk",
@@ -92,7 +55,6 @@
         "packagemanager_aidl-cpp",
         "tv_tuner_aidl_interface-ndk",
         "tv_tuner_resource_manager_aidl_interface-ndk",
-        "tv_tuner_resource_manager_aidl_interface-cpp",
     ],
 
     static_libs: [
@@ -123,6 +85,7 @@
     shared_libs: [
         "android.hardware.tv.tuner@1.0",
         "android.hardware.tv.tuner@1.1",
+        "android.hardware.tv.tuner-V1-ndk",
         "libbase",
         "libbinder",
         "libfmq",
@@ -130,7 +93,6 @@
         "libtunerservice",
         "libutils",
         "tv_tuner_resource_manager_aidl_interface-ndk",
-        "tv_tuner_resource_manager_aidl_interface-cpp",
     ],
 
     static_libs: [
diff --git a/services/tuner/OWNERS b/services/tuner/OWNERS
index 0ceb8e8..bf9fe34 100644
--- a/services/tuner/OWNERS
+++ b/services/tuner/OWNERS
@@ -1,2 +1,2 @@
-nchalko@google.com
+hgchen@google.com
 quxiangfang@google.com
diff --git a/services/tuner/TunerDemux.cpp b/services/tuner/TunerDemux.cpp
index 1122368..a6f3a2c 100644
--- a/services/tuner/TunerDemux.cpp
+++ b/services/tuner/TunerDemux.cpp
@@ -16,23 +16,32 @@
 
 #define LOG_TAG "TunerDemux"
 
-#include "TunerDvr.h"
 #include "TunerDemux.h"
+
+#include <aidl/android/hardware/tv/tuner/IDvr.h>
+#include <aidl/android/hardware/tv/tuner/IDvrCallback.h>
+#include <aidl/android/hardware/tv/tuner/IFilter.h>
+#include <aidl/android/hardware/tv/tuner/IFilterCallback.h>
+#include <aidl/android/hardware/tv/tuner/ITimeFilter.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
+#include "TunerDvr.h"
 #include "TunerTimeFilter.h"
 
-using ::android::hardware::tv::tuner::V1_0::DemuxAlpFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxIpFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxTlvFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
-using ::android::hardware::tv::tuner::V1_0::DvrType;
-using ::android::hardware::tv::tuner::V1_0::Result;
+using ::aidl::android::hardware::tv::tuner::IDvr;
+using ::aidl::android::hardware::tv::tuner::IDvrCallback;
+using ::aidl::android::hardware::tv::tuner::IFilter;
+using ::aidl::android::hardware::tv::tuner::IFilterCallback;
+using ::aidl::android::hardware::tv::tuner::ITimeFilter;
+using ::aidl::android::hardware::tv::tuner::Result;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-TunerDemux::TunerDemux(sp<IDemux> demux, int id) {
+TunerDemux::TunerDemux(shared_ptr<IDemux> demux, int id) {
     mDemux = demux;
     mDemuxId = id;
 }
@@ -41,192 +50,143 @@
     mDemux = nullptr;
 }
 
-Status TunerDemux::setFrontendDataSource(const std::shared_ptr<ITunerFrontend>& frontend) {
+::ndk::ScopedAStatus TunerDemux::setFrontendDataSource(
+        const shared_ptr<ITunerFrontend>& in_frontend) {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
     int frontendId;
-    frontend->getFrontendId(&frontendId);
-    Result res = mDemux->setFrontendDataSource(frontendId);
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    in_frontend->getFrontendId(&frontendId);
+
+    return mDemux->setFrontendDataSource(frontendId);
 }
 
-Status TunerDemux::openFilter(
-        int type, int subType, int bufferSize, const std::shared_ptr<ITunerFilterCallback>& cb,
-        std::shared_ptr<ITunerFilter>* _aidl_return) {
+::ndk::ScopedAStatus TunerDemux::setFrontendDataSourceById(int frontendId) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    return mDemux->setFrontendDataSource(frontendId);
+}
+
+::ndk::ScopedAStatus TunerDemux::openFilter(const DemuxFilterType& in_type, int32_t in_bufferSize,
+                                            const shared_ptr<ITunerFilterCallback>& in_cb,
+                                            shared_ptr<ITunerFilter>* _aidl_return) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    shared_ptr<IFilter> filter;
+    shared_ptr<TunerFilter::FilterCallback> filterCb =
+            ::ndk::SharedRefBase::make<TunerFilter::FilterCallback>(in_cb);
+    shared_ptr<IFilterCallback> cb = filterCb;
+    auto status = mDemux->openFilter(in_type, in_bufferSize, cb, &filter);
+    if (status.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerFilter>(filter, filterCb, in_type);
+    }
+
+    return status;
+}
+
+::ndk::ScopedAStatus TunerDemux::openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    DemuxFilterMainType mainType = static_cast<DemuxFilterMainType>(type);
-    DemuxFilterType filterType {
-        .mainType = mainType,
-    };
-
-    switch(mainType) {
-        case DemuxFilterMainType::TS:
-            filterType.subType.tsFilterType(static_cast<DemuxTsFilterType>(subType));
-            break;
-        case DemuxFilterMainType::MMTP:
-            filterType.subType.mmtpFilterType(static_cast<DemuxMmtpFilterType>(subType));
-            break;
-        case DemuxFilterMainType::IP:
-            filterType.subType.ipFilterType(static_cast<DemuxIpFilterType>(subType));
-            break;
-        case DemuxFilterMainType::TLV:
-            filterType.subType.tlvFilterType(static_cast<DemuxTlvFilterType>(subType));
-            break;
-        case DemuxFilterMainType::ALP:
-            filterType.subType.alpFilterType(static_cast<DemuxAlpFilterType>(subType));
-            break;
-    }
-    Result status;
-    sp<IFilter> filterSp;
-    sp<IFilterCallback> cbSp = new TunerFilter::FilterCallback(cb);
-    mDemux->openFilter(filterType, bufferSize, cbSp,
-            [&](Result r, const sp<IFilter>& filter) {
-                filterSp = filter;
-                status = r;
-            });
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    shared_ptr<ITimeFilter> filter;
+    auto status = mDemux->openTimeFilter(&filter);
+    if (status.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerTimeFilter>(filter);
     }
 
-    *_aidl_return = ::ndk::SharedRefBase::make<TunerFilter>(filterSp, type, subType);
-    return Status::ok();
+    return status;
 }
 
-Status TunerDemux::openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) {
+::ndk::ScopedAStatus TunerDemux::getAvSyncHwId(const shared_ptr<ITunerFilter>& tunerFilter,
+                                               int32_t* _aidl_return) {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status;
-    sp<ITimeFilter> filterSp;
-    mDemux->openTimeFilter([&](Result r, const sp<ITimeFilter>& filter) {
-        filterSp = filter;
-        status = r;
-    });
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-
-    *_aidl_return = ::ndk::SharedRefBase::make<TunerTimeFilter>(filterSp);
-    return Status::ok();
+    shared_ptr<IFilter> halFilter = (static_cast<TunerFilter*>(tunerFilter.get()))->getHalFilter();
+    return mDemux->getAvSyncHwId(halFilter, _aidl_return);
 }
 
-Status TunerDemux::getAvSyncHwId(const shared_ptr<ITunerFilter>& tunerFilter, int* _aidl_return) {
+::ndk::ScopedAStatus TunerDemux::getAvSyncTime(int32_t avSyncHwId, int64_t* _aidl_return) {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    uint32_t avSyncHwId;
-    Result res;
-    sp<IFilter> halFilter = static_cast<TunerFilter*>(tunerFilter.get())->getHalFilter();
-    mDemux->getAvSyncHwId(halFilter,
-            [&](Result r, uint32_t id) {
-                res = r;
-                avSyncHwId = id;
-            });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-
-    *_aidl_return = (int)avSyncHwId;
-    return Status::ok();
+    return mDemux->getAvSyncTime(avSyncHwId, _aidl_return);
 }
 
-Status TunerDemux::getAvSyncTime(int avSyncHwId, int64_t* _aidl_return) {
+::ndk::ScopedAStatus TunerDemux::openDvr(DvrType in_dvbType, int32_t in_bufferSize,
+                                         const shared_ptr<ITunerDvrCallback>& in_cb,
+                                         shared_ptr<ITunerDvr>* _aidl_return) {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    uint64_t time;
-    Result res;
-    mDemux->getAvSyncTime(static_cast<uint32_t>(avSyncHwId),
-            [&](Result r, uint64_t ts) {
-                res = r;
-                time = ts;
-            });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+    shared_ptr<IDvrCallback> callback = ::ndk::SharedRefBase::make<TunerDvr::DvrCallback>(in_cb);
+    shared_ptr<IDvr> halDvr;
+    auto res = mDemux->openDvr(in_dvbType, in_bufferSize, callback, &halDvr);
+    if (res.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerDvr>(halDvr, in_dvbType);
     }
 
-    *_aidl_return = (int64_t)time;
-    return Status::ok();
+    return res;
 }
 
-Status TunerDemux::openDvr(int dvrType, int bufferSize, const shared_ptr<ITunerDvrCallback>& cb,
-        shared_ptr<ITunerDvr>* _aidl_return) {
+::ndk::ScopedAStatus TunerDemux::connectCiCam(int32_t ciCamId) {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res;
-    sp<IDvrCallback> callback = new TunerDvr::DvrCallback(cb);
-    sp<IDvr> hidlDvr;
-    mDemux->openDvr(static_cast<DvrType>(dvrType), bufferSize, callback,
-            [&](Result r, const sp<IDvr>& dvr) {
-                hidlDvr = dvr;
-                res = r;
-            });
-    if (res != Result::SUCCESS) {
-        *_aidl_return = NULL;
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-
-    *_aidl_return = ::ndk::SharedRefBase::make<TunerDvr>(hidlDvr, dvrType);
-    return Status::ok();
+    return mDemux->connectCiCam(ciCamId);
 }
 
-Status TunerDemux::connectCiCam(int ciCamId) {
+::ndk::ScopedAStatus TunerDemux::disconnectCiCam() {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDemux->connectCiCam(static_cast<uint32_t>(ciCamId));
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return mDemux->disconnectCiCam();
 }
 
-Status TunerDemux::disconnectCiCam() {
+::ndk::ScopedAStatus TunerDemux::close() {
     if (mDemux == nullptr) {
         ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDemux->disconnectCiCam();
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    auto res = mDemux->close();
+    mDemux = nullptr;
+
+    return res;
 }
 
-Status TunerDemux::close() {
-    if (mDemux == nullptr) {
-        ALOGE("IDemux is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    Result res = mDemux->close();
-    mDemux = NULL;
-
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
-}
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
 }  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerDemux.h b/services/tuner/TunerDemux.h
index 2a9836b..cdb3aa0 100644
--- a/services/tuner/TunerDemux.h
+++ b/services/tuner/TunerDemux.h
@@ -17,52 +17,55 @@
 #ifndef ANDROID_MEDIA_TUNERDEMUX_H
 #define ANDROID_MEDIA_TUNERDEMUX_H
 
+#include <aidl/android/hardware/tv/tuner/IDemux.h>
 #include <aidl/android/media/tv/tuner/BnTunerDemux.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
 
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::tv::tuner::BnTunerDemux;
-using ::aidl::android::media::tv::tuner::ITunerDvr;
-using ::aidl::android::media::tv::tuner::ITunerDvrCallback;
-using ::aidl::android::media::tv::tuner::ITunerFilter;
-using ::aidl::android::media::tv::tuner::ITunerFilterCallback;
-using ::aidl::android::media::tv::tuner::ITunerFrontend;
-using ::aidl::android::media::tv::tuner::ITunerTimeFilter;
-using ::android::hardware::tv::tuner::V1_0::IDemux;
-using ::android::hardware::tv::tuner::V1_0::IDvr;
-using ::android::hardware::tv::tuner::V1_0::IDvrCallback;
-using ::android::hardware::tv::tuner::V1_0::ITimeFilter;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterType;
+using ::aidl::android::hardware::tv::tuner::DvrType;
+using ::aidl::android::hardware::tv::tuner::IDemux;
 
 using namespace std;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
 class TunerDemux : public BnTunerDemux {
 
 public:
-    TunerDemux(sp<IDemux> demux, int demuxId);
+    TunerDemux(shared_ptr<IDemux> demux, int demuxId);
     virtual ~TunerDemux();
-    Status setFrontendDataSource(const shared_ptr<ITunerFrontend>& frontend) override;
-    Status openFilter(
-        int mainType, int subtype, int bufferSize, const shared_ptr<ITunerFilterCallback>& cb,
-        shared_ptr<ITunerFilter>* _aidl_return) override;
-    Status openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) override;
-    Status getAvSyncHwId(const shared_ptr<ITunerFilter>& tunerFilter, int* _aidl_return) override;
-    Status getAvSyncTime(int avSyncHwId, int64_t* _aidl_return) override;
-    Status openDvr(
-        int dvbType, int bufferSize, const shared_ptr<ITunerDvrCallback>& cb,
-        shared_ptr<ITunerDvr>* _aidl_return) override;
-    Status connectCiCam(int ciCamId) override;
-    Status disconnectCiCam() override;
-    Status close() override;
+
+    ::ndk::ScopedAStatus setFrontendDataSource(
+            const shared_ptr<ITunerFrontend>& in_frontend) override;
+    ::ndk::ScopedAStatus setFrontendDataSourceById(int frontendId) override;
+    ::ndk::ScopedAStatus openFilter(const DemuxFilterType& in_type, int32_t in_bufferSize,
+                                    const shared_ptr<ITunerFilterCallback>& in_cb,
+                                    shared_ptr<ITunerFilter>* _aidl_return) override;
+    ::ndk::ScopedAStatus openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) override;
+    ::ndk::ScopedAStatus getAvSyncHwId(const shared_ptr<ITunerFilter>& in_tunerFilter,
+                                       int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getAvSyncTime(int32_t in_avSyncHwId, int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus openDvr(DvrType in_dvbType, int32_t in_bufferSize,
+                                 const shared_ptr<ITunerDvrCallback>& in_cb,
+                                 shared_ptr<ITunerDvr>* _aidl_return) override;
+    ::ndk::ScopedAStatus connectCiCam(int32_t in_ciCamId) override;
+    ::ndk::ScopedAStatus disconnectCiCam() override;
+    ::ndk::ScopedAStatus close() override;
 
     int getId() { return mDemuxId; }
 
 private:
-    sp<IDemux> mDemux;
+    shared_ptr<IDemux> mDemux;
     int mDemuxId;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERDEMUX_H
diff --git a/services/tuner/TunerDescrambler.cpp b/services/tuner/TunerDescrambler.cpp
index b7ae167..70aee20 100644
--- a/services/tuner/TunerDescrambler.cpp
+++ b/services/tuner/TunerDescrambler.cpp
@@ -16,17 +16,27 @@
 
 #define LOG_TAG "TunerDescrambler"
 
-#include "TunerFilter.h"
-#include "TunerDemux.h"
 #include "TunerDescrambler.h"
 
-using ::android::hardware::tv::tuner::V1_0::Result;
+#include <aidl/android/hardware/tv/tuner/IFilter.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <utils/Log.h>
+
+#include "TunerDemux.h"
+#include "TunerFilter.h"
+
+using ::aidl::android::hardware::tv::tuner::IFilter;
+using ::aidl::android::hardware::tv::tuner::Result;
 
 using namespace std;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-TunerDescrambler::TunerDescrambler(sp<IDescrambler> descrambler) {
+TunerDescrambler::TunerDescrambler(shared_ptr<IDescrambler> descrambler) {
     mDescrambler = descrambler;
 }
 
@@ -34,91 +44,74 @@
     mDescrambler = nullptr;
 }
 
-Status TunerDescrambler::setDemuxSource(const std::shared_ptr<ITunerDemux>& demux) {
+::ndk::ScopedAStatus TunerDescrambler::setDemuxSource(
+        const shared_ptr<ITunerDemux>& in_tunerDemux) {
     if (mDescrambler == nullptr) {
         ALOGE("IDescrambler is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDescrambler->setDemuxSource(static_cast<TunerDemux*>(demux.get())->getId());
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return mDescrambler->setDemuxSource((static_cast<TunerDemux*>(in_tunerDemux.get()))->getId());
 }
 
-Status TunerDescrambler::setKeyToken(const vector<uint8_t>& keyToken) {
+::ndk::ScopedAStatus TunerDescrambler::setKeyToken(const vector<uint8_t>& in_keyToken) {
     if (mDescrambler == nullptr) {
         ALOGE("IDescrambler is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDescrambler->setKeyToken(keyToken);
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return mDescrambler->setKeyToken(in_keyToken);
 }
 
-Status TunerDescrambler::addPid(const TunerDemuxPid& pid,
-        const shared_ptr<ITunerFilter>& optionalSourceFilter) {
+::ndk::ScopedAStatus TunerDescrambler::addPid(
+        const DemuxPid& in_pid, const shared_ptr<ITunerFilter>& in_optionalSourceFilter) {
     if (mDescrambler == nullptr) {
         ALOGE("IDescrambler is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    sp<IFilter> halFilter = (optionalSourceFilter == NULL)
-            ? NULL : static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter();
-    Result res = mDescrambler->addPid(getHidlDemuxPid(pid), halFilter);
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    shared_ptr<IFilter> halFilter =
+            (in_optionalSourceFilter == nullptr)
+                    ? nullptr
+                    : static_cast<TunerFilter*>(in_optionalSourceFilter.get())->getHalFilter();
+
+    return mDescrambler->addPid(in_pid, halFilter);
 }
 
-Status TunerDescrambler::removePid(const TunerDemuxPid& pid,
-        const shared_ptr<ITunerFilter>& optionalSourceFilter) {
+::ndk::ScopedAStatus TunerDescrambler::removePid(
+        const DemuxPid& in_pid, const shared_ptr<ITunerFilter>& in_optionalSourceFilter) {
     if (mDescrambler == nullptr) {
         ALOGE("IDescrambler is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    sp<IFilter> halFilter = (optionalSourceFilter == NULL)
-            ? NULL : static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter();
-    Result res = mDescrambler->removePid(getHidlDemuxPid(pid), halFilter);
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    shared_ptr<IFilter> halFilter =
+            (in_optionalSourceFilter == nullptr)
+                    ? nullptr
+                    : static_cast<TunerFilter*>(in_optionalSourceFilter.get())->getHalFilter();
+
+    return mDescrambler->removePid(in_pid, halFilter);
 }
 
-Status TunerDescrambler::close() {
+::ndk::ScopedAStatus TunerDescrambler::close() {
     if (mDescrambler == nullptr) {
         ALOGE("IDescrambler is not initialized.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDescrambler->close();
-    mDescrambler = NULL;
+    auto res = mDescrambler->close();
+    mDescrambler = nullptr;
 
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return res;
 }
 
-DemuxPid TunerDescrambler::getHidlDemuxPid(const TunerDemuxPid& pid) {
-    DemuxPid hidlPid;
-    switch (pid.getTag()) {
-        case TunerDemuxPid::tPid: {
-            hidlPid.tPid((uint16_t)pid.get<TunerDemuxPid::tPid>());
-            break;
-        }
-        case TunerDemuxPid::mmtpPid: {
-            hidlPid.mmtpPid((uint16_t)pid.get<TunerDemuxPid::mmtpPid>());
-            break;
-        }
-    }
-    return hidlPid;
-}
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
 }  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerDescrambler.h b/services/tuner/TunerDescrambler.h
index 1970fb7..b1d5fb9 100644
--- a/services/tuner/TunerDescrambler.h
+++ b/services/tuner/TunerDescrambler.h
@@ -17,38 +17,43 @@
 #ifndef ANDROID_MEDIA_TUNERDESCRAMBLER_H
 #define ANDROID_MEDIA_TUNERDESCRAMBLER_H
 
+#include <aidl/android/hardware/tv/tuner/IDescrambler.h>
 #include <aidl/android/media/tv/tuner/BnTunerDescrambler.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
 
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::tv::tuner::BnTunerDescrambler;
-using ::aidl::android::media::tv::tuner::ITunerDemux;
-using ::aidl::android::media::tv::tuner::ITunerFilter;
-using ::aidl::android::media::tv::tuner::TunerDemuxPid;
-using ::android::hardware::tv::tuner::V1_0::DemuxPid;
-using ::android::hardware::tv::tuner::V1_0::IDescrambler;
+using ::aidl::android::hardware::tv::tuner::DemuxPid;
+using ::aidl::android::hardware::tv::tuner::IDescrambler;
 
+using namespace std;
+
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
 class TunerDescrambler : public BnTunerDescrambler {
 
 public:
-    TunerDescrambler(sp<IDescrambler> descrambler);
+    TunerDescrambler(shared_ptr<IDescrambler> descrambler);
     virtual ~TunerDescrambler();
-    Status setDemuxSource(const shared_ptr<ITunerDemux>& demux) override;
-    Status setKeyToken(const vector<uint8_t>& keyToken) override;
-    Status addPid(const TunerDemuxPid& pid,
-            const shared_ptr<ITunerFilter>& optionalSourceFilter) override;
-    Status removePid(const TunerDemuxPid& pid,
-            const shared_ptr<ITunerFilter>& optionalSourceFilter) override;
-    Status close() override;
+
+    ::ndk::ScopedAStatus setDemuxSource(const shared_ptr<ITunerDemux>& in_tunerDemux) override;
+    ::ndk::ScopedAStatus setKeyToken(const vector<uint8_t>& in_keyToken) override;
+    ::ndk::ScopedAStatus addPid(const DemuxPid& in_pid,
+                                const shared_ptr<ITunerFilter>& in_optionalSourceFilter) override;
+    ::ndk::ScopedAStatus removePid(
+            const DemuxPid& in_pid,
+            const shared_ptr<ITunerFilter>& in_optionalSourceFilter) override;
+    ::ndk::ScopedAStatus close() override;
 
 private:
-    DemuxPid getHidlDemuxPid(const TunerDemuxPid& pid);
-
-    sp<IDescrambler> mDescrambler;
+    shared_ptr<IDescrambler> mDescrambler;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERDESCRAMBLER_H
diff --git a/services/tuner/TunerDvr.cpp b/services/tuner/TunerDvr.cpp
index db4e07b..8776f7e 100644
--- a/services/tuner/TunerDvr.cpp
+++ b/services/tuner/TunerDvr.cpp
@@ -16,194 +16,152 @@
 
 #define LOG_TAG "TunerDvr"
 
-#include <fmq/ConvertMQDescriptors.h>
 #include "TunerDvr.h"
+
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <utils/Log.h>
+
 #include "TunerFilter.h"
 
-using ::android::hardware::tv::tuner::V1_0::DataFormat;
-using ::android::hardware::tv::tuner::V1_0::Result;
+using ::aidl::android::hardware::tv::tuner::Result;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-TunerDvr::TunerDvr(sp<IDvr> dvr, int type) {
+TunerDvr::TunerDvr(shared_ptr<IDvr> dvr, DvrType type) {
     mDvr = dvr;
-    mType = static_cast<DvrType>(type);
+    mType = type;
 }
 
 TunerDvr::~TunerDvr() {
-    mDvr = NULL;
+    mDvr = nullptr;
 }
 
-Status TunerDvr::getQueueDesc(AidlMQDesc* _aidl_return) {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::getQueueDesc(AidlMQDesc* _aidl_return) {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    MQDesc dvrMQDesc;
-    Result res;
-    mDvr->getQueueDesc([&](Result r, const MQDesc& desc) {
-        dvrMQDesc = desc;
-        res = r;
-    });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-
-    AidlMQDesc aidlMQDesc;
-    unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(
-                dvrMQDesc,  &aidlMQDesc);
-    *_aidl_return = move(aidlMQDesc);
-    return Status::ok();
+    return mDvr->getQueueDesc(_aidl_return);
 }
 
-Status TunerDvr::configure(const TunerDvrSettings& settings) {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::configure(const DvrSettings& in_settings) {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDvr->configure(getHidlDvrSettingsFromAidl(settings));
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return mDvr->configure(in_settings);
 }
 
-Status TunerDvr::attachFilter(const shared_ptr<ITunerFilter>& filter) {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::attachFilter(const shared_ptr<ITunerFilter>& in_filter) {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    ITunerFilter* tunerFilter = filter.get();
-    sp<IFilter> hidlFilter = static_cast<TunerFilter*>(tunerFilter)->getHalFilter();
-    if (hidlFilter == NULL) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    if (in_filter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
     }
 
-    Result res = mDvr->attachFilter(hidlFilter);
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    shared_ptr<IFilter> halFilter = (static_cast<TunerFilter*>(in_filter.get()))->getHalFilter();
+    if (halFilter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
     }
-    return Status::ok();
+
+    return mDvr->attachFilter(halFilter);
 }
 
-Status TunerDvr::detachFilter(const shared_ptr<ITunerFilter>& filter) {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::detachFilter(const shared_ptr<ITunerFilter>& in_filter) {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    ITunerFilter* tunerFilter = filter.get();
-    sp<IFilter> hidlFilter = static_cast<TunerFilter*>(tunerFilter)->getHalFilter();
-    if (hidlFilter == NULL) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    if (in_filter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
     }
 
-    Result res = mDvr->detachFilter(hidlFilter);
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    shared_ptr<IFilter> halFilter = (static_cast<TunerFilter*>(in_filter.get()))->getHalFilter();
+    if (halFilter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
     }
-    return Status::ok();
+
+    return mDvr->detachFilter(halFilter);
 }
 
-Status TunerDvr::start() {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::start() {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDvr->start();
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return mDvr->start();
 }
 
-Status TunerDvr::stop() {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::stop() {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDvr->stop();
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return mDvr->stop();
 }
 
-Status TunerDvr::flush() {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::flush() {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDvr->flush();
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return mDvr->flush();
 }
 
-Status TunerDvr::close() {
-    if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::close() {
+    if (mDvr == nullptr) {
         ALOGE("IDvr is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mDvr->close();
-    mDvr = NULL;
+    auto status = mDvr->close();
+    mDvr = nullptr;
 
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
-}
-
-DvrSettings TunerDvr::getHidlDvrSettingsFromAidl(TunerDvrSettings settings) {
-    DvrSettings s;
-    switch (mType) {
-        case DvrType::PLAYBACK: {
-            s.playback({
-                .statusMask = static_cast<uint8_t>(settings.statusMask),
-                .lowThreshold = static_cast<uint32_t>(settings.lowThreshold),
-                .highThreshold = static_cast<uint32_t>(settings.highThreshold),
-                .dataFormat = static_cast<DataFormat>(settings.dataFormat),
-                .packetSize = static_cast<uint8_t>(settings.packetSize),
-            });
-            return s;
-        }
-        case DvrType::RECORD: {
-            s.record({
-                .statusMask = static_cast<uint8_t>(settings.statusMask),
-                .lowThreshold = static_cast<uint32_t>(settings.lowThreshold),
-                .highThreshold = static_cast<uint32_t>(settings.highThreshold),
-                .dataFormat = static_cast<DataFormat>(settings.dataFormat),
-                .packetSize = static_cast<uint8_t>(settings.packetSize),
-            });
-            return s;
-        }
-        default:
-            break;
-    }
-    return s;
+    return status;
 }
 
 /////////////// IDvrCallback ///////////////////////
-
-Return<void> TunerDvr::DvrCallback::onRecordStatus(const RecordStatus status) {
-    if (mTunerDvrCallback != NULL) {
-        mTunerDvrCallback->onRecordStatus(static_cast<int>(status));
+::ndk::ScopedAStatus TunerDvr::DvrCallback::onRecordStatus(const RecordStatus status) {
+    if (mTunerDvrCallback != nullptr) {
+        mTunerDvrCallback->onRecordStatus(status);
     }
-    return Void();
+    return ndk::ScopedAStatus::ok();
 }
 
-Return<void> TunerDvr::DvrCallback::onPlaybackStatus(const PlaybackStatus status) {
-    if (mTunerDvrCallback != NULL) {
-        mTunerDvrCallback->onPlaybackStatus(static_cast<int>(status));
+::ndk::ScopedAStatus TunerDvr::DvrCallback::onPlaybackStatus(const PlaybackStatus status) {
+    if (mTunerDvrCallback != nullptr) {
+        mTunerDvrCallback->onPlaybackStatus(status);
     }
-    return Void();
+    return ndk::ScopedAStatus::ok();
 }
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
 }  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerDvr.h b/services/tuner/TunerDvr.h
index a508e99..1854d08 100644
--- a/services/tuner/TunerDvr.h
+++ b/services/tuner/TunerDvr.h
@@ -17,81 +17,71 @@
 #ifndef ANDROID_MEDIA_TUNERDVR_H
 #define ANDROID_MEDIA_TUNERDVR_H
 
+#include <aidl/android/hardware/tv/tuner/BnDvrCallback.h>
+#include <aidl/android/hardware/tv/tuner/DvrSettings.h>
+#include <aidl/android/hardware/tv/tuner/DvrType.h>
+#include <aidl/android/hardware/tv/tuner/IDvr.h>
+#include <aidl/android/hardware/tv/tuner/PlaybackStatus.h>
+#include <aidl/android/hardware/tv/tuner/RecordStatus.h>
 #include <aidl/android/media/tv/tuner/BnTunerDvr.h>
 #include <aidl/android/media/tv/tuner/ITunerDvrCallback.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
-#include <fmq/MessageQueue.h>
 
-#include <TunerFilter.h>
+#include "TunerFilter.h"
 
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::hardware::common::fmq::GrantorDescriptor;
 using ::aidl::android::hardware::common::fmq::MQDescriptor;
 using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
-using ::aidl::android::media::tv::tuner::BnTunerDvr;
-using ::aidl::android::media::tv::tuner::ITunerDvrCallback;
-using ::aidl::android::media::tv::tuner::ITunerFilter;
-using ::aidl::android::media::tv::tuner::TunerDvrSettings;
-
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::MessageQueue;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-
-using ::android::hardware::tv::tuner::V1_0::DvrSettings;
-using ::android::hardware::tv::tuner::V1_0::DvrType;
-using ::android::hardware::tv::tuner::V1_0::IDvr;
-using ::android::hardware::tv::tuner::V1_0::IDvrCallback;
-using ::android::hardware::tv::tuner::V1_0::PlaybackStatus;
-using ::android::hardware::tv::tuner::V1_0::RecordStatus;
+using ::aidl::android::hardware::tv::tuner::BnDvrCallback;
+using ::aidl::android::hardware::tv::tuner::DvrSettings;
+using ::aidl::android::hardware::tv::tuner::DvrType;
+using ::aidl::android::hardware::tv::tuner::IDvr;
+using ::aidl::android::hardware::tv::tuner::PlaybackStatus;
+using ::aidl::android::hardware::tv::tuner::RecordStatus;
 
 using namespace std;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-using MQDesc = MQDescriptorSync<uint8_t>;
 using AidlMQDesc = MQDescriptor<int8_t, SynchronizedReadWrite>;
 
 class TunerDvr : public BnTunerDvr {
 
 public:
-    TunerDvr(sp<IDvr> dvr, int type);
+    TunerDvr(shared_ptr<IDvr> dvr, DvrType type);
     ~TunerDvr();
 
-    Status getQueueDesc(AidlMQDesc* _aidl_return) override;
+    ::ndk::ScopedAStatus getQueueDesc(AidlMQDesc* _aidl_return) override;
+    ::ndk::ScopedAStatus configure(const DvrSettings& in_settings) override;
+    ::ndk::ScopedAStatus attachFilter(const shared_ptr<ITunerFilter>& in_filter) override;
+    ::ndk::ScopedAStatus detachFilter(const shared_ptr<ITunerFilter>& in_filter) override;
+    ::ndk::ScopedAStatus start() override;
+    ::ndk::ScopedAStatus stop() override;
+    ::ndk::ScopedAStatus flush() override;
+    ::ndk::ScopedAStatus close() override;
 
-    Status configure(const TunerDvrSettings& settings) override;
-
-    Status attachFilter(const shared_ptr<ITunerFilter>& filter) override;
-
-    Status detachFilter(const shared_ptr<ITunerFilter>& filter) override;
-
-    Status start() override;
-
-    Status stop() override;
-
-    Status flush() override;
-
-    Status close() override;
-
-    struct DvrCallback : public IDvrCallback {
+    struct DvrCallback : public BnDvrCallback {
         DvrCallback(const shared_ptr<ITunerDvrCallback> tunerDvrCallback)
-                : mTunerDvrCallback(tunerDvrCallback) {};
+              : mTunerDvrCallback(tunerDvrCallback){};
 
-        virtual Return<void> onRecordStatus(const RecordStatus status);
-        virtual Return<void> onPlaybackStatus(const PlaybackStatus status);
+        ::ndk::ScopedAStatus onRecordStatus(const RecordStatus status) override;
+        ::ndk::ScopedAStatus onPlaybackStatus(const PlaybackStatus status) override;
 
-        private:
-            shared_ptr<ITunerDvrCallback> mTunerDvrCallback;
+    private:
+        shared_ptr<ITunerDvrCallback> mTunerDvrCallback;
     };
 
 private:
-    DvrSettings getHidlDvrSettingsFromAidl(TunerDvrSettings settings);
-
-    sp<IDvr> mDvr;
+    shared_ptr<IDvr> mDvr;
     DvrType mType;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERDVR_H
diff --git a/services/tuner/TunerFilter.cpp b/services/tuner/TunerFilter.cpp
index 039fd31..fb5bfa3 100644
--- a/services/tuner/TunerFilter.cpp
+++ b/services/tuner/TunerFilter.cpp
@@ -18,893 +18,460 @@
 
 #include "TunerFilter.h"
 
-using ::aidl::android::media::tv::tuner::TunerFilterSectionCondition;
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <binder/IPCThreadState.h>
 
-using ::android::hardware::hidl_handle;
-using ::android::hardware::tv::tuner::V1_0::DemuxAlpLengthType;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
-using ::android::hardware::tv::tuner::V1_0::DemuxIpAddress;
-using ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxMmtpPid;
-using ::android::hardware::tv::tuner::V1_0::DemuxRecordScIndexType;
-using ::android::hardware::tv::tuner::V1_0::DemuxStreamId;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
-using ::android::hardware::tv::tuner::V1_0::Result;
-using ::android::hardware::tv::tuner::V1_1::AudioStreamType;
-using ::android::hardware::tv::tuner::V1_1::Constant;
-using ::android::hardware::tv::tuner::V1_1::VideoStreamType;
+#include "TunerHelper.h"
+#include "TunerService.h"
 
+using ::aidl::android::hardware::tv::tuner::Result;
+
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+using ::android::IPCThreadState;
 
 using namespace std;
 
-TunerFilter::TunerFilter(
-        sp<IFilter> filter, int mainType, int subType) {
-    mFilter = filter;
-    mFilter_1_1 = ::android::hardware::tv::tuner::V1_1::IFilter::castFrom(filter);
-    mMainType = mainType;
-    mSubType = subType;
-}
+TunerFilter::TunerFilter(shared_ptr<IFilter> filter, shared_ptr<FilterCallback> cb,
+                         DemuxFilterType type)
+      : mFilter(filter),
+        mType(type),
+        mStarted(false),
+        mShared(false),
+        mClientPid(-1),
+        mFilterCallback(cb) {}
 
 TunerFilter::~TunerFilter() {
+    Mutex::Autolock _l(mLock);
     mFilter = nullptr;
-    mFilter_1_1 = nullptr;
 }
 
-Status TunerFilter::getQueueDesc(AidlMQDesc* _aidl_return) {
-    if (mFilter == NULL) {
-        ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    MQDesc filterMQDesc;
-    Result res;
-    mFilter->getQueueDesc([&](Result r, const MQDesc& desc) {
-        filterMQDesc = desc;
-        res = r;
-    });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-
-    AidlMQDesc aidlMQDesc;
-    unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(
-                filterMQDesc,  &aidlMQDesc);
-    *_aidl_return = move(aidlMQDesc);
-    return Status::ok();
-}
-
-Status TunerFilter::getId(int32_t* _aidl_return) {
+::ndk::ScopedAStatus TunerFilter::getQueueDesc(AidlMQDesc* _aidl_return) {
+    Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res;
-    mFilter->getId([&](Result r, uint32_t filterId) {
-        res = r;
-        mId = filterId;
-    });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
     }
-    *_aidl_return = mId;
-    return Status::ok();
+
+    return mFilter->getQueueDesc(_aidl_return);
 }
 
-Status TunerFilter::getId64Bit(int64_t* _aidl_return) {
-    if (mFilter_1_1 == nullptr) {
-        ALOGE("IFilter_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    Result res;
-    mFilter_1_1->getId64Bit([&](Result r, uint64_t filterId) {
-        res = r;
-        mId64Bit = filterId;
-    });
-    if (res != Result::SUCCESS) {
-        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    *_aidl_return = mId64Bit;
-    return Status::ok();
-}
-
-Status TunerFilter::configure(const TunerFilterConfiguration& config) {
+::ndk::ScopedAStatus TunerFilter::getId(int32_t* _aidl_return) {
+    Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    DemuxFilterSettings settings;
-    switch (config.getTag()) {
-        case TunerFilterConfiguration::ts: {
-            getHidlTsSettings(config, settings);
-            break;
-        }
-        case TunerFilterConfiguration::mmtp: {
-            getHidlMmtpSettings(config, settings);
-            break;
-        }
-        case TunerFilterConfiguration::ip: {
-            getHidlIpSettings(config, settings);
-            break;
-        }
-        case TunerFilterConfiguration::tlv: {
-            getHidlTlvSettings(config, settings);
-            break;
-        }
-        case TunerFilterConfiguration::alp: {
-            getHidlAlpSettings(config, settings);
-            break;
-        }
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
     }
 
-    Result res = mFilter->configure(settings);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+    auto status = mFilter->getId(&mId);
+    if (status.isOk()) {
+        *_aidl_return = mId;
     }
-    return Status::ok();
+    return status;
 }
 
-Status TunerFilter::configureMonitorEvent(int monitorEventType) {
-    if (mFilter_1_1 == nullptr) {
-        ALOGE("IFilter_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    Result res = mFilter_1_1->configureMonitorEvent(monitorEventType);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
-}
-
-Status TunerFilter::configureIpFilterContextId(int cid) {
-    if (mFilter_1_1 == nullptr) {
-        ALOGE("IFilter_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    Result res = mFilter_1_1->configureIpCid(cid);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
-}
-
-Status TunerFilter::configureAvStreamType(int avStreamType) {
-    if (mFilter_1_1 == nullptr) {
-        ALOGE("IFilter_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    AvStreamType type;
-    if (!getHidlAvStreamType(avStreamType, type)) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_STATE));
-    }
-
-    Result res = mFilter_1_1->configureAvStreamType(type);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
-}
-
-Status TunerFilter::setDataSource(const shared_ptr<ITunerFilter>& filter) {
+::ndk::ScopedAStatus TunerFilter::getId64Bit(int64_t* _aidl_return) {
+    Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    ITunerFilter* tunerFilter = filter.get();
-    sp<IFilter> hidlFilter = static_cast<TunerFilter*>(tunerFilter)->getHalFilter();
-    Result res = mFilter->setDataSource(hidlFilter);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
-}
-
-void TunerFilter::getHidlTsSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
-    auto tsConf = config.get<TunerFilterConfiguration::ts>();
-    DemuxTsFilterSettings ts{
-        .tpid = static_cast<uint16_t>(tsConf.tpid),
-    };
-
-    TunerFilterSettings tunerSettings = tsConf.filterSettings;
-    switch (tunerSettings.getTag()) {
-        case TunerFilterSettings::av: {
-            ts.filterSettings.av(getAvSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::section: {
-            ts.filterSettings.section(getSectionSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::pesData: {
-            ts.filterSettings.pesData(getPesDataSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::record: {
-            ts.filterSettings.record(getRecordSettings(tunerSettings));
-            break;
-        }
-        default: {
-            ts.filterSettings.noinit();
-            break;
-        }
-    }
-    settings.ts(ts);
-}
-
-void TunerFilter::getHidlMmtpSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
-    auto mmtpConf = config.get<TunerFilterConfiguration::mmtp>();
-    DemuxMmtpFilterSettings mmtp{
-        .mmtpPid = static_cast<DemuxMmtpPid>(mmtpConf.mmtpPid),
-    };
-
-    TunerFilterSettings tunerSettings = mmtpConf.filterSettings;
-    switch (tunerSettings.getTag()) {
-        case TunerFilterSettings::av: {
-            mmtp.filterSettings.av(getAvSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::section: {
-            mmtp.filterSettings.section(getSectionSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::pesData: {
-            mmtp.filterSettings.pesData(getPesDataSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::record: {
-            mmtp.filterSettings.record(getRecordSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::download: {
-            mmtp.filterSettings.download(getDownloadSettings(tunerSettings));
-            break;
-        }
-        default: {
-            mmtp.filterSettings.noinit();
-            break;
-        }
-    }
-    settings.mmtp(mmtp);
-}
-
-void TunerFilter::getHidlIpSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
-    auto ipConf = config.get<TunerFilterConfiguration::ip>();
-    DemuxIpAddress ipAddr{
-        .srcPort = static_cast<uint16_t>(ipConf.ipAddr.srcPort),
-        .dstPort = static_cast<uint16_t>(ipConf.ipAddr.dstPort),
-    };
-
-    ipConf.ipAddr.srcIpAddress.isIpV6
-            ? ipAddr.srcIpAddress.v6(getIpV6Address(ipConf.ipAddr.srcIpAddress))
-            : ipAddr.srcIpAddress.v4(getIpV4Address(ipConf.ipAddr.srcIpAddress));
-    ipConf.ipAddr.dstIpAddress.isIpV6
-            ? ipAddr.dstIpAddress.v6(getIpV6Address(ipConf.ipAddr.dstIpAddress))
-            : ipAddr.dstIpAddress.v4(getIpV4Address(ipConf.ipAddr.dstIpAddress));
-    DemuxIpFilterSettings ip{
-        .ipAddr = ipAddr,
-    };
-
-    TunerFilterSettings tunerSettings = ipConf.filterSettings;
-    switch (tunerSettings.getTag()) {
-        case TunerFilterSettings::section: {
-            ip.filterSettings.section(getSectionSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::isPassthrough: {
-            ip.filterSettings.bPassthrough(tunerSettings.isPassthrough);
-            break;
-        }
-        default: {
-            ip.filterSettings.noinit();
-            break;
-        }
-    }
-    settings.ip(ip);
-}
-
-hidl_array<uint8_t, IP_V6_LENGTH> TunerFilter::getIpV6Address(TunerDemuxIpAddress addr) {
-    hidl_array<uint8_t, IP_V6_LENGTH> ip;
-    if (addr.addr.size() != IP_V6_LENGTH) {
-        return ip;
-    }
-    copy(addr.addr.begin(), addr.addr.end(), ip.data());
-    return ip;
-}
-
-hidl_array<uint8_t, IP_V4_LENGTH> TunerFilter::getIpV4Address(TunerDemuxIpAddress addr) {
-    hidl_array<uint8_t, IP_V4_LENGTH> ip;
-    if (addr.addr.size() != IP_V4_LENGTH) {
-        return ip;
-    }
-    copy(addr.addr.begin(), addr.addr.end(), ip.data());
-    return ip;
-}
-
-void TunerFilter::getHidlTlvSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
-    auto tlvConf = config.get<TunerFilterConfiguration::tlv>();
-    DemuxTlvFilterSettings tlv{
-        .packetType = static_cast<uint8_t>(tlvConf.packetType),
-        .isCompressedIpPacket = tlvConf.isCompressedIpPacket,
-    };
-
-    TunerFilterSettings tunerSettings = tlvConf.filterSettings;
-    switch (tunerSettings.getTag()) {
-        case TunerFilterSettings::section: {
-            tlv.filterSettings.section(getSectionSettings(tunerSettings));
-            break;
-        }
-        case TunerFilterSettings::isPassthrough: {
-            tlv.filterSettings.bPassthrough(tunerSettings.isPassthrough);
-            break;
-        }
-        default: {
-            tlv.filterSettings.noinit();
-            break;
-        }
-    }
-    settings.tlv(tlv);
-}
-
-void TunerFilter::getHidlAlpSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
-    auto alpConf = config.get<TunerFilterConfiguration::alp>();
-    DemuxAlpFilterSettings alp{
-        .packetType = static_cast<uint8_t>(alpConf.packetType),
-        .lengthType = static_cast<DemuxAlpLengthType>(alpConf.lengthType),
-    };
-
-    TunerFilterSettings tunerSettings = alpConf.filterSettings;
-    switch (tunerSettings.getTag()) {
-        case TunerFilterSettings::section: {
-            alp.filterSettings.section(getSectionSettings(tunerSettings));
-            break;
-        }
-        default: {
-            alp.filterSettings.noinit();
-            break;
-        }
-    }
-    settings.alp(alp);
-}
-
-DemuxFilterAvSettings TunerFilter::getAvSettings(const TunerFilterSettings& settings) {
-    DemuxFilterAvSettings av {
-        .isPassthrough = settings.get<TunerFilterSettings::av>().isPassthrough,
-    };
-    return av;
-}
-
-DemuxFilterSectionSettings TunerFilter::getSectionSettings(const TunerFilterSettings& settings) {
-    auto s = settings.get<TunerFilterSettings::section>();
-    DemuxFilterSectionSettings section{
-        .isCheckCrc = s.isCheckCrc,
-        .isRepeat = s.isRepeat,
-        .isRaw = s.isRaw,
-    };
-
-    switch (s.condition.getTag()) {
-        case TunerFilterSectionCondition::sectionBits: {
-            auto sectionBits = s.condition.get<TunerFilterSectionCondition::sectionBits>();
-            vector<uint8_t> filter(sectionBits.filter.begin(), sectionBits.filter.end());
-            vector<uint8_t> mask(sectionBits.mask.begin(), sectionBits.mask.end());
-            vector<uint8_t> mode(sectionBits.mode.begin(), sectionBits.mode.end());
-            section.condition.sectionBits({
-                .filter = filter,
-                .mask = mask,
-                .mode = mode,
-            });
-            break;
-        }
-        case TunerFilterSectionCondition::tableInfo: {
-            auto tableInfo = s.condition.get<TunerFilterSectionCondition::tableInfo>();
-            section.condition.tableInfo({
-                .tableId = static_cast<uint16_t>(tableInfo.tableId),
-                .version = static_cast<uint16_t>(tableInfo.version),
-            });
-            break;
-        }
-        default: {
-            break;
-        }
-    }
-    return section;
-}
-
-DemuxFilterPesDataSettings TunerFilter::getPesDataSettings(const TunerFilterSettings& settings) {
-    DemuxFilterPesDataSettings pes{
-        .streamId = static_cast<DemuxStreamId>(
-                settings.get<TunerFilterSettings::pesData>().streamId),
-        .isRaw = settings.get<TunerFilterSettings::pesData>().isRaw,
-    };
-    return pes;
-}
-
-DemuxFilterRecordSettings TunerFilter::getRecordSettings(const TunerFilterSettings& settings) {
-    auto r = settings.get<TunerFilterSettings::record>();
-    DemuxFilterRecordSettings record{
-        .tsIndexMask = static_cast<uint32_t>(r.tsIndexMask),
-        .scIndexType = static_cast<DemuxRecordScIndexType>(r.scIndexType),
-    };
-
-    switch (r.scIndexMask.getTag()) {
-        case TunerFilterScIndexMask::sc: {
-            record.scIndexMask.sc(static_cast<uint32_t>(
-                    r.scIndexMask.get<TunerFilterScIndexMask::sc>()));
-            break;
-        }
-        case TunerFilterScIndexMask::scHevc: {
-            record.scIndexMask.scHevc(static_cast<uint32_t>(
-                    r.scIndexMask.get<TunerFilterScIndexMask::scHevc>()));
-            break;
-        }
-    }
-    return record;
-}
-
-DemuxFilterDownloadSettings TunerFilter::getDownloadSettings(const TunerFilterSettings& settings) {
-    DemuxFilterDownloadSettings download {
-        .downloadId = static_cast<uint32_t>(
-                settings.get<TunerFilterSettings::download>().downloadId),
-    };
-    return download;
-}
-
-Status TunerFilter::getAvSharedHandleInfo(TunerFilterSharedHandleInfo* _aidl_return) {
-    if (mFilter_1_1 == nullptr) {
-        ALOGE("IFilter_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
     }
 
-    Result res;
-    mFilter_1_1->getAvSharedHandle([&](Result r, hidl_handle avMemory, uint64_t avMemSize) {
-        res = r;
-        if (res == Result::SUCCESS) {
-            TunerFilterSharedHandleInfo info{
-                .handle = dupToAidl(avMemory),
-                .size = static_cast<int64_t>(avMemSize),
-            };
-            *_aidl_return = move(info);
+    auto status = mFilter->getId64Bit(&mId64Bit);
+    if (status.isOk()) {
+        *_aidl_return = mId64Bit;
+    }
+    return status;
+}
+
+::ndk::ScopedAStatus TunerFilter::configure(const DemuxFilterSettings& in_settings) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    return mFilter->configure(in_settings);
+}
+
+::ndk::ScopedAStatus TunerFilter::configureMonitorEvent(int32_t monitorEventType) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    return mFilter->configureMonitorEvent(monitorEventType);
+}
+
+::ndk::ScopedAStatus TunerFilter::configureIpFilterContextId(int32_t cid) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    return mFilter->configureIpCid(cid);
+}
+
+::ndk::ScopedAStatus TunerFilter::configureAvStreamType(const AvStreamType& in_avStreamType) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    return mFilter->configureAvStreamType(in_avStreamType);
+}
+
+::ndk::ScopedAStatus TunerFilter::setDataSource(const shared_ptr<ITunerFilter>& filter) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (filter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    shared_ptr<IFilter> halFilter = static_cast<TunerFilter*>(filter.get())->getHalFilter();
+    return mFilter->setDataSource(halFilter);
+}
+
+::ndk::ScopedAStatus TunerFilter::getAvSharedHandle(NativeHandle* out_avMemory,
+                                                    int64_t* _aidl_return) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    return mFilter->getAvSharedHandle(out_avMemory, _aidl_return);
+}
+
+::ndk::ScopedAStatus TunerFilter::releaseAvHandle(const NativeHandle& in_handle,
+                                                  int64_t in_avDataId) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    return mFilter->releaseAvHandle(in_handle, in_avDataId);
+}
+
+::ndk::ScopedAStatus TunerFilter::start() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
+    }
+
+    auto res = mFilter->start();
+    if (res.isOk()) {
+        mStarted = true;
+    }
+    return res;
+}
+
+::ndk::ScopedAStatus TunerFilter::stop() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
+    }
+
+    auto res = mFilter->stop();
+    mStarted = false;
+
+    return res;
+}
+
+::ndk::ScopedAStatus TunerFilter::flush() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
+    }
+
+    return mFilter->flush();
+}
+
+::ndk::ScopedAStatus TunerFilter::close() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            if (mFilterCallback != nullptr) {
+                mFilterCallback->sendSharedFilterStatus(STATUS_INACCESSIBLE);
+                mFilterCallback->detachSharedFilterCallback();
+            }
+            TunerService::getTunerService()->removeSharedFilter(this->ref<TunerFilter>());
         } else {
-            _aidl_return = NULL;
+            // Calling from shared process, do not really close this filter.
+            if (mFilterCallback != nullptr) {
+                mFilterCallback->detachSharedFilterCallback();
+            }
+            mStarted = false;
+            return ::ndk::ScopedAStatus::ok();
         }
-    });
-
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
     }
-    return Status::ok();
+
+    auto res = mFilter->close();
+    mFilter = nullptr;
+    mStarted = false;
+    mShared = false;
+    mClientPid = -1;
+
+    return res;
 }
 
-Status TunerFilter::releaseAvHandle(
-        const ::aidl::android::hardware::common::NativeHandle& handle, int64_t avDataId) {
+::ndk::ScopedAStatus TunerFilter::acquireSharedFilterToken(string* _aidl_return) {
+    Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mFilter->releaseAvHandle(hidl_handle(makeFromAidl(handle)), avDataId);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+    if (mShared || mStarted) {
+        ALOGD("create SharedFilter in wrong state");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
     }
-    return Status::ok();
+
+    IPCThreadState* ipc = IPCThreadState::self();
+    mClientPid = ipc->getCallingPid();
+    string token = TunerService::getTunerService()->addFilterToShared(this->ref<TunerFilter>());
+    _aidl_return->assign(token);
+    mShared = true;
+
+    return ::ndk::ScopedAStatus::ok();
 }
 
-Status TunerFilter::start() {
+::ndk::ScopedAStatus TunerFilter::freeSharedFilterToken(const string& /* in_filterToken */) {
+    Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
-    Result res = mFilter->start();
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+
+    if (!mShared) {
+        // The filter is not shared or the shared filter has been closed.
+        return ::ndk::ScopedAStatus::ok();
     }
-    return Status::ok();
+
+    if (mFilterCallback != nullptr) {
+        mFilterCallback->sendSharedFilterStatus(STATUS_INACCESSIBLE);
+        mFilterCallback->detachSharedFilterCallback();
+    }
+
+    TunerService::getTunerService()->removeSharedFilter(this->ref<TunerFilter>());
+    mShared = false;
+
+    return ::ndk::ScopedAStatus::ok();
 }
 
-Status TunerFilter::stop() {
+::ndk::ScopedAStatus TunerFilter::getFilterType(DemuxFilterType* _aidl_return) {
+    Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
-    Result res = mFilter->stop();
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+
+    *_aidl_return = mType;
+    return ::ndk::ScopedAStatus::ok();
 }
 
-Status TunerFilter::flush() {
+::ndk::ScopedAStatus TunerFilter::setDelayHint(const FilterDelayHint& in_hint) {
+    Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
-    Result res = mFilter->flush();
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+
+    return mFilter->setDelayHint(in_hint);
 }
 
-Status TunerFilter::close() {
-    if (mFilter == nullptr) {
-        ALOGE("IFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-    Result res = mFilter->close();
-    mFilter = NULL;
-    mFilter_1_1 = NULL;
-
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+bool TunerFilter::isSharedFilterAllowed(int callingPid) {
+    return mShared && mClientPid != callingPid;
 }
 
-sp<IFilter> TunerFilter::getHalFilter() {
+void TunerFilter::attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb) {
+    if (mFilterCallback != nullptr) {
+        mFilterCallback->attachSharedFilterCallback(in_cb);
+    }
+}
+
+shared_ptr<IFilter> TunerFilter::getHalFilter() {
     return mFilter;
 }
 
-bool TunerFilter::isAudioFilter() {
-    return (mMainType == (int)DemuxFilterMainType::TS
-                    && mSubType == (int)DemuxTsFilterType::AUDIO)
-            || (mMainType == (int)DemuxFilterMainType::MMTP
-                    && mSubType == (int)DemuxMmtpFilterType::AUDIO);
-}
-
-bool TunerFilter::isVideoFilter() {
-    return (mMainType == (int)DemuxFilterMainType::TS
-                    && mSubType == (int)DemuxTsFilterType::VIDEO)
-            || (mMainType == (int)DemuxFilterMainType::MMTP
-                    && mSubType == (int)DemuxMmtpFilterType::VIDEO);
-}
-
-bool TunerFilter::getHidlAvStreamType(int avStreamType, AvStreamType& type) {
-    if (isAudioFilter()) {
-        type.audio(static_cast<AudioStreamType>(avStreamType));
-        return true;
-    }
-
-    if (isVideoFilter()) {
-        type.video(static_cast<VideoStreamType>(avStreamType));
-        return true;
-    }
-
-    return false;
-}
-
 /////////////// FilterCallback ///////////////////////
-
-Return<void> TunerFilter::FilterCallback::onFilterStatus(DemuxFilterStatus status) {
-    if (mTunerFilterCallback != NULL) {
-        mTunerFilterCallback->onFilterStatus((int)status);
+::ndk::ScopedAStatus TunerFilter::FilterCallback::onFilterStatus(DemuxFilterStatus status) {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr) {
+        mTunerFilterCallback->onFilterStatus(status);
     }
-    return Void();
+    return ::ndk::ScopedAStatus::ok();
 }
 
-Return<void> TunerFilter::FilterCallback::onFilterEvent(const DemuxFilterEvent& filterEvent) {
-    vector<DemuxFilterEventExt::Event> emptyEventsExt;
-    DemuxFilterEventExt emptyFilterEventExt {
-            .events = emptyEventsExt,
-    };
-    onFilterEvent_1_1(filterEvent, emptyFilterEventExt);
-    return Void();
+::ndk::ScopedAStatus TunerFilter::FilterCallback::onFilterEvent(
+        const vector<DemuxFilterEvent>& events) {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr) {
+        mTunerFilterCallback->onFilterEvent(events);
+    }
+    return ::ndk::ScopedAStatus::ok();
 }
 
-Return<void> TunerFilter::FilterCallback::onFilterEvent_1_1(const DemuxFilterEvent& filterEvent,
-        const DemuxFilterEventExt& filterEventExt) {
-    if (mTunerFilterCallback != NULL) {
-        vector<DemuxFilterEvent::Event> events = filterEvent.events;
-        vector<DemuxFilterEventExt::Event> eventsExt = filterEventExt.events;
-        vector<TunerFilterEvent> tunerEvent;
-
-        getAidlFilterEvent(events, eventsExt, tunerEvent);
-        mTunerFilterCallback->onFilterEvent(tunerEvent);
-    }
-    return Void();
-}
-
-/////////////// FilterCallback Helper Methods ///////////////////////
-
-void TunerFilter::FilterCallback::getAidlFilterEvent(vector<DemuxFilterEvent::Event>& events,
-        vector<DemuxFilterEventExt::Event>& eventsExt,
-        vector<TunerFilterEvent>& tunerEvent) {
-    if (events.empty() && !eventsExt.empty()) {
-        auto eventExt = eventsExt[0];
-        switch (eventExt.getDiscriminator()) {
-            case DemuxFilterEventExt::Event::hidl_discriminator::monitorEvent: {
-                getMonitorEvent(eventsExt, tunerEvent);
-                return;
-            }
-            case DemuxFilterEventExt::Event::hidl_discriminator::startId: {
-                getRestartEvent(eventsExt, tunerEvent);
-                return;
-            }
-            default: {
-                break;
-            }
-        }
-        return;
-    }
-
-    if (!events.empty()) {
-        auto event = events[0];
-        switch (event.getDiscriminator()) {
-            case DemuxFilterEvent::Event::hidl_discriminator::media: {
-                getMediaEvent(events, tunerEvent);
-                break;
-            }
-            case DemuxFilterEvent::Event::hidl_discriminator::section: {
-                getSectionEvent(events, tunerEvent);
-                break;
-            }
-            case DemuxFilterEvent::Event::hidl_discriminator::pes: {
-                getPesEvent(events, tunerEvent);
-                break;
-            }
-            case DemuxFilterEvent::Event::hidl_discriminator::tsRecord: {
-                getTsRecordEvent(events, eventsExt, tunerEvent);
-                break;
-            }
-            case DemuxFilterEvent::Event::hidl_discriminator::mmtpRecord: {
-                getMmtpRecordEvent(events, eventsExt, tunerEvent);
-                break;
-            }
-            case DemuxFilterEvent::Event::hidl_discriminator::download: {
-                getDownloadEvent(events, tunerEvent);
-                break;
-            }
-            case DemuxFilterEvent::Event::hidl_discriminator::ipPayload: {
-                getIpPayloadEvent(events, tunerEvent);
-                break;
-            }
-            case DemuxFilterEvent::Event::hidl_discriminator::temi: {
-                getTemiEvent(events, tunerEvent);
-                break;
-            }
-            default: {
-                break;
-            }
-        }
+void TunerFilter::FilterCallback::sendSharedFilterStatus(int32_t status) {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr && mOriginalCallback != nullptr) {
+        mTunerFilterCallback->onFilterStatus(static_cast<DemuxFilterStatus>(status));
     }
 }
 
-void TunerFilter::FilterCallback::getMediaEvent(
-        vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
-    for (DemuxFilterEvent::Event e : events) {
-        DemuxFilterMediaEvent mediaEvent = e.media();
-        TunerFilterMediaEvent tunerMedia;
+void TunerFilter::FilterCallback::attachSharedFilterCallback(
+        const shared_ptr<ITunerFilterCallback>& in_cb) {
+    Mutex::Autolock _l(mCallbackLock);
+    mOriginalCallback = mTunerFilterCallback;
+    mTunerFilterCallback = in_cb;
+}
 
-        tunerMedia.streamId = static_cast<char16_t>(mediaEvent.streamId);
-        tunerMedia.isPtsPresent = mediaEvent.isPtsPresent;
-        tunerMedia.pts = static_cast<long>(mediaEvent.pts);
-        tunerMedia.dataLength = static_cast<int>(mediaEvent.dataLength);
-        tunerMedia.offset = static_cast<int>(mediaEvent.offset);
-        tunerMedia.isSecureMemory = mediaEvent.isSecureMemory;
-        tunerMedia.avDataId = static_cast<long>(mediaEvent.avDataId);
-        tunerMedia.mpuSequenceNumber = static_cast<int>(mediaEvent.mpuSequenceNumber);
-        tunerMedia.isPesPrivateData = mediaEvent.isPesPrivateData;
-
-        if (mediaEvent.extraMetaData.getDiscriminator() ==
-                DemuxFilterMediaEvent::ExtraMetaData::hidl_discriminator::audio) {
-            tunerMedia.isAudioExtraMetaData = true;
-            tunerMedia.audio = {
-                .adFade = static_cast<int8_t>(
-                        mediaEvent.extraMetaData.audio().adFade),
-                .adPan = static_cast<int8_t>(
-                        mediaEvent.extraMetaData.audio().adPan),
-                .versionTextTag = static_cast<int8_t>(
-                        mediaEvent.extraMetaData.audio().versionTextTag),
-                .adGainCenter = static_cast<int8_t>(
-                        mediaEvent.extraMetaData.audio().adGainCenter),
-                .adGainFront = static_cast<int8_t>(
-                        mediaEvent.extraMetaData.audio().adGainFront),
-                .adGainSurround = static_cast<int8_t>(
-                        mediaEvent.extraMetaData.audio().adGainSurround),
-            };
-        } else {
-            tunerMedia.isAudioExtraMetaData = false;
-        }
-
-        if (mediaEvent.avMemory.getNativeHandle() != nullptr) {
-            tunerMedia.avMemory = dupToAidl(mediaEvent.avMemory.getNativeHandle());
-        }
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::media>(move(tunerMedia));
-        res.push_back(move(tunerEvent));
+void TunerFilter::FilterCallback::detachSharedFilterCallback() {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr && mOriginalCallback != nullptr) {
+        mTunerFilterCallback = mOriginalCallback;
+        mOriginalCallback = nullptr;
     }
 }
 
-void TunerFilter::FilterCallback::getSectionEvent(
-        vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
-    for (DemuxFilterEvent::Event e : events) {
-        DemuxFilterSectionEvent sectionEvent = e.section();
-        TunerFilterSectionEvent tunerSection;
-
-        tunerSection.tableId = static_cast<char16_t>(sectionEvent.tableId);
-        tunerSection.version = static_cast<char16_t>(sectionEvent.version);
-        tunerSection.sectionNum = static_cast<char16_t>(sectionEvent.sectionNum);
-        tunerSection.dataLength = static_cast<char16_t>(sectionEvent.dataLength);
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::section>(move(tunerSection));
-        res.push_back(move(tunerEvent));
-    }
-}
-
-void TunerFilter::FilterCallback::getPesEvent(
-        vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
-    for (DemuxFilterEvent::Event e : events) {
-        DemuxFilterPesEvent pesEvent = e.pes();
-        TunerFilterPesEvent tunerPes;
-
-        tunerPes.streamId = static_cast<char16_t>(pesEvent.streamId);
-        tunerPes.dataLength = static_cast<char16_t>(pesEvent.dataLength);
-        tunerPes.mpuSequenceNumber = static_cast<int>(pesEvent.mpuSequenceNumber);
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::pes>(move(tunerPes));
-        res.push_back(move(tunerEvent));
-    }
-}
-
-void TunerFilter::FilterCallback::getTsRecordEvent(vector<DemuxFilterEvent::Event>& events,
-        vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
-    for (int i = 0; i < events.size(); i++) {
-        TunerFilterTsRecordEvent tunerTsRecord;
-        DemuxFilterTsRecordEvent tsRecordEvent = events[i].tsRecord();
-
-        TunerFilterScIndexMask scIndexMask;
-        if (tsRecordEvent.scIndexMask.getDiscriminator()
-                == DemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::sc) {
-            scIndexMask.set<TunerFilterScIndexMask::sc>(
-                    static_cast<int>(tsRecordEvent.scIndexMask.sc()));
-        } else if (tsRecordEvent.scIndexMask.getDiscriminator()
-                == DemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::scHevc) {
-            scIndexMask.set<TunerFilterScIndexMask::scHevc>(
-                    static_cast<int>(tsRecordEvent.scIndexMask.scHevc()));
-        }
-
-        if (tsRecordEvent.pid.getDiscriminator() == DemuxPid::hidl_discriminator::tPid) {
-            tunerTsRecord.pid = static_cast<char16_t>(tsRecordEvent.pid.tPid());
-        } else {
-            tunerTsRecord.pid = static_cast<char16_t>(Constant::INVALID_TS_PID);
-        }
-
-        tunerTsRecord.scIndexMask = scIndexMask;
-        tunerTsRecord.tsIndexMask = static_cast<int>(tsRecordEvent.tsIndexMask);
-        tunerTsRecord.byteNumber = static_cast<long>(tsRecordEvent.byteNumber);
-
-        if (eventsExt.size() > i && eventsExt[i].getDiscriminator() ==
-                    DemuxFilterEventExt::Event::hidl_discriminator::tsRecord) {
-            tunerTsRecord.isExtended = true;
-            tunerTsRecord.pts = static_cast<long>(eventsExt[i].tsRecord().pts);
-            tunerTsRecord.firstMbInSlice = static_cast<int>(eventsExt[i].tsRecord().firstMbInSlice);
-        } else {
-            tunerTsRecord.isExtended = false;
-        }
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::tsRecord>(move(tunerTsRecord));
-        res.push_back(move(tunerEvent));
-    }
-}
-
-void TunerFilter::FilterCallback::getMmtpRecordEvent(vector<DemuxFilterEvent::Event>& events,
-        vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
-    for (int i = 0; i < events.size(); i++) {
-        TunerFilterMmtpRecordEvent tunerMmtpRecord;
-        DemuxFilterMmtpRecordEvent mmtpRecordEvent = events[i].mmtpRecord();
-
-        tunerMmtpRecord.scHevcIndexMask = static_cast<int>(mmtpRecordEvent.scHevcIndexMask);
-        tunerMmtpRecord.byteNumber = static_cast<long>(mmtpRecordEvent.byteNumber);
-
-        if (eventsExt.size() > i && eventsExt[i].getDiscriminator() ==
-                    DemuxFilterEventExt::Event::hidl_discriminator::mmtpRecord) {
-            tunerMmtpRecord.isExtended = true;
-            tunerMmtpRecord.pts = static_cast<long>(eventsExt[i].mmtpRecord().pts);
-            tunerMmtpRecord.mpuSequenceNumber =
-                    static_cast<int>(eventsExt[i].mmtpRecord().mpuSequenceNumber);
-            tunerMmtpRecord.firstMbInSlice =
-                    static_cast<int>(eventsExt[i].mmtpRecord().firstMbInSlice);
-            tunerMmtpRecord.tsIndexMask = static_cast<int>(eventsExt[i].mmtpRecord().tsIndexMask);
-        } else {
-            tunerMmtpRecord.isExtended = false;
-        }
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::mmtpRecord>(move(tunerMmtpRecord));
-        res.push_back(move(tunerEvent));
-    }
-}
-
-void TunerFilter::FilterCallback::getDownloadEvent(
-        vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
-    for (DemuxFilterEvent::Event e : events) {
-        DemuxFilterDownloadEvent downloadEvent = e.download();
-        TunerFilterDownloadEvent tunerDownload;
-
-        tunerDownload.itemId = static_cast<int>(downloadEvent.itemId);
-        tunerDownload.itemFragmentIndex = static_cast<int>(downloadEvent.itemFragmentIndex);
-        tunerDownload.mpuSequenceNumber = static_cast<int>(downloadEvent.mpuSequenceNumber);
-        tunerDownload.lastItemFragmentIndex = static_cast<int>(downloadEvent.lastItemFragmentIndex);
-        tunerDownload.dataLength = static_cast<char16_t>(downloadEvent.dataLength);
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::download>(move(tunerDownload));
-        res.push_back(move(tunerEvent));
-    }
-}
-
-void TunerFilter::FilterCallback::getIpPayloadEvent(
-        vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
-    for (DemuxFilterEvent::Event e : events) {
-        DemuxFilterIpPayloadEvent ipPayloadEvent = e.ipPayload();
-        TunerFilterIpPayloadEvent tunerIpPayload;
-
-        tunerIpPayload.dataLength = static_cast<char16_t>(ipPayloadEvent.dataLength);
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::ipPayload>(move(tunerIpPayload));
-        res.push_back(move(tunerEvent));
-    }
-}
-
-void TunerFilter::FilterCallback::getTemiEvent(
-        vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
-    for (DemuxFilterEvent::Event e : events) {
-        DemuxFilterTemiEvent temiEvent = e.temi();
-        TunerFilterTemiEvent tunerTemi;
-
-        tunerTemi.pts = static_cast<long>(temiEvent.pts);
-        tunerTemi.descrTag = static_cast<int8_t>(temiEvent.descrTag);
-        vector<uint8_t> descrData = temiEvent.descrData;
-        tunerTemi.descrData.resize(descrData.size());
-        copy(descrData.begin(), descrData.end(), tunerTemi.descrData.begin());
-
-        TunerFilterEvent tunerEvent;
-        tunerEvent.set<TunerFilterEvent::temi>(move(tunerTemi));
-        res.push_back(move(tunerEvent));
-    }
-}
-
-void TunerFilter::FilterCallback::getMonitorEvent(
-        vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
-    DemuxFilterMonitorEvent monitorEvent = eventsExt[0].monitorEvent();
-    TunerFilterMonitorEvent tunerMonitor;
-
-    switch (monitorEvent.getDiscriminator()) {
-        case DemuxFilterMonitorEvent::hidl_discriminator::scramblingStatus: {
-            tunerMonitor.set<TunerFilterMonitorEvent::scramblingStatus>(
-                    static_cast<int>(monitorEvent.scramblingStatus()));
-            break;
-        }
-        case DemuxFilterMonitorEvent::hidl_discriminator::cid: {
-            tunerMonitor.set<TunerFilterMonitorEvent::cid>(static_cast<int>(monitorEvent.cid()));
-            break;
-        }
-    }
-
-    TunerFilterEvent tunerEvent;
-    tunerEvent.set<TunerFilterEvent::monitor>(move(tunerMonitor));
-    res.push_back(move(tunerEvent));
-}
-
-void TunerFilter::FilterCallback::getRestartEvent(
-        vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
-    TunerFilterEvent tunerEvent;
-    tunerEvent.set<TunerFilterEvent::startId>(static_cast<int>(eventsExt[0].startId()));
-    res.push_back(move(tunerEvent));
-}
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
 }  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerFilter.h b/services/tuner/TunerFilter.h
index ff4728c..529c191 100644
--- a/services/tuner/TunerFilter.h
+++ b/services/tuner/TunerFilter.h
@@ -17,176 +17,107 @@
 #ifndef ANDROID_MEDIA_TUNERFILTER_H
 #define ANDROID_MEDIA_TUNERFILTER_H
 
+#include <aidl/android/hardware/tv/tuner/AvStreamType.h>
+#include <aidl/android/hardware/tv/tuner/BnFilterCallback.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterEvent.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterStatus.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterType.h>
+#include <aidl/android/hardware/tv/tuner/FilterDelayHint.h>
+#include <aidl/android/hardware/tv/tuner/IFilter.h>
 #include <aidl/android/media/tv/tuner/BnTunerFilter.h>
 #include <aidl/android/media/tv/tuner/ITunerFilterCallback.h>
-#include <aidlcommonsupport/NativeHandle.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
-#include <android/hardware/tv/tuner/1.1/IFilter.h>
-#include <android/hardware/tv/tuner/1.1/IFilterCallback.h>
-#include <android/hardware/tv/tuner/1.1/types.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <fmq/ConvertMQDescriptors.h>
-#include <fmq/MessageQueue.h>
+#include <utils/Mutex.h>
 
-using Status = ::ndk::ScopedAStatus;
+using ::aidl::android::hardware::common::NativeHandle;
 using ::aidl::android::hardware::common::fmq::MQDescriptor;
 using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
+using ::aidl::android::hardware::tv::tuner::AvStreamType;
+using ::aidl::android::hardware::tv::tuner::BnFilterCallback;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterType;
+using ::aidl::android::hardware::tv::tuner::FilterDelayHint;
+using ::aidl::android::hardware::tv::tuner::IFilter;
 using ::aidl::android::media::tv::tuner::BnTunerFilter;
-using ::aidl::android::media::tv::tuner::ITunerFilterCallback;
-using ::aidl::android::media::tv::tuner::TunerDemuxIpAddress;
-using ::aidl::android::media::tv::tuner::TunerFilterConfiguration;
-using ::aidl::android::media::tv::tuner::TunerFilterDownloadEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterIpPayloadEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterMediaEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterMmtpRecordEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterMonitorEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterPesEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterScIndexMask;
-using ::aidl::android::media::tv::tuner::TunerFilterSectionEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterSharedHandleInfo;
-using ::aidl::android::media::tv::tuner::TunerFilterSettings;
-using ::aidl::android::media::tv::tuner::TunerFilterTemiEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterTsRecordEvent;
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::hidl_array;
-using ::android::hardware::tv::tuner::V1_0::DemuxAlpFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterAvSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterDownloadEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterDownloadSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterIpPayloadEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMediaEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMmtpRecordEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterPesDataSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterPesEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterRecordSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSectionEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSectionSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterStatus;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterTemiEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterTsRecordEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxIpFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxTlvFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxPid;
-using ::android::hardware::tv::tuner::V1_0::IFilter;
-using ::android::hardware::tv::tuner::V1_1::AvStreamType;
-using ::android::hardware::tv::tuner::V1_1::DemuxFilterEventExt;
-using ::android::hardware::tv::tuner::V1_1::DemuxFilterMonitorEvent;
-using ::android::hardware::tv::tuner::V1_1::DemuxFilterTsRecordEventExt;
-using ::android::hardware::tv::tuner::V1_1::IFilterCallback;
+using ::android::Mutex;
 
+using namespace std;
+
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-using MQDesc = MQDescriptorSync<uint8_t>;
 using AidlMQDesc = MQDescriptor<int8_t, SynchronizedReadWrite>;
 
-const static int IP_V4_LENGTH = 4;
-const static int IP_V6_LENGTH = 16;
-
 class TunerFilter : public BnTunerFilter {
 
 public:
-    TunerFilter(sp<IFilter> filter, int mainType, int subTyp);
-    virtual ~TunerFilter();
-    Status getId(int32_t* _aidl_return) override;
-    Status getId64Bit(int64_t* _aidl_return) override;
-    Status getQueueDesc(AidlMQDesc* _aidl_return) override;
-    Status configure(const TunerFilterConfiguration& config) override;
-    Status configureMonitorEvent(int monitorEventType) override;
-    Status configureIpFilterContextId(int cid) override;
-    Status configureAvStreamType(int avStreamType) override;
-    Status getAvSharedHandleInfo(TunerFilterSharedHandleInfo* _aidl_return) override;
-    Status releaseAvHandle(const ::aidl::android::hardware::common::NativeHandle& handle,
-            int64_t avDataId) override;
-    Status setDataSource(const std::shared_ptr<ITunerFilter>& filter) override;
-    Status start() override;
-    Status stop() override;
-    Status flush() override;
-    Status close() override;
-    sp<IFilter> getHalFilter();
+    class FilterCallback : public BnFilterCallback {
+    public:
+        FilterCallback(const shared_ptr<ITunerFilterCallback>& tunerFilterCallback)
+              : mTunerFilterCallback(tunerFilterCallback), mOriginalCallback(nullptr){};
 
-    struct FilterCallback : public IFilterCallback {
-        FilterCallback(const std::shared_ptr<ITunerFilterCallback> tunerFilterCallback)
-                : mTunerFilterCallback(tunerFilterCallback) {};
+        ::ndk::ScopedAStatus onFilterEvent(const vector<DemuxFilterEvent>& events) override;
+        ::ndk::ScopedAStatus onFilterStatus(DemuxFilterStatus status) override;
 
-        virtual Return<void> onFilterEvent(const DemuxFilterEvent& filterEvent);
-        virtual Return<void> onFilterEvent_1_1(const DemuxFilterEvent& filterEvent,
-                const DemuxFilterEventExt& filterEventExt);
-        virtual Return<void> onFilterStatus(DemuxFilterStatus status);
+        void sendSharedFilterStatus(int32_t status);
+        void attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb);
+        void detachSharedFilterCallback();
 
-        void getAidlFilterEvent(std::vector<DemuxFilterEvent::Event>& events,
-                std::vector<DemuxFilterEventExt::Event>& eventsExt,
-                std::vector<TunerFilterEvent>& tunerEvent);
-
-        void getMediaEvent(
-                std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
-        void getSectionEvent(
-                std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
-        void getPesEvent(
-                std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
-        void getTsRecordEvent(
-                std::vector<DemuxFilterEvent::Event>& events,
-                std::vector<DemuxFilterEventExt::Event>& eventsExt,
-                std::vector<TunerFilterEvent>& res);
-        void getMmtpRecordEvent(
-                std::vector<DemuxFilterEvent::Event>& events,
-                std::vector<DemuxFilterEventExt::Event>& eventsExt,
-                std::vector<TunerFilterEvent>& res);
-        void getDownloadEvent(
-                std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
-        void getIpPayloadEvent(
-                std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
-        void getTemiEvent(
-                std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
-        void getMonitorEvent(
-                std::vector<DemuxFilterEventExt::Event>& eventsExt,
-                std::vector<TunerFilterEvent>& res);
-        void getRestartEvent(
-                std::vector<DemuxFilterEventExt::Event>& eventsExt,
-                std::vector<TunerFilterEvent>& res);
-
-        std::shared_ptr<ITunerFilterCallback> mTunerFilterCallback;
+    private:
+        shared_ptr<ITunerFilterCallback> mTunerFilterCallback;
+        shared_ptr<ITunerFilterCallback> mOriginalCallback;
+        Mutex mCallbackLock;
     };
 
+    TunerFilter(shared_ptr<IFilter> filter, shared_ptr<FilterCallback> cb, DemuxFilterType type);
+    virtual ~TunerFilter();
+
+    ::ndk::ScopedAStatus getId(int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getId64Bit(int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getQueueDesc(AidlMQDesc* _aidl_return) override;
+    ::ndk::ScopedAStatus configure(const DemuxFilterSettings& in_settings) override;
+    ::ndk::ScopedAStatus configureMonitorEvent(int32_t in_monitorEventTypes) override;
+    ::ndk::ScopedAStatus configureIpFilterContextId(int32_t in_cid) override;
+    ::ndk::ScopedAStatus configureAvStreamType(const AvStreamType& in_avStreamType) override;
+    ::ndk::ScopedAStatus getAvSharedHandle(NativeHandle* out_avMemory,
+                                           int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus releaseAvHandle(const NativeHandle& in_handle,
+                                         int64_t in_avDataId) override;
+    ::ndk::ScopedAStatus setDataSource(const shared_ptr<ITunerFilter>& in_filter) override;
+    ::ndk::ScopedAStatus start() override;
+    ::ndk::ScopedAStatus stop() override;
+    ::ndk::ScopedAStatus flush() override;
+    ::ndk::ScopedAStatus close() override;
+    ::ndk::ScopedAStatus acquireSharedFilterToken(string* _aidl_return) override;
+    ::ndk::ScopedAStatus freeSharedFilterToken(const string& in_filterToken) override;
+    ::ndk::ScopedAStatus getFilterType(DemuxFilterType* _aidl_return) override;
+    ::ndk::ScopedAStatus setDelayHint(const FilterDelayHint& in_hint) override;
+
+    bool isSharedFilterAllowed(int32_t pid);
+    void attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb);
+    shared_ptr<IFilter> getHalFilter();
+
 private:
-    DemuxFilterAvSettings getAvSettings(const TunerFilterSettings& settings);
-    DemuxFilterSectionSettings getSectionSettings(const TunerFilterSettings& settings);
-    DemuxFilterPesDataSettings getPesDataSettings(const TunerFilterSettings& settings);
-    DemuxFilterRecordSettings getRecordSettings(const TunerFilterSettings& settings);
-    DemuxFilterDownloadSettings getDownloadSettings(const TunerFilterSettings& settings);
-
-    bool isAudioFilter();
-    bool isVideoFilter();
-    bool getHidlAvStreamType(int avStreamType, AvStreamType& type);
-
-    void getHidlTsSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
-    void getHidlMmtpSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
-    void getHidlIpSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
-    void getHidlTlvSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
-    void getHidlAlpSettings(
-        const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
-
-    hidl_array<uint8_t, IP_V4_LENGTH> getIpV4Address(TunerDemuxIpAddress addr);
-    hidl_array<uint8_t, IP_V6_LENGTH> getIpV6Address(TunerDemuxIpAddress addr);
-
-    sp<IFilter> mFilter;
-    sp<::android::hardware::tv::tuner::V1_1::IFilter> mFilter_1_1;
+    shared_ptr<IFilter> mFilter;
     int32_t mId;
     int64_t mId64Bit;
-    int mMainType;
-    int mSubType;
+    DemuxFilterType mType;
+    bool mStarted;
+    bool mShared;
+    int32_t mClientPid;
+    shared_ptr<FilterCallback> mFilterCallback;
+    Mutex mLock;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERFILTER_H
diff --git a/services/tuner/TunerFrontend.cpp b/services/tuner/TunerFrontend.cpp
index 74b5519..a5ef2bb 100644
--- a/services/tuner/TunerFrontend.cpp
+++ b/services/tuner/TunerFrontend.cpp
@@ -1,5 +1,5 @@
 /**
- * Copyright 2020, The Android Open Source Project
+ * Copyright 2021, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,1081 +14,186 @@
  * limitations under the License.
  */
 
+//#define LOG_NDEBUG 0
 #define LOG_TAG "TunerFrontend"
 
 #include "TunerFrontend.h"
+
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
 #include "TunerLnb.h"
 
-using ::aidl::android::media::tv::tuner::TunerFrontendAtsc3PlpSettings;
-using ::aidl::android::media::tv::tuner::TunerFrontendScanAtsc3PlpInfo;
-using ::aidl::android::media::tv::tuner::TunerFrontendStatusAtsc3PlpInfo;
-using ::aidl::android::media::tv::tuner::TunerFrontendUnionSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendAnalogSifStandard;
-using ::android::hardware::tv::tuner::V1_0::FrontendAnalogType;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtscModulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Bandwidth;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3CodeRate;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3DemodOutputFormat;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Fec;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Modulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3TimeInterleaveMode;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbcAnnex;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbcModulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbcOuterFec;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbcSpectralInversion;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsModulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsPilot;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsRolloff;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsStandard;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsVcmMode;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtBandwidth;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtCoderate;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtConstellation;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtGuardInterval;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtHierarchy;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtPlpMode;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtStandard;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtTransmissionMode;
-using ::android::hardware::tv::tuner::V1_0::FrontendInnerFec;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Coderate;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Modulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Rolloff;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Settings;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbsCoderate;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbsModulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbsRolloff;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbsSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbsStreamIdType;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtBandwidth;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtCoderate;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtGuardInterval;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtMode;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtModulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendModulationStatus;
-using ::android::hardware::tv::tuner::V1_0::FrontendScanAtsc3PlpInfo;
-using ::android::hardware::tv::tuner::V1_0::FrontendScanType;
-using ::android::hardware::tv::tuner::V1_0::FrontendStatusType;
-using ::android::hardware::tv::tuner::V1_0::Result;
-using ::android::hardware::tv::tuner::V1_1::FrontendAnalogAftFlag;
-using ::android::hardware::tv::tuner::V1_1::FrontendBandwidth;
-using ::android::hardware::tv::tuner::V1_1::FrontendCableTimeInterleaveMode;
-using ::android::hardware::tv::tuner::V1_1::FrontendDvbcBandwidth;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbBandwidth;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbCodeRate;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbGuardInterval;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbModulation;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbTimeInterleaveMode;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbTransmissionMode;
-using ::android::hardware::tv::tuner::V1_1::FrontendDvbsScanType;
-using ::android::hardware::tv::tuner::V1_1::FrontendGuardInterval;
-using ::android::hardware::tv::tuner::V1_1::FrontendInterleaveMode;
-using ::android::hardware::tv::tuner::V1_1::FrontendModulation;
-using ::android::hardware::tv::tuner::V1_1::FrontendRollOff;
-using ::android::hardware::tv::tuner::V1_1::FrontendTransmissionMode;
-using ::android::hardware::tv::tuner::V1_1::FrontendSpectralInversion;
-using ::android::hardware::tv::tuner::V1_1::FrontendStatusTypeExt1_1;
+using ::aidl::android::hardware::tv::tuner::Result;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-TunerFrontend::TunerFrontend(sp<IFrontend> frontend, int id) {
+TunerFrontend::TunerFrontend(shared_ptr<IFrontend> frontend, int id) {
     mFrontend = frontend;
-    mFrontend_1_1 = ::android::hardware::tv::tuner::V1_1::IFrontend::castFrom(mFrontend);
     mId = id;
 }
 
 TunerFrontend::~TunerFrontend() {
-    mFrontend = NULL;
-    mFrontend_1_1 = NULL;
+    mFrontend = nullptr;
     mId = -1;
 }
 
-Status TunerFrontend::setCallback(
+::ndk::ScopedAStatus TunerFrontend::setCallback(
         const shared_ptr<ITunerFrontendCallback>& tunerFrontendCallback) {
-    if (mFrontend == NULL) {
+    if (mFrontend == nullptr) {
         ALOGE("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    if (tunerFrontendCallback == NULL) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    if (tunerFrontendCallback == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
     }
 
-    sp<IFrontendCallback> frontendCallback = new FrontendCallback(tunerFrontendCallback);
-    Result status = mFrontend->setCallback(frontendCallback);
-    if (status == Result::SUCCESS) {
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    shared_ptr<IFrontendCallback> frontendCallback =
+            ::ndk::SharedRefBase::make<FrontendCallback>(tunerFrontendCallback);
+    return mFrontend->setCallback(frontendCallback);
 }
 
-Status TunerFrontend::tune(const TunerFrontendSettings& settings) {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::tune(const FrontendSettings& settings) {
+    if (mFrontend == nullptr) {
         ALOGE("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status;
-    FrontendSettings frontendSettings = getHidlFrontendSettings(settings);
-    if (settings.isExtended) {
-        if (mFrontend_1_1 == NULL) {
-            ALOGE("IFrontend_1_1 is not initialized");
-            return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-        }
-        FrontendSettingsExt1_1 frontendSettingsExt = getHidlFrontendSettingsExt(settings);
-        status = mFrontend_1_1->tune_1_1(frontendSettings, frontendSettingsExt);
-    } else {
-        status = mFrontend->tune(frontendSettings);
-    }
-
-    if (status == Result::SUCCESS) {
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    return mFrontend->tune(settings);
 }
 
-Status TunerFrontend::stopTune() {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::stopTune() {
+    if (mFrontend == nullptr) {
         ALOGD("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mFrontend->stopTune();
-    if (status == Result::SUCCESS) {
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    return mFrontend->stopTune();
 }
 
-Status TunerFrontend::scan(const TunerFrontendSettings& settings, int frontendScanType) {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::scan(const FrontendSettings& settings,
+                                         FrontendScanType frontendScanType) {
+    if (mFrontend == nullptr) {
         ALOGD("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status;
-    FrontendSettings frontendSettings = getHidlFrontendSettings(settings);
-    if (settings.isExtended) {
-        if (mFrontend_1_1 == NULL) {
-            ALOGE("IFrontend_1_1 is not initialized");
-            return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-        }
-        FrontendSettingsExt1_1 frontendSettingsExt = getHidlFrontendSettingsExt(settings);
-        status = mFrontend_1_1->scan_1_1(frontendSettings,
-                static_cast<FrontendScanType>(frontendScanType), frontendSettingsExt);
-    } else {
-        status = mFrontend->scan(
-                frontendSettings, static_cast<FrontendScanType>(frontendScanType));
-    }
-
-    if (status == Result::SUCCESS) {
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    return mFrontend->scan(settings, frontendScanType);
 }
 
-Status TunerFrontend::stopScan() {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::stopScan() {
+    if (mFrontend == nullptr) {
         ALOGD("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mFrontend->stopScan();
-    if (status == Result::SUCCESS) {
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    return mFrontend->stopScan();
 }
 
-Status TunerFrontend::setLnb(const shared_ptr<ITunerLnb>& lnb) {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::setLnb(const shared_ptr<ITunerLnb>& lnb) {
+    if (mFrontend == nullptr) {
         ALOGD("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mFrontend->setLnb(static_cast<TunerLnb*>(lnb.get())->getId());
-    if (status == Result::SUCCESS) {
-        return Status::ok();
+    if (lnb == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
     }
 
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    return mFrontend->setLnb(static_cast<TunerLnb*>(lnb.get())->getId());
 }
 
-Status TunerFrontend::setLna(bool bEnable) {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::linkCiCamToFrontend(int32_t ciCamId, int32_t* _aidl_return) {
+    if (mFrontend == nullptr) {
         ALOGD("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mFrontend->setLna(bEnable);
-    if (status == Result::SUCCESS) {
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    return mFrontend->linkCiCam(ciCamId, _aidl_return);
 }
 
-Status TunerFrontend::linkCiCamToFrontend(int ciCamId, int32_t* _aidl_return) {
-    if (mFrontend_1_1 == NULL) {
-        ALOGD("IFrontend_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    int ltsId;
-    Result status;
-    mFrontend_1_1->linkCiCam(static_cast<uint32_t>(ciCamId),
-            [&](Result r, uint32_t id) {
-                status = r;
-                ltsId = id;
-            });
-
-    if (status == Result::SUCCESS) {
-        *_aidl_return = ltsId;
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-}
-
-Status TunerFrontend::unlinkCiCamToFrontend(int ciCamId) {
-    if (mFrontend_1_1 == NULL) {
-        ALOGD("IFrontend_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    Result status = mFrontend_1_1->unlinkCiCam(ciCamId);
-    if (status == Result::SUCCESS) {
-        return Status::ok();
-    }
-
-    return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-}
-
-Status TunerFrontend::close() {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::unlinkCiCamToFrontend(int32_t ciCamId) {
+    if (mFrontend == nullptr) {
         ALOGD("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mFrontend->close();
-    mFrontend = NULL;
-    mFrontend_1_1 = NULL;
-
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    return mFrontend->unlinkCiCam(ciCamId);
 }
 
-Status TunerFrontend::getStatus(const vector<int32_t>& statusTypes,
-        vector<TunerFrontendStatus>* _aidl_return) {
-    if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::close() {
+    if (mFrontend == nullptr) {
         ALOGD("IFrontend is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res;
-    vector<FrontendStatus> status;
-    vector<FrontendStatusType> types;
-    for (auto s : statusTypes) {
-        types.push_back(static_cast<FrontendStatusType>(s));
-    }
+    auto res = mFrontend->close();
+    mFrontend = nullptr;
 
-    mFrontend->getStatus(types, [&](Result r, const hidl_vec<FrontendStatus>& s) {
-        res = r;
-        status = s;
-    });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-
-    getAidlFrontendStatus(status, *_aidl_return);
-    return Status::ok();
+    return res;
 }
 
-Status TunerFrontend::getStatusExtended_1_1(const vector<int32_t>& statusTypes,
-        vector<TunerFrontendStatus>* _aidl_return) {
-    if (mFrontend_1_1 == NULL) {
-        ALOGD("IFrontend_1_1 is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+::ndk::ScopedAStatus TunerFrontend::getStatus(const vector<FrontendStatusType>& in_statusTypes,
+                                              vector<FrontendStatus>* _aidl_return) {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res;
-    vector<FrontendStatusExt1_1> status;
-    vector<FrontendStatusTypeExt1_1> types;
-    for (auto s : statusTypes) {
-        types.push_back(static_cast<FrontendStatusTypeExt1_1>(s));
-    }
-
-    mFrontend_1_1->getStatusExt1_1(types, [&](Result r, const hidl_vec<FrontendStatusExt1_1>& s) {
-        res = r;
-        status = s;
-    });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-
-    getAidlFrontendStatusExt(status, *_aidl_return);
-    return Status::ok();
+    return mFrontend->getStatus(in_statusTypes, _aidl_return);
 }
 
-Status TunerFrontend::getFrontendId(int* _aidl_return) {
+::ndk::ScopedAStatus TunerFrontend::getFrontendId(int32_t* _aidl_return) {
     *_aidl_return = mId;
-    return Status::ok();
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerFrontend::getHardwareInfo(std::string* _aidl_return) {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    return mFrontend->getHardwareInfo(_aidl_return);
 }
 
 /////////////// FrontendCallback ///////////////////////
-
-Return<void> TunerFrontend::FrontendCallback::onEvent(FrontendEventType frontendEventType) {
-    ALOGD("FrontendCallback::onEvent, type=%d", frontendEventType);
-    mTunerFrontendCallback->onEvent((int)frontendEventType);
-    return Void();
+::ndk::ScopedAStatus TunerFrontend::FrontendCallback::onEvent(FrontendEventType frontendEventType) {
+    ALOGV("FrontendCallback::onEvent, type=%d", frontendEventType);
+    if (mTunerFrontendCallback != nullptr) {
+        mTunerFrontendCallback->onEvent(frontendEventType);
+    }
+    return ndk::ScopedAStatus::ok();
 }
 
-Return<void> TunerFrontend::FrontendCallback::onScanMessage(
+::ndk::ScopedAStatus TunerFrontend::FrontendCallback::onScanMessage(
         FrontendScanMessageType type, const FrontendScanMessage& message) {
-    ALOGD("FrontendCallback::onScanMessage, type=%d", type);
-    TunerFrontendScanMessage scanMessage;
-    switch(type) {
-        case FrontendScanMessageType::LOCKED: {
-            scanMessage.set<TunerFrontendScanMessage::isLocked>(message.isLocked());
-            break;
-        }
-        case FrontendScanMessageType::END: {
-            scanMessage.set<TunerFrontendScanMessage::isEnd>(message.isEnd());
-            break;
-        }
-        case FrontendScanMessageType::PROGRESS_PERCENT: {
-            scanMessage.set<TunerFrontendScanMessage::progressPercent>(message.progressPercent());
-            break;
-        }
-        case FrontendScanMessageType::FREQUENCY: {
-            auto f = message.frequencies();
-            vector<int> frequencies(begin(f), end(f));
-            scanMessage.set<TunerFrontendScanMessage::frequencies>(frequencies);
-            break;
-        }
-        case FrontendScanMessageType::SYMBOL_RATE: {
-            auto s = message.symbolRates();
-            vector<int> symbolRates(begin(s), end(s));
-            scanMessage.set<TunerFrontendScanMessage::symbolRates>(symbolRates);
-            break;
-        }
-        case FrontendScanMessageType::HIERARCHY: {
-            scanMessage.set<TunerFrontendScanMessage::hierarchy>((int)message.hierarchy());
-            break;
-        }
-        case FrontendScanMessageType::ANALOG_TYPE: {
-            scanMessage.set<TunerFrontendScanMessage::analogType>((int)message.analogType());
-            break;
-        }
-        case FrontendScanMessageType::PLP_IDS: {
-            auto p = message.plpIds();
-            vector<uint8_t> plpIds(begin(p), end(p));
-            scanMessage.set<TunerFrontendScanMessage::plpIds>(plpIds);
-            break;
-        }
-        case FrontendScanMessageType::GROUP_IDS: {
-            auto g = message.groupIds();
-            vector<uint8_t> groupIds(begin(g), end(g));
-            scanMessage.set<TunerFrontendScanMessage::groupIds>(groupIds);
-            break;
-        }
-        case FrontendScanMessageType::INPUT_STREAM_IDS: {
-            auto i = message.inputStreamIds();
-            vector<char16_t> streamIds(begin(i), end(i));
-            scanMessage.set<TunerFrontendScanMessage::inputStreamIds>(streamIds);
-            break;
-        }
-        case FrontendScanMessageType::STANDARD: {
-            FrontendScanMessage::Standard std = message.std();
-            int standard;
-            if (std.getDiscriminator() == FrontendScanMessage::Standard::hidl_discriminator::sStd) {
-                standard = (int) std.sStd();
-            } else if (std.getDiscriminator() ==
-                    FrontendScanMessage::Standard::hidl_discriminator::tStd) {
-                standard = (int) std.tStd();
-            } else if (std.getDiscriminator() ==
-                    FrontendScanMessage::Standard::hidl_discriminator::sifStd) {
-                standard = (int) std.sifStd();
-            }
-            scanMessage.set<TunerFrontendScanMessage::std>(standard);
-            break;
-        }
-        case FrontendScanMessageType::ATSC3_PLP_INFO: {
-            vector<FrontendScanAtsc3PlpInfo> plpInfos = message.atsc3PlpInfos();
-            vector<TunerFrontendScanAtsc3PlpInfo> tunerPlpInfos;
-            for (int i = 0; i < plpInfos.size(); i++) {
-                auto info = plpInfos[i];
-                int8_t plpId = (int8_t) info.plpId;
-                bool lls = (bool) info.bLlsFlag;
-                TunerFrontendScanAtsc3PlpInfo plpInfo{
-                    .plpId = plpId,
-                    .llsFlag = lls,
-                };
-                tunerPlpInfos.push_back(plpInfo);
-            }
-            scanMessage.set<TunerFrontendScanMessage::atsc3PlpInfos>(tunerPlpInfos);
-            break;
-        }
-        default:
-            break;
+    ALOGV("FrontendCallback::onScanMessage, type=%d", type);
+    if (mTunerFrontendCallback != nullptr) {
+        mTunerFrontendCallback->onScanMessage(type, message);
     }
-    mTunerFrontendCallback->onScanMessage((int)type, scanMessage);
-    return Void();
+    return ndk::ScopedAStatus::ok();
 }
 
-Return<void> TunerFrontend::FrontendCallback::onScanMessageExt1_1(
-        FrontendScanMessageTypeExt1_1 type, const FrontendScanMessageExt1_1& message) {
-    ALOGD("onScanMessageExt1_1::onScanMessage, type=%d", type);
-    TunerFrontendScanMessage scanMessage;
-    switch(type) {
-        case FrontendScanMessageTypeExt1_1::MODULATION: {
-            FrontendModulation m = message.modulation();
-            int modulation;
-            switch (m.getDiscriminator()) {
-                case FrontendModulation::hidl_discriminator::dvbc:
-                    modulation = (int) m.dvbc();
-                    break;
-                case FrontendModulation::hidl_discriminator::dvbt:
-                    modulation = (int) m.dvbt();
-                    break;
-                case FrontendModulation::hidl_discriminator::dvbs:
-                    modulation = (int) m.dvbs();
-                    break;
-                case FrontendModulation::hidl_discriminator::isdbs:
-                    modulation = (int) m.isdbs();
-                    break;
-                case FrontendModulation::hidl_discriminator::isdbs3:
-                    modulation = (int) m.isdbs3();
-                    break;
-                case FrontendModulation::hidl_discriminator::isdbt:
-                    modulation = (int) m.isdbt();
-                    break;
-                case FrontendModulation::hidl_discriminator::atsc:
-                    modulation = (int) m.atsc();
-                    break;
-                case FrontendModulation::hidl_discriminator::atsc3:
-                    modulation = (int) m.atsc3();
-                    break;
-                case FrontendModulation::hidl_discriminator::dtmb:
-                    modulation = (int) m.dtmb();
-                    break;
-            }
-            scanMessage.set<TunerFrontendScanMessage::modulation>(modulation);
-            break;
-        }
-        case FrontendScanMessageTypeExt1_1::DVBC_ANNEX: {
-            scanMessage.set<TunerFrontendScanMessage::annex>((int)message.annex());
-            break;
-        }
-        case FrontendScanMessageTypeExt1_1::HIGH_PRIORITY: {
-            scanMessage.set<TunerFrontendScanMessage::isHighPriority>(message.isHighPriority());
-            break;
-        }
-        default:
-            break;
-    }
-    mTunerFrontendCallback->onScanMessage((int)type, scanMessage);
-    return Void();
-}
-
-/////////////// TunerFrontend Helper Methods ///////////////////////
-
-void TunerFrontend::getAidlFrontendStatus(
-        vector<FrontendStatus>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus) {
-    for (FrontendStatus s : hidlStatus) {
-        TunerFrontendStatus status;
-        switch (s.getDiscriminator()) {
-            case FrontendStatus::hidl_discriminator::isDemodLocked: {
-                status.set<TunerFrontendStatus::isDemodLocked>(s.isDemodLocked());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::snr: {
-                status.set<TunerFrontendStatus::snr>((int)s.snr());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::ber: {
-                status.set<TunerFrontendStatus::ber>((int)s.ber());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::per: {
-                status.set<TunerFrontendStatus::per>((int)s.per());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::preBer: {
-                status.set<TunerFrontendStatus::preBer>((int)s.preBer());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::signalQuality: {
-                status.set<TunerFrontendStatus::signalQuality>((int)s.signalQuality());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::signalStrength: {
-                status.set<TunerFrontendStatus::signalStrength>((int)s.signalStrength());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::symbolRate: {
-                status.set<TunerFrontendStatus::symbolRate>((int)s.symbolRate());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::innerFec: {
-                status.set<TunerFrontendStatus::innerFec>((long)s.innerFec());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::modulation: {
-                switch (s.modulation().getDiscriminator()) {
-                    case FrontendModulationStatus::hidl_discriminator::dvbc:
-                        status.set<TunerFrontendStatus::modulation>((int)s.modulation().dvbc());
-                        aidlStatus.push_back(status);
-                        break;
-                    case FrontendModulationStatus::hidl_discriminator::dvbs:
-                        status.set<TunerFrontendStatus::modulation>((int)s.modulation().dvbs());
-                        aidlStatus.push_back(status);
-                        break;
-                    case FrontendModulationStatus::hidl_discriminator::isdbs:
-                        status.set<TunerFrontendStatus::modulation>((int)s.modulation().isdbs());
-                        aidlStatus.push_back(status);
-                        break;
-                    case FrontendModulationStatus::hidl_discriminator::isdbs3:
-                        status.set<TunerFrontendStatus::modulation>((int)s.modulation().isdbs3());
-                        aidlStatus.push_back(status);
-                        break;
-                    case FrontendModulationStatus::hidl_discriminator::isdbt:
-                        status.set<TunerFrontendStatus::modulation>((int)s.modulation().isdbt());
-                        aidlStatus.push_back(status);
-                        break;
-                }
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::inversion: {
-                status.set<TunerFrontendStatus::inversion>((int)s.inversion());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::lnbVoltage: {
-                status.set<TunerFrontendStatus::lnbVoltage>((int)s.lnbVoltage());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::plpId: {
-                status.set<TunerFrontendStatus::plpId>((int8_t)s.plpId());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::isEWBS: {
-                status.set<TunerFrontendStatus::isEWBS>(s.isEWBS());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::agc: {
-                status.set<TunerFrontendStatus::agc>((int8_t)s.agc());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::isLnaOn: {
-                status.set<TunerFrontendStatus::isLnaOn>(s.isLnaOn());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::isLayerError: {
-                vector<bool> e(s.isLayerError().begin(), s.isLayerError().end());
-                status.set<TunerFrontendStatus::isLayerError>(e);
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::mer: {
-                status.set<TunerFrontendStatus::mer>((int)s.mer());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::freqOffset: {
-                status.set<TunerFrontendStatus::freqOffset>((int)s.freqOffset());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::hierarchy: {
-                status.set<TunerFrontendStatus::hierarchy>((int)s.hierarchy());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::isRfLocked: {
-                status.set<TunerFrontendStatus::isRfLocked>(s.isRfLocked());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatus::hidl_discriminator::plpInfo: {
-                vector<TunerFrontendStatusAtsc3PlpInfo> info;
-                for (auto i : s.plpInfo()) {
-                    info.push_back({
-                        .plpId = (int8_t)i.plpId,
-                        .isLocked = i.isLocked,
-                        .uec = (int)i.uec,
-                    });
-                }
-                status.set<TunerFrontendStatus::plpInfo>(info);
-                aidlStatus.push_back(status);
-                break;
-            }
-        }
-    }
-}
-
-void TunerFrontend::getAidlFrontendStatusExt(
-        vector<FrontendStatusExt1_1>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus) {
-    for (FrontendStatusExt1_1 s : hidlStatus) {
-        TunerFrontendStatus status;
-        switch (s.getDiscriminator()) {
-            case FrontendStatusExt1_1::hidl_discriminator::modulations: {
-                vector<int> aidlMod;
-                for (auto m : s.modulations()) {
-                    switch (m.getDiscriminator()) {
-                        case FrontendModulation::hidl_discriminator::dvbc:
-                            aidlMod.push_back((int)m.dvbc());
-                            break;
-                        case FrontendModulation::hidl_discriminator::dvbs:
-                            aidlMod.push_back((int)m.dvbs());
-                            break;
-                        case FrontendModulation::hidl_discriminator::dvbt:
-                            aidlMod.push_back((int)m.dvbt());
-                            break;
-                        case FrontendModulation::hidl_discriminator::isdbs:
-                            aidlMod.push_back((int)m.isdbs());
-                            break;
-                        case FrontendModulation::hidl_discriminator::isdbs3:
-                            aidlMod.push_back((int)m.isdbs3());
-                            break;
-                        case FrontendModulation::hidl_discriminator::isdbt:
-                            aidlMod.push_back((int)m.isdbt());
-                            break;
-                        case FrontendModulation::hidl_discriminator::atsc:
-                            aidlMod.push_back((int)m.atsc());
-                            break;
-                        case FrontendModulation::hidl_discriminator::atsc3:
-                            aidlMod.push_back((int)m.atsc3());
-                            break;
-                        case FrontendModulation::hidl_discriminator::dtmb:
-                            aidlMod.push_back((int)m.dtmb());
-                            break;
-                    }
-                }
-                status.set<TunerFrontendStatus::modulations>(aidlMod);
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::bers: {
-                vector<int> b(s.bers().begin(), s.bers().end());
-                status.set<TunerFrontendStatus::bers>(b);
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::codeRates: {
-                vector<int64_t> codeRates;
-                for (auto c : s.codeRates()) {
-                    codeRates.push_back((long)c);
-                }
-                status.set<TunerFrontendStatus::codeRates>(codeRates);
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::bandwidth: {
-                switch (s.bandwidth().getDiscriminator()) {
-                    case FrontendBandwidth::hidl_discriminator::atsc3:
-                        status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().atsc3());
-                        break;
-                    case FrontendBandwidth::hidl_discriminator::dvbc:
-                        status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().dvbc());
-                        break;
-                    case FrontendBandwidth::hidl_discriminator::dvbt:
-                        status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().dvbt());
-                        break;
-                    case FrontendBandwidth::hidl_discriminator::isdbt:
-                        status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().isdbt());
-                        break;
-                    case FrontendBandwidth::hidl_discriminator::dtmb:
-                        status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().dtmb());
-                        break;
-                }
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::interval: {
-                switch (s.interval().getDiscriminator()) {
-                    case FrontendGuardInterval::hidl_discriminator::dvbt:
-                        status.set<TunerFrontendStatus::interval>((int)s.interval().dvbt());
-                        break;
-                    case FrontendGuardInterval::hidl_discriminator::isdbt:
-                        status.set<TunerFrontendStatus::interval>((int)s.interval().isdbt());
-                        break;
-                    case FrontendGuardInterval::hidl_discriminator::dtmb:
-                        status.set<TunerFrontendStatus::interval>((int)s.interval().dtmb());
-                        break;
-                }
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::transmissionMode: {
-                switch (s.transmissionMode().getDiscriminator()) {
-                    case FrontendTransmissionMode::hidl_discriminator::dvbt:
-                        status.set<TunerFrontendStatus::transmissionMode>(
-                                (int)s.transmissionMode().dvbt());
-                        break;
-                    case FrontendTransmissionMode::hidl_discriminator::isdbt:
-                        status.set<TunerFrontendStatus::transmissionMode>(
-                                (int)s.transmissionMode().isdbt());
-                        break;
-                    case FrontendTransmissionMode::hidl_discriminator::dtmb:
-                        status.set<TunerFrontendStatus::transmissionMode>(
-                                (int)s.transmissionMode().dtmb());
-                        break;
-                }
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::uec: {
-                status.set<TunerFrontendStatus::uec>((int)s.uec());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::systemId: {
-                status.set<TunerFrontendStatus::systemId>((char16_t)s.systemId());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::interleaving: {
-                vector<int> aidlInter;
-                for (auto i : s.interleaving()) {
-                    switch (i.getDiscriminator()) {
-                        case FrontendInterleaveMode::hidl_discriminator::atsc3:
-                            aidlInter.push_back((int)i.atsc3());
-                            break;
-                        case FrontendInterleaveMode::hidl_discriminator::dvbc:
-                            aidlInter.push_back((int)i.dvbc());
-                            break;
-                        case FrontendInterleaveMode::hidl_discriminator::dtmb:
-                            aidlInter.push_back((int)i.dtmb());
-                            break;
-                    }
-                }
-                status.set<TunerFrontendStatus::interleaving>(aidlInter);
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::isdbtSegment: {
-                auto seg = s.isdbtSegment();
-                vector<uint8_t> i(seg.begin(), seg.end());
-                status.set<TunerFrontendStatus::isdbtSegment>(i);
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::tsDataRate: {
-                vector<int> ts(s.tsDataRate().begin(), s.tsDataRate().end());
-                status.set<TunerFrontendStatus::tsDataRate>(ts);
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::rollOff: {
-                switch (s.rollOff().getDiscriminator()) {
-                    case FrontendRollOff::hidl_discriminator::dvbs:
-                        status.set<TunerFrontendStatus::rollOff>((int)s.rollOff().dvbs());
-                        break;
-                    case FrontendRollOff::hidl_discriminator::isdbs:
-                        status.set<TunerFrontendStatus::rollOff>((int)s.rollOff().isdbs());
-                        break;
-                    case FrontendRollOff::hidl_discriminator::isdbs3:
-                        status.set<TunerFrontendStatus::rollOff>((int)s.rollOff().isdbs3());
-                        break;
-                }
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::isMiso: {
-                status.set<TunerFrontendStatus::isMiso>(s.isMiso());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::isLinear: {
-                status.set<TunerFrontendStatus::isLinear>(s.isLinear());
-                aidlStatus.push_back(status);
-                break;
-            }
-            case FrontendStatusExt1_1::hidl_discriminator::isShortFrames: {
-                status.set<TunerFrontendStatus::isShortFrames>(s.isShortFrames());
-                aidlStatus.push_back(status);
-                break;
-            }
-        }
-    }
-}
-
-hidl_vec<FrontendAtsc3PlpSettings> TunerFrontend::getAtsc3PlpSettings(
-        const TunerFrontendAtsc3Settings& settings) {
-    int len = settings.plpSettings.size();
-    hidl_vec<FrontendAtsc3PlpSettings> plps = hidl_vec<FrontendAtsc3PlpSettings>(len);
-    // parse PLP settings
-    for (int i = 0; i < len; i++) {
-        uint8_t plpId = static_cast<uint8_t>(settings.plpSettings[i].plpId);
-        FrontendAtsc3Modulation modulation =
-                static_cast<FrontendAtsc3Modulation>(settings.plpSettings[i].modulation);
-        FrontendAtsc3TimeInterleaveMode interleaveMode =
-                static_cast<FrontendAtsc3TimeInterleaveMode>(
-                        settings.plpSettings[i].interleaveMode);
-        FrontendAtsc3CodeRate codeRate =
-                static_cast<FrontendAtsc3CodeRate>(settings.plpSettings[i].codeRate);
-        FrontendAtsc3Fec fec =
-                static_cast<FrontendAtsc3Fec>(settings.plpSettings[i].fec);
-        FrontendAtsc3PlpSettings frontendAtsc3PlpSettings {
-                .plpId = plpId,
-                .modulation = modulation,
-                .interleaveMode = interleaveMode,
-                .codeRate = codeRate,
-                .fec = fec,
-        };
-        plps[i] = frontendAtsc3PlpSettings;
-    }
-    return plps;
-}
-
-FrontendDvbsCodeRate TunerFrontend::getDvbsCodeRate(const TunerFrontendDvbsCodeRate& codeRate) {
-    FrontendInnerFec innerFec = static_cast<FrontendInnerFec>(codeRate.fec);
-    bool isLinear = codeRate.isLinear;
-    bool isShortFrames = codeRate.isShortFrames;
-    uint32_t bitsPer1000Symbol = static_cast<uint32_t>(codeRate.bitsPer1000Symbol);
-    FrontendDvbsCodeRate coderate {
-            .fec = innerFec,
-            .isLinear = isLinear,
-            .isShortFrames = isShortFrames,
-            .bitsPer1000Symbol = bitsPer1000Symbol,
-    };
-    return coderate;
-}
-
-FrontendSettings TunerFrontend::getHidlFrontendSettings(const TunerFrontendSettings& aidlSettings) {
-    auto settings = aidlSettings.settings;
-    FrontendSettings frontendSettings;
-
-    switch (settings.getTag()) {
-        case TunerFrontendUnionSettings::analog: {
-            auto analog = settings.get<TunerFrontendUnionSettings::analog>();
-            frontendSettings.analog({
-                .frequency = static_cast<uint32_t>(analog.frequency),
-                .type = static_cast<FrontendAnalogType>(analog.signalType),
-                .sifStandard = static_cast<FrontendAnalogSifStandard>(analog.sifStandard),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::atsc: {
-            auto atsc = settings.get<TunerFrontendUnionSettings::atsc>();
-            frontendSettings.atsc({
-                .frequency = static_cast<uint32_t>(atsc.frequency),
-                .modulation = static_cast<FrontendAtscModulation>(atsc.modulation),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::atsc3: {
-            auto atsc3 = settings.get<TunerFrontendUnionSettings::atsc3>();
-            frontendSettings.atsc3({
-                .frequency = static_cast<uint32_t>(atsc3.frequency),
-                .bandwidth = static_cast<FrontendAtsc3Bandwidth>(atsc3.bandwidth),
-                .demodOutputFormat = static_cast<FrontendAtsc3DemodOutputFormat>(
-                        atsc3.demodOutputFormat),
-                .plpSettings = getAtsc3PlpSettings(atsc3),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::cable: {
-            auto dvbc = settings.get<TunerFrontendUnionSettings::cable>();
-            frontendSettings.dvbc({
-                .frequency = static_cast<uint32_t>(dvbc.frequency),
-                .modulation = static_cast<FrontendDvbcModulation>(dvbc.modulation),
-                .fec = static_cast<FrontendInnerFec>(dvbc.innerFec),
-                .symbolRate = static_cast<uint32_t>(dvbc.symbolRate),
-                .outerFec = static_cast<FrontendDvbcOuterFec>(dvbc.outerFec),
-                .annex = static_cast<FrontendDvbcAnnex>(dvbc.annex),
-                .spectralInversion = static_cast<FrontendDvbcSpectralInversion>(
-                        dvbc.spectralInversion),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::dvbs: {
-            auto dvbs = settings.get<TunerFrontendUnionSettings::dvbs>();
-            frontendSettings.dvbs({
-                .frequency = static_cast<uint32_t>(dvbs.frequency),
-                .modulation = static_cast<FrontendDvbsModulation>(dvbs.modulation),
-                .coderate = getDvbsCodeRate(dvbs.codeRate),
-                .symbolRate = static_cast<uint32_t>(dvbs.symbolRate),
-                .rolloff = static_cast<FrontendDvbsRolloff>(dvbs.rolloff),
-                .pilot = static_cast<FrontendDvbsPilot>(dvbs.pilot),
-                .inputStreamId = static_cast<uint32_t>(dvbs.inputStreamId),
-                .standard = static_cast<FrontendDvbsStandard>(dvbs.standard),
-                .vcmMode = static_cast<FrontendDvbsVcmMode>(dvbs.vcm),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::dvbt: {
-            auto dvbt = settings.get<TunerFrontendUnionSettings::dvbt>();
-            frontendSettings.dvbt({
-                .frequency = static_cast<uint32_t>(dvbt.frequency),
-                .transmissionMode = static_cast<FrontendDvbtTransmissionMode>(
-                        dvbt.transmissionMode),
-                .bandwidth = static_cast<FrontendDvbtBandwidth>(dvbt.bandwidth),
-                .constellation = static_cast<FrontendDvbtConstellation>(dvbt.constellation),
-                .hierarchy = static_cast<FrontendDvbtHierarchy>(dvbt.hierarchy),
-                .hpCoderate = static_cast<FrontendDvbtCoderate>(dvbt.hpCodeRate),
-                .lpCoderate = static_cast<FrontendDvbtCoderate>(dvbt.lpCodeRate),
-                .guardInterval = static_cast<FrontendDvbtGuardInterval>(dvbt.guardInterval),
-                .isHighPriority = dvbt.isHighPriority,
-                .standard = static_cast<FrontendDvbtStandard>(dvbt.standard),
-                .isMiso = dvbt.isMiso,
-                .plpMode = static_cast<FrontendDvbtPlpMode>(dvbt.plpMode),
-                .plpId = static_cast<uint8_t>(dvbt.plpId),
-                .plpGroupId = static_cast<uint8_t>(dvbt.plpGroupId),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::isdbs: {
-            auto isdbs = settings.get<TunerFrontendUnionSettings::isdbs>();
-            frontendSettings.isdbs({
-                .frequency = static_cast<uint32_t>(isdbs.frequency),
-                .streamId = static_cast<uint16_t>(isdbs.streamId),
-                .streamIdType = static_cast<FrontendIsdbsStreamIdType>(isdbs.streamIdType),
-                .modulation = static_cast<FrontendIsdbsModulation>(isdbs.modulation),
-                .coderate = static_cast<FrontendIsdbsCoderate>(isdbs.codeRate),
-                .symbolRate = static_cast<uint32_t>(isdbs.symbolRate),
-                .rolloff = static_cast<FrontendIsdbsRolloff>(isdbs.rolloff),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::isdbs3: {
-            auto isdbs3 = settings.get<TunerFrontendUnionSettings::isdbs3>();
-            frontendSettings.isdbs3({
-                .frequency = static_cast<uint32_t>(isdbs3.frequency),
-                .streamId = static_cast<uint16_t>(isdbs3.streamId),
-                .streamIdType = static_cast<FrontendIsdbsStreamIdType>(isdbs3.streamIdType),
-                .modulation = static_cast<FrontendIsdbs3Modulation>(isdbs3.modulation),
-                .coderate = static_cast<FrontendIsdbs3Coderate>(isdbs3.codeRate),
-                .symbolRate = static_cast<uint32_t>(isdbs3.symbolRate),
-                .rolloff = static_cast<FrontendIsdbs3Rolloff>(isdbs3.rolloff),
-            });
-            break;
-        }
-        case TunerFrontendUnionSettings::isdbt: {
-            auto isdbt = settings.get<TunerFrontendUnionSettings::isdbt>();
-            frontendSettings.isdbt({
-                .frequency = static_cast<uint32_t>(isdbt.frequency),
-                .modulation = static_cast<FrontendIsdbtModulation>(isdbt.modulation),
-                .bandwidth = static_cast<FrontendIsdbtBandwidth>(isdbt.bandwidth),
-                .mode = static_cast<FrontendIsdbtMode>(isdbt.mode),
-                .coderate = static_cast<FrontendIsdbtCoderate>(isdbt.codeRate),
-                .guardInterval = static_cast<FrontendIsdbtGuardInterval>(isdbt.guardInterval),
-                .serviceAreaId = static_cast<uint32_t>(isdbt.serviceAreaId),
-            });
-            break;
-        }
-        default:
-            break;
-    }
-
-    return frontendSettings;
-}
-
-FrontendSettingsExt1_1 TunerFrontend::getHidlFrontendSettingsExt(
-        const TunerFrontendSettings& aidlSettings) {
-    FrontendSettingsExt1_1 frontendSettingsExt{
-        .endFrequency = static_cast<uint32_t>(aidlSettings.endFrequency),
-        .inversion = static_cast<FrontendSpectralInversion>(aidlSettings.inversion),
-    };
-
-    auto settings = aidlSettings.settings;
-    switch (settings.getTag()) {
-        case TunerFrontendUnionSettings::analog: {
-            auto analog = settings.get<TunerFrontendUnionSettings::analog>();
-            if (analog.isExtended) {
-                frontendSettingsExt.settingExt.analog({
-                    .aftFlag = static_cast<FrontendAnalogAftFlag>(analog.aftFlag),
-                });
-            } else {
-                frontendSettingsExt.settingExt.noinit();
-            }
-            break;
-        }
-        case TunerFrontendUnionSettings::cable: {
-            auto dvbc = settings.get<TunerFrontendUnionSettings::cable>();
-            if (dvbc.isExtended) {
-                frontendSettingsExt.settingExt.dvbc({
-                    .interleaveMode = static_cast<FrontendCableTimeInterleaveMode>(
-                            dvbc.interleaveMode),
-                    .bandwidth = static_cast<FrontendDvbcBandwidth>(
-                            dvbc.bandwidth),
-                });
-            } else {
-                frontendSettingsExt.settingExt.noinit();
-            }
-            break;
-        }
-        case TunerFrontendUnionSettings::dvbs: {
-            auto dvbs = settings.get<TunerFrontendUnionSettings::dvbs>();
-            if (dvbs.isExtended) {
-                frontendSettingsExt.settingExt.dvbs({
-                    .scanType = static_cast<FrontendDvbsScanType>(dvbs.scanType),
-                    .isDiseqcRxMessage = dvbs.isDiseqcRxMessage,
-                });
-            } else {
-                frontendSettingsExt.settingExt.noinit();
-            }
-            break;
-        }
-        case TunerFrontendUnionSettings::dvbt: {
-            auto dvbt = settings.get<TunerFrontendUnionSettings::dvbt>();
-            if (dvbt.isExtended) {
-                frontendSettingsExt.settingExt.dvbt({
-                    .constellation =
-                            static_cast<hardware::tv::tuner::V1_1::FrontendDvbtConstellation>(
-                                    dvbt.constellation),
-                    .transmissionMode =
-                            static_cast<hardware::tv::tuner::V1_1::FrontendDvbtTransmissionMode>(
-                                    dvbt.transmissionMode),
-                });
-            } else {
-                frontendSettingsExt.settingExt.noinit();
-            }
-            break;
-        }
-        case TunerFrontendUnionSettings::dtmb: {
-            auto dtmb = settings.get<TunerFrontendUnionSettings::dtmb>();
-            frontendSettingsExt.settingExt.dtmb({
-                .frequency = static_cast<uint32_t>(dtmb.frequency),
-                .transmissionMode = static_cast<FrontendDtmbTransmissionMode>(
-                        dtmb.transmissionMode),
-                .bandwidth = static_cast<FrontendDtmbBandwidth>(dtmb.bandwidth),
-                .modulation = static_cast<FrontendDtmbModulation>(dtmb.modulation),
-                .codeRate = static_cast<FrontendDtmbCodeRate>(dtmb.codeRate),
-                .guardInterval = static_cast<FrontendDtmbGuardInterval>(dtmb.guardInterval),
-                .interleaveMode = static_cast<FrontendDtmbTimeInterleaveMode>(dtmb.interleaveMode),
-            });
-            break;
-        }
-        default:
-            frontendSettingsExt.settingExt.noinit();
-            break;
-    }
-
-    return frontendSettingsExt;
-}
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
 }  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerFrontend.h b/services/tuner/TunerFrontend.h
index 22fd509..418a751 100644
--- a/services/tuner/TunerFrontend.h
+++ b/services/tuner/TunerFrontend.h
@@ -1,5 +1,5 @@
 /**
- * Copyright 2020, The Android Open Source Project
+ * Copyright 2021, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -17,95 +17,73 @@
 #ifndef ANDROID_MEDIA_TUNERFRONTEND_H
 #define ANDROID_MEDIA_TUNERFRONTEND_H
 
+#include <aidl/android/hardware/tv/tuner/BnFrontendCallback.h>
+#include <aidl/android/hardware/tv/tuner/IFrontend.h>
+#include <aidl/android/hardware/tv/tuner/IFrontendCallback.h>
 #include <aidl/android/media/tv/tuner/BnTunerFrontend.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
-#include <android/hardware/tv/tuner/1.1/IFrontend.h>
-#include <android/hardware/tv/tuner/1.1/IFrontendCallback.h>
-#include <media/stagefright/foundation/ADebug.h>
 #include <utils/Log.h>
 
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::tv::tuner::BnTunerFrontend;
-using ::aidl::android::media::tv::tuner::ITunerFrontendCallback;
-using ::aidl::android::media::tv::tuner::ITunerLnb;
-using ::aidl::android::media::tv::tuner::TunerFrontendAtsc3Settings;
-using ::aidl::android::media::tv::tuner::TunerFrontendDvbsCodeRate;
-using ::aidl::android::media::tv::tuner::TunerFrontendScanMessage;
-using ::aidl::android::media::tv::tuner::TunerFrontendSettings;
-using ::aidl::android::media::tv::tuner::TunerFrontendStatus;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3PlpSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsCodeRate;
-using ::android::hardware::tv::tuner::V1_0::FrontendEventType;
-using ::android::hardware::tv::tuner::V1_0::FrontendId;
-using ::android::hardware::tv::tuner::V1_0::FrontendScanMessage;
-using ::android::hardware::tv::tuner::V1_0::FrontendScanMessageType;
-using ::android::hardware::tv::tuner::V1_0::FrontendSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendStatus;
-using ::android::hardware::tv::tuner::V1_0::IFrontend;
-using ::android::hardware::tv::tuner::V1_1::IFrontendCallback;
-using ::android::hardware::tv::tuner::V1_1::FrontendScanMessageExt1_1;
-using ::android::hardware::tv::tuner::V1_1::FrontendScanMessageTypeExt1_1;
-using ::android::hardware::tv::tuner::V1_1::FrontendSettingsExt1_1;
-using ::android::hardware::tv::tuner::V1_1::FrontendStatusExt1_1;
+using ::aidl::android::hardware::tv::tuner::BnFrontendCallback;
+using ::aidl::android::hardware::tv::tuner::FrontendEventType;
+using ::aidl::android::hardware::tv::tuner::FrontendScanMessage;
+using ::aidl::android::hardware::tv::tuner::FrontendScanMessageType;
+using ::aidl::android::hardware::tv::tuner::FrontendScanType;
+using ::aidl::android::hardware::tv::tuner::FrontendSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendStatus;
+using ::aidl::android::hardware::tv::tuner::FrontendStatusType;
+using ::aidl::android::hardware::tv::tuner::IFrontend;
+using ::aidl::android::hardware::tv::tuner::IFrontendCallback;
 
 using namespace std;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
 class TunerFrontend : public BnTunerFrontend {
 
 public:
-    TunerFrontend(sp<IFrontend> frontend, int id);
+    TunerFrontend(shared_ptr<IFrontend> frontend, int id);
     virtual ~TunerFrontend();
-    Status setCallback(
-            const shared_ptr<ITunerFrontendCallback>& tunerFrontendCallback) override;
-    Status tune(const TunerFrontendSettings& settings) override;
-    Status stopTune() override;
-    Status scan(const TunerFrontendSettings& settings, int frontendScanType) override;
-    Status stopScan() override;
-    Status setLnb(const shared_ptr<ITunerLnb>& lnb) override;
-    Status setLna(bool bEnable) override;
-    Status linkCiCamToFrontend(int ciCamId, int32_t* _aidl_return) override;
-    Status unlinkCiCamToFrontend(int ciCamId) override;
-    Status close() override;
-    Status getStatus(const vector<int32_t>& statusTypes,
-            vector<TunerFrontendStatus>* _aidl_return) override;
-    Status getStatusExtended_1_1(const vector<int32_t>& statusTypes,
-            vector<TunerFrontendStatus>* _aidl_return) override;
-    Status getFrontendId(int* _aidl_return) override;
 
-    struct FrontendCallback : public IFrontendCallback {
+    ::ndk::ScopedAStatus setCallback(
+            const shared_ptr<ITunerFrontendCallback>& in_tunerFrontendCallback) override;
+    ::ndk::ScopedAStatus tune(const FrontendSettings& in_settings) override;
+    ::ndk::ScopedAStatus stopTune() override;
+    ::ndk::ScopedAStatus scan(const FrontendSettings& in_settings,
+                              FrontendScanType in_frontendScanType) override;
+    ::ndk::ScopedAStatus stopScan() override;
+    ::ndk::ScopedAStatus setLnb(const shared_ptr<ITunerLnb>& in_lnb) override;
+    ::ndk::ScopedAStatus linkCiCamToFrontend(int32_t in_ciCamId, int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus unlinkCiCamToFrontend(int32_t in_ciCamId) override;
+    ::ndk::ScopedAStatus close() override;
+    ::ndk::ScopedAStatus getStatus(const vector<FrontendStatusType>& in_statusTypes,
+                                   vector<FrontendStatus>* _aidl_return) override;
+    ::ndk::ScopedAStatus getFrontendId(int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getHardwareInfo(std::string* _aidl_return) override;
+
+    struct FrontendCallback : public BnFrontendCallback {
         FrontendCallback(const shared_ptr<ITunerFrontendCallback> tunerFrontendCallback)
-                : mTunerFrontendCallback(tunerFrontendCallback) {};
+              : mTunerFrontendCallback(tunerFrontendCallback){};
 
-        virtual Return<void> onEvent(FrontendEventType frontendEventType);
-        virtual Return<void> onScanMessage(
-                FrontendScanMessageType type, const FrontendScanMessage& message);
-        virtual Return<void> onScanMessageExt1_1(
-                FrontendScanMessageTypeExt1_1 type, const FrontendScanMessageExt1_1& message);
+        ::ndk::ScopedAStatus onEvent(FrontendEventType frontendEventType) override;
+        ::ndk::ScopedAStatus onScanMessage(FrontendScanMessageType type,
+                                           const FrontendScanMessage& message) override;
 
         shared_ptr<ITunerFrontendCallback> mTunerFrontendCallback;
     };
 
 private:
-    hidl_vec<FrontendAtsc3PlpSettings> getAtsc3PlpSettings(
-            const TunerFrontendAtsc3Settings& settings);
-    FrontendDvbsCodeRate getDvbsCodeRate(const TunerFrontendDvbsCodeRate& codeRate);
-    FrontendSettings getHidlFrontendSettings(const TunerFrontendSettings& aidlSettings);
-    FrontendSettingsExt1_1 getHidlFrontendSettingsExt(const TunerFrontendSettings& aidlSettings);
-    void getAidlFrontendStatus(
-            vector<FrontendStatus>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus);
-    void getAidlFrontendStatusExt(
-            vector<FrontendStatusExt1_1>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus);
-
     int mId;
-    sp<IFrontend> mFrontend;
-    sp<::android::hardware::tv::tuner::V1_1::IFrontend> mFrontend_1_1;
+    shared_ptr<IFrontend> mFrontend;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERFRONTEND_H
diff --git a/services/tuner/TunerHelper.cpp b/services/tuner/TunerHelper.cpp
new file mode 100644
index 0000000..dc67110
--- /dev/null
+++ b/services/tuner/TunerHelper.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TunerHelper.h"
+
+#include <aidl/android/media/tv/tunerresourcemanager/ITunerResourceManager.h>
+#include <android/binder_manager.h>
+#include <android/content/pm/IPackageManagerNative.h>
+#include <binder/IServiceManager.h>
+#include <utils/Log.h>
+
+using ::aidl::android::media::tv::tunerresourcemanager::ITunerResourceManager;
+using ::android::defaultServiceManager;
+using ::android::IBinder;
+using ::android::interface_cast;
+using ::android::IServiceManager;
+using ::android::sp;
+using ::android::binder::Status;
+using ::android::content::pm::IPackageManagerNative;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+// System Feature defined in PackageManager
+static const ::android::String16 FEATURE_TUNER(::android::String16("android.hardware.tv.tuner"));
+
+int32_t TunerHelper::sResourceRequestCount = 0;
+
+bool TunerHelper::checkTunerFeature() {
+    sp<IServiceManager> serviceMgr = defaultServiceManager();
+    sp<IPackageManagerNative> packageMgr;
+    if (serviceMgr.get() == nullptr) {
+        ALOGE("%s: Cannot find service manager", __func__);
+        return false;
+    }
+
+    sp<IBinder> binder = serviceMgr->waitForService(String16("package_native"));
+    packageMgr = interface_cast<IPackageManagerNative>(binder);
+    if (packageMgr != nullptr) {
+        bool hasFeature = false;
+        Status status = packageMgr->hasSystemFeature(FEATURE_TUNER, 0, &hasFeature);
+        if (!status.isOk()) {
+            ALOGE("%s: hasSystemFeature failed: %s", __func__, status.exceptionMessage().c_str());
+            return false;
+        }
+        if (!hasFeature) {
+            ALOGD("Current device does not support tuner feaure.");
+            return false;
+        }
+    } else {
+        ALOGD("%s: Cannot find package manager.", __func__);
+        return false;
+    }
+
+    return true;
+}
+
+// TODO: update Demux, Descrambler.
+void TunerHelper::updateTunerResources(const vector<TunerFrontendInfo>& feInfos,
+                                       const vector<int32_t>& lnbHandles) {
+    ::ndk::SpAIBinder binder(AServiceManager_waitForService("tv_tuner_resource_mgr"));
+    shared_ptr<ITunerResourceManager> tunerRM = ITunerResourceManager::fromBinder(binder);
+    if (tunerRM == nullptr) {
+        return;
+    }
+
+    tunerRM->setFrontendInfoList(feInfos);
+    tunerRM->setLnbInfoList(lnbHandles);
+}
+
+// TODO: create a map between resource id and handles.
+int TunerHelper::getResourceIdFromHandle(int resourceHandle, int /*type*/) {
+    return (resourceHandle & 0x00ff0000) >> 16;
+}
+
+int TunerHelper::getResourceHandleFromId(int id, int resourceType) {
+    // TODO: build up randomly generated id to handle mapping
+    return (resourceType & 0x000000ff) << 24 | (id << 16) | (sResourceRequestCount++ & 0xffff);
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerHelper.h b/services/tuner/TunerHelper.h
new file mode 100644
index 0000000..755df57
--- /dev/null
+++ b/services/tuner/TunerHelper.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERDVRHELPER_H
+#define ANDROID_MEDIA_TUNERDVRHELPER_H
+
+#include <aidl/android/media/tv/tunerresourcemanager/TunerFrontendInfo.h>
+#include <utils/String16.h>
+
+using ::aidl::android::media::tv::tunerresourcemanager::TunerFrontendInfo;
+using ::android::String16;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+const static int TUNER_HAL_VERSION_UNKNOWN = 0;
+const static int TUNER_HAL_VERSION_1_0 = 1 << 16;
+const static int TUNER_HAL_VERSION_1_1 = (1 << 16) | 1;
+const static int TUNER_HAL_VERSION_2_0 = 2 << 16;
+
+// Keep syncing with ShareFilter.java
+const static int STATUS_INACCESSIBLE = 1 << 7;
+
+const static String16 sSharedFilterPermission("android.permission.ACCESS_TV_SHARED_FILTER");
+
+typedef enum {
+    FRONTEND,
+    DEMUX,
+    DESCRAMBLER,
+    LNB
+} TunerResourceType;
+
+class TunerHelper {
+public:
+    static bool checkTunerFeature();
+
+    // TODO: update Demux, Descrambler.
+    static void updateTunerResources(const vector<TunerFrontendInfo>& feInfos,
+                                     const vector<int32_t>& lnbHandles);
+    // TODO: create a map between resource id and handles.
+    static int getResourceIdFromHandle(int resourceHandle, int type);
+    static int getResourceHandleFromId(int id, int resourceType);
+
+private:
+    static int32_t sResourceRequestCount;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERDVRHELPER_H
diff --git a/services/tuner/TunerLnb.cpp b/services/tuner/TunerLnb.cpp
index 77248d4..1e143c3 100644
--- a/services/tuner/TunerLnb.cpp
+++ b/services/tuner/TunerLnb.cpp
@@ -18,123 +18,116 @@
 
 #include "TunerLnb.h"
 
-using ::android::hardware::tv::tuner::V1_0::LnbPosition;
-using ::android::hardware::tv::tuner::V1_0::LnbTone;
-using ::android::hardware::tv::tuner::V1_0::LnbVoltage;
-using ::android::hardware::tv::tuner::V1_0::Result;
+#include <aidl/android/hardware/tv/tuner/ILnbCallback.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
 
+using ::aidl::android::hardware::tv::tuner::ILnbCallback;
+using ::aidl::android::hardware::tv::tuner::Result;
+
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-TunerLnb::TunerLnb(sp<ILnb> lnb, int id) {
+TunerLnb::TunerLnb(shared_ptr<ILnb> lnb, int id) {
     mLnb = lnb;
     mId = id;
 }
 
 TunerLnb::~TunerLnb() {
-    mLnb = NULL;
+    mLnb = nullptr;
     mId = -1;
 }
 
-Status TunerLnb::setCallback(
-        const shared_ptr<ITunerLnbCallback>& tunerLnbCallback) {
-    if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::setCallback(
+        const shared_ptr<ITunerLnbCallback>& in_tunerLnbCallback) {
+    if (mLnb == nullptr) {
         ALOGE("ILnb is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    if (tunerLnbCallback == NULL) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    if (in_tunerLnbCallback == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
     }
 
-    sp<ILnbCallback> lnbCallback = new LnbCallback(tunerLnbCallback);
-    Result status = mLnb->setCallback(lnbCallback);
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    shared_ptr<ILnbCallback> lnbCallback =
+            ::ndk::SharedRefBase::make<LnbCallback>(in_tunerLnbCallback);
+    return mLnb->setCallback(lnbCallback);
 }
 
-Status TunerLnb::setVoltage(int voltage) {
-    if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::setVoltage(LnbVoltage in_voltage) {
+    if (mLnb == nullptr) {
         ALOGE("ILnb is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mLnb->setVoltage(static_cast<LnbVoltage>(voltage));
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    return mLnb->setVoltage(in_voltage);
 }
 
-Status TunerLnb::setTone(int tone) {
-    if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::setTone(LnbTone in_tone) {
+    if (mLnb == nullptr) {
         ALOGE("ILnb is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mLnb->setTone(static_cast<LnbTone>(tone));
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    return mLnb->setTone(in_tone);
 }
 
-Status TunerLnb::setSatellitePosition(int position) {
-    if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::setSatellitePosition(LnbPosition in_position) {
+    if (mLnb == nullptr) {
         ALOGE("ILnb is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mLnb->setSatellitePosition(static_cast<LnbPosition>(position));
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    return mLnb->setSatellitePosition(in_position);
 }
 
-Status TunerLnb::sendDiseqcMessage(const vector<uint8_t>& diseqcMessage) {
-    if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::sendDiseqcMessage(const vector<uint8_t>& in_diseqcMessage) {
+    if (mLnb == nullptr) {
         ALOGE("ILnb is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mLnb->sendDiseqcMessage(diseqcMessage);
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    return mLnb->sendDiseqcMessage(in_diseqcMessage);
 }
 
-Status TunerLnb::close() {
-    if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::close() {
+    if (mLnb == nullptr) {
         ALOGE("ILnb is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mLnb->close();
-    mLnb = NULL;
+    auto res = mLnb->close();
+    mLnb = nullptr;
 
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return res;
 }
 
 /////////////// ILnbCallback ///////////////////////
-
-Return<void> TunerLnb::LnbCallback::onEvent(const LnbEventType lnbEventType) {
-    if (mTunerLnbCallback != NULL) {
-        mTunerLnbCallback->onEvent((int)lnbEventType);
+::ndk::ScopedAStatus TunerLnb::LnbCallback::onEvent(const LnbEventType lnbEventType) {
+    if (mTunerLnbCallback != nullptr) {
+        mTunerLnbCallback->onEvent(lnbEventType);
     }
-    return Void();
+    return ndk::ScopedAStatus::ok();
 }
 
-Return<void> TunerLnb::LnbCallback::onDiseqcMessage(const hidl_vec<uint8_t>& diseqcMessage) {
-    if (mTunerLnbCallback != NULL && diseqcMessage != NULL) {
-        vector<uint8_t> msg(begin(diseqcMessage), end(diseqcMessage));
-        mTunerLnbCallback->onDiseqcMessage(msg);
+::ndk::ScopedAStatus TunerLnb::LnbCallback::onDiseqcMessage(const vector<uint8_t>& diseqcMessage) {
+    if (mTunerLnbCallback != nullptr) {
+        mTunerLnbCallback->onDiseqcMessage(diseqcMessage);
     }
-    return Void();
+    return ndk::ScopedAStatus::ok();
 }
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
 }  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerLnb.h b/services/tuner/TunerLnb.h
index 500d072..72988a6 100644
--- a/services/tuner/TunerLnb.h
+++ b/services/tuner/TunerLnb.h
@@ -17,55 +17,61 @@
 #ifndef ANDROID_MEDIA_TUNERFLNB_H
 #define ANDROID_MEDIA_TUNERFLNB_H
 
+#include <aidl/android/hardware/tv/tuner/BnLnbCallback.h>
+#include <aidl/android/hardware/tv/tuner/ILnb.h>
 #include <aidl/android/media/tv/tuner/BnTunerLnb.h>
-#include <android/hardware/tv/tuner/1.0/ILnb.h>
-#include <android/hardware/tv/tuner/1.0/ILnbCallback.h>
-#include <media/stagefright/foundation/ADebug.h>
 #include <utils/Log.h>
 
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::tv::tuner::BnTunerLnb;
-using ::aidl::android::media::tv::tuner::ITunerLnbCallback;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::tv::tuner::V1_0::ILnb;
-using ::android::hardware::tv::tuner::V1_0::ILnbCallback;
-using ::android::hardware::tv::tuner::V1_0::LnbEventType;
+using ::aidl::android::hardware::tv::tuner::BnLnbCallback;
+using ::aidl::android::hardware::tv::tuner::ILnb;
+using ::aidl::android::hardware::tv::tuner::LnbEventType;
+using ::aidl::android::hardware::tv::tuner::LnbPosition;
+using ::aidl::android::hardware::tv::tuner::LnbTone;
+using ::aidl::android::hardware::tv::tuner::LnbVoltage;
 
 using namespace std;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
 class TunerLnb : public BnTunerLnb {
 
 public:
-    TunerLnb(sp<ILnb> lnb, int id);
+    TunerLnb(shared_ptr<ILnb> lnb, int id);
     virtual ~TunerLnb();
-    Status setCallback(const shared_ptr<ITunerLnbCallback>& tunerLnbCallback) override;
-    Status setVoltage(int voltage) override;
-    Status setTone(int tone) override;
-    Status setSatellitePosition(int position) override;
-    Status sendDiseqcMessage(const vector<uint8_t>& diseqcMessage) override;
-    Status close() override;
+
+    ::ndk::ScopedAStatus setCallback(
+            const shared_ptr<ITunerLnbCallback>& in_tunerLnbCallback) override;
+    ::ndk::ScopedAStatus setVoltage(LnbVoltage in_voltage) override;
+    ::ndk::ScopedAStatus setTone(LnbTone in_tone) override;
+    ::ndk::ScopedAStatus setSatellitePosition(LnbPosition in_position) override;
+    ::ndk::ScopedAStatus sendDiseqcMessage(const vector<uint8_t>& in_diseqcMessage) override;
+    ::ndk::ScopedAStatus close() override;
 
     int getId() { return mId; }
 
-    struct LnbCallback : public ILnbCallback {
+    struct LnbCallback : public BnLnbCallback {
         LnbCallback(const shared_ptr<ITunerLnbCallback> tunerLnbCallback)
-                : mTunerLnbCallback(tunerLnbCallback) {};
+              : mTunerLnbCallback(tunerLnbCallback){};
 
-        virtual Return<void> onEvent(const LnbEventType lnbEventType);
-        virtual Return<void> onDiseqcMessage(const hidl_vec<uint8_t>& diseqcMessage);
+        ::ndk::ScopedAStatus onEvent(const LnbEventType lnbEventType) override;
+        ::ndk::ScopedAStatus onDiseqcMessage(const vector<uint8_t>& diseqcMessage) override;
 
         shared_ptr<ITunerLnbCallback> mTunerLnbCallback;
     };
 
 private:
     int mId;
-    sp<ILnb> mLnb;
+    shared_ptr<ILnb> mLnb;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERFLNB_H
diff --git a/services/tuner/TunerService.cpp b/services/tuner/TunerService.cpp
index 5b4129a..4833aaf 100644
--- a/services/tuner/TunerService.cpp
+++ b/services/tuner/TunerService.cpp
@@ -1,5 +1,5 @@
 /**
- * Copyright (c) 2020, The Android Open Source Project
+ * Copyright (c) 2021, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,369 +14,342 @@
  * limitations under the License.
  */
 
+//#define LOG_NDEBUG 0
 #define LOG_TAG "TunerService"
 
-#include <android/binder_manager.h>
-#include <android/content/pm/IPackageManagerNative.h>
-#include <binder/IServiceManager.h>
-#include <utils/Log.h>
 #include "TunerService.h"
-#include "TunerFrontend.h"
-#include "TunerLnb.h"
+
+#include <aidl/android/hardware/tv/tuner/IDemux.h>
+#include <aidl/android/hardware/tv/tuner/IDescrambler.h>
+#include <aidl/android/hardware/tv/tuner/IFrontend.h>
+#include <aidl/android/hardware/tv/tuner/ILnb.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <android/binder_manager.h>
+#include <binder/IPCThreadState.h>
+#include <binder/PermissionCache.h>
+#include <utils/Log.h>
+
+#include <string>
+
 #include "TunerDemux.h"
 #include "TunerDescrambler.h"
+#include "TunerFrontend.h"
+#include "TunerHelper.h"
+#include "TunerLnb.h"
 
-using ::aidl::android::media::tv::tuner::TunerFrontendAnalogCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendAtsc3Capabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendAtscCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendCableCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendDvbsCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendDvbtCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendIsdbs3Capabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendIsdbsCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendIsdbtCapabilities;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterAvSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
-using ::android::hardware::tv::tuner::V1_0::FrontendId;
-using ::android::hardware::tv::tuner::V1_0::FrontendType;
-using ::android::hardware::tv::tuner::V1_0::IFrontend;
-using ::android::hardware::tv::tuner::V1_0::ILnb;
-using ::android::hardware::tv::tuner::V1_0::LnbId;
-using ::android::hardware::tv::tuner::V1_0::Result;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbCapabilities;
+using ::aidl::android::hardware::tv::tuner::IDemux;
+using ::aidl::android::hardware::tv::tuner::IDescrambler;
+using ::aidl::android::hardware::tv::tuner::IFrontend;
+using ::aidl::android::hardware::tv::tuner::Result;
+using ::android::IPCThreadState;
+using ::android::PermissionCache;
+using ::android::sp;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+shared_ptr<TunerService> TunerService::sTunerService = nullptr;
 
 TunerService::TunerService() {
-    sp<IServiceManager> serviceMgr = defaultServiceManager();
-    sp<content::pm::IPackageManagerNative> packageMgr;
-    if (serviceMgr.get() == nullptr) {
-        ALOGE("%s: Cannot find service manager", __func__);
-        return;
-    } else {
-        sp<IBinder> binder = serviceMgr->waitForService(String16("package_native"));
-        packageMgr = interface_cast<content::pm::IPackageManagerNative>(binder);
-    }
-
-    bool hasFeature = false;
-    if (packageMgr != nullptr) {
-        binder::Status status = packageMgr->hasSystemFeature(FEATURE_TUNER, 0, &hasFeature);
-        if (!status.isOk()) {
-            ALOGE("%s: hasSystemFeature failed: %s",
-                    __func__, status.exceptionMessage().c_str());
-            return;
-        }
-        if (!hasFeature) {
-            ALOGD("Current device does not support tuner feaure.");
-            return;
-        }
-    } else {
-        ALOGD("%s: Cannot find package manager.", __func__);
+    if (!TunerHelper::checkTunerFeature()) {
+        ALOGD("Device doesn't have tuner hardware.");
         return;
     }
 
-    ::ndk::SpAIBinder binder(AServiceManager_waitForService("tv_tuner_resource_mgr"));
-    mTunerResourceManager = ITunerResourceManager::fromBinder(binder);
     updateTunerResources();
 }
 
 TunerService::~TunerService() {}
 
 binder_status_t TunerService::instantiate() {
-    shared_ptr<TunerService> service =
-            ::ndk::SharedRefBase::make<TunerService>();
-    return AServiceManager_addService(service->asBinder().get(), getServiceName());
+    sTunerService = ::ndk::SharedRefBase::make<TunerService>();
+    return AServiceManager_addService(sTunerService->asBinder().get(), getServiceName());
+}
+
+shared_ptr<TunerService> TunerService::getTunerService() {
+    return sTunerService;
 }
 
 bool TunerService::hasITuner() {
-    ALOGD("hasITuner");
+    ALOGV("hasITuner");
     if (mTuner != nullptr) {
         return true;
     }
-    mTuner = ITuner::getService();
-    if (mTuner == nullptr) {
-        ALOGE("Failed to get ITuner service");
+    const string statsServiceName = string() + ITuner::descriptor + "/default";
+    if (AServiceManager_isDeclared(statsServiceName.c_str())) {
+        ::ndk::SpAIBinder binder(AServiceManager_waitForService(statsServiceName.c_str()));
+        mTuner = ITuner::fromBinder(binder);
+    } else {
+        mTuner = nullptr;
+        ALOGE("Failed to get Tuner HAL Service");
         return false;
     }
-    mTunerVersion = TUNER_HAL_VERSION_1_0;
-    mTuner_1_1 = ::android::hardware::tv::tuner::V1_1::ITuner::castFrom(mTuner);
-    if (mTuner_1_1 != nullptr) {
-        mTunerVersion = TUNER_HAL_VERSION_1_1;
-    } else {
-        ALOGE("Failed to get ITuner_1_1 service");
-    }
+
+    mTunerVersion = TUNER_HAL_VERSION_2_0;
+    // TODO: Enable this after Tuner HAL is frozen.
+    // if (mTuner->getInterfaceVersion(&mTunerVersion).isOk()) {
+    //  // Tuner AIDL HAL version 1 will be Tuner HAL 2.0
+    //  mTunerVersion = (mTunerVersion + 1) << 16;
+    //}
+
     return true;
 }
 
-bool TunerService::hasITuner_1_1() {
-    ALOGD("hasITuner_1_1");
-    hasITuner();
-    return (mTunerVersion == TUNER_HAL_VERSION_1_1);
-}
-
-Status TunerService::openDemux(
-        int /* demuxHandle */, std::shared_ptr<ITunerDemux>* _aidl_return) {
-    ALOGD("openDemux");
+::ndk::ScopedAStatus TunerService::openDemux(int32_t /* in_demuxHandle */,
+                                             shared_ptr<ITunerDemux>* _aidl_return) {
+    ALOGV("openDemux");
     if (!hasITuner()) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::NOT_INITIALIZED));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
-    Result res;
-    uint32_t id;
-    sp<IDemux> demuxSp = nullptr;
-    shared_ptr<ITunerDemux> tunerDemux = nullptr;
-    mTuner->openDemux([&](Result r, uint32_t demuxId, const sp<IDemux>& demux) {
-        demuxSp = demux;
-        id = demuxId;
-        res = r;
-        ALOGD("open demux, id = %d", demuxId);
-    });
-    if (res == Result::SUCCESS) {
-        tunerDemux = ::ndk::SharedRefBase::make<TunerDemux>(demuxSp, id);
-        *_aidl_return = tunerDemux->ref<ITunerDemux>();
-        return Status::ok();
+    vector<int32_t> id;
+    shared_ptr<IDemux> demux;
+    auto status = mTuner->openDemux(&id, &demux);
+    if (status.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerDemux>(demux, id[0]);
     }
 
-    ALOGW("open demux failed, res = %d", res);
-    return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+    return status;
 }
 
-Status TunerService::getDemuxCaps(TunerDemuxCapabilities* _aidl_return) {
-    ALOGD("getDemuxCaps");
+::ndk::ScopedAStatus TunerService::getDemuxCaps(DemuxCapabilities* _aidl_return) {
+    ALOGV("getDemuxCaps");
     if (!hasITuner()) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::NOT_INITIALIZED));
-    }
-    Result res;
-    DemuxCapabilities caps;
-    mTuner->getDemuxCaps([&](Result r, const DemuxCapabilities& demuxCaps) {
-        caps = demuxCaps;
-        res = r;
-    });
-    if (res == Result::SUCCESS) {
-        *_aidl_return = getAidlDemuxCaps(caps);
-        return Status::ok();
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    ALOGW("Get demux caps failed, res = %d", res);
-    return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+    return mTuner->getDemuxCaps(_aidl_return);
 }
 
-Status TunerService::getFrontendIds(vector<int32_t>* ids) {
+::ndk::ScopedAStatus TunerService::getFrontendIds(vector<int32_t>* ids) {
     if (!hasITuner()) {
-        return Status::fromServiceSpecificError(
-                static_cast<int32_t>(Result::NOT_INITIALIZED));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
-    hidl_vec<FrontendId> feIds;
-    Result res = getHidlFrontendIds(feIds);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    ids->resize(feIds.size());
-    copy(feIds.begin(), feIds.end(), ids->begin());
 
-    return Status::ok();
+    return mTuner->getFrontendIds(ids);
 }
 
-Status TunerService::getFrontendInfo(int32_t id, TunerFrontendInfo* _aidl_return) {
+::ndk::ScopedAStatus TunerService::getFrontendInfo(int32_t id, FrontendInfo* _aidl_return) {
     if (!hasITuner()) {
         ALOGE("ITuner service is not init.");
         return ::ndk::ScopedAStatus::fromServiceSpecificError(
                 static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    FrontendInfo info;
-    Result res = getHidlFrontendInfo(id, info);
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-
-    TunerFrontendInfo tunerInfo = convertToAidlFrontendInfo(info);
-    *_aidl_return = tunerInfo;
-    return Status::ok();
+    return mTuner->getFrontendInfo(id, _aidl_return);
 }
 
-Status TunerService::getFrontendDtmbCapabilities(
-        int32_t id, TunerFrontendDtmbCapabilities* _aidl_return) {
-    if (!hasITuner_1_1()) {
-        ALOGE("ITuner_1_1 service is not init.");
+::ndk::ScopedAStatus TunerService::openFrontend(int32_t frontendHandle,
+                                                shared_ptr<ITunerFrontend>* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGE("ITuner service is not init.");
         return ::ndk::ScopedAStatus::fromServiceSpecificError(
                 static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res;
-    FrontendDtmbCapabilities dtmbCaps;
-    mTuner_1_1->getFrontendDtmbCapabilities(id,
-            [&](Result r, const FrontendDtmbCapabilities& caps) {
-        dtmbCaps = caps;
-        res = r;
-    });
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+    int id = TunerHelper::getResourceIdFromHandle(frontendHandle, FRONTEND);
+    shared_ptr<IFrontend> frontend;
+    auto status = mTuner->openFrontendById(id, &frontend);
+    if (status.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerFrontend>(frontend, id);
     }
 
-    TunerFrontendDtmbCapabilities aidlDtmbCaps{
-        .transmissionModeCap = (int)dtmbCaps.transmissionModeCap,
-        .bandwidthCap = (int)dtmbCaps.bandwidthCap,
-        .modulationCap = (int)dtmbCaps.modulationCap,
-        .codeRateCap = (int)dtmbCaps.codeRateCap,
-        .guardIntervalCap = (int)dtmbCaps.guardIntervalCap,
-        .interleaveModeCap = (int)dtmbCaps.interleaveModeCap,
-    };
-
-    *_aidl_return = aidlDtmbCaps;
-    return Status::ok();
+    return status;
 }
 
-Status TunerService::openFrontend(
-        int32_t frontendHandle, shared_ptr<ITunerFrontend>* _aidl_return) {
-    if (!hasITuner()) {
-        ALOGE("ITuner service is not init.");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
-    }
-
-    Result status;
-    sp<IFrontend> frontend;
-    int id = getResourceIdFromHandle(frontendHandle, FRONTEND);
-    mTuner->openFrontendById(id, [&](Result result, const sp<IFrontend>& fe) {
-        frontend = fe;
-        status = result;
-    });
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    *_aidl_return = ::ndk::SharedRefBase::make<TunerFrontend>(frontend, id);
-    return Status::ok();
-}
-
-Status TunerService::openLnb(int lnbHandle, shared_ptr<ITunerLnb>* _aidl_return) {
+::ndk::ScopedAStatus TunerService::openLnb(int lnbHandle, shared_ptr<ITunerLnb>* _aidl_return) {
     if (!hasITuner()) {
         ALOGD("get ITuner failed");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status;
-    sp<ILnb> lnb;
-    int id = getResourceIdFromHandle(lnbHandle, LNB);
-    mTuner->openLnbById(id, [&](Result result, const sp<ILnb>& lnbSp){
-        lnb = lnbSp;
-        status = result;
-    });
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    shared_ptr<ILnb> lnb;
+    int id = TunerHelper::getResourceIdFromHandle(lnbHandle, LNB);
+    auto status = mTuner->openLnbById(id, &lnb);
+    if (status.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerLnb>(lnb, id);
     }
 
-    *_aidl_return = ::ndk::SharedRefBase::make<TunerLnb>(lnb, id);
-    return Status::ok();
+    return status;
 }
 
-Status TunerService::openLnbByName(const string& lnbName, shared_ptr<ITunerLnb>* _aidl_return) {
+::ndk::ScopedAStatus TunerService::openLnbByName(const string& lnbName,
+                                                 shared_ptr<ITunerLnb>* _aidl_return) {
     if (!hasITuner()) {
         ALOGE("get ITuner failed");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    int lnbId;
-    Result status;
-    sp<ILnb> lnb;
-    mTuner->openLnbByName(lnbName, [&](Result r, LnbId id, const sp<ILnb>& lnbSp) {
-        status = r;
-        lnb = lnbSp;
-        lnbId = (int)id;
-    });
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    vector<int32_t> id;
+    shared_ptr<ILnb> lnb;
+    auto status = mTuner->openLnbByName(lnbName, &id, &lnb);
+    if (status.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerLnb>(lnb, id[0]);
     }
 
-    *_aidl_return = ::ndk::SharedRefBase::make<TunerLnb>(lnb, lnbId);
-    return Status::ok();
+    return ::ndk::ScopedAStatus::ok();
 }
 
-Status TunerService::openDescrambler(int32_t /*descramblerHandle*/,
-            std::shared_ptr<ITunerDescrambler>* _aidl_return) {
+::ndk::ScopedAStatus TunerService::openDescrambler(int32_t /*descramblerHandle*/,
+                                                   shared_ptr<ITunerDescrambler>* _aidl_return) {
     if (!hasITuner()) {
         ALOGD("get ITuner failed");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status;
-    sp<IDescrambler> descrambler;
-    //int id = getResourceIdFromHandle(descramblerHandle, DESCRAMBLER);
-    mTuner->openDescrambler([&](Result r, const sp<IDescrambler>& descramblerSp) {
-        status = r;
-        descrambler = descramblerSp;
-    });
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+    shared_ptr<IDescrambler> descrambler;
+    // int id = TunerHelper::getResourceIdFromHandle(descramblerHandle, DESCRAMBLER);
+    auto status = mTuner->openDescrambler(&descrambler);
+    if (status.isOk()) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerDescrambler>(descrambler);
     }
 
-    *_aidl_return = ::ndk::SharedRefBase::make<TunerDescrambler>(descrambler);
-    return Status::ok();
+    return status;
+}
+
+::ndk::ScopedAStatus TunerService::getTunerHalVersion(int* _aidl_return) {
+    hasITuner();
+    *_aidl_return = mTunerVersion;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerService::openSharedFilter(const string& in_filterToken,
+                                                    const shared_ptr<ITunerFilterCallback>& in_cb,
+                                                    shared_ptr<ITunerFilter>* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGE("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (!PermissionCache::checkCallingPermission(sSharedFilterPermission)) {
+        ALOGE("Request requires android.permission.ACCESS_TV_SHARED_FILTER");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    Mutex::Autolock _l(mSharedFiltersLock);
+    if (mSharedFilters.find(in_filterToken) == mSharedFilters.end()) {
+        *_aidl_return = nullptr;
+        ALOGD("fail to find %s", in_filterToken.c_str());
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    shared_ptr<TunerFilter> filter = mSharedFilters.at(in_filterToken);
+    IPCThreadState* ipc = IPCThreadState::self();
+    const int pid = ipc->getCallingPid();
+    if (!filter->isSharedFilterAllowed(pid)) {
+        *_aidl_return = nullptr;
+        ALOGD("shared filter %s is opened in the same process", in_filterToken.c_str());
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    filter->attachSharedFilterCallback(in_cb);
+
+    *_aidl_return = filter;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerService::setLna(bool bEnable) {
+    if (!hasITuner()) {
+        ALOGD("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    return mTuner->setLna(bEnable);
+}
+
+::ndk::ScopedAStatus TunerService::setMaxNumberOfFrontends(FrontendType in_frontendType,
+                                                           int32_t in_maxNumber) {
+    if (!hasITuner()) {
+        ALOGD("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    return mTuner->setMaxNumberOfFrontends(in_frontendType, in_maxNumber);
+}
+
+::ndk::ScopedAStatus TunerService::getMaxNumberOfFrontends(FrontendType in_frontendType,
+                                                           int32_t* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGD("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    return mTuner->getMaxNumberOfFrontends(in_frontendType, _aidl_return);
+}
+
+string TunerService::addFilterToShared(const shared_ptr<TunerFilter>& sharedFilter) {
+    Mutex::Autolock _l(mSharedFiltersLock);
+
+    // Use sharedFilter address as token.
+    string token = to_string(reinterpret_cast<std::uintptr_t>(sharedFilter.get()));
+    mSharedFilters[token] = sharedFilter;
+    return token;
+}
+
+void TunerService::removeSharedFilter(const shared_ptr<TunerFilter>& sharedFilter) {
+    Mutex::Autolock _l(mSharedFiltersLock);
+
+    // Use sharedFilter address as token.
+    mSharedFilters.erase(to_string(reinterpret_cast<std::uintptr_t>(sharedFilter.get())));
 }
 
 void TunerService::updateTunerResources() {
-    if (!hasITuner() || mTunerResourceManager == NULL) {
+    if (!hasITuner()) {
         ALOGE("Failed to updateTunerResources");
         return;
     }
 
-    updateFrontendResources();
-    updateLnbResources();
-    // TODO: update Demux, Descrambler.
+    TunerHelper::updateTunerResources(getTRMFrontendInfos(), getTRMLnbHandles());
 }
 
-Status TunerService::getTunerHalVersion(int* _aidl_return) {
-    hasITuner();
-    *_aidl_return = mTunerVersion;
-    return Status::ok();
-}
-
-void TunerService::updateFrontendResources() {
-    hidl_vec<FrontendId> ids;
-    Result res = getHidlFrontendIds(ids);
-    if (res != Result::SUCCESS) {
-        return;
-    }
+vector<TunerFrontendInfo> TunerService::getTRMFrontendInfos() {
     vector<TunerFrontendInfo> infos;
+    vector<int32_t> ids;
+    auto status = mTuner->getFrontendIds(&ids);
+    if (!status.isOk()) {
+        return infos;
+    }
+
     for (int i = 0; i < ids.size(); i++) {
         FrontendInfo frontendInfo;
-        Result res = getHidlFrontendInfo((int)ids[i], frontendInfo);
-        if (res != Result::SUCCESS) {
+        auto res = mTuner->getFrontendInfo(ids[i], &frontendInfo);
+        if (!res.isOk()) {
             continue;
         }
         TunerFrontendInfo tunerFrontendInfo{
-            .handle = getResourceHandleFromId((int)ids[i], FRONTEND),
-            .type = static_cast<int>(frontendInfo.type),
-            .exclusiveGroupId = static_cast<int>(frontendInfo.exclusiveGroupId),
+                .handle = TunerHelper::getResourceHandleFromId((int)ids[i], FRONTEND),
+                .type = static_cast<int>(frontendInfo.type),
+                .exclusiveGroupId = frontendInfo.exclusiveGroupId,
         };
         infos.push_back(tunerFrontendInfo);
     }
-    mTunerResourceManager->setFrontendInfoList(infos);
+
+    return infos;
 }
 
-void TunerService::updateLnbResources() {
-    vector<int> handles = getLnbHandles();
-    if (handles.size() == 0) {
-        return;
-    }
-    mTunerResourceManager->setLnbInfoList(handles);
-}
-
-vector<int> TunerService::getLnbHandles() {
-    vector<int> lnbHandles;
-    if (mTuner != NULL) {
-        Result res;
-        vector<LnbId> lnbIds;
-        mTuner->getLnbIds([&](Result r, const hardware::hidl_vec<LnbId>& ids) {
-            lnbIds = ids;
-            res = r;
-        });
-        if (res != Result::SUCCESS || lnbIds.size() == 0) {
-        } else {
+vector<int32_t> TunerService::getTRMLnbHandles() {
+    vector<int32_t> lnbHandles;
+    if (mTuner != nullptr) {
+        vector<int32_t> lnbIds;
+        auto res = mTuner->getLnbIds(&lnbIds);
+        if (res.isOk()) {
             for (int i = 0; i < lnbIds.size(); i++) {
-                lnbHandles.push_back(getResourceHandleFromId((int)lnbIds[i], LNB));
+                lnbHandles.push_back(TunerHelper::getResourceHandleFromId(lnbIds[i], LNB));
             }
         }
     }
@@ -384,186 +357,8 @@
     return lnbHandles;
 }
 
-Result TunerService::getHidlFrontendIds(hidl_vec<FrontendId>& ids) {
-    if (mTuner == NULL) {
-        return Result::NOT_INITIALIZED;
-    }
-    Result res;
-    mTuner->getFrontendIds([&](Result r, const hidl_vec<FrontendId>& frontendIds) {
-        ids = frontendIds;
-        res = r;
-    });
-    return res;
-}
-
-Result TunerService::getHidlFrontendInfo(int id, FrontendInfo& info) {
-    if (mTuner == NULL) {
-        return Result::NOT_INITIALIZED;
-    }
-    Result res;
-    mTuner->getFrontendInfo(id, [&](Result r, const FrontendInfo& feInfo) {
-        info = feInfo;
-        res = r;
-    });
-    return res;
-}
-
-TunerDemuxCapabilities TunerService::getAidlDemuxCaps(DemuxCapabilities caps) {
-    TunerDemuxCapabilities aidlCaps{
-        .numDemux = (int)caps.numDemux,
-        .numRecord = (int)caps.numRecord,
-        .numPlayback = (int)caps.numPlayback,
-        .numTsFilter = (int)caps.numTsFilter,
-        .numSectionFilter = (int)caps.numSectionFilter,
-        .numAudioFilter = (int)caps.numAudioFilter,
-        .numVideoFilter = (int)caps.numVideoFilter,
-        .numPesFilter = (int)caps.numPesFilter,
-        .numPcrFilter = (int)caps.numPcrFilter,
-        .numBytesInSectionFilter = (int)caps.numBytesInSectionFilter,
-        .filterCaps = (int)caps.filterCaps,
-        .bTimeFilter = caps.bTimeFilter,
-    };
-    aidlCaps.linkCaps.resize(caps.linkCaps.size());
-    copy(caps.linkCaps.begin(), caps.linkCaps.end(), aidlCaps.linkCaps.begin());
-    return aidlCaps;
-}
-
-TunerFrontendInfo TunerService::convertToAidlFrontendInfo(FrontendInfo halInfo) {
-    TunerFrontendInfo info{
-        .type = (int)halInfo.type,
-        .minFrequency = (int)halInfo.minFrequency,
-        .maxFrequency = (int)halInfo.maxFrequency,
-        .minSymbolRate = (int)halInfo.minSymbolRate,
-        .maxSymbolRate = (int)halInfo.maxSymbolRate,
-        .acquireRange = (int)halInfo.acquireRange,
-        .exclusiveGroupId = (int)halInfo.exclusiveGroupId,
-    };
-    for (int i = 0; i < halInfo.statusCaps.size(); i++) {
-        info.statusCaps.push_back((int)halInfo.statusCaps[i]);
-    }
-
-    TunerFrontendCapabilities caps;
-    switch (halInfo.type) {
-        case FrontendType::ANALOG: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::analogCaps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendAnalogCapabilities analogCaps{
-                    .typeCap = (int)halInfo.frontendCaps.analogCaps().typeCap,
-                    .sifStandardCap = (int)halInfo.frontendCaps.analogCaps().sifStandardCap,
-                };
-                caps.set<TunerFrontendCapabilities::analogCaps>(analogCaps);
-            }
-            break;
-        }
-        case FrontendType::ATSC: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::atscCaps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendAtscCapabilities atscCaps{
-                    .modulationCap = (int)halInfo.frontendCaps.atscCaps().modulationCap,
-                };
-                caps.set<TunerFrontendCapabilities::atscCaps>(atscCaps);
-            }
-            break;
-        }
-        case FrontendType::ATSC3: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::atsc3Caps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendAtsc3Capabilities atsc3Caps{
-                    .bandwidthCap = (int)halInfo.frontendCaps.atsc3Caps().bandwidthCap,
-                    .modulationCap = (int)halInfo.frontendCaps.atsc3Caps().modulationCap,
-                    .timeInterleaveModeCap =
-                            (int)halInfo.frontendCaps.atsc3Caps().timeInterleaveModeCap,
-                    .codeRateCap = (int)halInfo.frontendCaps.atsc3Caps().codeRateCap,
-                    .demodOutputFormatCap
-                        = (int)halInfo.frontendCaps.atsc3Caps().demodOutputFormatCap,
-                    .fecCap = (int)halInfo.frontendCaps.atsc3Caps().fecCap,
-                };
-                caps.set<TunerFrontendCapabilities::atsc3Caps>(atsc3Caps);
-            }
-            break;
-        }
-        case FrontendType::DVBC: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbcCaps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendCableCapabilities cableCaps{
-                    .modulationCap = (int)halInfo.frontendCaps.dvbcCaps().modulationCap,
-                    .codeRateCap = (int64_t)halInfo.frontendCaps.dvbcCaps().fecCap,
-                    .annexCap = (int)halInfo.frontendCaps.dvbcCaps().annexCap,
-                };
-                caps.set<TunerFrontendCapabilities::cableCaps>(cableCaps);
-            }
-            break;
-        }
-        case FrontendType::DVBS: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbsCaps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendDvbsCapabilities dvbsCaps{
-                    .modulationCap = (int)halInfo.frontendCaps.dvbsCaps().modulationCap,
-                    .codeRateCap = (long)halInfo.frontendCaps.dvbsCaps().innerfecCap,
-                    .standard = (int)halInfo.frontendCaps.dvbsCaps().standard,
-                };
-                caps.set<TunerFrontendCapabilities::dvbsCaps>(dvbsCaps);
-            }
-            break;
-        }
-        case FrontendType::DVBT: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbtCaps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendDvbtCapabilities dvbtCaps{
-                    .transmissionModeCap = (int)halInfo.frontendCaps.dvbtCaps().transmissionModeCap,
-                    .bandwidthCap = (int)halInfo.frontendCaps.dvbtCaps().bandwidthCap,
-                    .constellationCap = (int)halInfo.frontendCaps.dvbtCaps().constellationCap,
-                    .codeRateCap = (int)halInfo.frontendCaps.dvbtCaps().coderateCap,
-                    .hierarchyCap = (int)halInfo.frontendCaps.dvbtCaps().hierarchyCap,
-                    .guardIntervalCap = (int)halInfo.frontendCaps.dvbtCaps().guardIntervalCap,
-                    .isT2Supported = (bool)halInfo.frontendCaps.dvbtCaps().isT2Supported,
-                    .isMisoSupported = (bool)halInfo.frontendCaps.dvbtCaps().isMisoSupported,
-                };
-                caps.set<TunerFrontendCapabilities::dvbtCaps>(dvbtCaps);
-            }
-            break;
-        }
-        case FrontendType::ISDBS: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbsCaps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendIsdbsCapabilities isdbsCaps{
-                    .modulationCap = (int)halInfo.frontendCaps.isdbsCaps().modulationCap,
-                    .codeRateCap = (int)halInfo.frontendCaps.isdbsCaps().coderateCap,
-                };
-                caps.set<TunerFrontendCapabilities::isdbsCaps>(isdbsCaps);
-            }
-            break;
-        }
-        case FrontendType::ISDBS3: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbs3Caps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendIsdbs3Capabilities isdbs3Caps{
-                    .modulationCap = (int)halInfo.frontendCaps.isdbs3Caps().modulationCap,
-                    .codeRateCap = (int)halInfo.frontendCaps.isdbs3Caps().coderateCap,
-                };
-                caps.set<TunerFrontendCapabilities::isdbs3Caps>(isdbs3Caps);
-            }
-            break;
-        }
-        case FrontendType::ISDBT: {
-            if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbtCaps
-                    == halInfo.frontendCaps.getDiscriminator()) {
-                TunerFrontendIsdbtCapabilities isdbtCaps{
-                    .modeCap = (int)halInfo.frontendCaps.isdbtCaps().modeCap,
-                    .bandwidthCap = (int)halInfo.frontendCaps.isdbtCaps().bandwidthCap,
-                    .modulationCap = (int)halInfo.frontendCaps.isdbtCaps().modulationCap,
-                    .codeRateCap = (int)halInfo.frontendCaps.isdbtCaps().coderateCap,
-                    .guardIntervalCap = (int)halInfo.frontendCaps.isdbtCaps().guardIntervalCap,
-                };
-                caps.set<TunerFrontendCapabilities::isdbtCaps>(isdbtCaps);
-            }
-            break;
-        }
-        default:
-            break;
-    }
-
-    info.caps = caps;
-    return info;
-}
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerService.h b/services/tuner/TunerService.h
index f8e2ee6..7fc2aa4 100644
--- a/services/tuner/TunerService.h
+++ b/services/tuner/TunerService.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (c) 2020, The Android Open Source Project
+ * Copyright (c) 2021, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -17,141 +17,95 @@
 #ifndef ANDROID_MEDIA_TUNERSERVICE_H
 #define ANDROID_MEDIA_TUNERSERVICE_H
 
-#include <aidl/android/media/tv/tunerresourcemanager/ITunerResourceManager.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterEvent.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterStatus.h>
+#include <aidl/android/hardware/tv/tuner/ITuner.h>
 #include <aidl/android/media/tv/tuner/BnTunerService.h>
-#include <android/hardware/tv/tuner/1.1/ITuner.h>
-#include <fmq/AidlMessageQueue.h>
-#include <fmq/EventFlag.h>
-#include <fmq/MessageQueue.h>
+#include <aidl/android/media/tv/tunerresourcemanager/TunerFrontendInfo.h>
+#include <utils/Mutex.h>
 
-using ::aidl::android::hardware::common::fmq::GrantorDescriptor;
-using ::aidl::android::hardware::common::fmq::MQDescriptor;
-using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
+#include <map>
+
+#include "TunerFilter.h"
+#include "TunerHelper.h"
+
+using ::aidl::android::hardware::tv::tuner::DemuxCapabilities;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
+using ::aidl::android::hardware::tv::tuner::FrontendInfo;
+using ::aidl::android::hardware::tv::tuner::FrontendType;
+using ::aidl::android::hardware::tv::tuner::ITuner;
 using ::aidl::android::media::tv::tuner::BnTunerService;
 using ::aidl::android::media::tv::tuner::ITunerDemux;
-using ::aidl::android::media::tv::tuner::ITunerDescrambler;
+using ::aidl::android::media::tv::tuner::ITunerFilter;
+using ::aidl::android::media::tv::tuner::ITunerFilterCallback;
 using ::aidl::android::media::tv::tuner::ITunerFrontend;
 using ::aidl::android::media::tv::tuner::ITunerLnb;
-using ::aidl::android::media::tv::tuner::TunerDemuxCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendDtmbCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendInfo;
-using ::aidl::android::media::tv::tunerresourcemanager::ITunerResourceManager;
-
-using ::android::hardware::details::logError;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::kSynchronizedReadWrite;
-using ::android::hardware::EventFlag;
-using ::android::hardware::MessageQueue;
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::tv::tuner::V1_0::DemuxCapabilities;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterAvSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterStatus;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
-using ::android::hardware::tv::tuner::V1_0::FrontendId;
-using ::android::hardware::tv::tuner::V1_0::FrontendInfo;
-using ::android::hardware::tv::tuner::V1_0::IDemux;
-using ::android::hardware::tv::tuner::V1_0::IDescrambler;
-using ::android::hardware::tv::tuner::V1_0::IFilter;
-using ::android::hardware::tv::tuner::V1_0::IFilterCallback;
-using ::android::hardware::tv::tuner::V1_0::ITuner;
-using ::android::hardware::tv::tuner::V1_0::Result;
-
-using Status = ::ndk::ScopedAStatus;
+using ::aidl::android::media::tv::tunerresourcemanager::TunerFrontendInfo;
+using ::android::Mutex;
 
 using namespace std;
 
+namespace aidl {
 namespace android {
-
-const static int TUNER_HAL_VERSION_UNKNOWN = 0;
-const static int TUNER_HAL_VERSION_1_0 = 1 << 16;
-const static int TUNER_HAL_VERSION_1_1 = (1 << 16) | 1;
-// System Feature defined in PackageManager
-static const ::android::String16 FEATURE_TUNER(::android::String16("android.hardware.tv.tuner"));
-
-typedef enum {
-    FRONTEND,
-    LNB,
-    DEMUX,
-    DESCRAMBLER,
-} TunerResourceType;
-
-struct FilterCallback : public IFilterCallback {
-    ~FilterCallback() {}
-    Return<void> onFilterEvent(const DemuxFilterEvent&) {
-        return Void();
-    }
-    Return<void> onFilterStatus(const DemuxFilterStatus) {
-        return Void();
-    }
-};
+namespace media {
+namespace tv {
+namespace tuner {
 
 class TunerService : public BnTunerService {
-    typedef AidlMessageQueue<int8_t, SynchronizedReadWrite> AidlMessageQueue;
-    typedef MessageQueue<uint8_t, kSynchronizedReadWrite> HidlMessageQueue;
-    typedef MQDescriptor<int8_t, SynchronizedReadWrite> AidlMQDesc;
-
 public:
     static char const *getServiceName() { return "media.tuner"; }
     static binder_status_t instantiate();
     TunerService();
     virtual ~TunerService();
 
-    Status getFrontendIds(vector<int32_t>* ids) override;
-    Status getFrontendInfo(int32_t id, TunerFrontendInfo* _aidl_return) override;
-    Status getFrontendDtmbCapabilities(
-            int32_t id, TunerFrontendDtmbCapabilities* _aidl_return) override;
-    Status openFrontend(
-            int32_t frontendHandle, shared_ptr<ITunerFrontend>* _aidl_return) override;
-    Status openLnb(int lnbHandle, shared_ptr<ITunerLnb>* _aidl_return) override;
-    Status openLnbByName(const string& lnbName, shared_ptr<ITunerLnb>* _aidl_return) override;
-    Status openDemux(int32_t demuxHandle, std::shared_ptr<ITunerDemux>* _aidl_return) override;
-    Status getDemuxCaps(TunerDemuxCapabilities* _aidl_return) override;
-    Status openDescrambler(int32_t descramblerHandle,
-            std::shared_ptr<ITunerDescrambler>* _aidl_return) override;
-    Status getTunerHalVersion(int* _aidl_return) override;
+    ::ndk::ScopedAStatus getFrontendIds(vector<int32_t>* out_ids) override;
+    ::ndk::ScopedAStatus getFrontendInfo(int32_t in_frontendHandle,
+                                         FrontendInfo* _aidl_return) override;
+    ::ndk::ScopedAStatus openFrontend(int32_t in_frontendHandle,
+                                      shared_ptr<ITunerFrontend>* _aidl_return) override;
+    ::ndk::ScopedAStatus openLnb(int32_t in_lnbHandle,
+                                 shared_ptr<ITunerLnb>* _aidl_return) override;
+    ::ndk::ScopedAStatus openLnbByName(const string& in_lnbName,
+                                       shared_ptr<ITunerLnb>* _aidl_return) override;
+    ::ndk::ScopedAStatus openDemux(int32_t in_demuxHandle,
+                                   shared_ptr<ITunerDemux>* _aidl_return) override;
+    ::ndk::ScopedAStatus getDemuxCaps(DemuxCapabilities* _aidl_return) override;
+    ::ndk::ScopedAStatus openDescrambler(int32_t in_descramblerHandle,
+                                         shared_ptr<ITunerDescrambler>* _aidl_return) override;
+    ::ndk::ScopedAStatus getTunerHalVersion(int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus openSharedFilter(const string& in_filterToken,
+                                          const shared_ptr<ITunerFilterCallback>& in_cb,
+                                          shared_ptr<ITunerFilter>* _aidl_return) override;
+    ::ndk::ScopedAStatus setLna(bool in_bEnable) override;
+    ::ndk::ScopedAStatus setMaxNumberOfFrontends(FrontendType in_frontendType,
+                                                 int32_t in_maxNumber) override;
+    ::ndk::ScopedAStatus getMaxNumberOfFrontends(FrontendType in_frontendType,
+                                                 int32_t* _aidl_return) override;
 
-    // TODO: create a map between resource id and handles.
-    static int getResourceIdFromHandle(int resourceHandle, int /*type*/) {
-        return (resourceHandle & 0x00ff0000) >> 16;
-    }
+    string addFilterToShared(const shared_ptr<TunerFilter>& sharedFilter);
+    void removeSharedFilter(const shared_ptr<TunerFilter>& sharedFilter);
 
-    int getResourceHandleFromId(int id, int resourceType) {
-        // TODO: build up randomly generated id to handle mapping
-        return (resourceType & 0x000000ff) << 24
-                | (id << 16)
-                | (mResourceRequestCount++ & 0xffff);
-    }
+    static shared_ptr<TunerService> getTunerService();
 
 private:
     bool hasITuner();
-    bool hasITuner_1_1();
     void updateTunerResources();
+    vector<TunerFrontendInfo> getTRMFrontendInfos();
+    vector<int32_t> getTRMLnbHandles();
 
-    void updateFrontendResources();
-    void updateLnbResources();
-    Result getHidlFrontendIds(hidl_vec<FrontendId>& ids);
-    Result getHidlFrontendInfo(int id, FrontendInfo& info);
-    vector<int> getLnbHandles();
-
-    TunerDemuxCapabilities getAidlDemuxCaps(DemuxCapabilities caps);
-    TunerFrontendInfo convertToAidlFrontendInfo(FrontendInfo halInfo);
-
-    sp<ITuner> mTuner;
-    sp<::android::hardware::tv::tuner::V1_1::ITuner> mTuner_1_1;
-
-    shared_ptr<ITunerResourceManager> mTunerResourceManager;
-    int mResourceRequestCount = 0;
-
+    shared_ptr<ITuner> mTuner;
     int mTunerVersion = TUNER_HAL_VERSION_UNKNOWN;
+    Mutex mSharedFiltersLock;
+    map<string, shared_ptr<TunerFilter>> mSharedFilters;
+
+    static shared_ptr<TunerService> sTunerService;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERSERVICE_H
diff --git a/services/tuner/TunerTimeFilter.cpp b/services/tuner/TunerTimeFilter.cpp
index ea9da30..73cd6b4 100644
--- a/services/tuner/TunerTimeFilter.cpp
+++ b/services/tuner/TunerTimeFilter.cpp
@@ -18,97 +18,91 @@
 
 #include "TunerTimeFilter.h"
 
-using ::android::hardware::tv::tuner::V1_0::Result;
-using ::android::hardware::tv::tuner::V1_1::Constant64Bit;
+#include <aidl/android/hardware/tv/tuner/Constant64Bit.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
 
+using ::aidl::android::hardware::tv::tuner::Constant64Bit;
+using ::aidl::android::hardware::tv::tuner::Result;
+
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
-TunerTimeFilter::TunerTimeFilter(sp<ITimeFilter> timeFilter) {
+TunerTimeFilter::TunerTimeFilter(shared_ptr<ITimeFilter> timeFilter) {
     mTimeFilter = timeFilter;
 }
 
 TunerTimeFilter::~TunerTimeFilter() {
-    mTimeFilter = NULL;
+    mTimeFilter = nullptr;
 }
 
-Status TunerTimeFilter::setTimeStamp(int64_t timeStamp) {
-    if (mTimeFilter == NULL) {
+::ndk::ScopedAStatus TunerTimeFilter::setTimeStamp(int64_t timeStamp) {
+    if (mTimeFilter == nullptr) {
         ALOGE("ITimeFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mTimeFilter->setTimeStamp(timeStamp);
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    return mTimeFilter->setTimeStamp(timeStamp);
 }
 
-Status TunerTimeFilter::clearTimeStamp() {
-    if (mTimeFilter == NULL) {
+::ndk::ScopedAStatus TunerTimeFilter::clearTimeStamp() {
+    if (mTimeFilter == nullptr) {
         ALOGE("ITimeFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status = mTimeFilter->clearTimeStamp();
-    if (status != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-    }
-    return Status::ok();
+    return mTimeFilter->clearTimeStamp();
 }
 
-Status TunerTimeFilter::getSourceTime(int64_t* _aidl_return) {
-    if (mTimeFilter == NULL) {
+::ndk::ScopedAStatus TunerTimeFilter::getSourceTime(int64_t* _aidl_return) {
+    if (mTimeFilter == nullptr) {
         *_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
         ALOGE("ITimeFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status;
-    mTimeFilter->getSourceTime(
-            [&](Result r, uint64_t t) {
-                status = r;
-                *_aidl_return = t;
-            });
-    if (status != Result::SUCCESS) {
+    auto status = mTimeFilter->getSourceTime(_aidl_return);
+    if (!status.isOk()) {
         *_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
     }
-    return Status::ok();
+    return status;
 }
 
-Status TunerTimeFilter::getTimeStamp(int64_t* _aidl_return) {
-    if (mTimeFilter == NULL) {
+::ndk::ScopedAStatus TunerTimeFilter::getTimeStamp(int64_t* _aidl_return) {
+    if (mTimeFilter == nullptr) {
         *_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
         ALOGE("ITimeFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result status;
-    mTimeFilter->getTimeStamp(
-            [&](Result r, uint64_t t) {
-                status = r;
-                *_aidl_return = t;
-            });
-    if (status != Result::SUCCESS) {
+    auto status = mTimeFilter->getTimeStamp(_aidl_return);
+    if (!status.isOk()) {
         *_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
-        return Status::fromServiceSpecificError(static_cast<int32_t>(status));
     }
-    return Status::ok();
+    return status;
 }
 
-Status TunerTimeFilter::close() {
-    if (mTimeFilter == NULL) {
+::ndk::ScopedAStatus TunerTimeFilter::close() {
+    if (mTimeFilter == nullptr) {
         ALOGE("ITimeFilter is not initialized");
-        return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
     }
 
-    Result res = mTimeFilter->close();
-    mTimeFilter = NULL;
+    auto status = mTimeFilter->close();
+    mTimeFilter = nullptr;
 
-    if (res != Result::SUCCESS) {
-        return Status::fromServiceSpecificError(static_cast<int32_t>(res));
-    }
-    return Status::ok();
+    return status;
 }
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
 }  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/TunerTimeFilter.h b/services/tuner/TunerTimeFilter.h
index d675319..31a47cd 100644
--- a/services/tuner/TunerTimeFilter.h
+++ b/services/tuner/TunerTimeFilter.h
@@ -17,38 +17,40 @@
 #ifndef ANDROID_MEDIA_TUNERFTIMEFILTER_H
 #define ANDROID_MEDIA_TUNERFTIMEFILTER_H
 
+#include <aidl/android/hardware/tv/tuner/ITimeFilter.h>
 #include <aidl/android/media/tv/tuner/BnTunerTimeFilter.h>
-#include <android/hardware/tv/tuner/1.0/ITimeFilter.h>
-#include <android/hardware/tv/tuner/1.1/types.h>
-#include <media/stagefright/foundation/ADebug.h>
 #include <utils/Log.h>
 
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::tv::tuner::BnTunerTimeFilter;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::tv::tuner::V1_0::ITimeFilter;
+using ::aidl::android::hardware::tv::tuner::ITimeFilter;
 
 using namespace std;
 
+namespace aidl {
 namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
 
 class TunerTimeFilter : public BnTunerTimeFilter {
 
 public:
-    TunerTimeFilter(sp<ITimeFilter> timeFilter);
+    TunerTimeFilter(shared_ptr<ITimeFilter> timeFilter);
     virtual ~TunerTimeFilter();
-    Status setTimeStamp(int64_t timeStamp) override;
-    Status clearTimeStamp() override;
-    Status getSourceTime(int64_t* _aidl_return) override;
-    Status getTimeStamp(int64_t* _aidl_return) override;
-    Status close() override;
+
+    ::ndk::ScopedAStatus setTimeStamp(int64_t in_timeStamp) override;
+    ::ndk::ScopedAStatus clearTimeStamp() override;
+    ::ndk::ScopedAStatus getSourceTime(int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getTimeStamp(int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus close() override;
 
 private:
-    sp<ITimeFilter> mTimeFilter;
+    shared_ptr<ITimeFilter> mTimeFilter;
 };
 
-} // namespace android
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
 
 #endif // ANDROID_MEDIA_TUNERFTIMEFILTER_H
diff --git a/services/tuner/aidl/android/media/tv/OWNERS b/services/tuner/aidl/android/media/tv/OWNERS
index 0ceb8e8..bf9fe34 100644
--- a/services/tuner/aidl/android/media/tv/OWNERS
+++ b/services/tuner/aidl/android/media/tv/OWNERS
@@ -1,2 +1,2 @@
-nchalko@google.com
+hgchen@google.com
 quxiangfang@google.com
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl
index 73b00ae..fa326b2 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl
@@ -16,6 +16,8 @@
 
 package android.media.tv.tuner;
 
+import android.hardware.tv.tuner.DemuxFilterType;
+import android.hardware.tv.tuner.DvrType;
 import android.media.tv.tuner.ITunerDvr;
 import android.media.tv.tuner.ITunerDvrCallback;
 import android.media.tv.tuner.ITunerFilter;
@@ -36,10 +38,15 @@
     void setFrontendDataSource(in ITunerFrontend frontend);
 
     /**
+     * Set a frontend resource by ID as data input of the demux
+     */
+    void setFrontendDataSourceById(in int frontendId);
+
+    /**
      * Open a new filter in the demux
      */
-    ITunerFilter openFilter(
-        in int mainType, in int subtype, in int bufferSize, in ITunerFilterCallback cb);
+    ITunerFilter openFilter(in DemuxFilterType type, in int bufferSize,
+        in ITunerFilterCallback cb);
 
     /**
      * Open time filter of the demux.
@@ -59,7 +66,7 @@
     /**
      * Open a DVR (Digital Video Record) instance in the demux.
      */
-    ITunerDvr openDvr(in int dvbType, in int bufferSize, in ITunerDvrCallback cb);
+    ITunerDvr openDvr(in DvrType dvbType, in int bufferSize, in ITunerDvrCallback cb);
 
     /**
      * Connect Conditional Access Modules (CAM) through Common Interface (CI).
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerDescrambler.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerDescrambler.aidl
index 7370eee..39d193c 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerDescrambler.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerDescrambler.aidl
@@ -16,9 +16,9 @@
 
 package android.media.tv.tuner;
 
+import android.hardware.tv.tuner.DemuxPid;
 import android.media.tv.tuner.ITunerDemux;
 import android.media.tv.tuner.ITunerFilter;
-import android.media.tv.tuner.TunerDemuxPid;
 
 /**
  * Tuner Demux interface handles tuner related operations.
@@ -39,12 +39,12 @@
     /**
      * Add packets' PID to the descrambler for descrambling.
      */
-    void addPid(in TunerDemuxPid pid, in ITunerFilter optionalSourceFilter);
+    void addPid(in DemuxPid pid, in ITunerFilter optionalSourceFilter);
 
     /**
      * Remove packets' PID from the descrambler.
      */
-    void removePid(in TunerDemuxPid pid, in ITunerFilter optionalSourceFilter);
+    void removePid(in DemuxPid pid, in ITunerFilter optionalSourceFilter);
 
     /**
      * Close a new interface of ITunerDescrambler.
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl
index 8f1601b..2c01c4e 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl
@@ -18,8 +18,8 @@
 
 import android.hardware.common.fmq.MQDescriptor;
 import android.hardware.common.fmq.SynchronizedReadWrite;
+import android.hardware.tv.tuner.DvrSettings;
 import android.media.tv.tuner.ITunerFilter;
-import android.media.tv.tuner.TunerDvrSettings;
 
 /**
  * Tuner Dvr interface handles tuner related operations.
@@ -35,7 +35,7 @@
     /**
      * Configure the DVR.
      */
-    void configure(in TunerDvrSettings settings);
+    void configure(in DvrSettings settings);
 
     /**
      * Attach one filter to DVR interface for recording.
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerDvrCallback.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerDvrCallback.aidl
index e234fe5..3043d24 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerDvrCallback.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerDvrCallback.aidl
@@ -16,6 +16,9 @@
 
 package android.media.tv.tuner;
 
+import android.hardware.tv.tuner.PlaybackStatus;
+import android.hardware.tv.tuner.RecordStatus;
+
 /**
  * TunerDvrCallback interface handles tuner dvr related callbacks.
  *
@@ -25,10 +28,10 @@
     /**
      * Notify the client a new status of the demux's record.
      */
-    void onRecordStatus(in int status);
+    void onRecordStatus(in RecordStatus status);
 
     /**
      * Notify the client a new status of the demux's playback.
      */
-    void onPlaybackStatus(in int status);
+    void onPlaybackStatus(in PlaybackStatus status);
 }
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl
index 10d4c3b..dc40f03 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl
@@ -19,8 +19,11 @@
 import android.hardware.common.fmq.MQDescriptor;
 import android.hardware.common.fmq.SynchronizedReadWrite;
 import android.hardware.common.NativeHandle;
-import android.media.tv.tuner.TunerFilterConfiguration;
-import android.media.tv.tuner.TunerFilterSharedHandleInfo;
+import android.hardware.tv.tuner.DemuxFilterSettings;
+import android.hardware.tv.tuner.DemuxFilterType;
+import android.hardware.tv.tuner.AvStreamType;
+import android.hardware.tv.tuner.DemuxFilterMonitorEventType;
+import android.hardware.tv.tuner.FilterDelayHint;
 
 /**
  * Tuner Filter interface handles tuner related operations.
@@ -46,12 +49,12 @@
     /**
      * Configure the filter.
      */
-    void configure(in TunerFilterConfiguration config);
+    void configure(in DemuxFilterSettings settings);
 
     /**
      * Configure the monitor event of the Filter.
      */
-    void configureMonitorEvent(in int monitorEventType);
+    void configureMonitorEvent(in int monitorEventTypes);
 
     /**
      * Configure the context id of the IP Filter.
@@ -61,12 +64,12 @@
     /**
      * Configure the stream type of the media Filter.
      */
-    void configureAvStreamType(in int avStreamType);
+    void configureAvStreamType(in AvStreamType avStreamType);
 
     /**
      * Get the a/v shared memory handle
      */
-    TunerFilterSharedHandleInfo getAvSharedHandleInfo();
+    long getAvSharedHandle(out NativeHandle avMemory);
 
     /**
      * Release the handle reported by the HAL for AV memory.
@@ -97,4 +100,28 @@
      * Close the filter.
      */
     void close();
+
+    /**
+     * Acquire a new SharedFilter token.
+     *
+     * @return a token of the newly created SharedFilter instance.
+     */
+    String acquireSharedFilterToken();
+
+    /**
+     * Free a SharedFilter token.
+     *
+     * @param filterToken the SharedFilter token will be released.
+     * @return a token of the newly created SharedFilter instance.
+     */
+    void freeSharedFilterToken(in String filterToken);
+
+    /**
+     * Get filter type.
+     *
+     * @return filter type.
+     */
+    DemuxFilterType getFilterType();
+
+    void setDelayHint(in FilterDelayHint hint);
 }
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl
index e7a52a7..6c53042 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl
@@ -16,7 +16,8 @@
 
 package android.media.tv.tuner;
 
-import android.media.tv.tuner.TunerFilterEvent;
+import android.hardware.tv.tuner.DemuxFilterEvent;
+import android.hardware.tv.tuner.DemuxFilterStatus;
 
 /**
  * TunerFilterCallback interface handles tuner filter related callbacks.
@@ -27,10 +28,10 @@
     /**
      * Notify the client a new status of a filter.
      */
-    void onFilterStatus(int status);
+    void onFilterStatus(in DemuxFilterStatus status);
 
     /**
      * Notify the client that a new filter event happened.
      */
-    void onFilterEvent(in TunerFilterEvent[] filterEvent);
+    void onFilterEvent(in DemuxFilterEvent[] events);
 }
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl
index ef0255a..96f285f 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl
@@ -1,5 +1,5 @@
 /**
- * Copyright 2020, The Android Open Source Project
+ * Copyright 2021, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,10 +16,12 @@
 
 package android.media.tv.tuner;
 
+import android.hardware.tv.tuner.FrontendScanType;
+import android.hardware.tv.tuner.FrontendSettings;
+import android.hardware.tv.tuner.FrontendStatus;
+import android.hardware.tv.tuner.FrontendStatusType;
 import android.media.tv.tuner.ITunerFrontendCallback;
 import android.media.tv.tuner.ITunerLnb;
-import android.media.tv.tuner.TunerFrontendSettings;
-import android.media.tv.tuner.TunerFrontendStatus;
 
 /**
  * Tuner Frontend interface handles frontend related operations.
@@ -39,7 +41,7 @@
      *
      * @param settings the settings to tune with.
      */
-    void tune(in TunerFrontendSettings settings);
+    void tune(in FrontendSettings settings);
 
     /**
      * Stop the previous tuning.
@@ -52,7 +54,7 @@
      * @param settings the settings to scan with.
      * @param frontendScanType scan with given type.
      */
-    void scan(in TunerFrontendSettings settings, in int frontendScanType);
+    void scan(in FrontendSettings settings, in FrontendScanType frontendScanType);
 
     /**
      * Stop the previous scanning.
@@ -67,13 +69,6 @@
     void setLnb(in ITunerLnb lnb);
 
     /**
-     * Enable or Disable Low Noise Amplifier (LNA).
-     *
-     * @param bEnable enable Lna or not.
-     */
-    void setLna(in boolean bEnable);
-
-    /**
      * Link Frontend to the cicam with given id.
      *
      * @return lts id
@@ -93,15 +88,15 @@
     /**
      * Gets the statuses of the frontend.
      */
-    TunerFrontendStatus[] getStatus(in int[] statusTypes);
-
-    /**
-     * Gets the 1.1 extended statuses of the frontend.
-     */
-    TunerFrontendStatus[] getStatusExtended_1_1(in int[] statusTypes);
+    FrontendStatus[] getStatus(in FrontendStatusType[] statusTypes);
 
     /**
      * Gets the id of the frontend.
      */
     int getFrontendId();
+
+    /**
+     * Request hardware information about the frontend.
+     */
+    String getHardwareInfo();
 }
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFrontendCallback.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFrontendCallback.aidl
index c92f5ee..d0ab11d 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFrontendCallback.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFrontendCallback.aidl
@@ -1,5 +1,5 @@
 /**
- * Copyright 2020, The Android Open Source Project
+ * Copyright 2021, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,7 +16,9 @@
 
 package android.media.tv.tuner;
 
-import android.media.tv.tuner.TunerFrontendScanMessage;
+import android.hardware.tv.tuner.FrontendEventType;
+import android.hardware.tv.tuner.FrontendScanMessage;
+import android.hardware.tv.tuner.FrontendScanMessageType;
 
 /**
  * TunerFrontendCallback interface handles tuner frontend related callbacks.
@@ -24,13 +26,14 @@
  * {@hide}
  */
 interface ITunerFrontendCallback {
-        /**
+    /**
      * Notify the client that a new event happened on the frontend.
      */
-    void onEvent(in int frontendEventType);
+    void onEvent(in FrontendEventType frontendEventType);
 
     /**
      * notify the client of scan messages.
      */
-    void onScanMessage(in int messageType, in TunerFrontendScanMessage message);
+    void onScanMessage(in FrontendScanMessageType messageType,
+        in FrontendScanMessage message);
 }
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerLnb.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerLnb.aidl
index d62145e..79f0761 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerLnb.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerLnb.aidl
@@ -16,6 +16,9 @@
 
 package android.media.tv.tuner;
 
+import android.hardware.tv.tuner.LnbPosition;
+import android.hardware.tv.tuner.LnbTone;
+import android.hardware.tv.tuner.LnbVoltage;
 import android.media.tv.tuner.ITunerLnbCallback;
 
 /**
@@ -32,17 +35,17 @@
     /**
      * Set the lnb's power voltage.
      */
-    void setVoltage(in int voltage);
+    void setVoltage(in LnbVoltage voltage);
 
     /**
      * Set the lnb's tone mode.
      */
-    void setTone(in int tone);
+    void setTone(in LnbTone tone);
 
     /**
      * Select the lnb's position.
      */
-    void setSatellitePosition(in int position);
+    void setSatellitePosition(in LnbPosition position);
 
     /**
      * Sends DiSEqC (Digital Satellite Equipment Control) message.
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerLnbCallback.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerLnbCallback.aidl
index 117352f..2b6eb5f 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerLnbCallback.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerLnbCallback.aidl
@@ -16,6 +16,8 @@
 
 package android.media.tv.tuner;
 
+import android.hardware.tv.tuner.LnbEventType;
+
 /**
  * TuneLnbCallback interface handles tuner lnb related callbacks.
  *
@@ -25,7 +27,7 @@
     /**
      * Notify the client that a new event happened on the Lnb.
      */
-    void onEvent(in int lnbEventType);
+    void onEvent(in LnbEventType lnbEventType);
 
     /**
      * notify the client of new DiSEqC message.
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl
index 755b152..b8084ab 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl
@@ -1,5 +1,5 @@
 /**
- * Copyright (c) 2020, The Android Open Source Project
+ * Copyright (c) 2021, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,16 +16,15 @@
 
 package android.media.tv.tuner;
 
-import android.hardware.common.fmq.MQDescriptor;
-import android.hardware.common.fmq.SynchronizedReadWrite;
-import android.hardware.common.fmq.UnsynchronizedWrite;
+import android.hardware.tv.tuner.DemuxCapabilities;
+import android.hardware.tv.tuner.FrontendInfo;
+import android.hardware.tv.tuner.FrontendType;
 import android.media.tv.tuner.ITunerDemux;
 import android.media.tv.tuner.ITunerDescrambler;
+import android.media.tv.tuner.ITunerFilter;
+import android.media.tv.tuner.ITunerFilterCallback;
 import android.media.tv.tuner.ITunerFrontend;
 import android.media.tv.tuner.ITunerLnb;
-import android.media.tv.tuner.TunerDemuxCapabilities;
-import android.media.tv.tuner.TunerFrontendDtmbCapabilities;
-import android.media.tv.tuner.TunerFrontendInfo;
 
 /**
  * TunerService interface handles tuner related operations.
@@ -33,8 +32,8 @@
  * {@hide}
  */
 //@VintfStability
+@SuppressWarnings(value={"out-array"})
 interface ITunerService {
-
     /**
      * Gets frontend IDs.
      */
@@ -43,15 +42,10 @@
     /**
      * Retrieve the frontend's information.
      *
-     * @param frontendHandle the handle of the frontend granted by TRM.
+     * @param frontendId the ID of the frontend.
      * @return the information of the frontend.
      */
-    TunerFrontendInfo getFrontendInfo(in int frontendHandle);
-
-    /**
-     * Get Dtmb Frontend Capabilities.
-     */
-    TunerFrontendDtmbCapabilities getFrontendDtmbCapabilities(in int id);
+    FrontendInfo getFrontendInfo(in int frontendId);
 
     /**
      * Open a Tuner Frontend interface.
@@ -87,7 +81,7 @@
      *
      * @return the demux’s capabilities.
      */
-    TunerDemuxCapabilities getDemuxCaps();
+    DemuxCapabilities getDemuxCaps();
 
     /* Open a new interface of ITunerDescrambler given a descramblerHandle.
      *
@@ -102,4 +96,38 @@
      * value is unknown version 0.
      */
     int getTunerHalVersion();
+
+    /**
+     * Open a new SharedFilter instance of ITunerFilter.
+     *
+     * @param filterToken the SharedFilter token created by ITunerFilter.
+     * @param cb the ITunerFilterCallback used to receive callback events
+     * @return a newly created ITunerFilter interface.
+     */
+    ITunerFilter openSharedFilter(in String filterToken, in ITunerFilterCallback cb);
+
+    /**
+     * Enable or Disable Low Noise Amplifier (LNA).
+     *
+     * @param bEnable enable Lna or not.
+     */
+    void setLna(in boolean bEnable);
+
+    /**
+     * Set the maximum usable frontends number of a given frontend type. It's used by client
+     * to enable or disable frontends when cable connection status is changed by user.
+     *
+     * @param frontendType the frontend type which the maximum usable number will be set.
+     * @param maxNumber the new maximum usable number.
+     */
+    void setMaxNumberOfFrontends(in FrontendType frontendType, in int maxNumber);
+
+    /**
+     * Get the maximum usable frontends number of a given frontend type.
+     *
+     * @param frontendType the frontend type which the maximum usable number will be queried.
+     *
+     * @return the maximum usable number of the queried frontend type.
+     */
+    int getMaxNumberOfFrontends(in FrontendType frontendType);
 }
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerAudioExtraMetaData.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerAudioExtraMetaData.aidl
deleted file mode 100644
index df3374a..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerAudioExtraMetaData.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Extra Meta Data from AD (Audio Descriptor) according to ETSI TS 101 154 V2.1.1.
- *
- * {@hide}
- */
-parcelable TunerAudioExtraMetaData {
-    byte adFade;
-
-    byte adPan;
-
-    byte versionTextTag;
-
-    byte adGainCenter;
-
-    byte adGainFront;
-
-    byte adGainSurround;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxCapabilities.aidl
deleted file mode 100644
index 71ab151..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxCapabilities.aidl
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Tuner Demux capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerDemuxCapabilities {
-    int numDemux;
-
-    int numRecord;
-
-    int numPlayback;
-
-    int numTsFilter;
-
-    int numSectionFilter;
-
-    int numAudioFilter;
-
-    int numVideoFilter;
-
-    int numPesFilter;
-
-    int numPcrFilter;
-
-    int numBytesInSectionFilter;
-
-    int filterCaps;
-
-    int[] linkCaps;
-
-    boolean bTimeFilter;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddress.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddress.aidl
deleted file mode 100644
index b65f404..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddress.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Demux ip address configure.
- *
- * {@hide}
- */
-parcelable TunerDemuxIpAddress {
-    boolean isIpV6;
-
-    byte[] addr;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddressSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddressSettings.aidl
deleted file mode 100644
index b244388..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddressSettings.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerDemuxIpAddress;
-
-/**
- * Filter Settings for an Ip filter.
- *
- * {@hide}
- */
-parcelable TunerDemuxIpAddressSettings {
-    TunerDemuxIpAddress srcIpAddress;
-
-    TunerDemuxIpAddress dstIpAddress;
-
-    char srcPort;
-
-    char dstPort;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxPid.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxPid.aidl
deleted file mode 100644
index 8b238b6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxPid.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Demux PID interface.
- *
- * {@hide}
- */
-union TunerDemuxPid {
-    char tPid;
-
-    char mmtpPid;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDvrSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDvrSettings.aidl
deleted file mode 100644
index 4ec4d75..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDvrSettings.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Dvr Settings interface.
- *
- * {@hide}
- */
-parcelable TunerDvrSettings {
-    int statusMask;
-
-    int lowThreshold;
-
-    int highThreshold;
-
-    int dataFormat;
-
-    int packetSize;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterAlpConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterAlpConfiguration.aidl
deleted file mode 100644
index 4c9e3af..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterAlpConfiguration.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSettings;
-
-/**
- * Filter Settings for an ALP filter.
- *
- * {@hide}
- */
-parcelable TunerFilterAlpConfiguration {
-    byte packetType;
-
-    byte lengthType;
-
-    TunerFilterSettings filterSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterAvSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterAvSettings.aidl
deleted file mode 100644
index 6bf88f0..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterAvSettings.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Settings for a Video and Audio.
- *
- * {@hide}
- */
-parcelable TunerFilterAvSettings {
-    /**
-     * true if the filter output goes to decoder directly in pass through mode.
-     */
-    boolean isPassthrough;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterConfiguration.aidl
deleted file mode 100644
index 808cfd1..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterConfiguration.aidl
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterAlpConfiguration;
-import android.media.tv.tuner.TunerFilterIpConfiguration;
-import android.media.tv.tuner.TunerFilterMmtpConfiguration;
-import android.media.tv.tuner.TunerFilterTlvConfiguration;
-import android.media.tv.tuner.TunerFilterTsConfiguration;
-
-/**
- * Filter configuration.
- *
- * {@hide}
- */
-union TunerFilterConfiguration {
-    TunerFilterTsConfiguration ts;
-
-    TunerFilterMmtpConfiguration mmtp;
-
-    TunerFilterIpConfiguration ip;
-
-    TunerFilterTlvConfiguration tlv;
-
-    TunerFilterAlpConfiguration alp;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadEvent.aidl
deleted file mode 100644
index b971dd3..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadEvent.aidl
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for Download data.
- *
- * {@hide}
- */
-parcelable TunerFilterDownloadEvent {
-    int itemId;
-
-    /**
-     * MPU sequence number of filtered data (only for MMTP)
-     */
-    int mpuSequenceNumber;
-
-    int itemFragmentIndex;
-
-    int lastItemFragmentIndex;
-
-    /**
-     * Data size in bytes of filtered data
-     */
-    char dataLength;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadSettings.aidl
deleted file mode 100644
index 417a5fe..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadSettings.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Settings for downloading.
- *
- * {@hide}
- */
-parcelable TunerFilterDownloadSettings {
-    int downloadId;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterEvent.aidl
deleted file mode 100644
index 1305510..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterEvent.aidl
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterDownloadEvent;
-import android.media.tv.tuner.TunerFilterIpPayloadEvent;
-import android.media.tv.tuner.TunerFilterMediaEvent;
-import android.media.tv.tuner.TunerFilterMmtpRecordEvent;
-import android.media.tv.tuner.TunerFilterMonitorEvent;
-import android.media.tv.tuner.TunerFilterPesEvent;
-import android.media.tv.tuner.TunerFilterSectionEvent;
-import android.media.tv.tuner.TunerFilterTemiEvent;
-import android.media.tv.tuner.TunerFilterTsRecordEvent;
-
-/**
- * Filter events.
- *
- * {@hide}
- */
-union TunerFilterEvent {
-    TunerFilterMediaEvent media;
-
-    TunerFilterSectionEvent section;
-
-    TunerFilterPesEvent pes;
-
-    TunerFilterTsRecordEvent tsRecord;
-
-    TunerFilterMmtpRecordEvent mmtpRecord;
-
-    TunerFilterDownloadEvent download;
-
-    TunerFilterIpPayloadEvent ipPayload;
-
-    TunerFilterTemiEvent temi;
-
-    TunerFilterMonitorEvent monitor;
-
-    int startId;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpConfiguration.aidl
deleted file mode 100644
index 8b4d889..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpConfiguration.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerDemuxIpAddressSettings;
-import android.media.tv.tuner.TunerFilterSettings;
-
-/**
- * Filter Settings for a ip filter.
- *
- * {@hide}
- */
-parcelable TunerFilterIpConfiguration {
-    TunerDemuxIpAddressSettings ipAddr;
-
-    TunerFilterSettings filterSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpPayloadEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpPayloadEvent.aidl
deleted file mode 100644
index d5bda93..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpPayloadEvent.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for IP payload data.
- *
- * {@hide}
- */
-parcelable TunerFilterIpPayloadEvent {
-    /**
-     * Data size in bytes of ip data
-     */
-    char dataLength;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl
deleted file mode 100644
index c3dbce9..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.hardware.common.NativeHandle;
-import android.media.tv.tuner.TunerAudioExtraMetaData;
-
-/**
- * Filter Event for Audio or Video Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterMediaEvent {
-    char streamId;
-
-    /**
-     * true if PTS is present in PES header.
-     */
-    boolean isPtsPresent;
-
-    /**
-     * Presentation Time Stamp for audio or video frame. It based on 90KHz has
-     * the same format as PTS (Presentation Time Stamp).
-     */
-    long pts;
-
-    /**
-     * Data size in bytes of audio or video frame
-     */
-    int dataLength;
-
-    /**
-     *  The offset in the memory block which is shared among multiple
-     *  MediaEvents.
-     */
-    int offset;
-
-    /**
-     * A handle associated to the memory where audio or video data stays.
-     */
-    NativeHandle avMemory;
-
-    /**
-     * True if the avMemory is in secure area, and isn't mappable.
-     */
-    boolean isSecureMemory;
-
-    /**
-     * An Id is used by HAL to provide additional information for AV data.
-     * For secure audio, it's the audio handle used by Audio Track.
-     */
-    long avDataId;
-
-    /**
-     * MPU sequence number of filtered data (only for MMTP)
-     */
-    int mpuSequenceNumber;
-
-    boolean isPesPrivateData;
-
-    /**
-     * If TunerAudioExtraMetaData field is valid or not
-     */
-    boolean isAudioExtraMetaData;
-
-    /**
-     * Only valid when isAudioExtraMetaData is true
-     */
-    TunerAudioExtraMetaData audio;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpConfiguration.aidl
deleted file mode 100644
index 162ca8e..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpConfiguration.aidl
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSettings;
-
-/**
- * Filter Settings for an mmtp filter.
- *
- * {@hide}
- */
-parcelable TunerFilterMmtpConfiguration {
-    char mmtpPid;
-
-    TunerFilterSettings filterSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpRecordEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpRecordEvent.aidl
deleted file mode 100644
index b8871cf..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpRecordEvent.aidl
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for an MMTP Record Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterMmtpRecordEvent {
-    int scHevcIndexMask;
-
-    /**
-     * Byte number from beginning of the filter's output
-     */
-    long byteNumber;
-
-    /**
-     * If the current event contains extended information or not
-     */
-    boolean isExtended;
-
-    /**
-     * The Presentation Time Stamp(PTS) for the audio or video frame. It is based on 90KHz
-     * and has the same format as the PTS in ISO/IEC 13818-1.
-     */
-    long pts;
-
-    /**
-     * MPU sequence number of the filtered data. This is only used for MMTP.
-     */
-    int mpuSequenceNumber;
-
-    /**
-     * Specifies the address of the first macroblock in the slice defined in ITU-T Rec. H.264.
-     */
-    int firstMbInSlice;
-
-    /**
-     * TS index mask.
-     */
-    int tsIndexMask;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMonitorEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMonitorEvent.aidl
deleted file mode 100644
index 31ab5e6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMonitorEvent.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter monitor events.
- *
- * {@hide}
- */
-union TunerFilterMonitorEvent {
-    /**
-     * New scrambling status.
-     */
-    int scramblingStatus;
-
-    /**
-     * New cid for the IP filter.
-     */
-    int cid;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesDataSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesDataSettings.aidl
deleted file mode 100644
index 312f314..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesDataSettings.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Settings for Pes Data.
- *
- * {@hide}
- */
-parcelable TunerFilterPesDataSettings {
-    char streamId;
-
-    boolean isRaw;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl
deleted file mode 100644
index dc1ecc6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for PES Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterPesEvent {
-    char streamId;
-
-    /**
-     * Data size in bytes of PES data
-     */
-    char dataLength;
-
-    /**
-     * MPU sequence number of filtered data
-     */
-    int mpuSequenceNumber;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterRecordSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterRecordSettings.aidl
deleted file mode 100644
index 29be624..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterRecordSettings.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterScIndexMask;
-
-/**
- * Filter Settings for recording.
- *
- * {@hide}
- */
-parcelable TunerFilterRecordSettings {
-    int tsIndexMask;
-
-    int scIndexType;
-
-    TunerFilterScIndexMask scIndexMask;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterScIndexMask.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterScIndexMask.aidl
deleted file mode 100644
index ed37fce..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterScIndexMask.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter SC Index Mask
- *
- * {@hide}
- */
-union TunerFilterScIndexMask {
-    int sc;
-
-    int scHevc;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionBits.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionBits.aidl
deleted file mode 100644
index dd4f842..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionBits.aidl
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Bits settings of a section Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterSectionBits {
-    byte[] filter;
-
-    byte[] mask;
-
-    byte[] mode;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionCondition.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionCondition.aidl
deleted file mode 100644
index 00aabe4..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionCondition.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSectionBits;
-import android.media.tv.tuner.TunerFilterSectionTableInfo;
-
-/**
- * Section filter condition settings.
- *
- * {@hide}
- */
-union TunerFilterSectionCondition {
-    TunerFilterSectionBits sectionBits;
-
-    TunerFilterSectionTableInfo tableInfo;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionEvent.aidl
deleted file mode 100644
index 5f20926..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionEvent.aidl
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for Section Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterSectionEvent {
-    /**
-     * Table ID of filtered data
-     */
-    char tableId;
-
-    /**
-     * Version number of filtered data
-     */
-    char version;
-
-    /**
-     * Section number of filtered data
-     */
-    char sectionNum;
-
-    /**
-     * Data size in bytes of filtered data
-     */
-    char dataLength;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionSettings.aidl
deleted file mode 100644
index 22129b6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionSettings.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSectionCondition;
-
-/**
- * Filter Settings for a section filter.
- *
- * {@hide}
- */
-parcelable TunerFilterSectionSettings {
-    TunerFilterSectionCondition condition;
-
-    boolean isCheckCrc;
-
-    boolean isRepeat;
-
-    boolean isRaw;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionTableInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionTableInfo.aidl
deleted file mode 100644
index cc78c9d..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionTableInfo.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Table info settings of a section Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterSectionTableInfo {
-    char tableId;
-
-    char version;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSettings.aidl
deleted file mode 100644
index eb7eaa5..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSettings.aidl
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterAvSettings;
-import android.media.tv.tuner.TunerFilterDownloadSettings;
-import android.media.tv.tuner.TunerFilterPesDataSettings;
-import android.media.tv.tuner.TunerFilterRecordSettings;
-import android.media.tv.tuner.TunerFilterSectionSettings;
-
-/**
- * Filter Settings.
- *
- * {@hide}
- */
-union TunerFilterSettings {
-    boolean nothing;
-
-    TunerFilterAvSettings av;
-
-    TunerFilterSectionSettings section;
-
-    TunerFilterPesDataSettings pesData;
-
-    TunerFilterRecordSettings record;
-
-    TunerFilterDownloadSettings download;
-
-    boolean isPassthrough;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSharedHandleInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSharedHandleInfo.aidl
deleted file mode 100644
index 122dfc3..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSharedHandleInfo.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.hardware.common.NativeHandle;
-
-/**
- * Filter Shared Handle Information.
- *
- * {@hide}
- */
-parcelable TunerFilterSharedHandleInfo {
-    NativeHandle handle;
-    long size;
-}
\ No newline at end of file
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTemiEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTemiEvent.aidl
deleted file mode 100644
index 4c4e993..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTemiEvent.aidl
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for Timed External Media Information (TEMI) data.
- *
- * {@hide}
- */
-parcelable TunerFilterTemiEvent {
-    /**
-     * Presentation Time Stamp for audio or video frame. It based on 90KHz has
-     * the same format as PTS (Presentation Time Stamp) in ISO/IEC 13818-1.
-     */
-    long pts;
-
-    /**
-     * TEMI Descriptor Tag
-     */
-    byte descrTag;
-
-    /**
-     * TEMI Descriptor
-     */
-    byte[] descrData;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTlvConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTlvConfiguration.aidl
deleted file mode 100644
index 0b237b4..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTlvConfiguration.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSettings;
-
-/**
- * Filter Settings for a tlv filter.
- *
- * {@hide}
- */
-parcelable TunerFilterTlvConfiguration {
-    byte packetType;
-
-    boolean isCompressedIpPacket;
-
-    TunerFilterSettings filterSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsConfiguration.aidl
deleted file mode 100644
index 2e386e6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsConfiguration.aidl
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSettings;
-
-/**
- * Filter Settings for a TS filter.
- *
- * {@hide}
- */
-parcelable TunerFilterTsConfiguration {
-    char tpid;
-
-    TunerFilterSettings filterSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsRecordEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsRecordEvent.aidl
deleted file mode 100644
index c52a749..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsRecordEvent.aidl
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterScIndexMask;
-
-/**
- * Filter Event for TS Record Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterTsRecordEvent {
-    char pid;
-
-    int tsIndexMask;
-
-    /**
-     * Indexes of record output
-     */
-    TunerFilterScIndexMask scIndexMask;
-
-    /**
-     * Byte number from beginning of the filter's output
-     */
-    long byteNumber;
-
-    /**
-     * If the current event contains extended information or not
-     */
-    boolean isExtended;
-
-    /**
-     * The Presentation Time Stamp(PTS) for the audio or video frame. It is based on 90KHz
-     * and has the same format as the PTS in ISO/IEC 13818-1.
-     */
-    long pts;
-
-    /**
-     * Specifies the address of the first macroblock in the slice defined in ITU-T Rec. H.264.
-     */
-    int firstMbInSlice;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogCapabilities.aidl
deleted file mode 100644
index 74bf04e..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogCapabilities.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Analog Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAnalogCapabilities {
-	/**
-     * Signal Type capability
-     */
-    int typeCap;
-
-    /**
-     * Standard Interchange Format (SIF) capability
-     */
-    int sifStandardCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogSettings.aidl
deleted file mode 100644
index 40cd8c9..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogSettings.aidl
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Analog Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAnalogSettings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    int signalType;
-
-    /**
-     * Standard Interchange Format (SIF) setting
-     */
-    int sifStandard;
-
-    /**
-     * Fields after isExtended are only valid when isExtended is true
-     */
-    boolean isExtended;
-
-    int aftFlag;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Capabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Capabilities.aidl
deleted file mode 100644
index 6c9be77..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Capabilities.aidl
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * ATSC3 Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAtsc3Capabilities {
-    /**
-     * Bandwidth capability
-     */
-    int bandwidthCap;
-
-    /**
-     * Modulation capability
-     */
-    int modulationCap;
-
-    /**
-     * TimeInterleaveMode capability
-     */
-    int timeInterleaveModeCap;
-
-    /**
-     * CodeRate capability
-     */
-    int codeRateCap;
-
-    /**
-     * FEC capability
-     */
-    int fecCap;
-
-    /**
-     * Demodulator Output Format capability
-     */
-    int demodOutputFormatCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3PlpSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3PlpSettings.aidl
deleted file mode 100644
index b29e1f7..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3PlpSettings.aidl
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Atsc3 Frontend Physical Layer Pipe Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAtsc3PlpSettings {
-    int plpId;
-
-    int modulation;
-
-    int interleaveMode;
-
-    int codeRate;
-
-    /**
-     * Forward Error Correction Type.
-     */
-    int fec;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Settings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Settings.aidl
deleted file mode 100644
index 32fb8c7..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Settings.aidl
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendAtsc3PlpSettings;
-
-/**
- * Atsc3 Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAtsc3Settings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    /**
-     * Bandwidth of tuning band.
-     */
-    int bandwidth;
-
-    int demodOutputFormat;
-
-    TunerFrontendAtsc3PlpSettings[] plpSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscCapabilities.aidl
deleted file mode 100644
index 2b6c2fc..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscCapabilities.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * ATSC Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAtscCapabilities {
-    /**
-     * Modulation capability
-     */
-    int modulationCap;
-}
\ No newline at end of file
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscSettings.aidl
deleted file mode 100644
index c7a8c07..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscSettings.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Atsc Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAtscSettings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    int modulation;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableCapabilities.aidl
deleted file mode 100644
index b880c60..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableCapabilities.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Cable(DVBC) Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendCableCapabilities {
-    /**
-     * Modulation capability
-     */
-    int modulationCap;
-
-    /**
-     * Code Rate capability
-     */
-    long codeRateCap; // inner FEC will converge to codeRate
-
-    /**
-     * Annex capability
-     */
-    int annexCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableSettings.aidl
deleted file mode 100644
index b9bcf29..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableSettings.aidl
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Cable Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendCableSettings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    int modulation;
-
-    /**
-     * Inner Forward Error Correction type as specified in ETSI EN 300 468 V1.15.1
-     * and ETSI EN 302 307-2 V1.1.1.
-     */
-    long innerFec;
-
-    /**
-     * Symbols per second
-     */
-    int symbolRate;
-
-    /**
-     * Outer Forward Error Correction (FEC) Type.
-     */
-    int outerFec;
-
-    int annex;
-
-    /**
-     * Spectral Inversion Type.
-     */
-    int spectralInversion;
-
-    /**
-     * Fields after isExtended are only valid when isExtended is true
-     */
-    boolean isExtended;
-
-    int interleaveMode;
-
-    int bandwidth;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCapabilities.aidl
deleted file mode 100644
index 19f31f1..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCapabilities.aidl
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendAnalogCapabilities;
-import android.media.tv.tuner.TunerFrontendAtscCapabilities;
-import android.media.tv.tuner.TunerFrontendAtsc3Capabilities;
-import android.media.tv.tuner.TunerFrontendCableCapabilities;
-import android.media.tv.tuner.TunerFrontendDvbsCapabilities;
-import android.media.tv.tuner.TunerFrontendDvbtCapabilities;
-import android.media.tv.tuner.TunerFrontendIsdbsCapabilities;
-import android.media.tv.tuner.TunerFrontendIsdbs3Capabilities;
-import android.media.tv.tuner.TunerFrontendIsdbtCapabilities;
-
-/**
- * Frontend Capabilities interface.
- *
- * Use a group of vectors as the workaround for Union structure that is not fully supported
- * in AIDL currently.
- *
- * Client may use FrontendInfo.type as the discriminar to check the corresponding vector. If
- * the vector is not null, it contains valid value.
- *
- * {@hide}
- */
-union TunerFrontendCapabilities {
-    /**
-     * Analog Frontend Capabilities
-     */
-    TunerFrontendAnalogCapabilities analogCaps;
-
-    /**
-     * ATSC Frontend Capabilities
-     */
-    TunerFrontendAtscCapabilities atscCaps;
-
-    /**
-     * ATSC3 Frontend Capabilities
-     */
-    TunerFrontendAtsc3Capabilities atsc3Caps;
-
-    /**
-     * Cable Frontend Capabilities
-     */
-    TunerFrontendCableCapabilities cableCaps;
-
-    /**
-     * DVBS Frontend Capabilities
-     */
-    TunerFrontendDvbsCapabilities dvbsCaps;
-
-    /**
-     * DVBT Frontend Capabilities
-     */
-    TunerFrontendDvbtCapabilities dvbtCaps;
-
-    /**
-     * ISDB-S Frontend Capabilities
-     */
-    TunerFrontendIsdbsCapabilities isdbsCaps;
-
-    /**
-     * ISDB-S3 Frontend Capabilities
-     */
-    TunerFrontendIsdbs3Capabilities isdbs3Caps;
-
-    /**
-     * ISDB-T Frontend Capabilities
-     */
-    TunerFrontendIsdbtCapabilities isdbtCaps;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbCapabilities.aidl
deleted file mode 100644
index e8e4933..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbCapabilities.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * DTMB Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDtmbCapabilities {
-    int transmissionModeCap;
-
-    int bandwidthCap;
-
-    int modulationCap;
-
-    int codeRateCap;
-
-    int guardIntervalCap;
-
-    int interleaveModeCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbSettings.aidl
deleted file mode 100644
index 45e7ff9..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbSettings.aidl
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * DTMB Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDtmbSettings {
-    int frequency;
-
-    int transmissionMode;
-
-    int bandwidth;
-
-    int modulation;
-
-    int codeRate;
-
-    int guardInterval;
-
-    int interleaveMode;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCapabilities.aidl
deleted file mode 100644
index 5e4322c..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCapabilities.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * DVBS Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDvbsCapabilities {
-    /**
-     * Modulation capability
-     */
-    int modulationCap;
-
-    /**
-     * Code Rate capability
-     */
-    long codeRateCap;  // inner FEC will converge to codeRate
-
-    /**
-     * Sub standards capability
-     */
-    int standard;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCodeRate.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCodeRate.aidl
deleted file mode 100644
index 59b7de3..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCodeRate.aidl
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Dvbs Frontend CodeRate interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDvbsCodeRate {
-    /**
-     * Inner Forward Error Correction type as specified in ETSI EN 300 468 V1.15.1
-     * and ETSI EN 302 307-2 V1.1.1.
-     */
-    long fec;
-
-    boolean isLinear;
-
-    /**
-     * true if enable short frame
-     */
-    boolean isShortFrames;
-
-    /**
-     * bits number in 1000 symbol. 0 if use the default.
-     */
-    int bitsPer1000Symbol;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsSettings.aidl
deleted file mode 100644
index ec3e4b9..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsSettings.aidl
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendDvbsCodeRate;
-
-/**
- * Dvbs Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDvbsSettings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    int modulation;
-
-    TunerFrontendDvbsCodeRate codeRate;
-
-    int symbolRate;
-
-    /**
-     * Roll off type.
-     */
-    int rolloff;
-
-    /**
-     * Pilot mode.
-     */
-    int pilot;
-
-    int inputStreamId;
-
-    int standard;
-
-    /**
-     * Vcm mode.
-     */
-    int vcm;
-
-    /**
-     * Fields after isExtended are only valid when isExtended is true
-     */
-    boolean isExtended;
-
-    int scanType;
-
-    boolean isDiseqcRxMessage;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtCapabilities.aidl
deleted file mode 100644
index 73f16dd..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtCapabilities.aidl
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * DVBT Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDvbtCapabilities {
-    /**
-     * Transmission Mode capability
-     */
-    int transmissionModeCap;
-
-    /**
-     * Bandwidth capability
-     */
-    int bandwidthCap;
-
-    /**
-     * Constellation capability
-     */
-    int constellationCap;
-
-    /**
-     * Code Rate capability
-     */
-    int codeRateCap;
-
-    /**
-     * Hierarchy Type capability
-     */
-    int hierarchyCap;
-
-    /**
-     * Guard Interval capability
-     */
-    int guardIntervalCap;
-
-    /**
-     * T2 Support capability
-     */
-    boolean isT2Supported;
-
-    /**
-     * Miso Support capability
-     */
-    boolean isMisoSupported;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtSettings.aidl
deleted file mode 100644
index 14c942a..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtSettings.aidl
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Dvbt Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDvbtSettings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    int transmissionMode;
-
-    int bandwidth;
-
-    int constellation;
-
-    int hierarchy;
-
-    /**
-     * Code Rate for High Priority level
-     */
-    int hpCodeRate;
-
-    /**
-     * Code Rate for Low Priority level
-     */
-    int lpCodeRate;
-
-    int guardInterval;
-
-    boolean isHighPriority;
-
-    int standard;
-
-    boolean isMiso;
-
-    /**
-     * Physical Layer Pipe (PLP) mode
-     */
-    int plpMode;
-
-    /**
-     * Physical Layer Pipe (PLP) Id
-     */
-    int plpId;
-
-    /**
-     * Physical Layer Pipe (PLP) Group Id
-     */
-    int plpGroupId;
-
-    /**
-     * Fields after isExtended are only valid when isExtended is true
-     */
-    boolean isExtended;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendInfo.aidl
deleted file mode 100644
index 4bccd56..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendInfo.aidl
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendCapabilities;
-
-/**
- * FrontendInfo interface that carries tuner frontend information.
- *
- * <p>This is used to update the TunerResourceManager and pass Frontend
- * information from HAL to the client side.
- *
- * {@hide}
- */
-parcelable TunerFrontendInfo {
-    /**
-     * Frontend Handle
-     */
-    int handle;
-
-    /**
-     * Frontend Type
-     */
-    int type;
-
-    /**
-     * Minimum Frequency in Hertz
-     */
-    int minFrequency;
-
-    /**
-     * Maximum Frequency in Hertz
-     */
-    int maxFrequency;
-
-    /**
-     * Minimum symbols per second
-     */
-    int minSymbolRate;
-
-    /**
-     * Maximum symbols per second
-     */
-    int maxSymbolRate;
-
-    /**
-     * Range in Hertz
-     */
-    int acquireRange;
-
-    /**
-     * Frontends are assigned with the same exclusiveGroupId if they can't
-     * function at same time. For instance, they share same hardware module.
-     */
-    int exclusiveGroupId;
-
-    /**
-     * A list of supported status types which client can inquiry
-     */
-    int[] statusCaps;
-
-    /**
-     * Frontend Capabilities
-     */
-    TunerFrontendCapabilities caps;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Capabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Capabilities.aidl
deleted file mode 100644
index 84dd67a..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Capabilities.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * ISDB-S3 Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbs3Capabilities {
-    /**
-     * Modulation capability
-     */
-    int modulationCap;
-
-    /**
-     * Code Rate capability
-     */
-    int codeRateCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Settings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Settings.aidl
deleted file mode 100644
index 9a11fd5..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Settings.aidl
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Isdbs3 Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbs3Settings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    char streamId;
-
-    int streamIdType;
-
-    int modulation;
-
-    int codeRate;
-
-    /**
-     * Symbols per second
-     */
-    int symbolRate;
-
-    int rolloff;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsCapabilities.aidl
deleted file mode 100644
index 15dfdf7..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsCapabilities.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * ISDB-S Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbsCapabilities {
-    /**
-     * Modulation capability
-     */
-    int modulationCap;
-
-    /**
-     * Code Rate capability
-     */
-    int codeRateCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsSettings.aidl
deleted file mode 100644
index dff9f4a..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsSettings.aidl
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Isdbs Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbsSettings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    char streamId;
-
-    int streamIdType;
-
-    int modulation;
-
-    int codeRate;
-
-    /**
-     * Symbols per second
-     */
-    int symbolRate;
-
-    int rolloff;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtCapabilities.aidl
deleted file mode 100644
index c9295d8..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtCapabilities.aidl
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * ISDB-T Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbtCapabilities {
-    /**
-     * ISDB-T Mode capability
-     */
-    int modeCap;
-
-    /**
-     * Bandwidth capability
-     */
-    int bandwidthCap;
-
-    /**
-     * Modulation capability
-     */
-    int modulationCap;
-
-    /**
-     * Code Rate capability
-     */
-    int codeRateCap;
-
-    /**
-     * Guard Interval capability
-     */
-    int guardIntervalCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtSettings.aidl
deleted file mode 100644
index 191f3a6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtSettings.aidl
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Isdbt Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbtSettings {
-    /**
-     * Signal frequency in Hertz
-     */
-    int frequency;
-
-    int modulation;
-
-    int bandwidth;
-
-    int mode;
-
-    int codeRate;
-
-    int guardInterval;
-
-    int serviceAreaId;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanAtsc3PlpInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanAtsc3PlpInfo.aidl
deleted file mode 100644
index 1b8fcbb..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanAtsc3PlpInfo.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Atsc3 Frontend Physical Layer Pipe Info.
- *
- * {@hide}
- */
-parcelable TunerFrontendScanAtsc3PlpInfo {
-    byte plpId;
-
-    boolean llsFlag;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanMessage.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanMessage.aidl
deleted file mode 100644
index 9921ca1..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanMessage.aidl
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendScanAtsc3PlpInfo;
-
-/**
- * Tuner Frontend Scan Message interface.
- *
- * {@hide}
- */
-union TunerFrontendScanMessage {
-    boolean isLocked;
-
-    boolean isEnd;
-
-    byte progressPercent;
-
-    int[] frequencies;
-
-    int[] symbolRates;
-
-    int hierarchy;
-
-    int analogType;
-
-    byte[] plpIds;
-
-    byte[] groupIds;
-
-    char[] inputStreamIds;
-
-    int std;
-
-    TunerFrontendScanAtsc3PlpInfo[] atsc3PlpInfos;
-
-    int modulation;
-
-    int annex;
-
-    boolean isHighPriority;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendSettings.aidl
deleted file mode 100644
index 70a5f3e..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendSettings.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendUnionSettings;
-
-/**
- * Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendSettings {
-    TunerFrontendUnionSettings settings;
-
-    boolean isExtended;
-
-    int endFrequency;
-
-    int inversion;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatus.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatus.aidl
deleted file mode 100644
index 2b3c01b..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatus.aidl
+++ /dev/null
@@ -1,187 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendStatusAtsc3PlpInfo;
-
-/**
- * Tuner Frontend Status interface.
- *
- * {@hide}
- */
-union TunerFrontendStatus {
-    /**
-     * Lock status for Demod in True/False.
-     */
-    boolean isDemodLocked;
-
-    /**
-     * SNR value measured by 0.001 dB.
-     */
-    int snr;
-
-    /**
-     * The number of error bits per 1 billion bits.
-     */
-    int ber;
-
-    /**
-     * The number of error packages per 1 billion packages.
-     */
-    int per;
-
-    /**
-     * The number of error bits per 1 billion bits before FEC.
-     */
-    int preBer;
-
-    /**
-     * Signal Quality in percent.
-     */
-    int signalQuality;
-
-    /**
-     * Signal Strength measured by 0.001 dBm.
-     */
-    int signalStrength;
-
-    /**
-     * Symbols per second
-     */
-    int symbolRate;
-
-    long innerFec;
-
-    /**
-     * Check frontend type to decide the hidl type value
-     */
-    int modulation;
-
-    int inversion;
-
-    int lnbVoltage;
-
-    byte plpId;
-
-    boolean isEWBS;
-
-    /**
-     * AGC value is normalized from 0 to 255.
-     */
-    byte agc;
-
-    boolean isLnaOn;
-
-    boolean[] isLayerError;
-
-    /**
-     * MER value measured by 0.001 dB
-     */
-    int mer;
-
-    /**
-     * Frequency difference in Hertz.
-     */
-    int freqOffset;
-
-    int hierarchy;
-
-    boolean isRfLocked;
-
-    /**
-     * A list of PLP status for tuned PLPs for ATSC3 frontend.
-     */
-    TunerFrontendStatusAtsc3PlpInfo[] plpInfo;
-
-    // 1.1 Extension Starting
-
-    /**
-     * Extended modulation status. Check frontend type to decide the hidl type value.
-     */
-    int[] modulations;
-
-    /**
-     * Extended bit error ratio status.
-     */
-    int[] bers;
-
-    /**
-     * Extended code rate status.
-     */
-    long[] codeRates;
-
-    /**
-     * Extended bandwidth status. Check frontend type to decide the hidl type value.
-     */
-    int bandwidth;
-
-    /**
-     * Extended guard interval status. Check frontend type to decide the hidl type value.
-     */
-    int interval;
-
-    /**
-     * Extended transmission mode status. Check frontend type to decide the hidl type value.
-     */
-    int transmissionMode;
-
-    /**
-     * Uncorrectable Error Counts of the frontend's Physical Layer Pipe (PLP)
-     * since the last tune operation.
-     */
-    int uec;
-
-    /**
-     * The current DVB-T2 system id status.
-     */
-    char systemId;
-
-    /**
-     * Frontend Interleaving Modes. Check frontend type to decide the hidl type value.
-     */
-    int[] interleaving;
-
-    /**
-     * Segments in ISDB-T Specification of all the channels.
-     */
-    byte[] isdbtSegment;
-
-    /**
-     * Transport Stream Data Rate in BPS of the current channel.
-     */
-    int[] tsDataRate;
-
-    /**
-     * Roll Off Type status of the frontend. Check frontend type to decide the hidl type value.
-     */
-    int rollOff;
-
-    /**
-     * If the frontend currently supports MISO or not.
-     */
-    boolean isMiso;
-
-    /**
-     * If the frontend code rate is linear or not.
-     */
-    boolean isLinear;
-
-    /**
-     * If short frames are enabled or not.
-     */
-    boolean isShortFrames;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatusAtsc3PlpInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatusAtsc3PlpInfo.aidl
deleted file mode 100644
index 4116c34..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatusAtsc3PlpInfo.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Atsc3 Frontend Physical Layer Pipe Info in Frontend status.
- *
- * {@hide}
- */
-parcelable TunerFrontendStatusAtsc3PlpInfo {
-    /**
-     * PLP Id value.
-     */
-    byte plpId;
-
-    /**
-     * Demod Lock/Unlock status of this particular PLP.
-     */
-    boolean isLocked;
-
-    /**
-     * Uncorrectable Error Counts (UEC) of this particular PLP since last tune operation.
-     */
-    int uec;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendUnionSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendUnionSettings.aidl
deleted file mode 100644
index c362c2a..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendUnionSettings.aidl
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendAnalogSettings;
-import android.media.tv.tuner.TunerFrontendAtscSettings;
-import android.media.tv.tuner.TunerFrontendAtsc3Settings;
-import android.media.tv.tuner.TunerFrontendCableSettings;
-import android.media.tv.tuner.TunerFrontendDtmbSettings;
-import android.media.tv.tuner.TunerFrontendDvbsSettings;
-import android.media.tv.tuner.TunerFrontendDvbtSettings;
-import android.media.tv.tuner.TunerFrontendIsdbsSettings;
-import android.media.tv.tuner.TunerFrontendIsdbs3Settings;
-import android.media.tv.tuner.TunerFrontendIsdbtSettings;
-
-/**
- * Frontend Settings Union interface.
- *
- * {@hide}
- */
-union TunerFrontendUnionSettings {
-    TunerFrontendAnalogSettings analog;
-
-    TunerFrontendAtscSettings atsc;
-
-    TunerFrontendAtsc3Settings atsc3;
-
-    TunerFrontendCableSettings cable;
-
-    TunerFrontendDvbsSettings dvbs;
-
-    TunerFrontendDvbtSettings dvbt;
-
-    TunerFrontendIsdbsSettings isdbs;
-
-    TunerFrontendIsdbs3Settings isdbs3;
-
-    TunerFrontendIsdbtSettings isdbt;
-
-    TunerFrontendDtmbSettings dtmb;
-}
diff --git a/services/tuner/hidl/TunerHidlDemux.cpp b/services/tuner/hidl/TunerHidlDemux.cpp
new file mode 100644
index 0000000..a8151d2
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDemux.cpp
@@ -0,0 +1,278 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlDemux"
+
+#include "TunerHidlDemux.h"
+
+#include "TunerHidlDvr.h"
+#include "TunerHidlFilter.h"
+#include "TunerHidlTimeFilter.h"
+
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSubType;
+
+using HidlDemuxAlpFilterType = ::android::hardware::tv::tuner::V1_0::DemuxAlpFilterType;
+using HidlDemuxFilterMainType = ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
+using HidlDemuxFilterType = ::android::hardware::tv::tuner::V1_0::DemuxFilterType;
+using HidlDemuxIpFilterType = ::android::hardware::tv::tuner::V1_0::DemuxIpFilterType;
+using HidlDemuxMmtpFilterType = ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterType;
+using HidlDemuxTlvFilterType = ::android::hardware::tv::tuner::V1_0::DemuxTlvFilterType;
+using HidlDemuxTsFilterType = ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
+using HidlDvrType = ::android::hardware::tv::tuner::V1_0::DvrType;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlDemux::TunerHidlDemux(sp<IDemux> demux, int id) {
+    mDemux = demux;
+    mDemuxId = id;
+}
+
+TunerHidlDemux::~TunerHidlDemux() {
+    mDemux = nullptr;
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::setFrontendDataSource(
+        const shared_ptr<ITunerFrontend>& in_frontend) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    int frontendId;
+    in_frontend->getFrontendId(&frontendId);
+    HidlResult res = mDemux->setFrontendDataSource(frontendId);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::setFrontendDataSourceById(int frontendId) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    HidlResult res = mDemux->setFrontendDataSource(frontendId);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::openFilter(const DemuxFilterType& in_type,
+                                                int32_t in_bufferSize,
+                                                const shared_ptr<ITunerFilterCallback>& in_cb,
+                                                shared_ptr<ITunerFilter>* _aidl_return) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    HidlDemuxFilterMainType mainType = static_cast<HidlDemuxFilterMainType>(in_type.mainType);
+    HidlDemuxFilterType filterType{
+            .mainType = mainType,
+    };
+
+    switch (mainType) {
+    case HidlDemuxFilterMainType::TS:
+        filterType.subType.tsFilterType(static_cast<HidlDemuxTsFilterType>(
+                in_type.subType.get<DemuxFilterSubType::Tag::tsFilterType>()));
+        break;
+    case HidlDemuxFilterMainType::MMTP:
+        filterType.subType.mmtpFilterType(static_cast<HidlDemuxMmtpFilterType>(
+                in_type.subType.get<DemuxFilterSubType::Tag::mmtpFilterType>()));
+        break;
+    case HidlDemuxFilterMainType::IP:
+        filterType.subType.ipFilterType(static_cast<HidlDemuxIpFilterType>(
+                in_type.subType.get<DemuxFilterSubType::Tag::ipFilterType>()));
+        break;
+    case HidlDemuxFilterMainType::TLV:
+        filterType.subType.tlvFilterType(static_cast<HidlDemuxTlvFilterType>(
+                in_type.subType.get<DemuxFilterSubType::Tag::tlvFilterType>()));
+        break;
+    case HidlDemuxFilterMainType::ALP:
+        filterType.subType.alpFilterType(static_cast<HidlDemuxAlpFilterType>(
+                in_type.subType.get<DemuxFilterSubType::Tag::alpFilterType>()));
+        break;
+    }
+    HidlResult status;
+    sp<HidlIFilter> filterSp;
+    sp<TunerHidlFilter::FilterCallback> filterCb = new TunerHidlFilter::FilterCallback(in_cb);
+    sp<::android::hardware::tv::tuner::V1_0::IFilterCallback> cbSp = filterCb;
+    mDemux->openFilter(filterType, static_cast<uint32_t>(in_bufferSize), cbSp,
+                       [&](HidlResult r, const sp<HidlIFilter>& filter) {
+                           filterSp = filter;
+                           status = r;
+                       });
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+
+    *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlFilter>(filterSp, filterCb, in_type);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    sp<HidlITimeFilter> filterSp;
+    mDemux->openTimeFilter([&](HidlResult r, const sp<HidlITimeFilter>& filter) {
+        filterSp = filter;
+        status = r;
+    });
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+
+    *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlTimeFilter>(filterSp);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::getAvSyncHwId(const shared_ptr<ITunerFilter>& tunerFilter,
+                                                   int32_t* _aidl_return) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    uint32_t avSyncHwId;
+    HidlResult res;
+    sp<HidlIFilter> halFilter = static_cast<TunerHidlFilter*>(tunerFilter.get())->getHalFilter();
+    mDemux->getAvSyncHwId(halFilter, [&](HidlResult r, uint32_t id) {
+        res = r;
+        avSyncHwId = id;
+    });
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    *_aidl_return = (int)avSyncHwId;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::getAvSyncTime(int32_t avSyncHwId, int64_t* _aidl_return) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    uint64_t time;
+    HidlResult res;
+    mDemux->getAvSyncTime(static_cast<uint32_t>(avSyncHwId), [&](HidlResult r, uint64_t ts) {
+        res = r;
+        time = ts;
+    });
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    *_aidl_return = (int64_t)time;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::openDvr(DvrType in_dvbType, int32_t in_bufferSize,
+                                             const shared_ptr<ITunerDvrCallback>& in_cb,
+                                             shared_ptr<ITunerDvr>* _aidl_return) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    HidlResult res;
+    sp<HidlIDvrCallback> callback = new TunerHidlDvr::DvrCallback(in_cb);
+    sp<HidlIDvr> hidlDvr;
+    mDemux->openDvr(static_cast<HidlDvrType>(in_dvbType), in_bufferSize, callback,
+                    [&](HidlResult r, const sp<HidlIDvr>& dvr) {
+                        hidlDvr = dvr;
+                        res = r;
+                    });
+    if (res != HidlResult::SUCCESS) {
+        *_aidl_return = nullptr;
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlDvr>(hidlDvr, in_dvbType);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::connectCiCam(int32_t ciCamId) {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    HidlResult res = mDemux->connectCiCam(static_cast<uint32_t>(ciCamId));
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::disconnectCiCam() {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    HidlResult res = mDemux->disconnectCiCam();
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::close() {
+    if (mDemux == nullptr) {
+        ALOGE("IDemux is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(HidlResult::UNAVAILABLE));
+    }
+
+    HidlResult res = mDemux->close();
+    mDemux = nullptr;
+
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlDemux.h b/services/tuner/hidl/TunerHidlDemux.h
new file mode 100644
index 0000000..d535da6
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDemux.h
@@ -0,0 +1,75 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLDEMUX_H
+#define ANDROID_MEDIA_TUNERHIDLDEMUX_H
+
+#include <aidl/android/media/tv/tuner/BnTunerDemux.h>
+#include <android/hardware/tv/tuner/1.0/ITuner.h>
+
+using ::aidl::android::hardware::tv::tuner::DemuxFilterType;
+using ::aidl::android::hardware::tv::tuner::DvrType;
+using ::android::sp;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::tv::tuner::V1_0::IDemux;
+using ::std::shared_ptr;
+using ::std::vector;
+
+using HidlIDemux = ::android::hardware::tv::tuner::V1_0::IDemux;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlDemux : public BnTunerDemux {
+public:
+    TunerHidlDemux(sp<HidlIDemux> demux, int demuxId);
+    virtual ~TunerHidlDemux();
+
+    ::ndk::ScopedAStatus setFrontendDataSource(
+            const shared_ptr<ITunerFrontend>& in_frontend) override;
+    ::ndk::ScopedAStatus setFrontendDataSourceById(int frontendId) override;
+    ::ndk::ScopedAStatus openFilter(const DemuxFilterType& in_type, int32_t in_bufferSize,
+                                    const shared_ptr<ITunerFilterCallback>& in_cb,
+                                    shared_ptr<ITunerFilter>* _aidl_return) override;
+    ::ndk::ScopedAStatus openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) override;
+    ::ndk::ScopedAStatus getAvSyncHwId(const shared_ptr<ITunerFilter>& in_tunerFilter,
+                                       int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getAvSyncTime(int32_t in_avSyncHwId, int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus openDvr(DvrType in_dvbType, int32_t in_bufferSize,
+                                 const shared_ptr<ITunerDvrCallback>& in_cb,
+                                 shared_ptr<ITunerDvr>* _aidl_return) override;
+    ::ndk::ScopedAStatus connectCiCam(int32_t in_ciCamId) override;
+    ::ndk::ScopedAStatus disconnectCiCam() override;
+    ::ndk::ScopedAStatus close() override;
+
+    int getId() { return mDemuxId; }
+
+private:
+    sp<HidlIDemux> mDemux;
+    int mDemuxId;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLDEMUX_H
diff --git a/services/tuner/hidl/TunerHidlDescrambler.cpp b/services/tuner/hidl/TunerHidlDescrambler.cpp
new file mode 100644
index 0000000..dd8cd9c
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDescrambler.cpp
@@ -0,0 +1,149 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlDescrambler"
+
+#include "TunerHidlDescrambler.h"
+
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
+#include "TunerHidlDemux.h"
+#include "TunerHidlFilter.h"
+
+using ::aidl::android::hardware::tv::tuner::Result;
+
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlDescrambler::TunerHidlDescrambler(sp<HidlIDescrambler> descrambler) {
+    mDescrambler = descrambler;
+}
+
+TunerHidlDescrambler::~TunerHidlDescrambler() {
+    mDescrambler = nullptr;
+}
+
+::ndk::ScopedAStatus TunerHidlDescrambler::setDemuxSource(
+        const shared_ptr<ITunerDemux>& in_tunerDemux) {
+    if (mDescrambler == nullptr) {
+        ALOGE("IDescrambler is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDescrambler->setDemuxSource(
+            static_cast<TunerHidlDemux*>(in_tunerDemux.get())->getId());
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDescrambler::setKeyToken(const vector<uint8_t>& in_keyToken) {
+    if (mDescrambler == nullptr) {
+        ALOGE("IDescrambler is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDescrambler->setKeyToken(in_keyToken);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDescrambler::addPid(
+        const DemuxPid& in_pid, const shared_ptr<ITunerFilter>& in_optionalSourceFilter) {
+    if (mDescrambler == nullptr) {
+        ALOGE("IDescrambler is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    sp<HidlIFilter> halFilter =
+            (in_optionalSourceFilter == nullptr)
+                    ? nullptr
+                    : static_cast<TunerHidlFilter*>(in_optionalSourceFilter.get())->getHalFilter();
+    HidlResult res = mDescrambler->addPid(getHidlDemuxPid(in_pid), halFilter);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDescrambler::removePid(
+        const DemuxPid& in_pid, const shared_ptr<ITunerFilter>& in_optionalSourceFilter) {
+    if (mDescrambler == nullptr) {
+        ALOGE("IDescrambler is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    sp<HidlIFilter> halFilter =
+            (in_optionalSourceFilter == nullptr)
+                    ? nullptr
+                    : static_cast<TunerHidlFilter*>(in_optionalSourceFilter.get())->getHalFilter();
+    HidlResult res = mDescrambler->removePid(getHidlDemuxPid(in_pid), halFilter);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDescrambler::close() {
+    if (mDescrambler == nullptr) {
+        ALOGE("IDescrambler is not initialized.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDescrambler->close();
+    mDescrambler = nullptr;
+
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+HidlDemuxPid TunerHidlDescrambler::getHidlDemuxPid(const DemuxPid& pid) {
+    HidlDemuxPid hidlPid;
+    switch (pid.getTag()) {
+    case DemuxPid::tPid: {
+        hidlPid.tPid((uint16_t)pid.get<DemuxPid::Tag::tPid>());
+        break;
+    }
+    case DemuxPid::mmtpPid: {
+        hidlPid.mmtpPid((uint16_t)pid.get<DemuxPid::Tag::mmtpPid>());
+        break;
+    }
+    }
+    return hidlPid;
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlDescrambler.h b/services/tuner/hidl/TunerHidlDescrambler.h
new file mode 100644
index 0000000..9494968
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDescrambler.h
@@ -0,0 +1,66 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLDESCRAMBLER_H
+#define ANDROID_MEDIA_TUNERHIDLDESCRAMBLER_H
+
+#include <aidl/android/hardware/tv/tuner/IDescrambler.h>
+#include <aidl/android/media/tv/tuner/BnTunerDescrambler.h>
+#include <android/hardware/tv/tuner/1.0/IDescrambler.h>
+#include <android/hardware/tv/tuner/1.0/ITuner.h>
+
+using ::aidl::android::hardware::tv::tuner::DemuxPid;
+using ::android::sp;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+using HidlDemuxPid = ::android::hardware::tv::tuner::V1_0::DemuxPid;
+using HidlIDescrambler = ::android::hardware::tv::tuner::V1_0::IDescrambler;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlDescrambler : public BnTunerDescrambler {
+public:
+    TunerHidlDescrambler(sp<HidlIDescrambler> descrambler);
+    virtual ~TunerHidlDescrambler();
+
+    ::ndk::ScopedAStatus setDemuxSource(const std::shared_ptr<ITunerDemux>& in_tunerDemux) override;
+    ::ndk::ScopedAStatus setKeyToken(const std::vector<uint8_t>& in_keyToken) override;
+    ::ndk::ScopedAStatus addPid(
+            const DemuxPid& in_pid,
+            const std::shared_ptr<ITunerFilter>& in_optionalSourceFilter) override;
+    ::ndk::ScopedAStatus removePid(
+            const DemuxPid& in_pid,
+            const std::shared_ptr<ITunerFilter>& in_optionalSourceFilter) override;
+    ::ndk::ScopedAStatus close() override;
+
+private:
+    HidlDemuxPid getHidlDemuxPid(const DemuxPid& pid);
+
+    sp<HidlIDescrambler> mDescrambler;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLDESCRAMBLER_H
diff --git a/services/tuner/hidl/TunerHidlDvr.cpp b/services/tuner/hidl/TunerHidlDvr.cpp
new file mode 100644
index 0000000..1a619d5
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDvr.cpp
@@ -0,0 +1,257 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlDvr"
+
+#include "TunerHidlDvr.h"
+
+#include <aidl/android/hardware/tv/tuner/DataFormat.h>
+#include <aidl/android/hardware/tv/tuner/PlaybackStatus.h>
+#include <aidl/android/hardware/tv/tuner/RecordStatus.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <fmq/ConvertMQDescriptors.h>
+
+using ::aidl::android::hardware::tv::tuner::DataFormat;
+using ::aidl::android::hardware::tv::tuner::PlaybackStatus;
+using ::aidl::android::hardware::tv::tuner::RecordStatus;
+using ::aidl::android::hardware::tv::tuner::Result;
+using ::android::unsafeHidlToAidlMQDescriptor;
+using ::android::hardware::MessageQueue;
+using ::android::hardware::MQDescriptorSync;
+
+using HidlDataFormat = ::android::hardware::tv::tuner::V1_0::DataFormat;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+using MQDesc = MQDescriptorSync<uint8_t>;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlDvr::TunerHidlDvr(sp<HidlIDvr> dvr, DvrType type) {
+    mDvr = dvr;
+    mType = type;
+}
+
+TunerHidlDvr::~TunerHidlDvr() {
+    mDvr = nullptr;
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::getQueueDesc(AidlMQDesc* _aidl_return) {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    MQDesc dvrMQDesc;
+    HidlResult res;
+    mDvr->getQueueDesc([&](HidlResult r, const MQDesc& desc) {
+        dvrMQDesc = desc;
+        res = r;
+    });
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    AidlMQDesc aidlMQDesc;
+    unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(dvrMQDesc, &aidlMQDesc);
+    *_aidl_return = move(aidlMQDesc);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::configure(const DvrSettings& in_settings) {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDvr->configure(getHidlDvrSettings(in_settings));
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::attachFilter(const shared_ptr<ITunerFilter>& in_filter) {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (in_filter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    sp<HidlIFilter> hidlFilter = static_cast<TunerHidlFilter*>(in_filter.get())->getHalFilter();
+    if (hidlFilter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    HidlResult res = mDvr->attachFilter(hidlFilter);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::detachFilter(const shared_ptr<ITunerFilter>& in_filter) {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (in_filter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    sp<HidlIFilter> halFilter = (static_cast<TunerHidlFilter*>(in_filter.get()))->getHalFilter();
+    if (halFilter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    HidlResult res = mDvr->detachFilter(halFilter);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::start() {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDvr->start();
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::stop() {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDvr->stop();
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::flush() {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDvr->flush();
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::close() {
+    if (mDvr == nullptr) {
+        ALOGE("IDvr is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mDvr->close();
+    mDvr = nullptr;
+
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+HidlDvrSettings TunerHidlDvr::getHidlDvrSettings(const DvrSettings& settings) {
+    HidlDvrSettings s;
+    switch (mType) {
+    case DvrType::PLAYBACK: {
+        s.playback({
+                .statusMask =
+                        static_cast<uint8_t>(settings.get<DvrSettings::playback>().statusMask),
+                .lowThreshold =
+                        static_cast<uint32_t>(settings.get<DvrSettings::playback>().lowThreshold),
+                .highThreshold =
+                        static_cast<uint32_t>(settings.get<DvrSettings::playback>().highThreshold),
+                .dataFormat = static_cast<HidlDataFormat>(
+                        settings.get<DvrSettings::playback>().dataFormat),
+                .packetSize =
+                        static_cast<uint8_t>(settings.get<DvrSettings::playback>().packetSize),
+        });
+        return s;
+    }
+    case DvrType::RECORD: {
+        s.record({
+                .statusMask = static_cast<uint8_t>(settings.get<DvrSettings::record>().statusMask),
+                .lowThreshold =
+                        static_cast<uint32_t>(settings.get<DvrSettings::record>().lowThreshold),
+                .highThreshold =
+                        static_cast<uint32_t>(settings.get<DvrSettings::record>().highThreshold),
+                .dataFormat =
+                        static_cast<HidlDataFormat>(settings.get<DvrSettings::record>().dataFormat),
+                .packetSize = static_cast<uint8_t>(settings.get<DvrSettings::record>().packetSize),
+        });
+        return s;
+    }
+    default:
+        break;
+    }
+    return s;
+}
+
+/////////////// IDvrCallback ///////////////////////
+Return<void> TunerHidlDvr::DvrCallback::onRecordStatus(const HidlRecordStatus status) {
+    if (mTunerDvrCallback != nullptr) {
+        mTunerDvrCallback->onRecordStatus(static_cast<RecordStatus>(status));
+    }
+    return Void();
+}
+
+Return<void> TunerHidlDvr::DvrCallback::onPlaybackStatus(const HidlPlaybackStatus status) {
+    if (mTunerDvrCallback != nullptr) {
+        mTunerDvrCallback->onPlaybackStatus(static_cast<PlaybackStatus>(status));
+    }
+    return Void();
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlDvr.h b/services/tuner/hidl/TunerHidlDvr.h
new file mode 100644
index 0000000..a280ff7
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDvr.h
@@ -0,0 +1,91 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLDVR_H
+#define ANDROID_MEDIA_TUNERHIDLDVR_H
+
+#include <aidl/android/hardware/tv/tuner/DvrSettings.h>
+#include <aidl/android/hardware/tv/tuner/DvrType.h>
+#include <aidl/android/media/tv/tuner/BnTunerDvr.h>
+#include <aidl/android/media/tv/tuner/ITunerDvrCallback.h>
+#include <android/hardware/tv/tuner/1.0/IDvr.h>
+#include <android/hardware/tv/tuner/1.0/IDvrCallback.h>
+
+#include "TunerHidlFilter.h"
+
+using ::aidl::android::hardware::common::fmq::MQDescriptor;
+using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
+using ::aidl::android::hardware::tv::tuner::DvrSettings;
+using ::aidl::android::hardware::tv::tuner::DvrType;
+using ::android::sp;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::std::shared_ptr;
+using ::std::vector;
+
+using HidlDvrSettings = ::android::hardware::tv::tuner::V1_0::DvrSettings;
+using HidlIDvr = ::android::hardware::tv::tuner::V1_0::IDvr;
+using HidlIDvrCallback = ::android::hardware::tv::tuner::V1_0::IDvrCallback;
+using HidlPlaybackStatus = ::android::hardware::tv::tuner::V1_0::PlaybackStatus;
+using HidlRecordStatus = ::android::hardware::tv::tuner::V1_0::RecordStatus;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+using AidlMQDesc = MQDescriptor<int8_t, SynchronizedReadWrite>;
+
+class TunerHidlDvr : public BnTunerDvr {
+public:
+    TunerHidlDvr(sp<HidlIDvr> dvr, DvrType type);
+    ~TunerHidlDvr();
+
+    ::ndk::ScopedAStatus getQueueDesc(AidlMQDesc* _aidl_return) override;
+    ::ndk::ScopedAStatus configure(const DvrSettings& in_settings) override;
+    ::ndk::ScopedAStatus attachFilter(const shared_ptr<ITunerFilter>& in_filter) override;
+    ::ndk::ScopedAStatus detachFilter(const shared_ptr<ITunerFilter>& in_filter) override;
+    ::ndk::ScopedAStatus start() override;
+    ::ndk::ScopedAStatus stop() override;
+    ::ndk::ScopedAStatus flush() override;
+    ::ndk::ScopedAStatus close() override;
+
+    struct DvrCallback : public HidlIDvrCallback {
+        DvrCallback(const shared_ptr<ITunerDvrCallback> tunerDvrCallback)
+              : mTunerDvrCallback(tunerDvrCallback){};
+
+        virtual Return<void> onRecordStatus(const HidlRecordStatus status);
+        virtual Return<void> onPlaybackStatus(const HidlPlaybackStatus status);
+
+    private:
+        shared_ptr<ITunerDvrCallback> mTunerDvrCallback;
+    };
+
+private:
+    HidlDvrSettings getHidlDvrSettings(const DvrSettings& settings);
+
+    sp<HidlIDvr> mDvr;
+    DvrType mType;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLDVR_H
diff --git a/services/tuner/hidl/TunerHidlFilter.cpp b/services/tuner/hidl/TunerHidlFilter.cpp
new file mode 100644
index 0000000..a5bbf39
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlFilter.cpp
@@ -0,0 +1,1275 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlFilter"
+
+#include "TunerHidlFilter.h"
+
+#include <aidl/android/hardware/tv/tuner/Constant.h>
+#include <aidl/android/hardware/tv/tuner/DemuxScIndex.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <aidlcommonsupport/NativeHandle.h>
+#include <binder/IPCThreadState.h>
+#include <fmq/ConvertMQDescriptors.h>
+
+#include "TunerHelper.h"
+#include "TunerHidlService.h"
+
+using ::aidl::android::hardware::tv::tuner::AudioExtraMetaData;
+using ::aidl::android::hardware::tv::tuner::AudioStreamType;
+using ::aidl::android::hardware::tv::tuner::Constant;
+using ::aidl::android::hardware::tv::tuner::DemuxAlpFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxAlpFilterSettingsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterDownloadEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterIpPayloadEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterMainType;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterMediaEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterMediaEventExtraMetaData;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterMmtpRecordEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterMonitorEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterPesEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterScIndexMask;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSectionBits;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSectionEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSectionSettingsCondition;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSectionSettingsConditionTableInfo;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSubType;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterTemiEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterTsRecordEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxIpAddress;
+using ::aidl::android::hardware::tv::tuner::DemuxIpAddressIpAddress;
+using ::aidl::android::hardware::tv::tuner::DemuxIpFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxIpFilterSettingsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxMmtpFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxMmtpFilterSettingsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxMmtpFilterType;
+using ::aidl::android::hardware::tv::tuner::DemuxPid;
+using ::aidl::android::hardware::tv::tuner::DemuxScIndex;
+using ::aidl::android::hardware::tv::tuner::DemuxTlvFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxTlvFilterSettingsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxTsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxTsFilterSettingsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxTsFilterType;
+using ::aidl::android::hardware::tv::tuner::Result;
+using ::aidl::android::hardware::tv::tuner::ScramblingStatus;
+using ::android::dupToAidl;
+using ::android::IPCThreadState;
+using ::android::makeFromAidl;
+using ::android::unsafeHidlToAidlMQDescriptor;
+using ::android::hardware::hidl_handle;
+
+using HidlDemuxAlpLengthType = ::android::hardware::tv::tuner::V1_0::DemuxAlpLengthType;
+using HidlDemuxFilterMainType = ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
+using HidlDemuxIpAddress = ::android::hardware::tv::tuner::V1_0::DemuxIpAddress;
+using HidlDemuxMmtpFilterType = ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterType;
+using HidlDemuxMmtpPid = ::android::hardware::tv::tuner::V1_0::DemuxMmtpPid;
+using HidlDemuxRecordScIndexType = ::android::hardware::tv::tuner::V1_0::DemuxRecordScIndexType;
+using HidlDemuxStreamId = ::android::hardware::tv::tuner::V1_0::DemuxStreamId;
+using HidlDemuxTsFilterType = ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+using HidlAudioStreamType = ::android::hardware::tv::tuner::V1_1::AudioStreamType;
+using HidlConstant = ::android::hardware::tv::tuner::V1_1::Constant;
+using HidlVideoStreamType = ::android::hardware::tv::tuner::V1_1::VideoStreamType;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlFilter::TunerHidlFilter(sp<HidlIFilter> filter, sp<FilterCallback> cb,
+                                 DemuxFilterType type)
+      : mFilter(filter),
+        mType(type),
+        mStarted(false),
+        mShared(false),
+        mClientPid(-1),
+        mFilterCallback(cb) {
+    mFilter_1_1 = ::android::hardware::tv::tuner::V1_1::IFilter::castFrom(filter);
+}
+
+TunerHidlFilter::~TunerHidlFilter() {
+    Mutex::Autolock _l(mLock);
+    mFilter = nullptr;
+    mFilter_1_1 = nullptr;
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::getQueueDesc(AidlMQDesc* _aidl_return) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
+    }
+
+    MQDesc filterMQDesc;
+    HidlResult res;
+    mFilter->getQueueDesc([&](HidlResult r, const MQDesc& desc) {
+        filterMQDesc = desc;
+        res = r;
+    });
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    AidlMQDesc aidlMQDesc;
+    unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(filterMQDesc, &aidlMQDesc);
+    *_aidl_return = move(aidlMQDesc);
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::getId(int32_t* _aidl_return) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlResult res;
+    mFilter->getId([&](HidlResult r, uint32_t filterId) {
+        res = r;
+        mId = filterId;
+    });
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    *_aidl_return = mId;
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::getId64Bit(int64_t* _aidl_return) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter_1_1 == nullptr) {
+        ALOGE("IFilter_1_1 is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlResult res;
+    mFilter_1_1->getId64Bit([&](HidlResult r, uint64_t filterId) {
+        res = r;
+        mId64Bit = filterId;
+    });
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    *_aidl_return = mId64Bit;
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::configure(const DemuxFilterSettings& in_settings) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlDemuxFilterSettings settings;
+    switch (in_settings.getTag()) {
+    case DemuxFilterSettings::ts: {
+        getHidlTsSettings(in_settings, settings);
+        break;
+    }
+    case DemuxFilterSettings::mmtp: {
+        getHidlMmtpSettings(in_settings, settings);
+        break;
+    }
+    case DemuxFilterSettings::ip: {
+        getHidlIpSettings(in_settings, settings);
+        break;
+    }
+    case DemuxFilterSettings::tlv: {
+        getHidlTlvSettings(in_settings, settings);
+        break;
+    }
+    case DemuxFilterSettings::alp: {
+        getHidlAlpSettings(in_settings, settings);
+        break;
+    }
+    }
+
+    HidlResult res = mFilter->configure(settings);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::configureMonitorEvent(int32_t monitorEventType) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter_1_1 == nullptr) {
+        ALOGE("IFilter_1_1 is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlResult res = mFilter_1_1->configureMonitorEvent(monitorEventType);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::configureIpFilterContextId(int32_t cid) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter_1_1 == nullptr) {
+        ALOGE("IFilter_1_1 is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlResult res = mFilter_1_1->configureIpCid(cid);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::configureAvStreamType(const AvStreamType& in_avStreamType) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter_1_1 == nullptr) {
+        ALOGE("IFilter_1_1 is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlAvStreamType type;
+    if (!getHidlAvStreamType(in_avStreamType, type)) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlResult res = mFilter_1_1->configureAvStreamType(type);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::setDataSource(const shared_ptr<ITunerFilter>& filter) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (filter == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    sp<HidlIFilter> hidlFilter = static_cast<TunerHidlFilter*>(filter.get())->getHalFilter();
+    HidlResult res = mFilter->setDataSource(hidlFilter);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::getAvSharedHandle(NativeHandle* out_avMemory,
+                                                        int64_t* _aidl_return) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter_1_1 == nullptr) {
+        ALOGE("IFilter_1_1 is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlResult res;
+    mFilter_1_1->getAvSharedHandle([&](HidlResult r, hidl_handle avMemory, uint64_t avMemSize) {
+        res = r;
+        if (res == HidlResult::SUCCESS) {
+            *out_avMemory = dupToAidl(avMemory);
+            *_aidl_return = static_cast<int64_t>(avMemSize);
+        }
+    });
+
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::releaseAvHandle(const NativeHandle& in_handle,
+                                                      int64_t in_avDataId) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        ALOGD("%s is called on a shared filter", __FUNCTION__);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    HidlResult res = mFilter->releaseAvHandle(hidl_handle(makeFromAidl(in_handle)), in_avDataId);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    // Call to HAL to make sure the transport FD was able to be closed by binder.
+    // This is a tricky workaround for a problem in Binder.
+    // TODO:[b/192048842] When that problem is fixed we may be able to remove or change this code.
+    mFilter->getId([&](HidlResult /* r */, uint32_t /* filterId*/){});
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::start() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
+    }
+
+    HidlResult res = mFilter->start();
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    mStarted = true;
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::stop() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
+    }
+
+    HidlResult res = mFilter->stop();
+    mStarted = false;
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::flush() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            ALOGD("%s is called in wrong process", __FUNCTION__);
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::INVALID_STATE));
+        }
+    }
+
+    HidlResult res = mFilter->flush();
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::close() {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared) {
+        IPCThreadState* ipc = IPCThreadState::self();
+        int32_t callingPid = ipc->getCallingPid();
+        if (callingPid == mClientPid) {
+            if (mFilterCallback != nullptr) {
+                mFilterCallback->sendSharedFilterStatus(STATUS_INACCESSIBLE);
+                mFilterCallback->detachSharedFilterCallback();
+            }
+            TunerHidlService::getTunerService()->removeSharedFilter(this->ref<TunerHidlFilter>());
+        } else {
+            // Calling from shared process, do not really close this filter.
+            if (mFilterCallback != nullptr) {
+                mFilterCallback->detachSharedFilterCallback();
+            }
+            mStarted = false;
+            return ::ndk::ScopedAStatus::ok();
+        }
+    }
+
+    HidlResult res = mFilter->close();
+    mFilter = nullptr;
+    mFilter_1_1 = nullptr;
+    mStarted = false;
+    mShared = false;
+    mClientPid = -1;
+
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::acquireSharedFilterToken(string* _aidl_return) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (mShared || mStarted) {
+        ALOGD("create SharedFilter in wrong state");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    IPCThreadState* ipc = IPCThreadState::self();
+    mClientPid = ipc->getCallingPid();
+    string token =
+            TunerHidlService::getTunerService()->addFilterToShared(this->ref<TunerHidlFilter>());
+    _aidl_return->assign(token);
+    mShared = true;
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::freeSharedFilterToken(const string& /* in_filterToken */) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (!mShared) {
+        // The filter is not shared or the shared filter has been closed.
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    if (mFilterCallback != nullptr) {
+        mFilterCallback->sendSharedFilterStatus(STATUS_INACCESSIBLE);
+        mFilterCallback->detachSharedFilterCallback();
+    }
+
+    TunerHidlService::getTunerService()->removeSharedFilter(this->ref<TunerHidlFilter>());
+    mShared = false;
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::getFilterType(DemuxFilterType* _aidl_return) {
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    *_aidl_return = mType;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::setDelayHint(const FilterDelayHint&) {
+    // setDelayHint is not supported in HIDL HAL
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+}
+
+bool TunerHidlFilter::isSharedFilterAllowed(int callingPid) {
+    return mShared && mClientPid != callingPid;
+}
+
+void TunerHidlFilter::attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb) {
+    if (mFilterCallback != nullptr) {
+        mFilterCallback->attachSharedFilterCallback(in_cb);
+    }
+}
+
+sp<HidlIFilter> TunerHidlFilter::getHalFilter() {
+    return mFilter;
+}
+
+bool TunerHidlFilter::getHidlAvStreamType(const AvStreamType avStreamType, HidlAvStreamType& type) {
+    if (isAudioFilter()) {
+        AudioStreamType audio = avStreamType.get<AvStreamType::audio>();
+        if (static_cast<int32_t>(audio) > static_cast<int32_t>(HidlAudioStreamType::DRA)) {
+            return false;
+        }
+        type.audio(static_cast<HidlAudioStreamType>(audio));
+        return true;
+    }
+
+    if (isVideoFilter()) {
+        type.video(static_cast<HidlVideoStreamType>(avStreamType.get<AvStreamType::video>()));
+        return true;
+    }
+
+    return false;
+}
+
+bool TunerHidlFilter::isAudioFilter() {
+    return (mType.mainType == DemuxFilterMainType::TS &&
+            mType.subType.get<DemuxFilterSubType::tsFilterType>() == DemuxTsFilterType::AUDIO) ||
+           (mType.mainType == DemuxFilterMainType::MMTP &&
+            mType.subType.get<DemuxFilterSubType::mmtpFilterType>() == DemuxMmtpFilterType::AUDIO);
+}
+
+bool TunerHidlFilter::isVideoFilter() {
+    return (mType.mainType == DemuxFilterMainType::TS &&
+            mType.subType.get<DemuxFilterSubType::tsFilterType>() == DemuxTsFilterType::VIDEO) ||
+           (mType.mainType == DemuxFilterMainType::MMTP &&
+            mType.subType.get<DemuxFilterSubType::mmtpFilterType>() == DemuxMmtpFilterType::VIDEO);
+}
+
+void TunerHidlFilter::getHidlTsSettings(const DemuxFilterSettings& settings,
+                                        HidlDemuxFilterSettings& hidlSettings) {
+    const DemuxTsFilterSettings& tsConf = settings.get<DemuxFilterSettings::ts>();
+    HidlDemuxTsFilterSettings ts{
+            .tpid = static_cast<uint16_t>(tsConf.tpid),
+    };
+
+    switch (tsConf.filterSettings.getTag()) {
+    case DemuxTsFilterSettingsFilterSettings::av: {
+        ts.filterSettings.av(getHidlAvSettings(
+                tsConf.filterSettings.get<DemuxTsFilterSettingsFilterSettings::av>()));
+        break;
+    }
+    case DemuxTsFilterSettingsFilterSettings::section: {
+        ts.filterSettings.section(getHidlSectionSettings(
+                tsConf.filterSettings.get<DemuxTsFilterSettingsFilterSettings::section>()));
+        break;
+    }
+    case DemuxTsFilterSettingsFilterSettings::pesData: {
+        ts.filterSettings.pesData(getHidlPesDataSettings(
+                tsConf.filterSettings.get<DemuxTsFilterSettingsFilterSettings::pesData>()));
+        break;
+    }
+    case DemuxTsFilterSettingsFilterSettings::record: {
+        ts.filterSettings.record(getHidlRecordSettings(
+                tsConf.filterSettings.get<DemuxTsFilterSettingsFilterSettings::record>()));
+        break;
+    }
+    default: {
+        ts.filterSettings.noinit();
+        break;
+    }
+    }
+    hidlSettings.ts(ts);
+}
+
+void TunerHidlFilter::getHidlMmtpSettings(const DemuxFilterSettings& settings,
+                                          HidlDemuxFilterSettings& hidlSettings) {
+    const DemuxMmtpFilterSettings& mmtpConf = settings.get<DemuxFilterSettings::mmtp>();
+    HidlDemuxMmtpFilterSettings mmtp{
+            .mmtpPid = static_cast<HidlDemuxMmtpPid>(mmtpConf.mmtpPid),
+    };
+
+    switch (mmtpConf.filterSettings.getTag()) {
+    case DemuxMmtpFilterSettingsFilterSettings::av: {
+        mmtp.filterSettings.av(getHidlAvSettings(
+                mmtpConf.filterSettings.get<DemuxMmtpFilterSettingsFilterSettings::av>()));
+        break;
+    }
+    case DemuxMmtpFilterSettingsFilterSettings::section: {
+        mmtp.filterSettings.section(getHidlSectionSettings(
+                mmtpConf.filterSettings.get<DemuxMmtpFilterSettingsFilterSettings::section>()));
+        break;
+    }
+    case DemuxMmtpFilterSettingsFilterSettings::pesData: {
+        mmtp.filterSettings.pesData(getHidlPesDataSettings(
+                mmtpConf.filterSettings.get<DemuxMmtpFilterSettingsFilterSettings::pesData>()));
+        break;
+    }
+    case DemuxMmtpFilterSettingsFilterSettings::record: {
+        mmtp.filterSettings.record(getHidlRecordSettings(
+                mmtpConf.filterSettings.get<DemuxMmtpFilterSettingsFilterSettings::record>()));
+        break;
+    }
+    case DemuxMmtpFilterSettingsFilterSettings::download: {
+        mmtp.filterSettings.download(getHidlDownloadSettings(
+                mmtpConf.filterSettings.get<DemuxMmtpFilterSettingsFilterSettings::download>()));
+        break;
+    }
+    default: {
+        mmtp.filterSettings.noinit();
+        break;
+    }
+    }
+    hidlSettings.mmtp(mmtp);
+}
+
+void TunerHidlFilter::getHidlIpSettings(const DemuxFilterSettings& settings,
+                                        HidlDemuxFilterSettings& hidlSettings) {
+    const DemuxIpFilterSettings& ipConf = settings.get<DemuxFilterSettings::ip>();
+    HidlDemuxIpAddress ipAddr{
+            .srcPort = static_cast<uint16_t>(ipConf.ipAddr.srcPort),
+            .dstPort = static_cast<uint16_t>(ipConf.ipAddr.dstPort),
+    };
+
+    ipConf.ipAddr.srcIpAddress.getTag() == DemuxIpAddressIpAddress::v6
+            ? ipAddr.srcIpAddress.v6(getIpV6Address(ipConf.ipAddr.srcIpAddress))
+            : ipAddr.srcIpAddress.v4(getIpV4Address(ipConf.ipAddr.srcIpAddress));
+    ipConf.ipAddr.dstIpAddress.getTag() == DemuxIpAddressIpAddress::v6
+            ? ipAddr.dstIpAddress.v6(getIpV6Address(ipConf.ipAddr.dstIpAddress))
+            : ipAddr.dstIpAddress.v4(getIpV4Address(ipConf.ipAddr.dstIpAddress));
+
+    HidlDemuxIpFilterSettings ip;
+    ip.ipAddr = ipAddr;
+
+    switch (ipConf.filterSettings.getTag()) {
+    case DemuxIpFilterSettingsFilterSettings::section: {
+        ip.filterSettings.section(getHidlSectionSettings(
+                ipConf.filterSettings.get<DemuxIpFilterSettingsFilterSettings::section>()));
+        break;
+    }
+    case DemuxIpFilterSettingsFilterSettings::bPassthrough: {
+        ip.filterSettings.bPassthrough(
+                ipConf.filterSettings.get<DemuxIpFilterSettingsFilterSettings::bPassthrough>());
+        break;
+    }
+    default: {
+        ip.filterSettings.noinit();
+        break;
+    }
+    }
+    hidlSettings.ip(ip);
+}
+
+hidl_array<uint8_t, IP_V6_LENGTH> TunerHidlFilter::getIpV6Address(
+        const DemuxIpAddressIpAddress& addr) {
+    hidl_array<uint8_t, IP_V6_LENGTH> ip;
+    if (addr.get<DemuxIpAddressIpAddress::v6>().size() != IP_V6_LENGTH) {
+        return ip;
+    }
+    copy(addr.get<DemuxIpAddressIpAddress::v6>().begin(),
+         addr.get<DemuxIpAddressIpAddress::v6>().end(), ip.data());
+    return ip;
+}
+
+hidl_array<uint8_t, IP_V4_LENGTH> TunerHidlFilter::getIpV4Address(
+        const DemuxIpAddressIpAddress& addr) {
+    hidl_array<uint8_t, IP_V4_LENGTH> ip;
+    if (addr.get<DemuxIpAddressIpAddress::v4>().size() != IP_V4_LENGTH) {
+        return ip;
+    }
+    copy(addr.get<DemuxIpAddressIpAddress::v4>().begin(),
+         addr.get<DemuxIpAddressIpAddress::v4>().end(), ip.data());
+    return ip;
+}
+
+void TunerHidlFilter::getHidlTlvSettings(const DemuxFilterSettings& settings,
+                                         HidlDemuxFilterSettings& hidlSettings) {
+    const DemuxTlvFilterSettings& tlvConf = settings.get<DemuxFilterSettings::tlv>();
+    HidlDemuxTlvFilterSettings tlv{
+            .packetType = static_cast<uint8_t>(tlvConf.packetType),
+            .isCompressedIpPacket = tlvConf.isCompressedIpPacket,
+    };
+
+    switch (tlvConf.filterSettings.getTag()) {
+    case DemuxTlvFilterSettingsFilterSettings::section: {
+        tlv.filterSettings.section(getHidlSectionSettings(
+                tlvConf.filterSettings.get<DemuxTlvFilterSettingsFilterSettings::section>()));
+        break;
+    }
+    case DemuxTlvFilterSettingsFilterSettings::bPassthrough: {
+        tlv.filterSettings.bPassthrough(
+                tlvConf.filterSettings.get<DemuxTlvFilterSettingsFilterSettings::bPassthrough>());
+        break;
+    }
+    default: {
+        tlv.filterSettings.noinit();
+        break;
+    }
+    }
+    hidlSettings.tlv(tlv);
+}
+
+void TunerHidlFilter::getHidlAlpSettings(const DemuxFilterSettings& settings,
+                                         HidlDemuxFilterSettings& hidlSettings) {
+    const DemuxAlpFilterSettings& alpConf = settings.get<DemuxFilterSettings::alp>();
+    HidlDemuxAlpFilterSettings alp{
+            .packetType = static_cast<uint8_t>(alpConf.packetType),
+            .lengthType = static_cast<HidlDemuxAlpLengthType>(alpConf.lengthType),
+    };
+
+    switch (alpConf.filterSettings.getTag()) {
+    case DemuxAlpFilterSettingsFilterSettings::section: {
+        alp.filterSettings.section(getHidlSectionSettings(
+                alpConf.filterSettings.get<DemuxAlpFilterSettingsFilterSettings::section>()));
+        break;
+    }
+    default: {
+        alp.filterSettings.noinit();
+        break;
+    }
+    }
+    hidlSettings.alp(alp);
+}
+
+HidlDemuxFilterAvSettings TunerHidlFilter::getHidlAvSettings(
+        const DemuxFilterAvSettings& settings) {
+    HidlDemuxFilterAvSettings av{
+            .isPassthrough = settings.isPassthrough,
+    };
+    return av;
+}
+
+HidlDemuxFilterSectionSettings TunerHidlFilter::getHidlSectionSettings(
+        const DemuxFilterSectionSettings& settings) {
+    HidlDemuxFilterSectionSettings section{
+            .isCheckCrc = settings.isCheckCrc,
+            .isRepeat = settings.isRepeat,
+            .isRaw = settings.isRaw,
+    };
+
+    switch (settings.condition.getTag()) {
+    case DemuxFilterSectionSettingsCondition::sectionBits: {
+        const DemuxFilterSectionBits& sectionBits =
+                settings.condition.get<DemuxFilterSectionSettingsCondition::sectionBits>();
+        vector<uint8_t> filter(sectionBits.filter.begin(), sectionBits.filter.end());
+        vector<uint8_t> mask(sectionBits.mask.begin(), sectionBits.mask.end());
+        vector<uint8_t> mode(sectionBits.mode.begin(), sectionBits.mode.end());
+        section.condition.sectionBits({
+                .filter = filter,
+                .mask = mask,
+                .mode = mode,
+        });
+        break;
+    }
+    case DemuxFilterSectionSettingsCondition::tableInfo: {
+        const DemuxFilterSectionSettingsConditionTableInfo& tableInfo =
+                settings.condition.get<DemuxFilterSectionSettingsCondition::tableInfo>();
+        section.condition.tableInfo({
+                .tableId = static_cast<uint16_t>(tableInfo.tableId),
+                .version = static_cast<uint16_t>(tableInfo.version),
+        });
+        break;
+    }
+    default: {
+        break;
+    }
+    }
+    return section;
+}
+
+HidlDemuxFilterPesDataSettings TunerHidlFilter::getHidlPesDataSettings(
+        const DemuxFilterPesDataSettings& settings) {
+    HidlDemuxFilterPesDataSettings pes{
+            .streamId = static_cast<HidlDemuxStreamId>(settings.streamId),
+            .isRaw = settings.isRaw,
+    };
+    return pes;
+}
+
+HidlDemuxFilterRecordSettings TunerHidlFilter::getHidlRecordSettings(
+        const DemuxFilterRecordSettings& settings) {
+    HidlDemuxFilterRecordSettings record{
+            .tsIndexMask = static_cast<uint32_t>(settings.tsIndexMask),
+    };
+
+    switch (settings.scIndexMask.getTag()) {
+    case DemuxFilterScIndexMask::scIndex: {
+        record.scIndexType = static_cast<HidlDemuxRecordScIndexType>(settings.scIndexType);
+        record.scIndexMask.sc(
+                static_cast<uint32_t>(settings.scIndexMask.get<DemuxFilterScIndexMask::scIndex>()));
+        break;
+    }
+    case DemuxFilterScIndexMask::scAvc: {
+        record.scIndexType = HidlDemuxRecordScIndexType::SC;
+        uint32_t index =
+                static_cast<uint32_t>(settings.scIndexMask.get<DemuxFilterScIndexMask::scAvc>());
+        // HIDL HAL starting from 1 << 4; AIDL starting from 1 << 0.
+        index = index << 4;
+        record.scIndexMask.sc(index);
+        break;
+    }
+    case DemuxFilterScIndexMask::scHevc: {
+        record.scIndexType = static_cast<HidlDemuxRecordScIndexType>(settings.scIndexType);
+        record.scIndexMask.scHevc(
+                static_cast<uint32_t>(settings.scIndexMask.get<DemuxFilterScIndexMask::scHevc>()));
+        break;
+    }
+    }
+    return record;
+}
+
+HidlDemuxFilterDownloadSettings TunerHidlFilter::getHidlDownloadSettings(
+        const DemuxFilterDownloadSettings& settings) {
+    HidlDemuxFilterDownloadSettings download{
+            .downloadId = static_cast<uint32_t>(settings.downloadId),
+    };
+    return download;
+}
+
+/////////////// FilterCallback ///////////////////////
+Return<void> TunerHidlFilter::FilterCallback::onFilterStatus(HidlDemuxFilterStatus status) {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr) {
+        mTunerFilterCallback->onFilterStatus(static_cast<DemuxFilterStatus>(status));
+    }
+    return Void();
+}
+
+Return<void> TunerHidlFilter::FilterCallback::onFilterEvent(
+        const HidlDemuxFilterEvent& filterEvent) {
+    vector<HidlDemuxFilterEventExt::Event> emptyEventsExt;
+    HidlDemuxFilterEventExt emptyFilterEventExt{
+            .events = emptyEventsExt,
+    };
+    onFilterEvent_1_1(filterEvent, emptyFilterEventExt);
+    return Void();
+}
+
+Return<void> TunerHidlFilter::FilterCallback::onFilterEvent_1_1(
+        const HidlDemuxFilterEvent& filterEvent, const HidlDemuxFilterEventExt& filterEventExt) {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr) {
+        vector<HidlDemuxFilterEvent::Event> events = filterEvent.events;
+        vector<HidlDemuxFilterEventExt::Event> eventsExt = filterEventExt.events;
+        vector<DemuxFilterEvent> tunerEvents;
+
+        getAidlFilterEvent(events, eventsExt, tunerEvents);
+        mTunerFilterCallback->onFilterEvent(tunerEvents);
+    }
+    return Void();
+}
+
+void TunerHidlFilter::FilterCallback::sendSharedFilterStatus(int32_t status) {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr && mOriginalCallback != nullptr) {
+        mTunerFilterCallback->onFilterStatus(static_cast<DemuxFilterStatus>(status));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::attachSharedFilterCallback(
+        const shared_ptr<ITunerFilterCallback>& in_cb) {
+    Mutex::Autolock _l(mCallbackLock);
+    mOriginalCallback = mTunerFilterCallback;
+    mTunerFilterCallback = in_cb;
+}
+
+void TunerHidlFilter::FilterCallback::detachSharedFilterCallback() {
+    Mutex::Autolock _l(mCallbackLock);
+    if (mTunerFilterCallback != nullptr && mOriginalCallback != nullptr) {
+        mTunerFilterCallback = mOriginalCallback;
+        mOriginalCallback = nullptr;
+    }
+}
+
+/////////////// FilterCallback Helper Methods ///////////////////////
+void TunerHidlFilter::FilterCallback::getAidlFilterEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events,
+        const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+        vector<DemuxFilterEvent>& aidlEvents) {
+    if (events.empty() && !eventsExt.empty()) {
+        switch (eventsExt[0].getDiscriminator()) {
+        case HidlDemuxFilterEventExt::Event::hidl_discriminator::monitorEvent: {
+            getMonitorEvent(eventsExt, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEventExt::Event::hidl_discriminator::startId: {
+            getRestartEvent(eventsExt, aidlEvents);
+            break;
+        }
+        default: {
+            break;
+        }
+        }
+    }
+
+    if (!events.empty()) {
+        switch (events[0].getDiscriminator()) {
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::media: {
+            getMediaEvent(events, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::section: {
+            getSectionEvent(events, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::pes: {
+            getPesEvent(events, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::tsRecord: {
+            getTsRecordEvent(events, eventsExt, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::mmtpRecord: {
+            getMmtpRecordEvent(events, eventsExt, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::download: {
+            getDownloadEvent(events, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::ipPayload: {
+            getIpPayloadEvent(events, aidlEvents);
+            break;
+        }
+        case HidlDemuxFilterEvent::Event::hidl_discriminator::temi: {
+            getTemiEvent(events, aidlEvents);
+            break;
+        }
+        default: {
+            break;
+        }
+        }
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getMediaEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events, vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        const HidlDemuxFilterMediaEvent& mediaEvent = events[i].media();
+        DemuxFilterMediaEvent media;
+
+        media.streamId = static_cast<int32_t>(mediaEvent.streamId);
+        media.isPtsPresent = mediaEvent.isPtsPresent;
+        media.pts = static_cast<int64_t>(mediaEvent.pts);
+        media.isDtsPresent = false;
+        media.dts = static_cast<int64_t>(-1);
+        media.dataLength = static_cast<int64_t>(mediaEvent.dataLength);
+        media.offset = static_cast<int64_t>(mediaEvent.offset);
+        media.isSecureMemory = mediaEvent.isSecureMemory;
+        media.avDataId = static_cast<int64_t>(mediaEvent.avDataId);
+        media.mpuSequenceNumber = static_cast<int32_t>(mediaEvent.mpuSequenceNumber);
+        media.isPesPrivateData = mediaEvent.isPesPrivateData;
+        media.scIndexMask.set<DemuxFilterScIndexMask::scIndex>(
+                static_cast<int32_t>(DemuxScIndex::UNDEFINED));
+
+        if (mediaEvent.extraMetaData.getDiscriminator() ==
+            HidlDemuxFilterMediaEvent::ExtraMetaData::hidl_discriminator::audio) {
+            AudioExtraMetaData audio;
+            audio.adFade = static_cast<int8_t>(mediaEvent.extraMetaData.audio().adFade);
+            audio.adPan = static_cast<int8_t>(mediaEvent.extraMetaData.audio().adPan);
+            audio.versionTextTag =
+                    static_cast<int16_t>(mediaEvent.extraMetaData.audio().versionTextTag);
+            audio.adGainCenter = static_cast<int8_t>(mediaEvent.extraMetaData.audio().adGainCenter);
+            audio.adGainFront = static_cast<int8_t>(mediaEvent.extraMetaData.audio().adGainFront);
+            audio.adGainSurround =
+                    static_cast<int8_t>(mediaEvent.extraMetaData.audio().adGainSurround);
+            media.extraMetaData.set<DemuxFilterMediaEventExtraMetaData::audio>(audio);
+        } else {
+            media.extraMetaData.set<DemuxFilterMediaEventExtraMetaData::noinit>(true);
+        }
+
+        if (mediaEvent.avMemory.getNativeHandle() != nullptr) {
+            media.avMemory = dupToAidl(mediaEvent.avMemory.getNativeHandle());
+        }
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::media>(move(media));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getSectionEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events, vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        const HidlDemuxFilterSectionEvent& sectionEvent = events[i].section();
+        DemuxFilterSectionEvent section;
+
+        section.tableId = static_cast<int32_t>(sectionEvent.tableId);
+        section.version = static_cast<int32_t>(sectionEvent.version);
+        section.sectionNum = static_cast<int32_t>(sectionEvent.sectionNum);
+        section.dataLength = static_cast<int64_t>(sectionEvent.dataLength);
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::section>(move(section));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getPesEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                                                  vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        const HidlDemuxFilterPesEvent& pesEvent = events[i].pes();
+        DemuxFilterPesEvent pes;
+
+        pes.streamId = static_cast<int32_t>(pesEvent.streamId);
+        pes.dataLength = static_cast<int32_t>(pesEvent.dataLength);
+        pes.mpuSequenceNumber = static_cast<int32_t>(pesEvent.mpuSequenceNumber);
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::pes>(move(pes));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getTsRecordEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events,
+        const vector<HidlDemuxFilterEventExt::Event>& eventsExt, vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        DemuxFilterTsRecordEvent tsRecord;
+        const HidlDemuxFilterTsRecordEvent& tsRecordEvent = events[i].tsRecord();
+
+        DemuxFilterScIndexMask scIndexMask;
+        if (tsRecordEvent.scIndexMask.getDiscriminator() ==
+            HidlDemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::sc) {
+            int32_t hidlScIndex = static_cast<int32_t>(tsRecordEvent.scIndexMask.sc());
+            if (hidlScIndex <= static_cast<int32_t>(DemuxScIndex::SEQUENCE)) {
+                scIndexMask.set<DemuxFilterScIndexMask::scIndex>(hidlScIndex);
+            } else {
+                // HIDL HAL starting from 1 << 4; AIDL starting from 1 << 0.
+                scIndexMask.set<DemuxFilterScIndexMask::scAvc>(hidlScIndex >> 4);
+            }
+        } else if (tsRecordEvent.scIndexMask.getDiscriminator() ==
+                   HidlDemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::scHevc) {
+            scIndexMask.set<DemuxFilterScIndexMask::scHevc>(
+                    static_cast<int32_t>(tsRecordEvent.scIndexMask.scHevc()));
+        }
+
+        if (tsRecordEvent.pid.getDiscriminator() == HidlDemuxPid::hidl_discriminator::tPid) {
+            DemuxPid pid;
+            pid.set<DemuxPid::tPid>(static_cast<int32_t>(tsRecordEvent.pid.tPid()));
+            tsRecord.pid = pid;
+        } else {
+            DemuxPid pid;
+            pid.set<DemuxPid::tPid>(static_cast<int32_t>(Constant::INVALID_TS_PID));
+            tsRecord.pid = pid;
+        }
+
+        tsRecord.scIndexMask = scIndexMask;
+        tsRecord.tsIndexMask = static_cast<int32_t>(tsRecordEvent.tsIndexMask);
+        tsRecord.byteNumber = static_cast<int64_t>(tsRecordEvent.byteNumber);
+
+        if (eventsExt.size() > i &&
+            eventsExt[i].getDiscriminator() ==
+                    HidlDemuxFilterEventExt::Event::hidl_discriminator::tsRecord) {
+            tsRecord.pts = static_cast<int64_t>(eventsExt[i].tsRecord().pts);
+            tsRecord.firstMbInSlice = static_cast<int32_t>(eventsExt[i].tsRecord().firstMbInSlice);
+        }
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::tsRecord>(move(tsRecord));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getMmtpRecordEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events,
+        const vector<HidlDemuxFilterEventExt::Event>& eventsExt, vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        DemuxFilterMmtpRecordEvent mmtpRecord;
+        const HidlDemuxFilterMmtpRecordEvent& mmtpRecordEvent = events[i].mmtpRecord();
+
+        mmtpRecord.scHevcIndexMask = static_cast<int32_t>(mmtpRecordEvent.scHevcIndexMask);
+        mmtpRecord.byteNumber = static_cast<int64_t>(mmtpRecordEvent.byteNumber);
+
+        if (eventsExt.size() > i &&
+            eventsExt[i].getDiscriminator() ==
+                    HidlDemuxFilterEventExt::Event::hidl_discriminator::mmtpRecord) {
+            mmtpRecord.pts = static_cast<int64_t>(eventsExt[i].mmtpRecord().pts);
+            mmtpRecord.mpuSequenceNumber =
+                    static_cast<int32_t>(eventsExt[i].mmtpRecord().mpuSequenceNumber);
+            mmtpRecord.firstMbInSlice =
+                    static_cast<int32_t>(eventsExt[i].mmtpRecord().firstMbInSlice);
+            mmtpRecord.tsIndexMask = static_cast<int32_t>(eventsExt[i].mmtpRecord().tsIndexMask);
+        }
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::mmtpRecord>(move(mmtpRecord));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getDownloadEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events, vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        const HidlDemuxFilterDownloadEvent& downloadEvent = events[i].download();
+        DemuxFilterDownloadEvent download;
+
+        download.itemId = static_cast<int32_t>(downloadEvent.itemId);
+        download.downloadId = -1;
+        download.itemFragmentIndex = static_cast<int32_t>(downloadEvent.itemFragmentIndex);
+        download.mpuSequenceNumber = static_cast<int32_t>(downloadEvent.mpuSequenceNumber);
+        download.lastItemFragmentIndex = static_cast<int32_t>(downloadEvent.lastItemFragmentIndex);
+        download.dataLength = static_cast<int32_t>(downloadEvent.dataLength);
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::download>(move(download));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getIpPayloadEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events, vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        const HidlDemuxFilterIpPayloadEvent& ipPayloadEvent = events[i].ipPayload();
+        DemuxFilterIpPayloadEvent ipPayload;
+
+        ipPayload.dataLength = static_cast<int32_t>(ipPayloadEvent.dataLength);
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::ipPayload>(move(ipPayload));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getTemiEvent(
+        const vector<HidlDemuxFilterEvent::Event>& events, vector<DemuxFilterEvent>& res) {
+    for (int i = 0; i < events.size(); i++) {
+        const HidlDemuxFilterTemiEvent& temiEvent = events[i].temi();
+        DemuxFilterTemiEvent temi;
+
+        temi.pts = static_cast<int64_t>(temiEvent.pts);
+        temi.descrTag = static_cast<int8_t>(temiEvent.descrTag);
+        vector<uint8_t> descrData = temiEvent.descrData;
+        temi.descrData.resize(descrData.size());
+        copy(descrData.begin(), descrData.end(), temi.descrData.begin());
+
+        DemuxFilterEvent filterEvent;
+        filterEvent.set<DemuxFilterEvent::temi>(move(temi));
+        res.push_back(move(filterEvent));
+    }
+}
+
+void TunerHidlFilter::FilterCallback::getMonitorEvent(
+        const vector<HidlDemuxFilterEventExt::Event>& eventsExt, vector<DemuxFilterEvent>& res) {
+    HidlDemuxFilterMonitorEvent monitorEvent = eventsExt[0].monitorEvent();
+    DemuxFilterMonitorEvent monitor;
+
+    switch (monitorEvent.getDiscriminator()) {
+    case HidlDemuxFilterMonitorEvent::hidl_discriminator::scramblingStatus: {
+        monitor.set<DemuxFilterMonitorEvent::scramblingStatus>(
+                static_cast<ScramblingStatus>(monitorEvent.scramblingStatus()));
+        break;
+    }
+    case HidlDemuxFilterMonitorEvent::hidl_discriminator::cid: {
+        monitor.set<DemuxFilterMonitorEvent::cid>(static_cast<int32_t>(monitorEvent.cid()));
+        break;
+    }
+    }
+
+    DemuxFilterEvent filterEvent;
+    filterEvent.set<DemuxFilterEvent::monitorEvent>(move(monitor));
+    res.push_back(move(filterEvent));
+}
+
+void TunerHidlFilter::FilterCallback::getRestartEvent(
+        const vector<HidlDemuxFilterEventExt::Event>& eventsExt, vector<DemuxFilterEvent>& res) {
+    DemuxFilterEvent filterEvent;
+    filterEvent.set<DemuxFilterEvent::startId>(static_cast<int32_t>(eventsExt[0].startId()));
+    res.push_back(move(filterEvent));
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlFilter.h b/services/tuner/hidl/TunerHidlFilter.h
new file mode 100644
index 0000000..b8fad22
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlFilter.h
@@ -0,0 +1,240 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLFILTER_H
+#define ANDROID_MEDIA_TUNERHIDLFILTER_H
+
+#include <aidl/android/hardware/tv/tuner/AvStreamType.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterAvSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterDownloadSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterEvent.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterPesDataSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterRecordSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterSectionSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterStatus.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterType.h>
+#include <aidl/android/media/tv/tuner/BnTunerFilter.h>
+#include <aidl/android/media/tv/tuner/ITunerFilterCallback.h>
+#include <android/hardware/tv/tuner/1.0/ITuner.h>
+#include <android/hardware/tv/tuner/1.1/IFilter.h>
+#include <android/hardware/tv/tuner/1.1/IFilterCallback.h>
+#include <android/hardware/tv/tuner/1.1/types.h>
+#include <fmq/MessageQueue.h>
+#include <utils/Mutex.h>
+
+#include <map>
+
+using ::aidl::android::hardware::common::NativeHandle;
+using ::aidl::android::hardware::common::fmq::MQDescriptor;
+using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
+using ::aidl::android::hardware::tv::tuner::AvStreamType;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterAvSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterDownloadSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterPesDataSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterRecordSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSectionSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterType;
+using ::aidl::android::hardware::tv::tuner::DemuxIpAddressIpAddress;
+using ::aidl::android::hardware::tv::tuner::FilterDelayHint;
+using ::aidl::android::media::tv::tuner::BnTunerFilter;
+using ::aidl::android::media::tv::tuner::ITunerFilterCallback;
+using ::android::Mutex;
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::MQDescriptorSync;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::std::shared_ptr;
+using ::std::string;
+using ::std::vector;
+
+using HidlAvStreamType = ::android::hardware::tv::tuner::V1_1::AvStreamType;
+using HidlDemuxAlpFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxAlpFilterSettings;
+using HidlDemuxFilterAvSettings = ::android::hardware::tv::tuner::V1_0::DemuxFilterAvSettings;
+using HidlDemuxFilterDownloadEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterDownloadEvent;
+using HidlDemuxFilterDownloadSettings =
+        ::android::hardware::tv::tuner::V1_0::DemuxFilterDownloadSettings;
+using HidlDemuxFilterIpPayloadEvent =
+        ::android::hardware::tv::tuner::V1_0::DemuxFilterIpPayloadEvent;
+using HidlDemuxFilterEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterEvent;
+using HidlDemuxFilterMediaEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterMediaEvent;
+using HidlDemuxFilterMmtpRecordEvent =
+        ::android::hardware::tv::tuner::V1_0::DemuxFilterMmtpRecordEvent;
+using HidlDemuxFilterPesDataSettings =
+        ::android::hardware::tv::tuner::V1_0::DemuxFilterPesDataSettings;
+using HidlDemuxFilterPesEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterPesEvent;
+using HidlDemuxFilterRecordSettings =
+        ::android::hardware::tv::tuner::V1_0::DemuxFilterRecordSettings;
+using HidlDemuxFilterSectionEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterSectionEvent;
+using HidlDemuxFilterSectionSettings =
+        ::android::hardware::tv::tuner::V1_0::DemuxFilterSectionSettings;
+using HidlDemuxFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxFilterSettings;
+using HidlDemuxFilterStatus = ::android::hardware::tv::tuner::V1_0::DemuxFilterStatus;
+using HidlDemuxFilterTemiEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterTemiEvent;
+using HidlDemuxFilterTsRecordEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterTsRecordEvent;
+using HidlDemuxIpFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxIpFilterSettings;
+using HidlDemuxMmtpFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterSettings;
+using HidlDemuxTlvFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxTlvFilterSettings;
+using HidlDemuxTsFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxTsFilterSettings;
+using HidlDemuxPid = ::android::hardware::tv::tuner::V1_0::DemuxPid;
+using HidlIFilter = ::android::hardware::tv::tuner::V1_0::IFilter;
+using HidlDvStreamType = ::android::hardware::tv::tuner::V1_1::AvStreamType;
+using HidlDemuxFilterEventExt = ::android::hardware::tv::tuner::V1_1::DemuxFilterEventExt;
+using HidlDemuxFilterMonitorEvent = ::android::hardware::tv::tuner::V1_1::DemuxFilterMonitorEvent;
+using HidlDemuxFilterTsRecordEventExt =
+        ::android::hardware::tv::tuner::V1_1::DemuxFilterTsRecordEventExt;
+using HidlIFilterCallback = ::android::hardware::tv::tuner::V1_1::IFilterCallback;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+using MQDesc = MQDescriptorSync<uint8_t>;
+using AidlMQDesc = MQDescriptor<int8_t, SynchronizedReadWrite>;
+
+const static int IP_V4_LENGTH = 4;
+const static int IP_V6_LENGTH = 16;
+
+class TunerHidlFilter : public BnTunerFilter {
+public:
+    class FilterCallback : public HidlIFilterCallback {
+    public:
+        FilterCallback(const shared_ptr<ITunerFilterCallback> tunerFilterCallback)
+              : mTunerFilterCallback(tunerFilterCallback){};
+
+        virtual Return<void> onFilterEvent(const HidlDemuxFilterEvent& filterEvent);
+        virtual Return<void> onFilterEvent_1_1(const HidlDemuxFilterEvent& filterEvent,
+                                               const HidlDemuxFilterEventExt& filterEventExt);
+        virtual Return<void> onFilterStatus(HidlDemuxFilterStatus status);
+
+        void sendSharedFilterStatus(int32_t status);
+        void attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb);
+        void detachSharedFilterCallback();
+
+    private:
+        void getAidlFilterEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                                const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+                                vector<DemuxFilterEvent>& aidlEvents);
+
+        void getMediaEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                           vector<DemuxFilterEvent>& res);
+        void getSectionEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                             vector<DemuxFilterEvent>& res);
+        void getPesEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                         vector<DemuxFilterEvent>& res);
+        void getTsRecordEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                              const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+                              vector<DemuxFilterEvent>& res);
+        void getMmtpRecordEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                                const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+                                vector<DemuxFilterEvent>& res);
+        void getDownloadEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                              vector<DemuxFilterEvent>& res);
+        void getIpPayloadEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                               vector<DemuxFilterEvent>& res);
+        void getTemiEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+                          vector<DemuxFilterEvent>& res);
+        void getMonitorEvent(const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+                             vector<DemuxFilterEvent>& res);
+        void getRestartEvent(const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+                             vector<DemuxFilterEvent>& res);
+
+    private:
+        shared_ptr<ITunerFilterCallback> mTunerFilterCallback;
+        shared_ptr<ITunerFilterCallback> mOriginalCallback;
+        Mutex mCallbackLock;
+    };
+
+    TunerHidlFilter(sp<HidlIFilter> filter, sp<FilterCallback> cb, DemuxFilterType type);
+    virtual ~TunerHidlFilter();
+
+    ::ndk::ScopedAStatus getId(int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getId64Bit(int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getQueueDesc(AidlMQDesc* _aidl_return) override;
+    ::ndk::ScopedAStatus configure(const DemuxFilterSettings& in_settings) override;
+    ::ndk::ScopedAStatus configureMonitorEvent(int32_t in_monitorEventTypes) override;
+    ::ndk::ScopedAStatus configureIpFilterContextId(int32_t in_cid) override;
+    ::ndk::ScopedAStatus configureAvStreamType(const AvStreamType& in_avStreamType) override;
+    ::ndk::ScopedAStatus getAvSharedHandle(NativeHandle* out_avMemory,
+                                           int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus releaseAvHandle(const NativeHandle& in_handle,
+                                         int64_t in_avDataId) override;
+    ::ndk::ScopedAStatus setDataSource(const shared_ptr<ITunerFilter>& in_filter) override;
+    ::ndk::ScopedAStatus start() override;
+    ::ndk::ScopedAStatus stop() override;
+    ::ndk::ScopedAStatus flush() override;
+    ::ndk::ScopedAStatus close() override;
+    ::ndk::ScopedAStatus acquireSharedFilterToken(string* _aidl_return) override;
+    ::ndk::ScopedAStatus freeSharedFilterToken(const string& in_filterToken) override;
+    ::ndk::ScopedAStatus getFilterType(DemuxFilterType* _aidl_return) override;
+    ::ndk::ScopedAStatus setDelayHint(const FilterDelayHint& in_hint) override;
+
+    bool isSharedFilterAllowed(int32_t pid);
+    void attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb);
+    sp<HidlIFilter> getHalFilter();
+
+private:
+    bool isAudioFilter();
+    bool isVideoFilter();
+
+    HidlDemuxFilterAvSettings getHidlAvSettings(const DemuxFilterAvSettings& settings);
+    HidlDemuxFilterSectionSettings getHidlSectionSettings(
+            const DemuxFilterSectionSettings& settings);
+    HidlDemuxFilterPesDataSettings getHidlPesDataSettings(
+            const DemuxFilterPesDataSettings& settings);
+    HidlDemuxFilterRecordSettings getHidlRecordSettings(const DemuxFilterRecordSettings& settings);
+    HidlDemuxFilterDownloadSettings getHidlDownloadSettings(
+            const DemuxFilterDownloadSettings& settings);
+    bool getHidlAvStreamType(const AvStreamType avStreamType, HidlAvStreamType& type);
+    void getHidlTsSettings(const DemuxFilterSettings& settings,
+                           HidlDemuxFilterSettings& hidlSettings);
+    void getHidlMmtpSettings(const DemuxFilterSettings& settings,
+                             HidlDemuxFilterSettings& hidlSettings);
+    void getHidlIpSettings(const DemuxFilterSettings& settings,
+                           HidlDemuxFilterSettings& hidlSettings);
+    void getHidlTlvSettings(const DemuxFilterSettings& settings,
+                            HidlDemuxFilterSettings& hidlSettings);
+    void getHidlAlpSettings(const DemuxFilterSettings& settings,
+                            HidlDemuxFilterSettings& hidlSettings);
+
+    hidl_array<uint8_t, IP_V4_LENGTH> getIpV4Address(const DemuxIpAddressIpAddress& addr);
+    hidl_array<uint8_t, IP_V6_LENGTH> getIpV6Address(const DemuxIpAddressIpAddress& addr);
+
+    sp<HidlIFilter> mFilter;
+    sp<::android::hardware::tv::tuner::V1_1::IFilter> mFilter_1_1;
+    int32_t mId;
+    int64_t mId64Bit;
+    DemuxFilterType mType;
+    bool mStarted;
+    bool mShared;
+    int32_t mClientPid;
+    sp<FilterCallback> mFilterCallback;
+    Mutex mLock;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLFILTER_H
diff --git a/services/tuner/hidl/TunerHidlFrontend.cpp b/services/tuner/hidl/TunerHidlFrontend.cpp
new file mode 100644
index 0000000..057f24a
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlFrontend.cpp
@@ -0,0 +1,1211 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TunerHidlFrontend"
+
+#include "TunerHidlFrontend.h"
+
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
+#include "TunerHidlLnb.h"
+#include "TunerHidlService.h"
+
+using ::aidl::android::hardware::tv::tuner::FrontendAnalogSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendAnalogSifStandard;
+using ::aidl::android::hardware::tv::tuner::FrontendAnalogType;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Bandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3CodeRate;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Fec;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Modulation;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3PlpSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Settings;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3TimeInterleaveMode;
+using ::aidl::android::hardware::tv::tuner::FrontendAtscModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendAtscSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendBandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendCableTimeInterleaveMode;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbBandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbGuardInterval;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbTimeInterleaveMode;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbTransmissionMode;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbcAnnex;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbcBandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbcModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbcSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsRolloff;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsStandard;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtBandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtConstellation;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtGuardInterval;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtHierarchy;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtStandard;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtTransmissionMode;
+using ::aidl::android::hardware::tv::tuner::FrontendGuardInterval;
+using ::aidl::android::hardware::tv::tuner::FrontendInnerFec;
+using ::aidl::android::hardware::tv::tuner::FrontendInterleaveMode;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbs3Modulation;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbs3Rolloff;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbs3Settings;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbsModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbsRolloff;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbsSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtBandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtCoderate;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtGuardInterval;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtMode;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendModulationStatus;
+using ::aidl::android::hardware::tv::tuner::FrontendRollOff;
+using ::aidl::android::hardware::tv::tuner::FrontendScanAtsc3PlpInfo;
+using ::aidl::android::hardware::tv::tuner::FrontendScanMessageStandard;
+using ::aidl::android::hardware::tv::tuner::FrontendSpectralInversion;
+using ::aidl::android::hardware::tv::tuner::FrontendStatusAtsc3PlpInfo;
+using ::aidl::android::hardware::tv::tuner::FrontendTransmissionMode;
+using ::aidl::android::hardware::tv::tuner::Result;
+
+using HidlFrontendStatusAtsc3PlpInfo =
+        ::aidl::android::hardware::tv::tuner::FrontendStatusAtsc3PlpInfo;
+using HidlFrontendAnalogSifStandard =
+        ::android::hardware::tv::tuner::V1_0::FrontendAnalogSifStandard;
+using HidlFrontendAnalogType = ::android::hardware::tv::tuner::V1_0::FrontendAnalogType;
+using HidlFrontendAtscModulation = ::android::hardware::tv::tuner::V1_0::FrontendAtscModulation;
+using HidlFrontendAtsc3Bandwidth = ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Bandwidth;
+using HidlFrontendAtsc3CodeRate = ::android::hardware::tv::tuner::V1_0::FrontendAtsc3CodeRate;
+using HidlFrontendAtsc3DemodOutputFormat =
+        ::android::hardware::tv::tuner::V1_0::FrontendAtsc3DemodOutputFormat;
+using HidlFrontendAtsc3Fec = ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Fec;
+using HidlFrontendAtsc3Modulation = ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Modulation;
+using HidlFrontendAtsc3TimeInterleaveMode =
+        ::android::hardware::tv::tuner::V1_0::FrontendAtsc3TimeInterleaveMode;
+using HidlFrontendDvbcAnnex = ::android::hardware::tv::tuner::V1_0::FrontendDvbcAnnex;
+using HidlFrontendDvbcModulation = ::android::hardware::tv::tuner::V1_0::FrontendDvbcModulation;
+using HidlFrontendDvbcOuterFec = ::android::hardware::tv::tuner::V1_0::FrontendDvbcOuterFec;
+using HidlFrontendDvbcSpectralInversion =
+        ::android::hardware::tv::tuner::V1_0::FrontendDvbcSpectralInversion;
+using HidlFrontendDvbsModulation = ::android::hardware::tv::tuner::V1_0::FrontendDvbsModulation;
+using HidlFrontendDvbsPilot = ::android::hardware::tv::tuner::V1_0::FrontendDvbsPilot;
+using HidlFrontendDvbsRolloff = ::android::hardware::tv::tuner::V1_0::FrontendDvbsRolloff;
+using HidlFrontendDvbsSettings = ::android::hardware::tv::tuner::V1_0::FrontendDvbsSettings;
+using HidlFrontendDvbsStandard = ::android::hardware::tv::tuner::V1_0::FrontendDvbsStandard;
+using HidlFrontendDvbsVcmMode = ::android::hardware::tv::tuner::V1_0::FrontendDvbsVcmMode;
+using HidlFrontendDvbtBandwidth = ::android::hardware::tv::tuner::V1_0::FrontendDvbtBandwidth;
+using HidlFrontendDvbtCoderate = ::android::hardware::tv::tuner::V1_0::FrontendDvbtCoderate;
+using HidlFrontendDvbtConstellation =
+        ::android::hardware::tv::tuner::V1_0::FrontendDvbtConstellation;
+using HidlFrontendDvbtGuardInterval =
+        ::android::hardware::tv::tuner::V1_0::FrontendDvbtGuardInterval;
+using HidlFrontendDvbtHierarchy = ::android::hardware::tv::tuner::V1_0::FrontendDvbtHierarchy;
+using HidlFrontendDvbtPlpMode = ::android::hardware::tv::tuner::V1_0::FrontendDvbtPlpMode;
+using HidlFrontendDvbtSettings = ::android::hardware::tv::tuner::V1_0::FrontendDvbtSettings;
+using HidlFrontendDvbtStandard = ::android::hardware::tv::tuner::V1_0::FrontendDvbtStandard;
+using HidlFrontendDvbtTransmissionMode =
+        ::android::hardware::tv::tuner::V1_0::FrontendDvbtTransmissionMode;
+using HidlFrontendInnerFec = ::android::hardware::tv::tuner::V1_0::FrontendInnerFec;
+using HidlFrontendIsdbs3Coderate = ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Coderate;
+using HidlFrontendIsdbs3Modulation = ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Modulation;
+using HidlFrontendIsdbs3Rolloff = ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Rolloff;
+using HidlFrontendIsdbs3Settings = ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Settings;
+using HidlFrontendIsdbsCoderate = ::android::hardware::tv::tuner::V1_0::FrontendIsdbsCoderate;
+using HidlFrontendIsdbsModulation = ::android::hardware::tv::tuner::V1_0::FrontendIsdbsModulation;
+using HidlFrontendIsdbsRolloff = ::android::hardware::tv::tuner::V1_0::FrontendIsdbsRolloff;
+using HidlFrontendIsdbsSettings = ::android::hardware::tv::tuner::V1_0::FrontendIsdbsSettings;
+using HidlFrontendIsdbsStreamIdType =
+        ::android::hardware::tv::tuner::V1_0::FrontendIsdbsStreamIdType;
+using HidlFrontendIsdbtBandwidth = ::android::hardware::tv::tuner::V1_0::FrontendIsdbtBandwidth;
+using HidlFrontendIsdbtCoderate = ::android::hardware::tv::tuner::V1_0::FrontendIsdbtCoderate;
+using HidlFrontendIsdbtGuardInterval =
+        ::android::hardware::tv::tuner::V1_0::FrontendIsdbtGuardInterval;
+using HidlFrontendIsdbtMode = ::android::hardware::tv::tuner::V1_0::FrontendIsdbtMode;
+using HidlFrontendIsdbtModulation = ::android::hardware::tv::tuner::V1_0::FrontendIsdbtModulation;
+using HidlFrontendIsdbtSettings = ::android::hardware::tv::tuner::V1_0::FrontendIsdbtSettings;
+using HidlFrontendModulationStatus = ::android::hardware::tv::tuner::V1_0::FrontendModulationStatus;
+using HidlFrontendScanAtsc3PlpInfo = ::android::hardware::tv::tuner::V1_0::FrontendScanAtsc3PlpInfo;
+using HidlFrontendScanType = ::android::hardware::tv::tuner::V1_0::FrontendScanType;
+using HidlFrontendStatusType = ::android::hardware::tv::tuner::V1_0::FrontendStatusType;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+using HidlFrontendAnalogAftFlag = ::android::hardware::tv::tuner::V1_1::FrontendAnalogAftFlag;
+using HidlFrontendBandwidth = ::android::hardware::tv::tuner::V1_1::FrontendBandwidth;
+using HidlFrontendCableTimeInterleaveMode =
+        ::android::hardware::tv::tuner::V1_1::FrontendCableTimeInterleaveMode;
+using HidlFrontendDvbcBandwidth = ::android::hardware::tv::tuner::V1_1::FrontendDvbcBandwidth;
+using HidlFrontendDtmbBandwidth = ::android::hardware::tv::tuner::V1_1::FrontendDtmbBandwidth;
+using HidlFrontendDtmbCodeRate = ::android::hardware::tv::tuner::V1_1::FrontendDtmbCodeRate;
+using HidlFrontendDtmbGuardInterval =
+        ::android::hardware::tv::tuner::V1_1::FrontendDtmbGuardInterval;
+using HidlFrontendDtmbModulation = ::android::hardware::tv::tuner::V1_1::FrontendDtmbModulation;
+using HidlFrontendDtmbTimeInterleaveMode =
+        ::android::hardware::tv::tuner::V1_1::FrontendDtmbTimeInterleaveMode;
+using HidlFrontendDtmbTransmissionMode =
+        ::android::hardware::tv::tuner::V1_1::FrontendDtmbTransmissionMode;
+using HidlFrontendDvbsScanType = ::android::hardware::tv::tuner::V1_1::FrontendDvbsScanType;
+using HidlFrontendGuardInterval = ::android::hardware::tv::tuner::V1_1::FrontendGuardInterval;
+using HidlFrontendInterleaveMode = ::android::hardware::tv::tuner::V1_1::FrontendInterleaveMode;
+using HidlFrontendModulation = ::android::hardware::tv::tuner::V1_1::FrontendModulation;
+using HidlFrontendRollOff = ::android::hardware::tv::tuner::V1_1::FrontendRollOff;
+using HidlFrontendTransmissionMode = ::android::hardware::tv::tuner::V1_1::FrontendTransmissionMode;
+using HidlFrontendSpectralInversion =
+        ::android::hardware::tv::tuner::V1_1::FrontendSpectralInversion;
+using HidlFrontendStatusTypeExt1_1 = ::android::hardware::tv::tuner::V1_1::FrontendStatusTypeExt1_1;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlFrontend::TunerHidlFrontend(sp<HidlIFrontend> frontend, int id) {
+    mFrontend = frontend;
+    mFrontend_1_1 = ::android::hardware::tv::tuner::V1_1::IFrontend::castFrom(mFrontend);
+    mId = id;
+}
+
+TunerHidlFrontend::~TunerHidlFrontend() {
+    mFrontend = nullptr;
+    mFrontend_1_1 = nullptr;
+    mId = -1;
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::setCallback(
+        const shared_ptr<ITunerFrontendCallback>& tunerFrontendCallback) {
+    if (mFrontend == nullptr) {
+        ALOGE("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (tunerFrontendCallback == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    sp<HidlIFrontendCallback> frontendCallback = new FrontendCallback(tunerFrontendCallback);
+    HidlResult status = mFrontend->setCallback(frontendCallback);
+    if (status == HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::tune(const FrontendSettings& settings) {
+    if (mFrontend == nullptr) {
+        ALOGE("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    HidlFrontendSettings frontendSettings;
+    HidlFrontendSettingsExt1_1 frontendSettingsExt;
+    getHidlFrontendSettings(settings, frontendSettings, frontendSettingsExt);
+    if (mFrontend_1_1 != nullptr) {
+        status = mFrontend_1_1->tune_1_1(frontendSettings, frontendSettingsExt);
+    } else {
+        status = mFrontend->tune(frontendSettings);
+    }
+    if (status == HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::stopTune() {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mFrontend->stopTune();
+    if (status == HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::scan(const FrontendSettings& settings,
+                                             FrontendScanType frontendScanType) {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    HidlFrontendSettings frontendSettings;
+    HidlFrontendSettingsExt1_1 frontendSettingsExt;
+    getHidlFrontendSettings(settings, frontendSettings, frontendSettingsExt);
+    if (mFrontend_1_1 != nullptr) {
+        status = mFrontend_1_1->scan_1_1(frontendSettings,
+                                         static_cast<HidlFrontendScanType>(frontendScanType),
+                                         frontendSettingsExt);
+    } else {
+        status = mFrontend->scan(frontendSettings,
+                                 static_cast<HidlFrontendScanType>(frontendScanType));
+    }
+    if (status == HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::stopScan() {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mFrontend->stopScan();
+    if (status == HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::setLnb(const shared_ptr<ITunerLnb>& lnb) {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (lnb == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    HidlResult status = mFrontend->setLnb(static_cast<TunerHidlLnb*>(lnb.get())->getId());
+    if (status == HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::linkCiCamToFrontend(int32_t ciCamId,
+                                                            int32_t* _aidl_return) {
+    if (mFrontend_1_1 == nullptr) {
+        ALOGD("IFrontend_1_1 is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    int ltsId;
+    HidlResult status;
+    mFrontend_1_1->linkCiCam(static_cast<uint32_t>(ciCamId), [&](HidlResult r, uint32_t id) {
+        status = r;
+        ltsId = id;
+    });
+
+    if (status == HidlResult::SUCCESS) {
+        *_aidl_return = ltsId;
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::unlinkCiCamToFrontend(int32_t ciCamId) {
+    if (mFrontend_1_1 == nullptr) {
+        ALOGD("IFrontend_1_1 is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mFrontend_1_1->unlinkCiCam(ciCamId);
+    if (status == HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::close() {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    TunerHidlService::getTunerService()->removeFrontend(this->ref<TunerHidlFrontend>());
+
+    HidlResult status = mFrontend->close();
+    mFrontend = nullptr;
+    mFrontend_1_1 = nullptr;
+
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::getStatus(const vector<FrontendStatusType>& in_statusTypes,
+                                                  vector<FrontendStatus>* _aidl_return) {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res;
+    vector<HidlFrontendStatus> status;
+    vector<HidlFrontendStatusExt1_1> statusExt;
+    vector<HidlFrontendStatusType> types;
+    vector<HidlFrontendStatusTypeExt1_1> typesExt;
+    for (auto s : in_statusTypes) {
+        if (static_cast<int32_t>(s) <=
+            static_cast<int32_t>(HidlFrontendStatusType::ATSC3_PLP_INFO)) {
+            types.push_back(static_cast<HidlFrontendStatusType>(s));
+        } else {
+            typesExt.push_back(static_cast<HidlFrontendStatusTypeExt1_1>(s));
+        }
+    }
+
+    mFrontend->getStatus(types, [&](HidlResult r, const hidl_vec<HidlFrontendStatus>& ss) {
+        res = r;
+        for (auto s : ss) {
+            status.push_back(s);
+        }
+    });
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    if (mFrontend_1_1 != nullptr) {
+        mFrontend_1_1->getStatusExt1_1(
+                typesExt, [&](HidlResult r, const hidl_vec<HidlFrontendStatusExt1_1>& ss) {
+                    res = r;
+                    for (auto s : ss) {
+                        statusExt.push_back(s);
+                    }
+                });
+        if (res != HidlResult::SUCCESS) {
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+        }
+    }
+
+    getAidlFrontendStatus(status, statusExt, *_aidl_return);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::getFrontendId(int32_t* _aidl_return) {
+    *_aidl_return = mId;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::getHardwareInfo(std::string* _aidl_return) {
+    _aidl_return->clear();
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(
+            static_cast<int32_t>(Result::UNAVAILABLE));
+}
+
+void TunerHidlFrontend::setLna(bool bEnable) {
+    if (mFrontend == nullptr) {
+        ALOGD("IFrontend is not initialized");
+        return;
+    }
+
+    mFrontend->setLna(bEnable);
+}
+
+/////////////// FrontendCallback ///////////////////////
+Return<void> TunerHidlFrontend::FrontendCallback::onEvent(HidlFrontendEventType frontendEventType) {
+    ALOGV("FrontendCallback::onEvent, type=%d", frontendEventType);
+    mTunerFrontendCallback->onEvent(static_cast<FrontendEventType>(frontendEventType));
+    return Void();
+}
+
+Return<void> TunerHidlFrontend::FrontendCallback::onScanMessage(
+        HidlFrontendScanMessageType type, const HidlFrontendScanMessage& message) {
+    ALOGV("FrontendCallback::onScanMessage, type=%d", type);
+    FrontendScanMessage scanMessage;
+    switch (type) {
+    case HidlFrontendScanMessageType::LOCKED: {
+        scanMessage.set<FrontendScanMessage::isLocked>(message.isLocked());
+        break;
+    }
+    case HidlFrontendScanMessageType::END: {
+        scanMessage.set<FrontendScanMessage::isEnd>(message.isEnd());
+        break;
+    }
+    case HidlFrontendScanMessageType::PROGRESS_PERCENT: {
+        scanMessage.set<FrontendScanMessage::progressPercent>(message.progressPercent());
+        break;
+    }
+    case HidlFrontendScanMessageType::FREQUENCY: {
+        const vector<uint32_t>& f = message.frequencies();
+        vector<int64_t> lf(begin(f), end(f));
+        scanMessage.set<FrontendScanMessage::frequencies>(lf);
+        break;
+    }
+    case HidlFrontendScanMessageType::SYMBOL_RATE: {
+        const vector<uint32_t>& s = message.symbolRates();
+        vector<int32_t> symbolRates(begin(s), end(s));
+        scanMessage.set<FrontendScanMessage::symbolRates>(symbolRates);
+        break;
+    }
+    case HidlFrontendScanMessageType::HIERARCHY: {
+        scanMessage.set<FrontendScanMessage::hierarchy>(
+                static_cast<FrontendDvbtHierarchy>(message.hierarchy()));
+        break;
+    }
+    case HidlFrontendScanMessageType::ANALOG_TYPE: {
+        scanMessage.set<FrontendScanMessage::analogType>(
+                static_cast<FrontendAnalogType>(message.analogType()));
+        break;
+    }
+    case HidlFrontendScanMessageType::PLP_IDS: {
+        const vector<uint8_t>& p = message.plpIds();
+        vector<int32_t> plpIds(begin(p), end(p));
+        scanMessage.set<FrontendScanMessage::plpIds>(plpIds);
+        break;
+    }
+    case HidlFrontendScanMessageType::GROUP_IDS: {
+        const vector<uint8_t>& g = message.groupIds();
+        vector<int32_t> groupIds(begin(g), end(g));
+        scanMessage.set<FrontendScanMessage::groupIds>(groupIds);
+        break;
+    }
+    case HidlFrontendScanMessageType::INPUT_STREAM_IDS: {
+        const vector<uint16_t>& i = message.inputStreamIds();
+        vector<int32_t> streamIds(begin(i), end(i));
+        scanMessage.set<FrontendScanMessage::inputStreamIds>(streamIds);
+        break;
+    }
+    case HidlFrontendScanMessageType::STANDARD: {
+        const HidlFrontendScanMessage::Standard& std = message.std();
+        FrontendScanMessageStandard standard;
+        if (std.getDiscriminator() == HidlFrontendScanMessage::Standard::hidl_discriminator::sStd) {
+            standard.set<FrontendScanMessageStandard::sStd>(
+                    static_cast<FrontendDvbsStandard>(std.sStd()));
+        } else if (std.getDiscriminator() ==
+                   HidlFrontendScanMessage::Standard::hidl_discriminator::tStd) {
+            standard.set<FrontendScanMessageStandard::tStd>(
+                    static_cast<FrontendDvbtStandard>(std.tStd()));
+        } else if (std.getDiscriminator() ==
+                   HidlFrontendScanMessage::Standard::hidl_discriminator::sifStd) {
+            standard.set<FrontendScanMessageStandard::sifStd>(
+                    static_cast<FrontendAnalogSifStandard>(std.sifStd()));
+        }
+        scanMessage.set<FrontendScanMessage::std>(standard);
+        break;
+    }
+    case HidlFrontendScanMessageType::ATSC3_PLP_INFO: {
+        const vector<HidlFrontendScanAtsc3PlpInfo>& plpInfos = message.atsc3PlpInfos();
+        vector<FrontendScanAtsc3PlpInfo> tunerPlpInfos;
+        for (int i = 0; i < plpInfos.size(); i++) {
+            FrontendScanAtsc3PlpInfo plpInfo{
+                    .plpId = static_cast<int32_t>(plpInfos[i].plpId),
+                    .bLlsFlag = plpInfos[i].bLlsFlag,
+            };
+            tunerPlpInfos.push_back(plpInfo);
+        }
+        scanMessage.set<FrontendScanMessage::atsc3PlpInfos>(tunerPlpInfos);
+        break;
+    }
+    default:
+        break;
+    }
+    mTunerFrontendCallback->onScanMessage(static_cast<FrontendScanMessageType>(type), scanMessage);
+    return Void();
+}
+
+Return<void> TunerHidlFrontend::FrontendCallback::onScanMessageExt1_1(
+        HidlFrontendScanMessageTypeExt1_1 type, const HidlFrontendScanMessageExt1_1& message) {
+    ALOGV("onScanMessageExt1_1::onScanMessage, type=%d", type);
+    FrontendScanMessage scanMessage;
+    switch (type) {
+    case HidlFrontendScanMessageTypeExt1_1::MODULATION: {
+        HidlFrontendModulation m = message.modulation();
+        FrontendModulation modulation;
+        switch (m.getDiscriminator()) {
+        case HidlFrontendModulation::hidl_discriminator::dvbc: {
+            modulation.set<FrontendModulation::dvbc>(static_cast<FrontendDvbcModulation>(m.dvbc()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::dvbt: {
+            modulation.set<FrontendModulation::dvbt>(
+                    static_cast<FrontendDvbtConstellation>(m.dvbt()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::dvbs: {
+            modulation.set<FrontendModulation::dvbs>(static_cast<FrontendDvbsModulation>(m.dvbs()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::isdbs: {
+            modulation.set<FrontendModulation::isdbs>(
+                    static_cast<FrontendIsdbsModulation>(m.isdbs()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::isdbs3: {
+            modulation.set<FrontendModulation::isdbs3>(
+                    static_cast<FrontendIsdbs3Modulation>(m.isdbs3()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::isdbt: {
+            modulation.set<FrontendModulation::isdbt>(
+                    static_cast<FrontendIsdbtModulation>(m.isdbt()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::atsc: {
+            modulation.set<FrontendModulation::atsc>(static_cast<FrontendAtscModulation>(m.atsc()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::atsc3: {
+            modulation.set<FrontendModulation::atsc3>(
+                    static_cast<FrontendAtsc3Modulation>(m.atsc3()));
+            break;
+        }
+        case HidlFrontendModulation::hidl_discriminator::dtmb: {
+            modulation.set<FrontendModulation::dtmb>(static_cast<FrontendDtmbModulation>(m.dtmb()));
+            break;
+        }
+        }
+        scanMessage.set<FrontendScanMessage::modulation>(modulation);
+        break;
+    }
+    case HidlFrontendScanMessageTypeExt1_1::DVBC_ANNEX: {
+        scanMessage.set<FrontendScanMessage::annex>(
+                static_cast<FrontendDvbcAnnex>(message.annex()));
+        break;
+    }
+    case HidlFrontendScanMessageTypeExt1_1::HIGH_PRIORITY: {
+        scanMessage.set<FrontendScanMessage::isHighPriority>(message.isHighPriority());
+        break;
+    }
+    default: {
+        break;
+    }
+    }
+    mTunerFrontendCallback->onScanMessage(static_cast<FrontendScanMessageType>(type), scanMessage);
+    return Void();
+}
+
+/////////////// TunerHidlFrontend Helper Methods ///////////////////////
+void TunerHidlFrontend::getAidlFrontendStatus(const vector<HidlFrontendStatus>& hidlStatus,
+                                              const vector<HidlFrontendStatusExt1_1>& hidlStatusExt,
+                                              vector<FrontendStatus>& aidlStatus) {
+    for (HidlFrontendStatus s : hidlStatus) {
+        FrontendStatus status;
+        switch (s.getDiscriminator()) {
+        case HidlFrontendStatus::hidl_discriminator::isDemodLocked: {
+            status.set<FrontendStatus::isDemodLocked>(s.isDemodLocked());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::snr: {
+            status.set<FrontendStatus::snr>((int)s.snr());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::ber: {
+            status.set<FrontendStatus::ber>((int)s.ber());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::per: {
+            status.set<FrontendStatus::per>((int)s.per());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::preBer: {
+            status.set<FrontendStatus::preBer>((int)s.preBer());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::signalQuality: {
+            status.set<FrontendStatus::signalQuality>((int)s.signalQuality());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::signalStrength: {
+            status.set<FrontendStatus::signalStrength>((int)s.signalStrength());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::symbolRate: {
+            status.set<FrontendStatus::symbolRate>((int)s.symbolRate());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::innerFec: {
+            status.set<FrontendStatus::innerFec>(static_cast<FrontendInnerFec>(s.innerFec()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::modulation: {
+            FrontendModulationStatus modulationStatus;
+            switch (s.modulation().getDiscriminator()) {
+            case HidlFrontendModulationStatus::hidl_discriminator::dvbc:
+                modulationStatus.set<FrontendModulationStatus::dvbc>(
+                        static_cast<FrontendDvbcModulation>(s.modulation().dvbc()));
+                break;
+            case HidlFrontendModulationStatus::hidl_discriminator::dvbs:
+                modulationStatus.set<FrontendModulationStatus::dvbs>(
+                        static_cast<FrontendDvbsModulation>(s.modulation().dvbs()));
+                break;
+            case HidlFrontendModulationStatus::hidl_discriminator::isdbs:
+                modulationStatus.set<FrontendModulationStatus::isdbs>(
+                        static_cast<FrontendIsdbsModulation>(s.modulation().isdbs()));
+                break;
+            case HidlFrontendModulationStatus::hidl_discriminator::isdbs3:
+                modulationStatus.set<FrontendModulationStatus::isdbs3>(
+                        static_cast<FrontendIsdbs3Modulation>(s.modulation().isdbs3()));
+                break;
+            case HidlFrontendModulationStatus::hidl_discriminator::isdbt:
+                modulationStatus.set<FrontendModulationStatus::isdbt>(
+                        static_cast<FrontendIsdbtModulation>(s.modulation().isdbt()));
+                break;
+            }
+            status.set<FrontendStatus::modulationStatus>(modulationStatus);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::inversion: {
+            status.set<FrontendStatus::inversion>(
+                    static_cast<FrontendSpectralInversion>(s.inversion()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::lnbVoltage: {
+            status.set<FrontendStatus::lnbVoltage>(static_cast<LnbVoltage>(s.lnbVoltage()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::plpId: {
+            status.set<FrontendStatus::plpId>((int32_t)s.plpId());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::isEWBS: {
+            status.set<FrontendStatus::isEWBS>(s.isEWBS());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::agc: {
+            status.set<FrontendStatus::agc>((int32_t)s.agc());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::isLnaOn: {
+            status.set<FrontendStatus::isLnaOn>(s.isLnaOn());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::isLayerError: {
+            vector<bool> e(s.isLayerError().begin(), s.isLayerError().end());
+            status.set<FrontendStatus::isLayerError>(e);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::mer: {
+            status.set<FrontendStatus::mer>(static_cast<int32_t>(s.mer()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::freqOffset: {
+            status.set<FrontendStatus::freqOffset>(static_cast<int64_t>(s.freqOffset()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::hierarchy: {
+            status.set<FrontendStatus::hierarchy>(
+                    static_cast<FrontendDvbtHierarchy>(s.hierarchy()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::isRfLocked: {
+            status.set<FrontendStatus::isRfLocked>(s.isRfLocked());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatus::hidl_discriminator::plpInfo: {
+            vector<FrontendStatusAtsc3PlpInfo> info;
+            for (auto i : s.plpInfo()) {
+                info.push_back({
+                        .plpId = static_cast<int32_t>(i.plpId),
+                        .isLocked = i.isLocked,
+                        .uec = static_cast<int32_t>(i.uec),
+                });
+            }
+            status.set<FrontendStatus::plpInfo>(info);
+            aidlStatus.push_back(status);
+            break;
+        }
+        }
+    }
+
+    for (HidlFrontendStatusExt1_1 s : hidlStatusExt) {
+        FrontendStatus status;
+        switch (s.getDiscriminator()) {
+        case HidlFrontendStatusExt1_1::hidl_discriminator::modulations: {
+            vector<FrontendModulation> aidlMod;
+            for (auto m : s.modulations()) {
+                switch (m.getDiscriminator()) {
+                case HidlFrontendModulation::hidl_discriminator::dvbc:
+                    aidlMod.push_back(static_cast<FrontendDvbcModulation>(m.dvbc()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::dvbs:
+                    aidlMod.push_back(static_cast<FrontendDvbsModulation>(m.dvbs()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::dvbt:
+                    aidlMod.push_back(static_cast<FrontendDvbtConstellation>(m.dvbt()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::isdbs:
+                    aidlMod.push_back(static_cast<FrontendIsdbsModulation>(m.isdbs()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::isdbs3:
+                    aidlMod.push_back(static_cast<FrontendIsdbs3Modulation>(m.isdbs3()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::isdbt:
+                    aidlMod.push_back(static_cast<FrontendIsdbtModulation>(m.isdbt()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::atsc:
+                    aidlMod.push_back(static_cast<FrontendAtscModulation>(m.atsc()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::atsc3:
+                    aidlMod.push_back(static_cast<FrontendAtsc3Modulation>(m.atsc3()));
+                    break;
+                case HidlFrontendModulation::hidl_discriminator::dtmb:
+                    aidlMod.push_back(static_cast<FrontendDtmbModulation>(m.dtmb()));
+                    break;
+                }
+            }
+            status.set<FrontendStatus::modulations>(aidlMod);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::bers: {
+            vector<int> b(s.bers().begin(), s.bers().end());
+            status.set<FrontendStatus::bers>(b);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::codeRates: {
+            vector<FrontendInnerFec> codeRates;
+            for (auto c : s.codeRates()) {
+                codeRates.push_back(static_cast<FrontendInnerFec>(c));
+            }
+            status.set<FrontendStatus::codeRates>(codeRates);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::bandwidth: {
+            FrontendBandwidth bandwidth;
+            switch (s.bandwidth().getDiscriminator()) {
+            case HidlFrontendBandwidth::hidl_discriminator::atsc3:
+                bandwidth.set<FrontendBandwidth::atsc3>(
+                        static_cast<FrontendAtsc3Bandwidth>(s.bandwidth().atsc3()));
+                break;
+            case HidlFrontendBandwidth::hidl_discriminator::dvbc:
+                bandwidth.set<FrontendBandwidth::dvbc>(
+                        static_cast<FrontendDvbcBandwidth>(s.bandwidth().dvbc()));
+                break;
+            case HidlFrontendBandwidth::hidl_discriminator::dvbt:
+                bandwidth.set<FrontendBandwidth::dvbt>(
+                        static_cast<FrontendDvbtBandwidth>(s.bandwidth().dvbt()));
+                break;
+            case HidlFrontendBandwidth::hidl_discriminator::isdbt:
+                bandwidth.set<FrontendBandwidth::isdbt>(
+                        static_cast<FrontendIsdbtBandwidth>(s.bandwidth().isdbt()));
+                break;
+            case HidlFrontendBandwidth::hidl_discriminator::dtmb:
+                bandwidth.set<FrontendBandwidth::dtmb>(
+                        static_cast<FrontendDtmbBandwidth>(s.bandwidth().dtmb()));
+                break;
+            }
+            status.set<FrontendStatus::bandwidth>(bandwidth);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::interval: {
+            FrontendGuardInterval interval;
+            switch (s.interval().getDiscriminator()) {
+            case HidlFrontendGuardInterval::hidl_discriminator::dvbt:
+                interval.set<FrontendGuardInterval::dvbt>(
+                        static_cast<FrontendDvbtGuardInterval>(s.interval().dvbt()));
+                break;
+            case HidlFrontendGuardInterval::hidl_discriminator::isdbt:
+                interval.set<FrontendGuardInterval::isdbt>(
+                        static_cast<FrontendIsdbtGuardInterval>(s.interval().isdbt()));
+                break;
+            case HidlFrontendGuardInterval::hidl_discriminator::dtmb:
+                interval.set<FrontendGuardInterval::dtmb>(
+                        static_cast<FrontendDtmbGuardInterval>(s.interval().dtmb()));
+                break;
+            }
+            status.set<FrontendStatus::interval>(interval);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::transmissionMode: {
+            FrontendTransmissionMode transmissionMode;
+            switch (s.transmissionMode().getDiscriminator()) {
+            case HidlFrontendTransmissionMode::hidl_discriminator::dvbt:
+                transmissionMode.set<FrontendTransmissionMode::dvbt>(
+                        static_cast<FrontendDvbtTransmissionMode>(s.transmissionMode().dvbt()));
+                break;
+            case HidlFrontendTransmissionMode::hidl_discriminator::isdbt:
+                transmissionMode.set<FrontendTransmissionMode::isdbt>(
+                        static_cast<FrontendIsdbtMode>(s.transmissionMode().isdbt()));
+                break;
+            case HidlFrontendTransmissionMode::hidl_discriminator::dtmb:
+                transmissionMode.set<FrontendTransmissionMode::dtmb>(
+                        static_cast<FrontendDtmbTransmissionMode>(s.transmissionMode().dtmb()));
+                break;
+            }
+            status.set<FrontendStatus::transmissionMode>(transmissionMode);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::uec: {
+            status.set<FrontendStatus::uec>(static_cast<int32_t>(s.uec()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::systemId: {
+            status.set<FrontendStatus::systemId>(static_cast<int32_t>(s.systemId()));
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::interleaving: {
+            vector<FrontendInterleaveMode> aidlInter;
+            for (auto i : s.interleaving()) {
+                FrontendInterleaveMode leaveMode;
+                switch (i.getDiscriminator()) {
+                case HidlFrontendInterleaveMode::hidl_discriminator::atsc3:
+                    leaveMode.set<FrontendInterleaveMode::atsc3>(
+                            static_cast<FrontendAtsc3TimeInterleaveMode>(i.atsc3()));
+                    break;
+                case HidlFrontendInterleaveMode::hidl_discriminator::dvbc:
+                    leaveMode.set<FrontendInterleaveMode::dvbc>(
+                            static_cast<FrontendCableTimeInterleaveMode>(i.dvbc()));
+                    break;
+                case HidlFrontendInterleaveMode::hidl_discriminator::dtmb:
+                    leaveMode.set<FrontendInterleaveMode::dtmb>(
+                            static_cast<FrontendDtmbTimeInterleaveMode>(i.dtmb()));
+                    break;
+                }
+                aidlInter.push_back(leaveMode);
+            }
+            status.set<FrontendStatus::interleaving>(aidlInter);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::isdbtSegment: {
+            const vector<uint8_t>& seg = s.isdbtSegment();
+            vector<int32_t> i(seg.begin(), seg.end());
+            status.set<FrontendStatus::isdbtSegment>(i);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::tsDataRate: {
+            vector<int32_t> ts(s.tsDataRate().begin(), s.tsDataRate().end());
+            status.set<FrontendStatus::tsDataRate>(ts);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::rollOff: {
+            FrontendRollOff rollOff;
+            switch (s.rollOff().getDiscriminator()) {
+            case HidlFrontendRollOff::hidl_discriminator::dvbs:
+                rollOff.set<FrontendRollOff::dvbs>(
+                        static_cast<FrontendDvbsRolloff>(s.rollOff().dvbs()));
+                break;
+            case HidlFrontendRollOff::hidl_discriminator::isdbs:
+                rollOff.set<FrontendRollOff::isdbs>(
+                        static_cast<FrontendIsdbsRolloff>(s.rollOff().isdbs()));
+                break;
+            case HidlFrontendRollOff::hidl_discriminator::isdbs3:
+                rollOff.set<FrontendRollOff::isdbs3>(
+                        static_cast<FrontendIsdbs3Rolloff>(s.rollOff().isdbs3()));
+                break;
+            }
+            status.set<FrontendStatus::rollOff>(rollOff);
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::isMiso: {
+            status.set<FrontendStatus::isMiso>(s.isMiso());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::isLinear: {
+            status.set<FrontendStatus::isLinear>(s.isLinear());
+            aidlStatus.push_back(status);
+            break;
+        }
+        case HidlFrontendStatusExt1_1::hidl_discriminator::isShortFrames: {
+            status.set<FrontendStatus::isShortFrames>(s.isShortFrames());
+            aidlStatus.push_back(status);
+            break;
+        }
+        }
+    }
+}
+
+hidl_vec<HidlFrontendAtsc3PlpSettings> TunerHidlFrontend::getAtsc3PlpSettings(
+        const FrontendAtsc3Settings& settings) {
+    int len = settings.plpSettings.size();
+    hidl_vec<HidlFrontendAtsc3PlpSettings> plps = hidl_vec<HidlFrontendAtsc3PlpSettings>(len);
+    // parse PLP settings
+    for (int i = 0; i < len; i++) {
+        uint8_t plpId = static_cast<uint8_t>(settings.plpSettings[i].plpId);
+        HidlFrontendAtsc3Modulation modulation =
+                static_cast<HidlFrontendAtsc3Modulation>(settings.plpSettings[i].modulation);
+        HidlFrontendAtsc3TimeInterleaveMode interleaveMode =
+                static_cast<HidlFrontendAtsc3TimeInterleaveMode>(
+                        settings.plpSettings[i].interleaveMode);
+        HidlFrontendAtsc3CodeRate codeRate =
+                static_cast<HidlFrontendAtsc3CodeRate>(settings.plpSettings[i].codeRate);
+        HidlFrontendAtsc3Fec fec = static_cast<HidlFrontendAtsc3Fec>(settings.plpSettings[i].fec);
+        HidlFrontendAtsc3PlpSettings frontendAtsc3PlpSettings{
+                .plpId = plpId,
+                .modulation = modulation,
+                .interleaveMode = interleaveMode,
+                .codeRate = codeRate,
+                .fec = fec,
+        };
+        plps[i] = frontendAtsc3PlpSettings;
+    }
+    return plps;
+}
+
+HidlFrontendDvbsCodeRate TunerHidlFrontend::getDvbsCodeRate(const FrontendDvbsCodeRate& codeRate) {
+    HidlFrontendInnerFec innerFec = static_cast<HidlFrontendInnerFec>(codeRate.fec);
+    bool isLinear = codeRate.isLinear;
+    bool isShortFrames = codeRate.isShortFrames;
+    uint32_t bitsPer1000Symbol = static_cast<uint32_t>(codeRate.bitsPer1000Symbol);
+    HidlFrontendDvbsCodeRate coderate{
+            .fec = innerFec,
+            .isLinear = isLinear,
+            .isShortFrames = isShortFrames,
+            .bitsPer1000Symbol = bitsPer1000Symbol,
+    };
+    return coderate;
+}
+
+void TunerHidlFrontend::getHidlFrontendSettings(const FrontendSettings& aidlSettings,
+                                                HidlFrontendSettings& settings,
+                                                HidlFrontendSettingsExt1_1& settingsExt) {
+    switch (aidlSettings.getTag()) {
+    case FrontendSettings::analog: {
+        const FrontendAnalogSettings& analog = aidlSettings.get<FrontendSettings::analog>();
+        settings.analog({
+                .frequency = static_cast<uint32_t>(analog.frequency),
+                .type = static_cast<HidlFrontendAnalogType>(analog.type),
+                .sifStandard = static_cast<HidlFrontendAnalogSifStandard>(analog.sifStandard),
+        });
+        settingsExt.settingExt.analog({
+                .aftFlag = static_cast<HidlFrontendAnalogAftFlag>(analog.aftFlag),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(analog.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(analog.inversion);
+        break;
+    }
+    case FrontendSettings::atsc: {
+        const FrontendAtscSettings& atsc = aidlSettings.get<FrontendSettings::atsc>();
+        settings.atsc({
+                .frequency = static_cast<uint32_t>(atsc.frequency),
+                .modulation = static_cast<HidlFrontendAtscModulation>(atsc.modulation),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(atsc.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(atsc.inversion);
+        settingsExt.settingExt.noinit();
+        break;
+    }
+    case FrontendSettings::atsc3: {
+        const FrontendAtsc3Settings& atsc3 = aidlSettings.get<FrontendSettings::atsc3>();
+        settings.atsc3({
+                .frequency = static_cast<uint32_t>(atsc3.frequency),
+                .bandwidth = static_cast<HidlFrontendAtsc3Bandwidth>(atsc3.bandwidth),
+                .demodOutputFormat =
+                        static_cast<HidlFrontendAtsc3DemodOutputFormat>(atsc3.demodOutputFormat),
+                .plpSettings = getAtsc3PlpSettings(atsc3),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(atsc3.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(atsc3.inversion);
+        settingsExt.settingExt.noinit();
+        break;
+    }
+    case FrontendSettings::dvbc: {
+        const FrontendDvbcSettings& dvbc = aidlSettings.get<FrontendSettings::dvbc>();
+        settings.dvbc({
+                .frequency = static_cast<uint32_t>(dvbc.frequency),
+                .modulation = static_cast<HidlFrontendDvbcModulation>(dvbc.modulation),
+                .fec = static_cast<HidlFrontendInnerFec>(dvbc.fec),
+                .symbolRate = static_cast<uint32_t>(dvbc.symbolRate),
+                .outerFec = static_cast<HidlFrontendDvbcOuterFec>(dvbc.outerFec),
+                .annex = static_cast<HidlFrontendDvbcAnnex>(dvbc.annex),
+                .spectralInversion = static_cast<HidlFrontendDvbcSpectralInversion>(dvbc.inversion),
+        });
+        settingsExt.settingExt.dvbc({
+                .interleaveMode =
+                        static_cast<HidlFrontendCableTimeInterleaveMode>(dvbc.interleaveMode),
+                .bandwidth = static_cast<HidlFrontendDvbcBandwidth>(dvbc.bandwidth),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(dvbc.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(dvbc.inversion);
+        break;
+    }
+    case FrontendSettings::dvbs: {
+        const FrontendDvbsSettings& dvbs = aidlSettings.get<FrontendSettings::dvbs>();
+        settings.dvbs({
+                .frequency = static_cast<uint32_t>(dvbs.frequency),
+                .modulation = static_cast<HidlFrontendDvbsModulation>(dvbs.modulation),
+                .coderate = getDvbsCodeRate(dvbs.coderate),
+                .symbolRate = static_cast<uint32_t>(dvbs.symbolRate),
+                .rolloff = static_cast<HidlFrontendDvbsRolloff>(dvbs.rolloff),
+                .pilot = static_cast<HidlFrontendDvbsPilot>(dvbs.pilot),
+                .inputStreamId = static_cast<uint32_t>(dvbs.inputStreamId),
+                .standard = static_cast<HidlFrontendDvbsStandard>(dvbs.standard),
+                .vcmMode = static_cast<HidlFrontendDvbsVcmMode>(dvbs.vcmMode),
+        });
+        settingsExt.settingExt.dvbs({
+                .scanType = static_cast<HidlFrontendDvbsScanType>(dvbs.scanType),
+                .isDiseqcRxMessage = dvbs.isDiseqcRxMessage,
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(dvbs.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(dvbs.inversion);
+        break;
+    }
+    case FrontendSettings::dvbt: {
+        const FrontendDvbtSettings& dvbt = aidlSettings.get<FrontendSettings::dvbt>();
+        settings.dvbt({
+                .frequency = static_cast<uint32_t>(dvbt.frequency),
+                .transmissionMode =
+                        static_cast<HidlFrontendDvbtTransmissionMode>(dvbt.transmissionMode),
+                .bandwidth = static_cast<HidlFrontendDvbtBandwidth>(dvbt.bandwidth),
+                .constellation = static_cast<HidlFrontendDvbtConstellation>(dvbt.constellation),
+                .hierarchy = static_cast<HidlFrontendDvbtHierarchy>(dvbt.hierarchy),
+                .hpCoderate = static_cast<HidlFrontendDvbtCoderate>(dvbt.hpCoderate),
+                .lpCoderate = static_cast<HidlFrontendDvbtCoderate>(dvbt.lpCoderate),
+                .guardInterval = static_cast<HidlFrontendDvbtGuardInterval>(dvbt.guardInterval),
+                .isHighPriority = dvbt.isHighPriority,
+                .standard = static_cast<HidlFrontendDvbtStandard>(dvbt.standard),
+                .isMiso = dvbt.isMiso,
+                .plpMode = static_cast<HidlFrontendDvbtPlpMode>(dvbt.plpMode),
+                .plpId = static_cast<uint8_t>(dvbt.plpId),
+                .plpGroupId = static_cast<uint8_t>(dvbt.plpGroupId),
+        });
+        settingsExt.settingExt.dvbt({
+                .constellation = static_cast<
+                        ::android::hardware::tv::tuner::V1_1::FrontendDvbtConstellation>(
+                        dvbt.constellation),
+                .transmissionMode = static_cast<
+                        ::android::hardware::tv::tuner::V1_1::FrontendDvbtTransmissionMode>(
+                        dvbt.transmissionMode),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(dvbt.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(dvbt.inversion);
+        break;
+    }
+    case FrontendSettings::isdbs: {
+        const FrontendIsdbsSettings& isdbs = aidlSettings.get<FrontendSettings::isdbs>();
+        settings.isdbs({
+                .frequency = static_cast<uint32_t>(isdbs.frequency),
+                .streamId = static_cast<uint16_t>(isdbs.streamId),
+                .streamIdType = static_cast<HidlFrontendIsdbsStreamIdType>(isdbs.streamIdType),
+                .modulation = static_cast<HidlFrontendIsdbsModulation>(isdbs.modulation),
+                .coderate = static_cast<HidlFrontendIsdbsCoderate>(isdbs.coderate),
+                .symbolRate = static_cast<uint32_t>(isdbs.symbolRate),
+                .rolloff = static_cast<HidlFrontendIsdbsRolloff>(isdbs.rolloff),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(isdbs.endFrequency);
+        settingsExt.settingExt.noinit();
+        break;
+    }
+    case FrontendSettings::isdbs3: {
+        const FrontendIsdbs3Settings& isdbs3 = aidlSettings.get<FrontendSettings::isdbs3>();
+        settings.isdbs3({
+                .frequency = static_cast<uint32_t>(isdbs3.frequency),
+                .streamId = static_cast<uint16_t>(isdbs3.streamId),
+                .streamIdType = static_cast<HidlFrontendIsdbsStreamIdType>(isdbs3.streamIdType),
+                .modulation = static_cast<HidlFrontendIsdbs3Modulation>(isdbs3.modulation),
+                .coderate = static_cast<HidlFrontendIsdbs3Coderate>(isdbs3.coderate),
+                .symbolRate = static_cast<uint32_t>(isdbs3.symbolRate),
+                .rolloff = static_cast<HidlFrontendIsdbs3Rolloff>(isdbs3.rolloff),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(isdbs3.endFrequency);
+        settingsExt.settingExt.noinit();
+        break;
+    }
+    case FrontendSettings::isdbt: {
+        const FrontendIsdbtSettings& isdbt = aidlSettings.get<FrontendSettings::isdbt>();
+        HidlFrontendIsdbtModulation modulation = HidlFrontendIsdbtModulation::UNDEFINED;
+        HidlFrontendIsdbtCoderate coderate = HidlFrontendIsdbtCoderate::UNDEFINED;
+        if (isdbt.layerSettings.size() > 0) {
+            modulation =
+                    static_cast<HidlFrontendIsdbtModulation>(isdbt.layerSettings[0].modulation);
+            coderate = static_cast<HidlFrontendIsdbtCoderate>(isdbt.layerSettings[0].coderate);
+        }
+        settings.isdbt({
+                .frequency = static_cast<uint32_t>(isdbt.frequency),
+                .modulation = modulation,
+                .bandwidth = static_cast<HidlFrontendIsdbtBandwidth>(isdbt.bandwidth),
+                .mode = static_cast<HidlFrontendIsdbtMode>(isdbt.mode),
+                .coderate = coderate,
+                .guardInterval = static_cast<HidlFrontendIsdbtGuardInterval>(isdbt.guardInterval),
+                .serviceAreaId = static_cast<uint32_t>(isdbt.serviceAreaId),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(isdbt.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(isdbt.inversion);
+        settingsExt.settingExt.noinit();
+        break;
+    }
+    case FrontendSettings::dtmb: {
+        const FrontendDtmbSettings& dtmb = aidlSettings.get<FrontendSettings::dtmb>();
+        settingsExt.settingExt.dtmb({
+                .frequency = static_cast<uint32_t>(dtmb.frequency),
+                .transmissionMode =
+                        static_cast<HidlFrontendDtmbTransmissionMode>(dtmb.transmissionMode),
+                .bandwidth = static_cast<HidlFrontendDtmbBandwidth>(dtmb.bandwidth),
+                .modulation = static_cast<HidlFrontendDtmbModulation>(dtmb.modulation),
+                .codeRate = static_cast<HidlFrontendDtmbCodeRate>(dtmb.codeRate),
+                .guardInterval = static_cast<HidlFrontendDtmbGuardInterval>(dtmb.guardInterval),
+                .interleaveMode =
+                        static_cast<HidlFrontendDtmbTimeInterleaveMode>(dtmb.interleaveMode),
+        });
+        settingsExt.endFrequency = static_cast<uint32_t>(dtmb.endFrequency);
+        settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(dtmb.inversion);
+        break;
+    }
+    default:
+        break;
+    }
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlFrontend.h b/services/tuner/hidl/TunerHidlFrontend.h
new file mode 100644
index 0000000..7ff278c
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlFrontend.h
@@ -0,0 +1,124 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLFRONTEND_H
+#define ANDROID_MEDIA_TUNERHIDLFRONTEND_H
+
+#include <aidl/android/hardware/tv/tuner/IFrontendCallback.h>
+#include <aidl/android/media/tv/tuner/BnTunerFrontend.h>
+#include <android/hardware/tv/tuner/1.0/ITuner.h>
+#include <android/hardware/tv/tuner/1.1/IFrontend.h>
+#include <android/hardware/tv/tuner/1.1/IFrontendCallback.h>
+#include <utils/Log.h>
+
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Settings;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsCodeRate;
+using ::aidl::android::hardware::tv::tuner::FrontendEventType;
+using ::aidl::android::hardware::tv::tuner::FrontendScanMessage;
+using ::aidl::android::hardware::tv::tuner::FrontendScanMessageType;
+using ::aidl::android::hardware::tv::tuner::FrontendScanType;
+using ::aidl::android::hardware::tv::tuner::FrontendSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendStatus;
+using ::aidl::android::hardware::tv::tuner::FrontendStatusType;
+using ::android::sp;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::std::shared_ptr;
+using ::std::vector;
+
+using HidlFrontendAtsc3PlpSettings = ::android::hardware::tv::tuner::V1_0::FrontendAtsc3PlpSettings;
+using HidlFrontendDvbsCodeRate = ::android::hardware::tv::tuner::V1_0::FrontendDvbsCodeRate;
+using HidlFrontendEventType = ::android::hardware::tv::tuner::V1_0::FrontendEventType;
+using HidlFrontendId = ::android::hardware::tv::tuner::V1_0::FrontendId;
+using HidlFrontendScanMessage = ::android::hardware::tv::tuner::V1_0::FrontendScanMessage;
+using HidlFrontendScanMessageType = ::android::hardware::tv::tuner::V1_0::FrontendScanMessageType;
+using HidlFrontendSettings = ::android::hardware::tv::tuner::V1_0::FrontendSettings;
+using HidlFrontendStatus = ::android::hardware::tv::tuner::V1_0::FrontendStatus;
+using HidlIFrontend = ::android::hardware::tv::tuner::V1_0::IFrontend;
+using HidlIFrontendCallback = ::android::hardware::tv::tuner::V1_1::IFrontendCallback;
+using HidlFrontendScanMessageExt1_1 =
+        ::android::hardware::tv::tuner::V1_1::FrontendScanMessageExt1_1;
+using HidlFrontendScanMessageTypeExt1_1 =
+        ::android::hardware::tv::tuner::V1_1::FrontendScanMessageTypeExt1_1;
+using HidlFrontendSettingsExt1_1 = ::android::hardware::tv::tuner::V1_1::FrontendSettingsExt1_1;
+using HidlFrontendStatusExt1_1 = ::android::hardware::tv::tuner::V1_1::FrontendStatusExt1_1;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlFrontend : public BnTunerFrontend {
+public:
+    TunerHidlFrontend(sp<HidlIFrontend> frontend, int id);
+    virtual ~TunerHidlFrontend();
+
+    ::ndk::ScopedAStatus setCallback(
+            const shared_ptr<ITunerFrontendCallback>& in_tunerFrontendCallback) override;
+    ::ndk::ScopedAStatus tune(const FrontendSettings& in_settings) override;
+    ::ndk::ScopedAStatus stopTune() override;
+    ::ndk::ScopedAStatus scan(const FrontendSettings& in_settings,
+                              FrontendScanType in_frontendScanType) override;
+    ::ndk::ScopedAStatus stopScan() override;
+    ::ndk::ScopedAStatus setLnb(const shared_ptr<ITunerLnb>& in_lnb) override;
+    ::ndk::ScopedAStatus linkCiCamToFrontend(int32_t in_ciCamId, int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus unlinkCiCamToFrontend(int32_t in_ciCamId) override;
+    ::ndk::ScopedAStatus close() override;
+    ::ndk::ScopedAStatus getStatus(const vector<FrontendStatusType>& in_statusTypes,
+                                   vector<FrontendStatus>* _aidl_return) override;
+    ::ndk::ScopedAStatus getFrontendId(int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getHardwareInfo(std::string* _aidl_return) override;
+
+    void setLna(bool in_bEnable);
+
+    struct FrontendCallback : public HidlIFrontendCallback {
+        FrontendCallback(const shared_ptr<ITunerFrontendCallback> tunerFrontendCallback)
+              : mTunerFrontendCallback(tunerFrontendCallback){};
+
+        virtual Return<void> onEvent(HidlFrontendEventType frontendEventType);
+        virtual Return<void> onScanMessage(HidlFrontendScanMessageType type,
+                                           const HidlFrontendScanMessage& message);
+        virtual Return<void> onScanMessageExt1_1(HidlFrontendScanMessageTypeExt1_1 type,
+                                                 const HidlFrontendScanMessageExt1_1& message);
+
+        shared_ptr<ITunerFrontendCallback> mTunerFrontendCallback;
+    };
+
+private:
+    hidl_vec<HidlFrontendAtsc3PlpSettings> getAtsc3PlpSettings(
+            const FrontendAtsc3Settings& settings);
+    HidlFrontendDvbsCodeRate getDvbsCodeRate(const FrontendDvbsCodeRate& codeRate);
+    void getHidlFrontendSettings(const FrontendSettings& aidlSettings,
+                                 HidlFrontendSettings& settings,
+                                 HidlFrontendSettingsExt1_1& settingsExt);
+    void getAidlFrontendStatus(const vector<HidlFrontendStatus>& hidlStatus,
+                               const vector<HidlFrontendStatusExt1_1>& hidlStatusExt,
+                               vector<FrontendStatus>& aidlStatus);
+
+    int mId;
+    sp<HidlIFrontend> mFrontend;
+    sp<::android::hardware::tv::tuner::V1_1::IFrontend> mFrontend_1_1;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLFRONTEND_H
diff --git a/services/tuner/hidl/TunerHidlLnb.cpp b/services/tuner/hidl/TunerHidlLnb.cpp
new file mode 100644
index 0000000..a7e20bb
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlLnb.cpp
@@ -0,0 +1,160 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlLnb"
+
+#include "TunerHidlLnb.h"
+
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
+using ::aidl::android::hardware::tv::tuner::Result;
+using HidlLnbPosition = ::android::hardware::tv::tuner::V1_0::LnbPosition;
+using HidlLnbTone = ::android::hardware::tv::tuner::V1_0::LnbTone;
+using HidlLnbVoltage = ::android::hardware::tv::tuner::V1_0::LnbVoltage;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlLnb::TunerHidlLnb(sp<HidlILnb> lnb, int id) {
+    mLnb = lnb;
+    mId = id;
+}
+
+TunerHidlLnb::~TunerHidlLnb() {
+    mLnb = nullptr;
+    mId = -1;
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::setCallback(
+        const shared_ptr<ITunerLnbCallback>& in_tunerLnbCallback) {
+    if (mLnb == nullptr) {
+        ALOGE("ILnb is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (in_tunerLnbCallback == nullptr) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_ARGUMENT));
+    }
+
+    sp<HidlILnbCallback> lnbCallback = new LnbCallback(in_tunerLnbCallback);
+    HidlResult status = mLnb->setCallback(lnbCallback);
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::setVoltage(LnbVoltage in_voltage) {
+    if (mLnb == nullptr) {
+        ALOGE("ILnb is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mLnb->setVoltage(static_cast<HidlLnbVoltage>(in_voltage));
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::setTone(LnbTone in_tone) {
+    if (mLnb == nullptr) {
+        ALOGE("ILnb is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mLnb->setTone(static_cast<HidlLnbTone>(in_tone));
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::setSatellitePosition(LnbPosition in_position) {
+    if (mLnb == nullptr) {
+        ALOGE("ILnb is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mLnb->setSatellitePosition(static_cast<HidlLnbPosition>(in_position));
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::sendDiseqcMessage(const vector<uint8_t>& in_diseqcMessage) {
+    if (mLnb == nullptr) {
+        ALOGE("ILnb is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mLnb->sendDiseqcMessage(in_diseqcMessage);
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::close() {
+    if (mLnb == nullptr) {
+        ALOGE("ILnb is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mLnb->close();
+    mLnb = nullptr;
+
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+/////////////// ILnbCallback ///////////////////////
+Return<void> TunerHidlLnb::LnbCallback::onEvent(const HidlLnbEventType lnbEventType) {
+    if (mTunerLnbCallback != nullptr) {
+        mTunerLnbCallback->onEvent(static_cast<LnbEventType>(lnbEventType));
+    }
+    return Void();
+}
+
+Return<void> TunerHidlLnb::LnbCallback::onDiseqcMessage(const hidl_vec<uint8_t>& diseqcMessage) {
+    if (mTunerLnbCallback != nullptr) {
+        vector<uint8_t> msg(begin(diseqcMessage), end(diseqcMessage));
+        mTunerLnbCallback->onDiseqcMessage(msg);
+    }
+    return Void();
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlLnb.h b/services/tuner/hidl/TunerHidlLnb.h
new file mode 100644
index 0000000..becf848
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlLnb.h
@@ -0,0 +1,83 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLLNB_H
+#define ANDROID_MEDIA_TUNERHIDLLNB_H
+
+#include <aidl/android/hardware/tv/tuner/ILnb.h>
+#include <aidl/android/media/tv/tuner/BnTunerLnb.h>
+#include <android/hardware/tv/tuner/1.0/ILnb.h>
+#include <android/hardware/tv/tuner/1.0/ILnbCallback.h>
+#include <utils/Log.h>
+
+using ::aidl::android::hardware::tv::tuner::LnbEventType;
+using ::aidl::android::hardware::tv::tuner::LnbPosition;
+using ::aidl::android::hardware::tv::tuner::LnbTone;
+using ::aidl::android::hardware::tv::tuner::LnbVoltage;
+using ::android::sp;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::std::shared_ptr;
+using ::std::vector;
+
+using HidlILnb = ::android::hardware::tv::tuner::V1_0::ILnb;
+using HidlILnbCallback = ::android::hardware::tv::tuner::V1_0::ILnbCallback;
+using HidlLnbEventType = ::android::hardware::tv::tuner::V1_0::LnbEventType;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlLnb : public BnTunerLnb {
+public:
+    TunerHidlLnb(sp<HidlILnb> lnb, int id);
+    virtual ~TunerHidlLnb();
+
+    ::ndk::ScopedAStatus setCallback(
+            const shared_ptr<ITunerLnbCallback>& in_tunerLnbCallback) override;
+    ::ndk::ScopedAStatus setVoltage(LnbVoltage in_voltage) override;
+    ::ndk::ScopedAStatus setTone(LnbTone in_tone) override;
+    ::ndk::ScopedAStatus setSatellitePosition(LnbPosition in_position) override;
+    ::ndk::ScopedAStatus sendDiseqcMessage(const vector<uint8_t>& in_diseqcMessage) override;
+    ::ndk::ScopedAStatus close() override;
+
+    int getId() { return mId; }
+
+    struct LnbCallback : public HidlILnbCallback {
+        LnbCallback(const shared_ptr<ITunerLnbCallback> tunerLnbCallback)
+              : mTunerLnbCallback(tunerLnbCallback){};
+
+        virtual Return<void> onEvent(const HidlLnbEventType lnbEventType);
+        virtual Return<void> onDiseqcMessage(const hidl_vec<uint8_t>& diseqcMessage);
+
+        shared_ptr<ITunerLnbCallback> mTunerLnbCallback;
+    };
+
+private:
+    int mId;
+    sp<HidlILnb> mLnb;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLLNB_H
diff --git a/services/tuner/hidl/TunerHidlService.cpp b/services/tuner/hidl/TunerHidlService.cpp
new file mode 100644
index 0000000..6f55f1e
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlService.cpp
@@ -0,0 +1,711 @@
+/**
+ * Copyright (c) 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TunerHidlService"
+
+#include "TunerHidlService.h"
+
+#include <aidl/android/hardware/tv/tuner/FrontendIsdbtTimeInterleaveMode.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <android/binder_manager.h>
+#include <binder/IPCThreadState.h>
+#include <binder/PermissionCache.h>
+#include <utils/Log.h>
+
+#include "TunerHelper.h"
+#include "TunerHidlDemux.h"
+#include "TunerHidlDescrambler.h"
+#include "TunerHidlFrontend.h"
+#include "TunerHidlLnb.h"
+
+using ::aidl::android::hardware::tv::tuner::FrontendAnalogCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Capabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendAtscCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbcCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbs3Capabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbsCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtTimeInterleaveMode;
+using ::aidl::android::hardware::tv::tuner::FrontendType;
+using ::aidl::android::hardware::tv::tuner::Result;
+using ::aidl::android::media::tv::tunerresourcemanager::TunerFrontendInfo;
+using ::android::IPCThreadState;
+using ::android::PermissionCache;
+using ::android::hardware::hidl_vec;
+
+using HidlFrontendId = ::android::hardware::tv::tuner::V1_0::FrontendId;
+using HidlLnbId = ::android::hardware::tv::tuner::V1_0::LnbId;
+using HidlFrontendType = ::android::hardware::tv::tuner::V1_1::FrontendType;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+shared_ptr<TunerHidlService> TunerHidlService::sTunerService = nullptr;
+
+TunerHidlService::TunerHidlService() {
+    if (!TunerHelper::checkTunerFeature()) {
+        ALOGD("Device doesn't have tuner hardware.");
+        return;
+    }
+
+    updateTunerResources();
+}
+
+TunerHidlService::~TunerHidlService() {
+    mOpenedFrontends.clear();
+    mLnaStatus = -1;
+}
+
+binder_status_t TunerHidlService::instantiate() {
+    if (HidlITuner::getService() == nullptr) {
+        ALOGD("Failed to get ITuner HIDL HAL");
+        return STATUS_NAME_NOT_FOUND;
+    }
+
+    sTunerService = ::ndk::SharedRefBase::make<TunerHidlService>();
+    return AServiceManager_addService(sTunerService->asBinder().get(), getServiceName());
+}
+
+shared_ptr<TunerHidlService> TunerHidlService::getTunerService() {
+    return sTunerService;
+}
+
+bool TunerHidlService::hasITuner() {
+    ALOGV("hasITuner");
+    if (mTuner != nullptr) {
+        return true;
+    }
+
+    mTuner = HidlITuner::getService();
+    if (mTuner == nullptr) {
+        ALOGE("Failed to get ITuner service");
+        return false;
+    }
+    mTunerVersion = TUNER_HAL_VERSION_1_0;
+
+    mTuner_1_1 = ::android::hardware::tv::tuner::V1_1::ITuner::castFrom(mTuner);
+    if (mTuner_1_1 != nullptr) {
+        mTunerVersion = TUNER_HAL_VERSION_1_1;
+    } else {
+        ALOGD("Failed to get ITuner_1_1 service");
+    }
+
+    return true;
+}
+
+bool TunerHidlService::hasITuner_1_1() {
+    ALOGV("hasITuner_1_1");
+    hasITuner();
+    return (mTunerVersion == TUNER_HAL_VERSION_1_1);
+}
+
+::ndk::ScopedAStatus TunerHidlService::openDemux(int32_t /* in_demuxHandle */,
+                                                 shared_ptr<ITunerDemux>* _aidl_return) {
+    ALOGV("openDemux");
+    if (!hasITuner()) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res;
+    uint32_t id;
+    sp<IDemux> demuxSp = nullptr;
+    mTuner->openDemux([&](HidlResult r, uint32_t demuxId, const sp<IDemux>& demux) {
+        demuxSp = demux;
+        id = demuxId;
+        res = r;
+        ALOGD("open demux, id = %d", demuxId);
+    });
+    if (res == HidlResult::SUCCESS) {
+        *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlDemux>(demuxSp, id);
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    ALOGW("open demux failed, res = %d", res);
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+}
+
+::ndk::ScopedAStatus TunerHidlService::getDemuxCaps(DemuxCapabilities* _aidl_return) {
+    ALOGV("getDemuxCaps");
+    if (!hasITuner()) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res;
+    HidlDemuxCapabilities caps;
+    mTuner->getDemuxCaps([&](HidlResult r, const HidlDemuxCapabilities& demuxCaps) {
+        caps = demuxCaps;
+        res = r;
+    });
+    if (res == HidlResult::SUCCESS) {
+        *_aidl_return = getAidlDemuxCaps(caps);
+        return ::ndk::ScopedAStatus::ok();
+    }
+
+    ALOGW("Get demux caps failed, res = %d", res);
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+}
+
+::ndk::ScopedAStatus TunerHidlService::getFrontendIds(vector<int32_t>* ids) {
+    if (!hasITuner()) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    hidl_vec<HidlFrontendId> feIds;
+    HidlResult res = getHidlFrontendIds(feIds);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    ids->resize(feIds.size());
+    copy(feIds.begin(), feIds.end(), ids->begin());
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::getFrontendInfo(int32_t id, FrontendInfo* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGE("ITuner service is not init.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlFrontendInfo info;
+    HidlResult res = getHidlFrontendInfo(id, info);
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+
+    HidlFrontendDtmbCapabilities dtmbCaps;
+    if (static_cast<HidlFrontendType>(info.type) == HidlFrontendType::DTMB) {
+        if (!hasITuner_1_1()) {
+            ALOGE("ITuner_1_1 service is not init.");
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                    static_cast<int32_t>(Result::UNAVAILABLE));
+        }
+
+        mTuner_1_1->getFrontendDtmbCapabilities(
+                id, [&](HidlResult r, const HidlFrontendDtmbCapabilities& caps) {
+                    dtmbCaps = caps;
+                    res = r;
+                });
+        if (res != HidlResult::SUCCESS) {
+            return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+        }
+    }
+
+    *_aidl_return = getAidlFrontendInfo(info, dtmbCaps);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::openFrontend(int32_t frontendHandle,
+                                                    shared_ptr<ITunerFrontend>* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGE("ITuner service is not init.");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    sp<HidlIFrontend> frontend;
+    int id = TunerHelper::getResourceIdFromHandle(frontendHandle, FRONTEND);
+    mTuner->openFrontendById(id, [&](HidlResult result, const sp<HidlIFrontend>& fe) {
+        frontend = fe;
+        status = result;
+    });
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+
+    shared_ptr<TunerHidlFrontend> tunerFrontend =
+            ::ndk::SharedRefBase::make<TunerHidlFrontend>(frontend, id);
+    if (mLnaStatus != -1) {
+        tunerFrontend->setLna(mLnaStatus == 1);
+    }
+    {
+        Mutex::Autolock _l(mOpenedFrontendsLock);
+        mOpenedFrontends.insert(tunerFrontend);
+    }
+    *_aidl_return = tunerFrontend;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::openLnb(int lnbHandle, shared_ptr<ITunerLnb>* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGD("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    sp<HidlILnb> lnb;
+    int id = TunerHelper::getResourceIdFromHandle(lnbHandle, LNB);
+    mTuner->openLnbById(id, [&](HidlResult result, const sp<HidlILnb>& lnbSp) {
+        lnb = lnbSp;
+        status = result;
+    });
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+
+    *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlLnb>(lnb, id);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::openLnbByName(const string& lnbName,
+                                                     shared_ptr<ITunerLnb>* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGE("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    int lnbId;
+    HidlResult status;
+    sp<HidlILnb> lnb;
+    mTuner->openLnbByName(lnbName, [&](HidlResult r, HidlLnbId id, const sp<HidlILnb>& lnbSp) {
+        status = r;
+        lnb = lnbSp;
+        lnbId = static_cast<int32_t>(id);
+    });
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+
+    *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlLnb>(lnb, lnbId);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::openDescrambler(
+        int32_t /*descramblerHandle*/, shared_ptr<ITunerDescrambler>* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGD("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    sp<HidlIDescrambler> descrambler;
+    //int id = TunerHelper::getResourceIdFromHandle(descramblerHandle, DESCRAMBLER);
+    mTuner->openDescrambler([&](HidlResult r, const sp<HidlIDescrambler>& descramblerSp) {
+        status = r;
+        descrambler = descramblerSp;
+    });
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+
+    *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlDescrambler>(descrambler);
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::getTunerHalVersion(int* _aidl_return) {
+    hasITuner();
+    *_aidl_return = mTunerVersion;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::openSharedFilter(
+        const string& in_filterToken, const shared_ptr<ITunerFilterCallback>& in_cb,
+        shared_ptr<ITunerFilter>* _aidl_return) {
+    if (!hasITuner()) {
+        ALOGE("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    if (!PermissionCache::checkCallingPermission(sSharedFilterPermission)) {
+        ALOGE("Request requires android.permission.ACCESS_TV_SHARED_FILTER");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    Mutex::Autolock _l(mSharedFiltersLock);
+    if (mSharedFilters.find(in_filterToken) == mSharedFilters.end()) {
+        *_aidl_return = nullptr;
+        ALOGD("fail to find %s", in_filterToken.c_str());
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    shared_ptr<TunerHidlFilter> filter = mSharedFilters.at(in_filterToken);
+    IPCThreadState* ipc = IPCThreadState::self();
+    const int pid = ipc->getCallingPid();
+    if (!filter->isSharedFilterAllowed(pid)) {
+        *_aidl_return = nullptr;
+        ALOGD("shared filter %s is opened in the same process", in_filterToken.c_str());
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::INVALID_STATE));
+    }
+
+    filter->attachSharedFilterCallback(in_cb);
+
+    *_aidl_return = filter;
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::setLna(bool bEnable) {
+    if (!hasITuner()) {
+        ALOGE("get ITuner failed");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    mLnaStatus = bEnable ? 1 : 0;
+
+    {
+        Mutex::Autolock _l(mOpenedFrontendsLock);
+        for (auto it = mOpenedFrontends.begin(); it != mOpenedFrontends.end(); ++it) {
+            (*it)->setLna(mLnaStatus == 1);
+        }
+    }
+
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::setMaxNumberOfFrontends(FrontendType /* in_frontendType */,
+                                                               int32_t /* in_maxNumber */) {
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(
+            static_cast<int32_t>(Result::UNAVAILABLE));
+}
+
+::ndk::ScopedAStatus TunerHidlService::getMaxNumberOfFrontends(FrontendType /* in_frontendType */,
+                                                               int32_t* _aidl_return) {
+    *_aidl_return = -1;
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(
+            static_cast<int32_t>(Result::UNAVAILABLE));
+}
+
+string TunerHidlService::addFilterToShared(const shared_ptr<TunerHidlFilter>& sharedFilter) {
+    Mutex::Autolock _l(mSharedFiltersLock);
+
+    // Use sharedFilter address as token.
+    string token = to_string(reinterpret_cast<std::uintptr_t>(sharedFilter.get()));
+    mSharedFilters[token] = sharedFilter;
+
+    return token;
+}
+
+void TunerHidlService::removeSharedFilter(const shared_ptr<TunerHidlFilter>& sharedFilter) {
+    Mutex::Autolock _l(mSharedFiltersLock);
+
+    // Use sharedFilter address as token.
+    mSharedFilters.erase(to_string(reinterpret_cast<std::uintptr_t>(sharedFilter.get())));
+}
+
+void TunerHidlService::removeFrontend(const shared_ptr<TunerHidlFrontend>& frontend) {
+    Mutex::Autolock _l(mOpenedFrontendsLock);
+    for (auto it = mOpenedFrontends.begin(); it != mOpenedFrontends.end(); ++it) {
+        if (it->get() == frontend.get()) {
+            mOpenedFrontends.erase(it);
+            break;
+        }
+    }
+}
+
+void TunerHidlService::updateTunerResources() {
+    if (!hasITuner()) {
+        ALOGE("Failed to updateTunerResources");
+        return;
+    }
+
+    TunerHelper::updateTunerResources(getTRMFrontendInfos(), getTRMLnbHandles());
+}
+
+vector<TunerFrontendInfo> TunerHidlService::getTRMFrontendInfos() {
+    vector<TunerFrontendInfo> infos;
+    hidl_vec<HidlFrontendId> ids;
+    HidlResult res = getHidlFrontendIds(ids);
+    if (res != HidlResult::SUCCESS) {
+        return infos;
+    }
+
+    for (int i = 0; i < ids.size(); i++) {
+        HidlFrontendInfo frontendInfo;
+        HidlResult res = getHidlFrontendInfo(static_cast<int32_t>(ids[i]), frontendInfo);
+        if (res != HidlResult::SUCCESS) {
+            continue;
+        }
+        TunerFrontendInfo tunerFrontendInfo{
+                .handle = TunerHelper::getResourceHandleFromId(static_cast<int32_t>(ids[i]),
+                                                               FRONTEND),
+                .type = static_cast<int32_t>(frontendInfo.type),
+                .exclusiveGroupId = static_cast<int32_t>(frontendInfo.exclusiveGroupId),
+        };
+        infos.push_back(tunerFrontendInfo);
+    }
+
+    return infos;
+}
+
+vector<int32_t> TunerHidlService::getTRMLnbHandles() {
+    vector<int32_t> lnbHandles;
+    if (mTuner != nullptr) {
+        HidlResult res;
+        vector<HidlLnbId> lnbIds;
+        mTuner->getLnbIds([&](HidlResult r, const hidl_vec<HidlLnbId>& ids) {
+            lnbIds = ids;
+            res = r;
+        });
+        if (res == HidlResult::SUCCESS && lnbIds.size() > 0) {
+            for (int i = 0; i < lnbIds.size(); i++) {
+                lnbHandles.push_back(
+                        TunerHelper::getResourceHandleFromId(static_cast<int32_t>(lnbIds[i]), LNB));
+            }
+        }
+    }
+
+    return lnbHandles;
+}
+
+HidlResult TunerHidlService::getHidlFrontendIds(hidl_vec<HidlFrontendId>& ids) {
+    if (mTuner == nullptr) {
+        return HidlResult::NOT_INITIALIZED;
+    }
+    HidlResult res;
+    mTuner->getFrontendIds([&](HidlResult r, const hidl_vec<HidlFrontendId>& frontendIds) {
+        ids = frontendIds;
+        res = r;
+    });
+    return res;
+}
+
+HidlResult TunerHidlService::getHidlFrontendInfo(const int id, HidlFrontendInfo& info) {
+    if (mTuner == nullptr) {
+        return HidlResult::NOT_INITIALIZED;
+    }
+    HidlResult res;
+    mTuner->getFrontendInfo(id, [&](HidlResult r, const HidlFrontendInfo& feInfo) {
+        info = feInfo;
+        res = r;
+    });
+    return res;
+}
+
+DemuxCapabilities TunerHidlService::getAidlDemuxCaps(const HidlDemuxCapabilities& caps) {
+    DemuxCapabilities aidlCaps{
+            .numDemux = static_cast<int32_t>(caps.numDemux),
+            .numRecord = static_cast<int32_t>(caps.numRecord),
+            .numPlayback = static_cast<int32_t>(caps.numPlayback),
+            .numTsFilter = static_cast<int32_t>(caps.numTsFilter),
+            .numSectionFilter = static_cast<int32_t>(caps.numSectionFilter),
+            .numAudioFilter = static_cast<int32_t>(caps.numAudioFilter),
+            .numVideoFilter = static_cast<int32_t>(caps.numVideoFilter),
+            .numPesFilter = static_cast<int32_t>(caps.numPesFilter),
+            .numPcrFilter = static_cast<int32_t>(caps.numPcrFilter),
+            .numBytesInSectionFilter = static_cast<int64_t>(caps.numBytesInSectionFilter),
+            .filterCaps = static_cast<int32_t>(caps.filterCaps),
+            .bTimeFilter = caps.bTimeFilter,
+    };
+    aidlCaps.linkCaps.resize(caps.linkCaps.size());
+    copy(caps.linkCaps.begin(), caps.linkCaps.end(), aidlCaps.linkCaps.begin());
+    return aidlCaps;
+}
+
+FrontendInfo TunerHidlService::getAidlFrontendInfo(
+        const HidlFrontendInfo& halInfo, const HidlFrontendDtmbCapabilities& halDtmbCaps) {
+    FrontendInfo info{
+            .type = static_cast<FrontendType>(halInfo.type),
+            .minFrequency = static_cast<int64_t>(halInfo.minFrequency),
+            .maxFrequency = static_cast<int64_t>(halInfo.maxFrequency),
+            .minSymbolRate = static_cast<int32_t>(halInfo.minSymbolRate),
+            .maxSymbolRate = static_cast<int32_t>(halInfo.maxSymbolRate),
+            .acquireRange = static_cast<int64_t>(halInfo.acquireRange),
+            .exclusiveGroupId = static_cast<int32_t>(halInfo.exclusiveGroupId),
+    };
+    for (int i = 0; i < halInfo.statusCaps.size(); i++) {
+        info.statusCaps.push_back(static_cast<FrontendStatusType>(halInfo.statusCaps[i]));
+    }
+
+    FrontendCapabilities caps;
+    switch (halInfo.type) {
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::ANALOG: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::analogCaps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendAnalogCapabilities analogCaps{
+                    .typeCap = static_cast<int32_t>(halInfo.frontendCaps.analogCaps().typeCap),
+                    .sifStandardCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.analogCaps().sifStandardCap),
+            };
+            caps.set<FrontendCapabilities::analogCaps>(analogCaps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::ATSC: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::atscCaps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendAtscCapabilities atscCaps{
+                    .modulationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.atscCaps().modulationCap),
+            };
+            caps.set<FrontendCapabilities::atscCaps>(atscCaps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::ATSC3: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::atsc3Caps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendAtsc3Capabilities atsc3Caps{
+                    .bandwidthCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.atsc3Caps().bandwidthCap),
+                    .modulationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.atsc3Caps().modulationCap),
+                    .timeInterleaveModeCap = static_cast<int32_t>(
+                            halInfo.frontendCaps.atsc3Caps().timeInterleaveModeCap),
+                    .codeRateCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.atsc3Caps().codeRateCap),
+                    .demodOutputFormatCap = static_cast<int8_t>(
+                            halInfo.frontendCaps.atsc3Caps().demodOutputFormatCap),
+                    .fecCap = static_cast<int32_t>(halInfo.frontendCaps.atsc3Caps().fecCap),
+            };
+            caps.set<FrontendCapabilities::atsc3Caps>(atsc3Caps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::DVBC: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::dvbcCaps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendDvbcCapabilities dvbcCaps{
+                    .modulationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.dvbcCaps().modulationCap),
+                    .fecCap = static_cast<int64_t>(halInfo.frontendCaps.dvbcCaps().fecCap),
+                    .annexCap = static_cast<int8_t>(halInfo.frontendCaps.dvbcCaps().annexCap),
+            };
+            caps.set<FrontendCapabilities::dvbcCaps>(dvbcCaps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::DVBS: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::dvbsCaps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendDvbsCapabilities dvbsCaps{
+                    .modulationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.dvbsCaps().modulationCap),
+                    .innerfecCap =
+                            static_cast<int64_t>(halInfo.frontendCaps.dvbsCaps().innerfecCap),
+                    .standard = static_cast<int8_t>(halInfo.frontendCaps.dvbsCaps().standard),
+            };
+            caps.set<FrontendCapabilities::dvbsCaps>(dvbsCaps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::DVBT: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::dvbtCaps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendDvbtCapabilities dvbtCaps{
+                    .transmissionModeCap = static_cast<int32_t>(
+                            halInfo.frontendCaps.dvbtCaps().transmissionModeCap),
+                    .bandwidthCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.dvbtCaps().bandwidthCap),
+                    .constellationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.dvbtCaps().constellationCap),
+                    .coderateCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.dvbtCaps().coderateCap),
+                    .hierarchyCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.dvbtCaps().hierarchyCap),
+                    .guardIntervalCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.dvbtCaps().guardIntervalCap),
+                    .isT2Supported = halInfo.frontendCaps.dvbtCaps().isT2Supported,
+                    .isMisoSupported = halInfo.frontendCaps.dvbtCaps().isMisoSupported,
+            };
+            caps.set<FrontendCapabilities::dvbtCaps>(dvbtCaps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::ISDBS: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::isdbsCaps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendIsdbsCapabilities isdbsCaps{
+                    .modulationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbsCaps().modulationCap),
+                    .coderateCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbsCaps().coderateCap),
+            };
+            caps.set<FrontendCapabilities::isdbsCaps>(isdbsCaps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::ISDBS3: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::isdbs3Caps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendIsdbs3Capabilities isdbs3Caps{
+                    .modulationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbs3Caps().modulationCap),
+                    .coderateCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbs3Caps().coderateCap),
+            };
+            caps.set<FrontendCapabilities::isdbs3Caps>(isdbs3Caps);
+        }
+        break;
+    }
+    case ::android::hardware::tv::tuner::V1_0::FrontendType::ISDBT: {
+        if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::isdbtCaps ==
+            halInfo.frontendCaps.getDiscriminator()) {
+            FrontendIsdbtCapabilities isdbtCaps{
+                    .modeCap = static_cast<int32_t>(halInfo.frontendCaps.isdbtCaps().modeCap),
+                    .bandwidthCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbtCaps().bandwidthCap),
+                    .modulationCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbtCaps().modulationCap),
+                    .coderateCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbtCaps().coderateCap),
+                    .guardIntervalCap =
+                            static_cast<int32_t>(halInfo.frontendCaps.isdbtCaps().guardIntervalCap),
+                    .timeInterleaveCap =
+                            static_cast<int32_t>(FrontendIsdbtTimeInterleaveMode::UNDEFINED),
+                    .isSegmentAuto = false,
+                    .isFullSegment = false,
+            };
+            caps.set<FrontendCapabilities::isdbtCaps>(isdbtCaps);
+        }
+        break;
+    }
+    default: {
+        if (static_cast<HidlFrontendType>(info.type) == HidlFrontendType::DTMB) {
+            FrontendDtmbCapabilities dtmbCaps{
+                    .transmissionModeCap = static_cast<int32_t>(halDtmbCaps.transmissionModeCap),
+                    .bandwidthCap = static_cast<int32_t>(halDtmbCaps.bandwidthCap),
+                    .modulationCap = static_cast<int32_t>(halDtmbCaps.modulationCap),
+                    .codeRateCap = static_cast<int32_t>(halDtmbCaps.codeRateCap),
+                    .guardIntervalCap = static_cast<int32_t>(halDtmbCaps.guardIntervalCap),
+                    .interleaveModeCap = static_cast<int32_t>(halDtmbCaps.interleaveModeCap),
+            };
+            caps.set<FrontendCapabilities::dtmbCaps>(dtmbCaps);
+        }
+        break;
+    }
+    }
+
+    info.frontendCaps = caps;
+    return info;
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlService.h b/services/tuner/hidl/TunerHidlService.h
new file mode 100644
index 0000000..2252d35
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlService.h
@@ -0,0 +1,134 @@
+/**
+ * Copyright (c) 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLSERVICE_H
+#define ANDROID_MEDIA_TUNERHIDLSERVICE_H
+
+#include <aidl/android/hardware/tv/tuner/DemuxFilterEvent.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterStatus.h>
+#include <aidl/android/media/tv/tuner/BnTunerService.h>
+#include <aidl/android/media/tv/tunerresourcemanager/TunerFrontendInfo.h>
+#include <android/hardware/tv/tuner/1.1/ITuner.h>
+#include <utils/Mutex.h>
+
+#include <unordered_set>
+
+#include "TunerHelper.h"
+#include "TunerHidlFilter.h"
+#include "TunerHidlFrontend.h"
+
+using ::aidl::android::hardware::tv::tuner::DemuxCapabilities;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
+using ::aidl::android::hardware::tv::tuner::FrontendInfo;
+using ::aidl::android::hardware::tv::tuner::FrontendType;
+using ::aidl::android::media::tv::tuner::ITunerDemux;
+using ::aidl::android::media::tv::tuner::ITunerDescrambler;
+using ::aidl::android::media::tv::tuner::ITunerFrontend;
+using ::aidl::android::media::tv::tuner::ITunerLnb;
+using ::aidl::android::media::tv::tunerresourcemanager::TunerFrontendInfo;
+using ::android::Mutex;
+using ::android::sp;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::std::shared_ptr;
+using ::std::string;
+using ::std::vector;
+
+using HidlFrontendDtmbCapabilities = ::android::hardware::tv::tuner::V1_1::FrontendDtmbCapabilities;
+using HidlDemuxFilterEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterEvent;
+using HidlDemuxFilterStatus = ::android::hardware::tv::tuner::V1_0::DemuxFilterStatus;
+using HidlDemuxCapabilities = ::android::hardware::tv::tuner::V1_0::DemuxCapabilities;
+using HidlFrontendInfo = ::android::hardware::tv::tuner::V1_0::FrontendInfo;
+using HidlITuner = ::android::hardware::tv::tuner::V1_0::ITuner;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+using HidlFrontendId = ::android::hardware::tv::tuner::V1_0::FrontendId;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlService : public BnTunerService {
+public:
+    static char const* getServiceName() { return "media.tuner"; }
+    static binder_status_t instantiate();
+    TunerHidlService();
+    virtual ~TunerHidlService();
+
+    ::ndk::ScopedAStatus getFrontendIds(vector<int32_t>* out_ids) override;
+    ::ndk::ScopedAStatus getFrontendInfo(int32_t in_frontendHandle,
+                                         FrontendInfo* _aidl_return) override;
+    ::ndk::ScopedAStatus openFrontend(int32_t in_frontendHandle,
+                                      shared_ptr<ITunerFrontend>* _aidl_return) override;
+    ::ndk::ScopedAStatus openLnb(int32_t in_lnbHandle,
+                                 shared_ptr<ITunerLnb>* _aidl_return) override;
+    ::ndk::ScopedAStatus openLnbByName(const std::string& in_lnbName,
+                                       shared_ptr<ITunerLnb>* _aidl_return) override;
+    ::ndk::ScopedAStatus openDemux(int32_t in_demuxHandle,
+                                   shared_ptr<ITunerDemux>* _aidl_return) override;
+    ::ndk::ScopedAStatus getDemuxCaps(DemuxCapabilities* _aidl_return) override;
+    ::ndk::ScopedAStatus openDescrambler(int32_t in_descramblerHandle,
+                                         shared_ptr<ITunerDescrambler>* _aidl_return) override;
+    ::ndk::ScopedAStatus getTunerHalVersion(int32_t* _aidl_return) override;
+    ::ndk::ScopedAStatus openSharedFilter(const string& in_filterToken,
+                                          const shared_ptr<ITunerFilterCallback>& in_cb,
+                                          shared_ptr<ITunerFilter>* _aidl_return) override;
+    ::ndk::ScopedAStatus setLna(bool in_bEnable) override;
+    ::ndk::ScopedAStatus setMaxNumberOfFrontends(FrontendType in_frontendType,
+                                                 int32_t in_maxNumber) override;
+    ::ndk::ScopedAStatus getMaxNumberOfFrontends(FrontendType in_frontendType,
+                                                 int32_t* _aidl_return) override;
+
+    string addFilterToShared(const shared_ptr<TunerHidlFilter>& sharedFilter);
+    void removeSharedFilter(const shared_ptr<TunerHidlFilter>& sharedFilter);
+    void removeFrontend(const shared_ptr<TunerHidlFrontend>& frontend);
+
+    static shared_ptr<TunerHidlService> getTunerService();
+
+private:
+    bool hasITuner();
+    bool hasITuner_1_1();
+    void updateTunerResources();
+    vector<TunerFrontendInfo> getTRMFrontendInfos();
+    vector<int32_t> getTRMLnbHandles();
+    HidlResult getHidlFrontendIds(hidl_vec<HidlFrontendId>& ids);
+    HidlResult getHidlFrontendInfo(const int id, HidlFrontendInfo& info);
+    DemuxCapabilities getAidlDemuxCaps(const HidlDemuxCapabilities& caps);
+    FrontendInfo getAidlFrontendInfo(const HidlFrontendInfo& halInfo,
+                                     const HidlFrontendDtmbCapabilities& dtmbCaps);
+
+    sp<HidlITuner> mTuner;
+    sp<::android::hardware::tv::tuner::V1_1::ITuner> mTuner_1_1;
+    int mTunerVersion = TUNER_HAL_VERSION_UNKNOWN;
+    Mutex mSharedFiltersLock;
+    map<string, shared_ptr<TunerHidlFilter>> mSharedFilters;
+    Mutex mOpenedFrontendsLock;
+    unordered_set<shared_ptr<TunerHidlFrontend>> mOpenedFrontends;
+    int mLnaStatus = -1;
+
+    static shared_ptr<TunerHidlService> sTunerService;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLSERVICE_H
diff --git a/services/tuner/hidl/TunerHidlTimeFilter.cpp b/services/tuner/hidl/TunerHidlTimeFilter.cpp
new file mode 100644
index 0000000..d0606d6
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlTimeFilter.cpp
@@ -0,0 +1,133 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlTimeFilter"
+
+#include "TunerHidlTimeFilter.h"
+
+#include <aidl/android/hardware/tv/tuner/Constant64Bit.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
+using ::aidl::android::hardware::tv::tuner::Constant64Bit;
+using ::aidl::android::hardware::tv::tuner::Result;
+
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlTimeFilter::TunerHidlTimeFilter(sp<HidlITimeFilter> timeFilter) {
+    mTimeFilter = timeFilter;
+}
+
+TunerHidlTimeFilter::~TunerHidlTimeFilter() {
+    mTimeFilter = nullptr;
+}
+
+::ndk::ScopedAStatus TunerHidlTimeFilter::setTimeStamp(int64_t timeStamp) {
+    if (mTimeFilter == nullptr) {
+        ALOGE("ITimeFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mTimeFilter->setTimeStamp(static_cast<uint64_t>(timeStamp));
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlTimeFilter::clearTimeStamp() {
+    if (mTimeFilter == nullptr) {
+        ALOGE("ITimeFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status = mTimeFilter->clearTimeStamp();
+    if (status != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlTimeFilter::getSourceTime(int64_t* _aidl_return) {
+    if (mTimeFilter == nullptr) {
+        *_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
+        ALOGE("ITimeFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    mTimeFilter->getSourceTime([&](HidlResult r, uint64_t t) {
+        status = r;
+        *_aidl_return = static_cast<int64_t>(t);
+    });
+    if (status != HidlResult::SUCCESS) {
+        *_aidl_return = static_cast<int64_t>(Constant64Bit::INVALID_PRESENTATION_TIME_STAMP);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlTimeFilter::getTimeStamp(int64_t* _aidl_return) {
+    if (mTimeFilter == nullptr) {
+        *_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
+        ALOGE("ITimeFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult status;
+    mTimeFilter->getTimeStamp([&](HidlResult r, uint64_t t) {
+        status = r;
+        *_aidl_return = static_cast<int64_t>(t);
+    });
+    if (status != HidlResult::SUCCESS) {
+        *_aidl_return = static_cast<int64_t>(Constant64Bit::INVALID_PRESENTATION_TIME_STAMP);
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlTimeFilter::close() {
+    if (mTimeFilter == nullptr) {
+        ALOGE("ITimeFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    HidlResult res = mTimeFilter->close();
+    mTimeFilter = nullptr;
+
+    if (res != HidlResult::SUCCESS) {
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+    }
+    return ::ndk::ScopedAStatus::ok();
+}
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlTimeFilter.h b/services/tuner/hidl/TunerHidlTimeFilter.h
new file mode 100644
index 0000000..78f9c5e
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlTimeFilter.h
@@ -0,0 +1,57 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLTIMEFILTER_H
+#define ANDROID_MEDIA_TUNERHIDLTIMEFILTER_H
+
+#include <aidl/android/media/tv/tuner/BnTunerTimeFilter.h>
+#include <android/hardware/tv/tuner/1.0/ITimeFilter.h>
+#include <utils/Log.h>
+
+using ::android::sp;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+using HidlITimeFilter = ::android::hardware::tv::tuner::V1_0::ITimeFilter;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlTimeFilter : public BnTunerTimeFilter {
+public:
+    TunerHidlTimeFilter(sp<HidlITimeFilter> timeFilter);
+    virtual ~TunerHidlTimeFilter();
+
+    ::ndk::ScopedAStatus setTimeStamp(int64_t in_timeStamp) override;
+    ::ndk::ScopedAStatus clearTimeStamp() override;
+    ::ndk::ScopedAStatus getSourceTime(int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus getTimeStamp(int64_t* _aidl_return) override;
+    ::ndk::ScopedAStatus close() override;
+
+private:
+    sp<HidlITimeFilter> mTimeFilter;
+};
+
+}  // namespace tuner
+}  // namespace tv
+}  // namespace media
+}  // namespace android
+}  // namespace aidl
+
+#endif  // ANDROID_MEDIA_TUNERHIDLTIMEFILTER_H
diff --git a/services/tuner/main_tunerservice.cpp b/services/tuner/main_tunerservice.cpp
index 586a0e2..a014dea 100644
--- a/services/tuner/main_tunerservice.cpp
+++ b/services/tuner/main_tunerservice.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright (C) 2021 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,30 +14,33 @@
  * limitations under the License.
  */
 
-#include <utils/Log.h>
+#include <android-base/logging.h>
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
-#include <binder/ProcessState.h>
-#include <hidl/HidlTransportSupport.h>
+#include <utils/Log.h>
 
 #include "TunerService.h"
+#include "hidl/TunerHidlService.h"
+
+using ::aidl::android::media::tv::tuner::TunerHidlService;
+using ::aidl::android::media::tv::tuner::TunerService;
 
 using namespace android;
 
-int main(int argc __unused, char** argv) {
+int main() {
     ALOGD("Tuner service starting");
 
-    strcpy(argv[0], "media.tuner");
     sp<ProcessState> proc(ProcessState::self());
     sp<IServiceManager> sm = defaultServiceManager();
-    ALOGD("ServiceManager: %p", sm.get());
 
-    binder_status_t status = TunerService::instantiate();
+    // Check legacy HIDL HAL first. If it's not existed, use AIDL HAL.
+    binder_status_t status = TunerHidlService::instantiate();
     if (status != STATUS_OK) {
-        ALOGD("Failed to add tuner service as AIDL interface");
-        return -1;
+        status = TunerService::instantiate();
+        CHECK(status == STATUS_OK);
     }
 
     ProcessState::self()->startThreadPool();
     IPCThreadState::self()->joinThreadPool();
+    return EXIT_FAILURE;  // should not reached
 }
diff --git a/services/tuner/mediatuner.rc b/services/tuner/mediatuner.rc
index fd30618..6a3e199 100644
--- a/services/tuner/mediatuner.rc
+++ b/services/tuner/mediatuner.rc
@@ -2,4 +2,7 @@
     class main
     group media
     ioprio rt 4
+    onrestart restart vendor.tuner-hal-1-0
+    onrestart restart vendor.tuner-hal-1-1
+    onrestart restart vendor.tuner-default
     task_profiles ProcessCapacityHigh HighPerformance