Merge "Change docs to include aaudio/Aaudio.h"
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..a7614d2
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,13 @@
+BasedOnStyle: Google
+Standard: Cpp11
+AccessModifierOffset: -2
+AllowShortFunctionsOnASingleLine: Inline
+ColumnLimit: 100
+CommentPragmas: NOLINT:.*
+DerivePointerAlignment: false
+IncludeBlocks: Preserve
+IndentWidth: 4
+ContinuationIndentWidth: 8
+PointerAlignment: Left
+TabWidth: 4
+UseTab: Never
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index 03439fd..24c9108 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -68,6 +68,9 @@
         unavailablePhysicalIds16.push_back(String16(id8));
     }
     res = parcel->writeString16Vector(unavailablePhysicalIds16);
+    if (res != OK) return res;
+
+    res = parcel->writeString16(String16(clientPackage));
     return res;
 }
 
@@ -86,6 +89,12 @@
     for (auto& id16 : unavailablePhysicalIds16) {
         unavailablePhysicalIds.push_back(String8(id16));
     }
+
+    String16 tempClientPackage;
+    res = parcel->readString16(&tempClientPackage);
+    if (res != OK) return res;
+    clientPackage = String8(tempClientPackage);
+
     return res;
 }
 
diff --git a/camera/OWNERS b/camera/OWNERS
index d6b95da..a4ab616 100644
--- a/camera/OWNERS
+++ b/camera/OWNERS
@@ -1,8 +1,8 @@
+
+# Bug component: 41727
+arakesh@google.com
 epeev@google.com
 etalvala@google.com
 jchowdhary@google.com
 shuzhenwang@google.com
-yinchiayeh@google.com
-# backup owner
-cychen@google.com
-zhijunhe@google.com
+ruchamk@google.com
diff --git a/camera/include/camera/CameraBase.h b/camera/include/camera/CameraBase.h
index e156994..8e53968 100644
--- a/camera/include/camera/CameraBase.h
+++ b/camera/include/camera/CameraBase.h
@@ -85,11 +85,17 @@
      */
     std::vector<String8> unavailablePhysicalIds;
 
+    /**
+     * Client package name if camera is open, otherwise not applicable
+     */
+    String8 clientPackage;
+
     virtual status_t writeToParcel(android::Parcel* parcel) const;
     virtual status_t readFromParcel(const android::Parcel* parcel);
 
-    CameraStatus(String8 id, int32_t s, const std::vector<String8>& unavailSubIds) :
-            cameraId(id), status(s), unavailablePhysicalIds(unavailSubIds) {}
+    CameraStatus(String8 id, int32_t s, const std::vector<String8>& unavailSubIds,
+            const String8& clientPkg) : cameraId(id), status(s),
+            unavailablePhysicalIds(unavailSubIds), clientPackage(clientPkg) {}
     CameraStatus() : status(ICameraServiceListener::STATUS_PRESENT) {}
 };
 
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
index ccbfaa9..da887a2 100644
--- a/camera/ndk/impl/ACameraManager.h
+++ b/camera/ndk/impl/ACameraManager.h
@@ -139,6 +139,8 @@
             return !(*this == other);
         }
         bool operator < (const Callback& other) const {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wordered-compare-function-pointers"
             if (*this == other) return false;
             if (mContext != other.mContext) return mContext < other.mContext;
             if (mPhysicalCamAvailable != other.mPhysicalCamAvailable) {
@@ -152,6 +154,7 @@
             }
             if (mAvailable != other.mAvailable) return mAvailable < other.mAvailable;
             return mUnavailable < other.mUnavailable;
+#pragma GCC diagnostic pop
         }
         bool operator > (const Callback& other) const {
             return (*this != other && !(*this < other));
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 86781e5..c353b2d 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -4578,6 +4578,25 @@
      *
      * <p>Also defines the direction of rolling shutter readout, which is from top to bottom in
      * the sensor's coordinate system.</p>
+     * <p>Starting with Android API level 32, camera clients that query the orientation via
+     * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#get">CameraCharacteristics#get</a> on foldable devices which
+     * include logical cameras can receive a value that can dynamically change depending on the
+     * device/fold state.
+     * Clients are advised to not cache or store the orientation value of such logical sensors.
+     * In case repeated queries to CameraCharacteristics are not preferred, then clients can
+     * also access the entire mapping from device state to sensor orientation in
+     * <a href="https://developer.android.com/reference/android/hardware/camera2/params/DeviceStateOrientationMap.html">DeviceStateOrientationMap</a>.
+     * Do note that a dynamically changing sensor orientation value in camera characteristics
+     * will not be the best way to establish the orientation per frame. Clients that want to
+     * know the sensor orientation of a particular captured frame should query the
+     * ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID from the corresponding capture result and
+     * check the respective physical camera orientation.</p>
+     * <p>Native camera clients must query ACAMERA_INFO_DEVICE_STATE_ORIENTATIONS for the mapping
+     * between device state and camera sensor orientation. Dynamic updates to the sensor
+     * orientation are not supported in this code path.</p>
+     *
+     * @see ACAMERA_INFO_DEVICE_STATE_ORIENTATIONS
+     * @see ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID
      */
     ACAMERA_SENSOR_ORIENTATION =                                // int32
             ACAMERA_SENSOR_START + 14,
@@ -6284,6 +6303,21 @@
      */
     ACAMERA_INFO_VERSION =                                      // byte
             ACAMERA_INFO_START + 1,
+    /**
+     *
+     * <p>Type: int64[2*n]</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>HAL must populate the array with
+     * (hardware::camera::provider::V2_5::DeviceState, sensorOrientation) pairs for each
+     * supported device state bitwise combination.</p>
+     */
+    ACAMERA_INFO_DEVICE_STATE_ORIENTATIONS =                    // int64[2*n]
+            ACAMERA_INFO_START + 3,
     ACAMERA_INFO_END,
 
     /**
diff --git a/camera/ndk/ndk_vendor/impl/ACameraManager.h b/camera/ndk/ndk_vendor/impl/ACameraManager.h
index 8359bb1..4663529 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraManager.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraManager.h
@@ -136,6 +136,8 @@
             return !(*this == other);
         }
         bool operator < (const Callback& other) const {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wordered-compare-function-pointers"
             if (*this == other) return false;
             if (mContext != other.mContext) return mContext < other.mContext;
             if (mAvailable != other.mAvailable) return mAvailable < other.mAvailable;
@@ -146,6 +148,7 @@
             if (mPhysicalCamUnavailable != other.mPhysicalCamUnavailable)
                     return mPhysicalCamUnavailable < other.mPhysicalCamUnavailable;
             return mUnavailable < other.mUnavailable;
+#pragma GCC diagnostic pop
         }
         bool operator > (const Callback& other) const {
             return (*this != other && !(*this < other));
diff --git a/cmds/screenrecord/Android.bp b/cmds/screenrecord/Android.bp
index 359a835..d0b3ce0 100644
--- a/cmds/screenrecord/Android.bp
+++ b/cmds/screenrecord/Android.bp
@@ -55,12 +55,6 @@
         "libGLESv2",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-        "frameworks/av/media/libstagefright/include",
-        "frameworks/native/include/media/openmax",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/drm/mediadrm/plugins/TEST_MAPPING b/drm/mediadrm/plugins/TEST_MAPPING
index 87becb6..fd4ef95 100644
--- a/drm/mediadrm/plugins/TEST_MAPPING
+++ b/drm/mediadrm/plugins/TEST_MAPPING
@@ -1,19 +1,19 @@
 {
   "presubmit": [
     {
-      "name": "CtsMediaTestCases",
+      "name": "CtsMediaDrmTestCases",
       "options" : [
         {
           "include-annotation": "android.platform.test.annotations.Presubmit"
         },
         {
-          "include-filter": "android.media.cts.MediaDrmClearkeyTest"
+          "include-filter": "android.mediadrm.cts.MediaDrmClearkeyTest"
         },
         {
-          "include-filter": "android.media.cts.MediaDrmMetricsTest"
+          "include-filter": "android.mediadrm.cts.MediaDrmMetricsTest"
         },
         {
-          "include-filter": "android.media.cts.NativeMediaDrmClearkeyTest"
+          "include-filter": "android.mediadrm.cts.NativeMediaDrmClearkeyTest"
         }
       ]
     }
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 200e92d..bd6db55 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -53,6 +53,83 @@
 //EL_FIXME 20 seconds may not be enough and must be reconciled with new obtainBuffer implementation
 #define MAX_RUN_OFFLOADED_TIMEOUT_MS 20000 // assuming up to a maximum of 20 seconds of offloaded
 
+// for audio_track_cblk_t::mState, to match TrackBase.h
+static inline constexpr int CBLK_STATE_IDLE = 0;
+static inline constexpr int CBLK_STATE_PAUSING = 7;
+
+/**
+ * MirroredVariable is a local variable which simultaneously updates
+ * a mirrored storage location.  This is useful for server side variables
+ * where a local copy is kept, but a client visible copy is offered through shared memory.
+ *
+ * We use std::atomic as the default container class to access this memory.
+ */
+template <typename T, template <typename> class Container = std::atomic>
+class MirroredVariable {
+    template <typename C>
+    struct Constraints {
+        // If setMirror is used with a different type U != T passed in,
+        // as a general rule, the Container must issue a memcpy to read or write
+        // (or its equivalent) to avoid possible strict aliasing issues.
+        // The memcpy also avoids gaps in structs and alignment issues with different types.
+        static constexpr bool ok_ = false;  // Containers must specify constraints.
+    };
+    template <typename X>
+    struct Constraints<std::atomic<X>> {
+        // Atomics force read and write to memory.
+        static constexpr bool ok = std::is_same_v<X, T> ||
+                (std::atomic<X>::is_always_lock_free                   // no additional locking
+                && sizeof(std::atomic<X>) == sizeof(X)                 // layout identical to X.
+                && (std::is_arithmetic_v<X> || std::is_enum_v<X>));    // No gaps in the layout.
+    };
+
+static_assert(Constraints<Container<T>>::ok);
+public:
+    explicit MirroredVariable(const T& t) : t_{t} {}
+
+    // implicit conversion operator
+    operator T() const {
+        return t_;
+    }
+
+    MirroredVariable& operator=(const T& t) {
+        t_ = t;
+        if (mirror_ != nullptr) {
+            *mirror_ = t;
+        }
+        return *this;
+    }
+
+    template <typename U>
+    void setMirror(Container<U> *other_mirror) {
+        // Much of the concern is with T != U, however there are additional concerns
+        // when storage uses shared memory between processes.  For atomics, it must be
+        // lock free.
+        static_assert(sizeof(U) == sizeof(T));
+        static_assert(alignof(U) == alignof(T));
+        static_assert(Constraints<Container<U>>::ok);
+        static_assert(sizeof(Container<U>) == sizeof(Container<T>));
+        static_assert(alignof(Container<U>) == alignof(Container<T>));
+        auto mirror = reinterpret_cast<Container<T>*>(other_mirror);
+        if (mirror_ != mirror) {
+            mirror_ = mirror;
+            if (mirror != nullptr) {
+                *mirror = t_;
+            }
+        }
+    }
+
+    void clear() {
+        mirror_ = nullptr;
+    }
+
+    MirroredVariable& operator&() const = delete;
+
+protected:
+    T t_{};
+    Container<T>* mirror_ = nullptr;
+};
+
 struct AudioTrackSharedStreaming {
     // similar to NBAIO MonoPipe
     // in continuously incrementing frame units, take modulo buffer size, which must be a power of 2
@@ -188,6 +265,8 @@
 
     volatile    int32_t     mFlags;         // combinations of CBLK_*
 
+                std::atomic<int32_t>  mState; // current TrackBase state.
+
 public:
                 union {
                     AudioTrackSharedStreaming   mStreaming;
@@ -198,6 +277,9 @@
                 // Cache line boundary (32 bytes)
 };
 
+// TODO: ensure standard layout.
+// static_assert(std::is_standard_layout_v<audio_track_cblk_t>);
+
 // ----------------------------------------------------------------------------
 
 // Proxy for shared memory control block, to isolate callers from needing to know the details.
@@ -323,6 +405,7 @@
         return mEpoch;
     }
 
+    int32_t getState() const { return mCblk->mState; }
     uint32_t      getBufferSizeInFrames() const { return mBufferSizeInFrames; }
     // See documentation for AudioTrack::setBufferSizeInFrames()
     uint32_t      setBufferSizeInFrames(uint32_t requestedSize);
diff --git a/media/audioserver/Android.bp b/media/audioserver/Android.bp
index be25ffb..e4fc4de 100644
--- a/media/audioserver/Android.bp
+++ b/media/audioserver/Android.bp
@@ -26,6 +26,7 @@
 
     shared_libs: [
         "libaaudioservice",
+        "libaudioclient",
         "libaudioflinger",
         "libaudiopolicyservice",
         "libaudioprocessing",
@@ -41,7 +42,6 @@
         "libpowermanager",
         "libutils",
         "libvibrator",
-
     ],
 
     // TODO check if we still need all of these include directories
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index 8ee1efb..726a6be 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -17,11 +17,17 @@
 #define LOG_TAG "audioserver"
 //#define LOG_NDEBUG 0
 
+#include <algorithm>
+
 #include <fcntl.h>
 #include <sys/prctl.h>
 #include <sys/wait.h>
 #include <cutils/properties.h>
 
+#include <android/media/audio/common/AudioMMapPolicy.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
+#include <android/media/IAudioFlingerService.h>
 #include <binder/IPCThreadState.h>
 #include <binder/ProcessState.h>
 #include <binder/IServiceManager.h>
@@ -30,7 +36,6 @@
 #include <utils/Log.h>
 
 // from include_dirs
-#include "aaudio/AAudioTesting.h" // aaudio_policy_t, AAUDIO_PROP_MMAP_POLICY, AAUDIO_POLICY_*
 #include "AudioFlinger.h"
 #include "AudioPolicyService.h"
 #include "AAudioService.h"
@@ -39,6 +44,10 @@
 
 using namespace android;
 
+using android::media::audio::common::AudioMMapPolicy;
+using android::media::audio::common::AudioMMapPolicyInfo;
+using android::media::audio::common::AudioMMapPolicyType;
+
 int main(int argc __unused, char **argv)
 {
     // TODO: update with refined parameters
@@ -146,10 +155,24 @@
         // AAudioService should only be used in OC-MR1 and later.
         // And only enable the AAudioService if the system MMAP policy explicitly allows it.
         // This prevents a client from misusing AAudioService when it is not supported.
-        aaudio_policy_t mmapPolicy = property_get_int32(AAUDIO_PROP_MMAP_POLICY,
-                                                        AAUDIO_POLICY_NEVER);
-        if (mmapPolicy == AAUDIO_POLICY_AUTO || mmapPolicy == AAUDIO_POLICY_ALWAYS) {
+        // If we cannot get audio flinger here, there must be some serious problems. In that case,
+        // attempting to call audio flinger on a null pointer could make the process crash
+        // and attract attentions.
+        sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+        std::vector<AudioMMapPolicyInfo> policyInfos;
+        status_t status = af->getMmapPolicyInfos(
+                AudioMMapPolicyType::DEFAULT, &policyInfos);
+        // Initialize aaudio service when querying mmap policy succeeds and
+        // any of the policy supports MMAP.
+        if (status == NO_ERROR &&
+            std::any_of(policyInfos.begin(), policyInfos.end(), [](const auto& info) {
+                    return info.mmapPolicy == AudioMMapPolicy::AUTO ||
+                           info.mmapPolicy == AudioMMapPolicy::ALWAYS;
+            })) {
             AAudioService::instantiate();
+        } else {
+            ALOGD("Do not init aaudio service, status %d, policy info size %zu",
+                  status, policyInfos.size());
         }
 
         ProcessState::self()->startThreadPool();
diff --git a/media/codec2/components/aac/C2SoftAacDec.cpp b/media/codec2/components/aac/C2SoftAacDec.cpp
index 342d771..57cdcd0 100644
--- a/media/codec2/components/aac/C2SoftAacDec.cpp
+++ b/media/codec2/components/aac/C2SoftAacDec.cpp
@@ -287,6 +287,7 @@
     mOutputDelayRingBufferWritePos = 0;
     mOutputDelayRingBufferReadPos = 0;
     mOutputDelayRingBufferFilled = 0;
+    mOutputDelayRingBuffer.reset();
     mBuffersInfo.clear();
 
     status_t status = UNKNOWN_ERROR;
@@ -308,10 +309,7 @@
         aacDecoder_Close(mAACDecoder);
         mAACDecoder = nullptr;
     }
-    if (mOutputDelayRingBuffer) {
-        delete[] mOutputDelayRingBuffer;
-        mOutputDelayRingBuffer = nullptr;
-    }
+    mOutputDelayRingBuffer.reset();
 }
 
 status_t C2SoftAacDec::initDecoder() {
@@ -327,7 +325,7 @@
 
     mOutputDelayCompensated = 0;
     mOutputDelayRingBufferSize = 2048 * MAX_CHANNEL_COUNT * kNumDelayBlocksMax;
-    mOutputDelayRingBuffer = new short[mOutputDelayRingBufferSize];
+    mOutputDelayRingBuffer.reset(new short[mOutputDelayRingBufferSize]);
     mOutputDelayRingBufferWritePos = 0;
     mOutputDelayRingBufferReadPos = 0;
     mOutputDelayRingBufferFilled = 0;
diff --git a/media/codec2/components/aac/C2SoftAacDec.h b/media/codec2/components/aac/C2SoftAacDec.h
index 986187c..a03fc70 100644
--- a/media/codec2/components/aac/C2SoftAacDec.h
+++ b/media/codec2/components/aac/C2SoftAacDec.h
@@ -93,7 +93,7 @@
     bool mEndOfOutput;
     int32_t mOutputDelayCompensated;
     int32_t mOutputDelayRingBufferSize;
-    short *mOutputDelayRingBuffer;
+    std::unique_ptr<short[]> mOutputDelayRingBuffer;
     int32_t mOutputDelayRingBufferWritePos;
     int32_t mOutputDelayRingBufferReadPos;
     int32_t mOutputDelayRingBufferFilled;
diff --git a/media/codec2/components/mp3/C2SoftMp3Dec.cpp b/media/codec2/components/mp3/C2SoftMp3Dec.cpp
index 50690af..149c6ee 100644
--- a/media/codec2/components/mp3/C2SoftMp3Dec.cpp
+++ b/media/codec2/components/mp3/C2SoftMp3Dec.cpp
@@ -413,7 +413,7 @@
         mConfig->inputBufferCurrentLength = (inSize - inPos);
         mConfig->inputBufferMaxLength = 0;
         mConfig->inputBufferUsedLength = 0;
-        mConfig->outputFrameSize = (calOutSize - outSize);
+        mConfig->outputFrameSize = (calOutSize - outSize) / sizeof(int16_t);
         mConfig->pOutputBuffer = reinterpret_cast<int16_t *> (wView.data() + outSize);
 
         ERROR_CODE decoderErr;
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
index 60d6b44..81f4679 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
@@ -228,7 +228,6 @@
         const std::shared_ptr<IntfImpl> &intfImpl)
     : SimpleC2Component(std::make_shared<SimpleInterface<IntfImpl>>(name, id, intfImpl)),
       mIntf(intfImpl),
-      mDecHandle(nullptr),
       mOutputBuffer{},
       mInitialized(false) {
 }
@@ -243,13 +242,9 @@
 }
 
 c2_status_t C2SoftMpeg4Dec::onStop() {
-    if (mDecHandle) {
-        if (mInitialized) {
-            PVCleanUpVideoDecoder(mDecHandle);
-            mInitialized = false;
-        }
-        delete mDecHandle;
-        mDecHandle = nullptr;
+    if (mInitialized) {
+        PVCleanUpVideoDecoder(&mVideoDecControls);
+        mInitialized = false;
     }
     for (int32_t i = 0; i < kNumOutputBuffers; ++i) {
         if (mOutputBuffer[i]) {
@@ -279,7 +274,7 @@
 
 c2_status_t C2SoftMpeg4Dec::onFlush_sm() {
     if (mInitialized) {
-        if (PV_TRUE != PVResetVideoDecoder(mDecHandle)) {
+        if (PV_TRUE != PVResetVideoDecoder(&mVideoDecControls)) {
             return C2_CORRUPTED;
         }
     }
@@ -294,14 +289,8 @@
 #else
     mIsMpeg4 = false;
 #endif
-    if (!mDecHandle) {
-        mDecHandle = new tagvideoDecControls;
-    }
-    if (!mDecHandle) {
-        ALOGE("mDecHandle is null");
-        return NO_MEMORY;
-    }
-    memset(mDecHandle, 0, sizeof(tagvideoDecControls));
+
+    memset(&mVideoDecControls, 0, sizeof(tagvideoDecControls));
 
     /* TODO: bring these values to 352 and 288. It cannot be done as of now
      * because, h263 doesn't seem to allow port reconfiguration. In OMX, the
@@ -357,10 +346,6 @@
 }
 
 c2_status_t C2SoftMpeg4Dec::ensureDecoderState(const std::shared_ptr<C2BlockPool> &pool) {
-    if (!mDecHandle) {
-        ALOGE("not supposed to be here, invalid decoder context");
-        return C2_CORRUPTED;
-    }
 
     mOutputBufferSize = align(mIntf->getMaxWidth(), 16) * align(mIntf->getMaxHeight(), 16) * 3 / 2;
     for (int32_t i = 0; i < kNumOutputBuffers; ++i) {
@@ -391,10 +376,10 @@
 
 bool C2SoftMpeg4Dec::handleResChange(const std::unique_ptr<C2Work> &work) {
     uint32_t disp_width, disp_height;
-    PVGetVideoDimensions(mDecHandle, (int32 *)&disp_width, (int32 *)&disp_height);
+    PVGetVideoDimensions(&mVideoDecControls, (int32 *)&disp_width, (int32 *)&disp_height);
 
     uint32_t buf_width, buf_height;
-    PVGetBufferDimensions(mDecHandle, (int32 *)&buf_width, (int32 *)&buf_height);
+    PVGetBufferDimensions(&mVideoDecControls, (int32 *)&buf_width, (int32 *)&buf_height);
 
     CHECK_LE(disp_width, buf_width);
     CHECK_LE(disp_height, buf_height);
@@ -415,13 +400,14 @@
         }
 
         if (!mIsMpeg4) {
-            PVCleanUpVideoDecoder(mDecHandle);
+            PVCleanUpVideoDecoder(&mVideoDecControls);
 
             uint8_t *vol_data[1]{};
             int32_t vol_size = 0;
 
             if (!PVInitVideoDecoder(
-                    mDecHandle, vol_data, &vol_size, 1, mIntf->getMaxWidth(), mIntf->getMaxHeight(), H263_MODE)) {
+                    &mVideoDecControls, vol_data, &vol_size, 1, mIntf->getMaxWidth(),
+                                                        mIntf->getMaxHeight(), H263_MODE)) {
                 ALOGE("Error in PVInitVideoDecoder H263_MODE while resChanged was set to true");
                 mSignalledError = true;
                 work->result = C2_CORRUPTED;
@@ -511,7 +497,7 @@
     uint32_t *start_code = (uint32_t *)bitstream;
     bool volHeader = *start_code == 0xB0010000;
     if (volHeader) {
-        PVCleanUpVideoDecoder(mDecHandle);
+        PVCleanUpVideoDecoder(&mVideoDecControls);
         mInitialized = false;
     }
 
@@ -526,7 +512,7 @@
         }
         MP4DecodingMode mode = (mIsMpeg4) ? MPEG4_MODE : H263_MODE;
         if (!PVInitVideoDecoder(
-                mDecHandle, vol_data, &vol_size, 1,
+                &mVideoDecControls, vol_data, &vol_size, 1,
                 mIntf->getMaxWidth(), mIntf->getMaxHeight(), mode)) {
             ALOGE("PVInitVideoDecoder failed. Unsupported content?");
             mSignalledError = true;
@@ -534,7 +520,7 @@
             return;
         }
         mInitialized = true;
-        MP4DecodingMode actualMode = PVGetDecBitstreamMode(mDecHandle);
+        MP4DecodingMode actualMode = PVGetDecBitstreamMode(&mVideoDecControls);
         if (mode != actualMode) {
             ALOGE("Decoded mode not same as actual mode of the decoder");
             mSignalledError = true;
@@ -542,7 +528,7 @@
             return;
         }
 
-        PVSetPostProcType(mDecHandle, 0);
+        PVSetPostProcType(&mVideoDecControls, 0);
         if (handleResChange(work)) {
             ALOGI("Setting width and height");
             C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
@@ -579,7 +565,7 @@
             return;
         }
 
-        uint32_t yFrameSize = sizeof(uint8) * mDecHandle->size;
+        uint32_t yFrameSize = sizeof(uint8) * mVideoDecControls.size;
         if (mOutputBufferSize < yFrameSize * 3 / 2){
             ALOGE("Too small output buffer: %zu bytes", mOutputBufferSize);
             mSignalledError = true;
@@ -588,7 +574,7 @@
         }
 
         if (!mFramesConfigured) {
-            PVSetReferenceYUV(mDecHandle,mOutputBuffer[1]);
+            PVSetReferenceYUV(&mVideoDecControls,mOutputBuffer[1]);
             mFramesConfigured = true;
         }
 
@@ -599,7 +585,7 @@
         uint8_t *bitstreamTmp = bitstream;
         uint32_t timestamp = workIndex;
         if (PVDecodeVopHeader(
-                    mDecHandle, &bitstreamTmp, &timestamp, &tmpInSize,
+                    &mVideoDecControls, &bitstreamTmp, &timestamp, &tmpInSize,
                     &header_info, &useExtTimestamp,
                     mOutputBuffer[mNumSamplesOutput & 1]) != PV_TRUE) {
             ALOGE("failed to decode vop header.");
@@ -631,7 +617,7 @@
             continue;
         }
 
-        if (PVDecodeVopBody(mDecHandle, &tmpInSize) != PV_TRUE) {
+        if (PVDecodeVopBody(&mVideoDecControls, &tmpInSize) != PV_TRUE) {
             ALOGE("failed to decode video frame.");
             mSignalledError = true;
             work->result = C2_CORRUPTED;
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.h b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.h
index 716a095..fed04c9 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.h
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.h
@@ -19,8 +19,8 @@
 
 #include <SimpleC2Component.h>
 
+#include <mp4dec_api.h>
 
-struct tagvideoDecControls;
 
 namespace android {
 
@@ -54,7 +54,7 @@
     bool handleResChange(const std::unique_ptr<C2Work> &work);
 
     std::shared_ptr<IntfImpl> mIntf;
-    tagvideoDecControls *mDecHandle;
+    tagvideoDecControls mVideoDecControls;
     std::shared_ptr<C2GraphicBlock> mOutBlock;
     uint8_t *mOutputBuffer[kNumOutputBuffers];
     size_t  mOutputBufferSize;
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 2953d90..45e2ca8 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -149,8 +149,16 @@
 #else
         addParameter(
                 DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
-                .withConstValue(new C2StreamProfileLevelInfo::input(0u,
-                        C2Config::PROFILE_UNUSED, C2Config::LEVEL_UNUSED))
+                .withDefault(new C2StreamProfileLevelInfo::input(0u,
+                        C2Config::PROFILE_VP8_0, C2Config::LEVEL_UNUSED))
+                .withFields({
+                    C2F(mProfileLevel, profile).equalTo(
+                        PROFILE_VP8_0
+                    ),
+                    C2F(mProfileLevel, level).equalTo(
+                        LEVEL_UNUSED),
+                })
+                .withSetter(ProfileLevelSetter, mSize)
                 .build());
 #endif
 
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.h b/media/codec2/components/vpx/C2SoftVpxEnc.h
index c98b802..926b2e9 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.h
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.h
@@ -324,21 +324,35 @@
                 .withConstValue(new C2StreamIntraRefreshTuning::output(
                              0u, C2Config::INTRA_REFRESH_DISABLED, 0.))
                 .build());
-
+#ifdef VP9
         addParameter(
-        DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
-        .withDefault(new C2StreamProfileLevelInfo::output(
-                0u, PROFILE_VP9_0, LEVEL_VP9_4_1))
-        .withFields({
-            C2F(mProfileLevel, profile).equalTo(
-                PROFILE_VP9_0
-            ),
-            C2F(mProfileLevel, level).equalTo(
-                LEVEL_VP9_4_1),
-        })
-        .withSetter(ProfileLevelSetter)
-        .build());
-
+                DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+                .withDefault(new C2StreamProfileLevelInfo::output(
+                        0u, PROFILE_VP9_0, LEVEL_VP9_4_1))
+                .withFields({
+                    C2F(mProfileLevel, profile).equalTo(
+                        PROFILE_VP9_0
+                    ),
+                    C2F(mProfileLevel, level).equalTo(
+                        LEVEL_VP9_4_1),
+                })
+                .withSetter(ProfileLevelSetter)
+                .build());
+#else
+        addParameter(
+                DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+                .withDefault(new C2StreamProfileLevelInfo::output(
+                        0u, PROFILE_VP8_0, LEVEL_UNUSED))
+                .withFields({
+                    C2F(mProfileLevel, profile).equalTo(
+                        PROFILE_VP8_0
+                    ),
+                    C2F(mProfileLevel, level).equalTo(
+                        LEVEL_UNUSED),
+                })
+                .withSetter(ProfileLevelSetter)
+                .build());
+#endif
         addParameter(
                 DefineParam(mRequestSync, C2_PARAMKEY_REQUEST_SYNC_FRAME)
                 .withDefault(new C2StreamRequestSyncFrameTuning::output(0u, C2_FALSE))
diff --git a/media/codec2/fuzzer/C2Fuzzer.cpp b/media/codec2/fuzzer/C2Fuzzer.cpp
index e35ee48..e469d8b 100644
--- a/media/codec2/fuzzer/C2Fuzzer.cpp
+++ b/media/codec2/fuzzer/C2Fuzzer.cpp
@@ -239,17 +239,17 @@
 }
 
 void Codec2Fuzzer::decodeFrames(const uint8_t* data, size_t size) {
-  mBufferSource = new BufferSource(data, size);
-  if (!mBufferSource) {
+  std::unique_ptr<BufferSource> bufferSource = std::make_unique<BufferSource>(data, size);
+  if (!bufferSource) {
     return;
   }
-  mBufferSource->parse();
+  bufferSource->parse();
   c2_status_t status = C2_OK;
   size_t numFrames = 0;
-  while (!mBufferSource->isEos()) {
+  while (!bufferSource->isEos()) {
     uint8_t* frame = nullptr;
     size_t frameSize = 0;
-    FrameData frameData = mBufferSource->getFrame();
+    FrameData frameData = bufferSource->getFrame();
     frame = std::get<0>(frameData);
     frameSize = std::get<1>(frameData);
 
@@ -298,7 +298,6 @@
   mConditionalVariable.wait_for(waitForDecodeComplete, kC2FuzzerTimeOut, [this] { return mEos; });
   std::list<std::unique_ptr<C2Work>> c2flushedWorks;
   mComponent->flush_sm(C2Component::FLUSH_COMPONENT, &c2flushedWorks);
-  delete mBufferSource;
 }
 
 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
diff --git a/media/codec2/fuzzer/C2Fuzzer.h b/media/codec2/fuzzer/C2Fuzzer.h
index d5ac81a..da76885 100644
--- a/media/codec2/fuzzer/C2Fuzzer.h
+++ b/media/codec2/fuzzer/C2Fuzzer.h
@@ -104,7 +104,6 @@
     static constexpr size_t kMarkerSuffixSize = 3;
   };
 
-  BufferSource* mBufferSource;
   bool mEos = false;
   C2BlockPool::local_id_t mBlockPoolId;
 
diff --git a/media/codec2/hidl/1.0/vts/.clang-format b/media/codec2/hidl/1.0/vts/.clang-format
deleted file mode 120000
index 136279c..0000000
--- a/media/codec2/hidl/1.0/vts/.clang-format
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../../../build/soong/scripts/system-clang-format
\ No newline at end of file
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
index 74b099c..d47ef67 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
@@ -100,8 +100,10 @@
         ASSERT_NE(mLinearPool, nullptr);
 
         std::vector<std::unique_ptr<C2Param>> queried;
-        mComponent->query({}, {C2PortMediaTypeSetting::input::PARAM_TYPE}, C2_DONT_BLOCK, &queried);
-        ASSERT_GT(queried.size(), 0);
+        c2_status_t c2err = mComponent->query({}, {C2PortMediaTypeSetting::input::PARAM_TYPE},
+                                              C2_DONT_BLOCK, &queried);
+        ASSERT_EQ(c2err, C2_OK) << "Query media type failed";
+        ASSERT_EQ(queried.size(), 1) << "Size of the vector returned is invalid";
 
         mMime = ((C2PortMediaTypeSetting::input*)queried[0].get())->m.value;
 
@@ -277,24 +279,20 @@
     };
     std::vector<std::unique_ptr<C2Param>> inParams;
     c2_status_t status = component->query({}, indices, C2_DONT_BLOCK, &inParams);
-    if (status != C2_OK && inParams.size() == 0) {
-        ALOGE("Query media type failed => %d", status);
-        ASSERT_TRUE(false);
-    } else {
-        size_t offset = sizeof(C2Param);
-        for (size_t i = 0; i < inParams.size(); ++i) {
-            C2Param* param = inParams[i].get();
-            bitStreamInfo[i] = *(int32_t*)((uint8_t*)param + offset);
-        }
-        if (mime.find("3gpp") != std::string::npos) {
-            ASSERT_EQ(bitStreamInfo[0], 8000);
-            ASSERT_EQ(bitStreamInfo[1], 1);
-        } else if (mime.find("amr-wb") != std::string::npos) {
-            ASSERT_EQ(bitStreamInfo[0], 16000);
-            ASSERT_EQ(bitStreamInfo[1], 1);
-        } else if (mime.find("gsm") != std::string::npos) {
-            ASSERT_EQ(bitStreamInfo[0], 8000);
-        }
+    ASSERT_EQ(status, C2_OK) << "Query sample rate and channel count info failed";
+    ASSERT_EQ(inParams.size(), indices.size()) << "Size of the vector returned is invalid";
+
+    bitStreamInfo[0] = C2StreamSampleRateInfo::output::From(inParams[0].get())->value;
+    bitStreamInfo[1] = C2StreamChannelCountInfo::output::From(inParams[1].get())->value;
+    if (mime.find("3gpp") != std::string::npos) {
+        ASSERT_EQ(bitStreamInfo[0], 8000);
+        ASSERT_EQ(bitStreamInfo[1], 1);
+    } else if (mime.find("amr-wb") != std::string::npos) {
+        ASSERT_EQ(bitStreamInfo[0], 16000);
+        ASSERT_EQ(bitStreamInfo[1], 1);
+    } else if (mime.find("gsm") != std::string::npos) {
+        ASSERT_EQ(bitStreamInfo[0], 8000);
+        ASSERT_EQ(bitStreamInfo[1], 1);
     }
 }
 
@@ -467,6 +465,11 @@
     if (mMime.find("raw") != std::string::npos) {
         bitStreamInfo[0] = 8000;
         bitStreamInfo[1] = 1;
+    } else if ((mMime.find("g711-alaw") != std::string::npos) ||
+               (mMime.find("g711-mlaw") != std::string::npos)) {
+        // g711 test data is all 1-channel and has no embedded config info.
+        bitStreamInfo[0] = 8000;
+        bitStreamInfo[1] = 1;
     } else {
         ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mMime, bitStreamInfo));
     }
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
index c487fa3..bd7ec0d 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
@@ -73,26 +73,22 @@
         ASSERT_NE(mLinearPool, nullptr);
 
         std::vector<std::unique_ptr<C2Param>> queried;
-        mComponent->query({}, {C2PortMediaTypeSetting::output::PARAM_TYPE}, C2_DONT_BLOCK,
-                          &queried);
-        ASSERT_GT(queried.size(), 0);
+        c2_status_t c2err = mComponent->query({}, {C2PortMediaTypeSetting::output::PARAM_TYPE},
+                                              C2_DONT_BLOCK, &queried);
+        ASSERT_EQ(c2err, C2_OK) << "Query media type failed";
+        ASSERT_EQ(queried.size(), 1) << "Size of the vector returned is invalid";
 
         mMime = ((C2PortMediaTypeSetting::output*)queried[0].get())->m.value;
         mEos = false;
         mCsd = false;
         mFramesReceived = 0;
+        mEncoderFrameSize = 0;
         mWorkResult = C2_OK;
         mOutputSize = 0u;
-        getInputMaxBufSize();
-
-        c2_status_t status = getChannelCount(&mNumChannels);
-        ASSERT_EQ(status, C2_OK) << "Unable to get supported channel count";
-
-        status = getSampleRate(&mSampleRate);
-        ASSERT_EQ(status, C2_OK) << "Unable to get supported sample rate";
-
-        status = getSamplesPerFrame(mNumChannels, &mSamplesPerFrame);
-        ASSERT_EQ(status, C2_OK) << "Unable to get supported number of samples per frame";
+        ASSERT_NO_FATAL_FAILURE(getInputMaxBufSize());
+        ASSERT_NO_FATAL_FAILURE(getChannelCount(&mNumChannels));
+        ASSERT_NO_FATAL_FAILURE(getSampleRate(&mSampleRate));
+        ASSERT_NO_FATAL_FAILURE(getSamplesPerFrame(mNumChannels, &mSamplesPerFrame));
 
         getFile(mNumChannels, mSampleRate);
     }
@@ -108,9 +104,9 @@
     // Get the test parameters from GetParam call.
     virtual void getParams() {}
 
-    c2_status_t getChannelCount(int32_t* nChannels);
-    c2_status_t getSampleRate(int32_t* nSampleRate);
-    c2_status_t getSamplesPerFrame(int32_t nChannels, int32_t* samplesPerFrame);
+    void getChannelCount(int32_t* nChannels);
+    void getSampleRate(int32_t* nSampleRate);
+    void getSamplesPerFrame(int32_t nChannels, int32_t* samplesPerFrame);
 
     void getFile(int32_t channelCount, int32_t sampleRate);
 
@@ -146,6 +142,7 @@
     uint32_t mFramesReceived;
     int32_t mInputMaxBufSize;
     uint64_t mOutputSize;
+    uint32_t mEncoderFrameSize;
     std::list<uint64_t> mFlushedIndices;
 
     C2BlockPool::local_id_t mBlockPoolId;
@@ -173,21 +170,13 @@
 
     // In encoder components, fetch the size of input buffer allocated
     void getInputMaxBufSize() {
-        int32_t bitStreamInfo[1] = {0};
         std::vector<std::unique_ptr<C2Param>> inParams;
         c2_status_t status = mComponent->query({}, {C2StreamMaxBufferSizeInfo::input::PARAM_TYPE},
                                                C2_DONT_BLOCK, &inParams);
-        if (status != C2_OK && inParams.size() == 0) {
-            ALOGE("Query MaxBufferSizeInfo failed => %d", status);
-            ASSERT_TRUE(false);
-        } else {
-            size_t offset = sizeof(C2Param);
-            for (size_t i = 0; i < inParams.size(); ++i) {
-                C2Param* param = inParams[i].get();
-                bitStreamInfo[i] = *(int32_t*)((uint8_t*)param + offset);
-            }
-        }
-        mInputMaxBufSize = bitStreamInfo[0];
+        ASSERT_EQ(status, C2_OK) << "Query max buffer size info failed";
+        ASSERT_EQ(inParams.size(), 1) << "Size of the vector returned is invalid";
+
+        mInputMaxBufSize = C2StreamMaxBufferSizeInfo::input::From(inParams[0].get())->value;
     }
 };
 
@@ -243,17 +232,15 @@
     return false;
 }
 
-c2_status_t Codec2AudioEncHidlTestBase::getChannelCount(int32_t* nChannels) {
+void Codec2AudioEncHidlTestBase::getChannelCount(int32_t* nChannels) {
     std::unique_ptr<C2StreamChannelCountInfo::input> channelCount =
             std::make_unique<C2StreamChannelCountInfo::input>();
     std::vector<C2FieldSupportedValuesQuery> validValueInfos = {
             C2FieldSupportedValuesQuery::Current(
                     C2ParamField(channelCount.get(), &C2StreamChannelCountInfo::value))};
     c2_status_t c2err = mComponent->querySupportedValues(validValueInfos, C2_DONT_BLOCK);
-    if (c2err != C2_OK || validValueInfos.size() != 1u) {
-        ALOGE("querySupportedValues_vb failed for channelCount");
-        return c2err;
-    }
+    ASSERT_EQ(c2err, C2_OK) << "Query channel count info failed";
+    ASSERT_EQ(validValueInfos.size(), 1) << "Size of the vector returned is invalid";
 
     // setting default value of channelCount
     *nChannels = 1;
@@ -280,47 +267,45 @@
             break;
         }
         default:
+            ASSERT_TRUE(false) << "Unsupported type: " << c2FSV.type;
             break;
     }
-    return C2_OK;
+    return;
 }
-c2_status_t Codec2AudioEncHidlTestBase::getSampleRate(int32_t* nSampleRate) {
+void Codec2AudioEncHidlTestBase::getSampleRate(int32_t* nSampleRate) {
     // Use the default sample rate for mComponents
     std::vector<std::unique_ptr<C2Param>> queried;
     c2_status_t c2err = mComponent->query({}, {C2StreamSampleRateInfo::input::PARAM_TYPE},
                                           C2_DONT_BLOCK, &queried);
-    if (c2err != C2_OK || queried.size() == 0) return c2err;
+    ASSERT_EQ(c2err, C2_OK) << "Query sample rate info failed";
+    ASSERT_EQ(queried.size(), 1) << "Size of the vector returned is invalid";
 
-    size_t offset = sizeof(C2Param);
-    C2Param* param = queried[0].get();
-    *nSampleRate = *(int32_t*)((uint8_t*)param + offset);
-
-    return C2_OK;
+    *nSampleRate = C2StreamSampleRateInfo::input::From(queried[0].get())->value;
+    return;
 }
 
-c2_status_t Codec2AudioEncHidlTestBase::getSamplesPerFrame(int32_t nChannels,
-                                                           int32_t* samplesPerFrame) {
+void Codec2AudioEncHidlTestBase::getSamplesPerFrame(int32_t nChannels, int32_t* samplesPerFrame) {
     std::vector<std::unique_ptr<C2Param>> queried;
     c2_status_t c2err = mComponent->query({}, {C2StreamAudioFrameSizeInfo::input::PARAM_TYPE},
                                           C2_DONT_BLOCK, &queried);
-    size_t offset = sizeof(C2Param);
-    uint32_t maxInputSize = 0;
-    if (c2err == C2_OK && queried.size()) {
-        C2Param* param = queried[0].get();
-        maxInputSize = *(uint32_t*)((uint8_t*)param + offset);
+
+    if (c2err == C2_OK && queried.size() == 1) {
+        mEncoderFrameSize = C2StreamAudioFrameSizeInfo::input::From(queried[0].get())->value;
+        if (mEncoderFrameSize) {
+            *samplesPerFrame = mEncoderFrameSize;
+            return;
+        }
     }
 
-    if (0 == maxInputSize) {
-        c2err = mComponent->query({}, {C2StreamMaxBufferSizeInfo::input::PARAM_TYPE}, C2_DONT_BLOCK,
-                                  &queried);
-        if (c2err != C2_OK || queried.size() == 0) return c2err;
+    c2err = mComponent->query({}, {C2StreamMaxBufferSizeInfo::input::PARAM_TYPE}, C2_DONT_BLOCK,
+                              &queried);
+    ASSERT_EQ(c2err, C2_OK) << "Query max buffer size info failed";
+    ASSERT_EQ(queried.size(), 1) << "Size of the vector returned is invalid";
 
-        C2Param* param = queried[0].get();
-        maxInputSize = *(uint32_t*)((uint8_t*)param + offset);
-    }
+    uint32_t maxInputSize = C2StreamMaxBufferSizeInfo::input::From(queried[0].get())->value;
     *samplesPerFrame = std::min((maxInputSize / (nChannels * 2)), kMaxSamplesPerFrame);
 
-    return C2_OK;
+    return;
 }
 
 // LookUpTable of clips and metadata for component testing
@@ -450,10 +435,13 @@
     ALOGV("EncodeTest");
     if (mDisableTest) GTEST_SKIP() << "Test is disabled";
     bool signalEOS = std::get<2>(GetParam());
-    // Ratio w.r.t to mInputMaxBufSize
-    int32_t inputMaxBufRatio = std::get<3>(GetParam());
-    mSamplesPerFrame = ((mInputMaxBufSize / inputMaxBufRatio) / (mNumChannels * 2));
-
+    // Set samples per frame based on inputMaxBufRatio if component does not
+    // advertise supported frame size
+    if (!mEncoderFrameSize) {
+        // Ratio w.r.t to mInputMaxBufSize
+        int32_t inputMaxBufRatio = std::get<3>(GetParam());
+        mSamplesPerFrame = ((mInputMaxBufSize / inputMaxBufRatio) / (mNumChannels * 2));
+    }
     ALOGV("signalEOS %d mInputMaxBufSize %d mSamplesPerFrame %d", signalEOS, mInputMaxBufSize,
           mSamplesPerFrame);
 
@@ -613,12 +601,11 @@
         std::vector<std::unique_ptr<C2Param>> inParams;
         c2_status_t c2_status = mComponent->query({}, {C2StreamChannelCountInfo::input::PARAM_TYPE},
                                                   C2_DONT_BLOCK, &inParams);
-        ASSERT_TRUE(!c2_status && inParams.size())
-                << "Query configured channelCount failed => %d" << c2_status;
+        ASSERT_EQ(c2_status, C2_OK) << "Query channel count info failed";
+        ASSERT_EQ(inParams.size(), 1) << "Size of the vector returned is invalid";
 
-        size_t offset = sizeof(C2Param);
-        C2Param* param = inParams[0].get();
-        int32_t channelCount = *(int32_t*)((uint8_t*)param + offset);
+        int32_t channelCount = C2StreamChannelCountInfo::input::From(inParams[0].get())->value;
+
         if (channelCount != nChannels) {
             std::cout << "[   WARN   ] Test Skipped for ChannelCount " << nChannels << "\n";
             continue;
@@ -702,13 +689,11 @@
         std::vector<std::unique_ptr<C2Param>> inParams;
         c2_status_t c2_status = mComponent->query({}, {C2StreamSampleRateInfo::input::PARAM_TYPE},
                                                   C2_DONT_BLOCK, &inParams);
+        ASSERT_EQ(c2_status, C2_OK) << "Query sample rate info failed";
+        ASSERT_EQ(inParams.size(), 1) << "Size of the vector returned is invalid";
 
-        ASSERT_TRUE(!c2_status && inParams.size())
-                << "Query configured SampleRate failed => %d" << c2_status;
-        size_t offset = sizeof(C2Param);
-        C2Param* param = inParams[0].get();
-        int32_t configuredSampleRate = *(int32_t*)((uint8_t*)param + offset);
-
+        int32_t configuredSampleRate =
+                C2StreamSampleRateInfo::input::From(inParams[0].get())->value;
         if (configuredSampleRate != nSampleRate) {
             std::cout << "[   WARN   ] Test Skipped for SampleRate " << nSampleRate << "\n";
             continue;
diff --git a/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
index ffec897..275a721 100644
--- a/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
@@ -61,6 +61,7 @@
   public:
     virtual void SetUp() override {
         getParams();
+        mDisableTest = false;
         mEos = false;
         mClient = android::Codec2Client::CreateFromService(mInstanceName.c_str());
         ASSERT_NE(mClient, nullptr);
@@ -73,6 +74,14 @@
         for (int i = 0; i < MAX_INPUT_BUFFERS; ++i) {
             mWorkQueue.emplace_back(new C2Work);
         }
+
+        C2SecureModeTuning secureModeTuning{};
+        mComponent->query({&secureModeTuning}, {}, C2_MAY_BLOCK, nullptr);
+        if (secureModeTuning.value != C2Config::SM_UNPROTECTED) {
+            mDisableTest = true;
+        }
+
+        if (mDisableTest) std::cout << "[   WARN   ] Test Disabled \n";
     }
 
     virtual void TearDown() override {
@@ -105,6 +114,7 @@
     std::string mInstanceName;
     std::string mComponentName;
     bool mEos;
+    bool mDisableTest;
     std::mutex mQueueLock;
     std::condition_variable mQueueCondition;
     std::list<std::unique_ptr<C2Work>> mWorkQueue;
@@ -324,6 +334,7 @@
 };
 
 TEST_P(Codec2ComponentInputTests, InputBufferTest) {
+    if (mDisableTest) GTEST_SKIP() << "Test is disabled";
     description("Tests for different inputs");
 
     uint32_t flags = std::get<2>(GetParam());
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
index 95a4674..67873fa 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
@@ -123,8 +123,10 @@
         ASSERT_NE(mLinearPool, nullptr);
 
         std::vector<std::unique_ptr<C2Param>> queried;
-        mComponent->query({}, {C2PortMediaTypeSetting::input::PARAM_TYPE}, C2_DONT_BLOCK, &queried);
-        ASSERT_GT(queried.size(), 0);
+        c2_status_t c2err = mComponent->query({}, {C2PortMediaTypeSetting::input::PARAM_TYPE},
+                                              C2_DONT_BLOCK, &queried);
+        ASSERT_EQ(c2err, C2_OK) << "Query media type failed";
+        ASSERT_EQ(queried.size(), 1) << "Size of the vector returned is invalid";
 
         mMime = ((C2PortMediaTypeSetting::input*)queried[0].get())->m.value;
         mEos = false;
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
index a6507e7..8305feb 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
@@ -74,9 +74,10 @@
         ASSERT_NE(mGraphicPool, nullptr);
 
         std::vector<std::unique_ptr<C2Param>> queried;
-        mComponent->query({}, {C2PortMediaTypeSetting::output::PARAM_TYPE}, C2_DONT_BLOCK,
-                          &queried);
-        ASSERT_GT(queried.size(), 0);
+        c2_status_t c2err = mComponent->query({}, {C2PortMediaTypeSetting::output::PARAM_TYPE},
+                                              C2_DONT_BLOCK, &queried);
+        ASSERT_EQ(c2err, C2_OK) << "Query media type failed";
+        ASSERT_EQ(queried.size(), 1) << "Size of the vector returned is invalid";
 
         mMime = ((C2PortMediaTypeSetting::output*)queried[0].get())->m.value;
         std::cout << "mime : " << mMime << "\n";
@@ -531,9 +532,17 @@
                   << " resetting num BFrames to 0\n";
         mConfigBPictures = false;
     } else {
-        size_t offset = sizeof(C2Param);
-        C2Param* param = inParams[0].get();
-        int32_t numBFrames = *(int32_t*)((uint8_t*)param + offset);
+        int32_t numBFrames = 0;
+        C2StreamGopTuning::output* gop = C2StreamGopTuning::output::From(inParams[0].get());
+        if (gop && gop->flexCount() >= 1) {
+            for (size_t i = 0; i < gop->flexCount(); ++i) {
+                const C2GopLayerStruct& layer = gop->m.values[i];
+                if (layer.type_ == C2Config::picture_type_t(P_FRAME | B_FRAME)) {
+                    numBFrames = layer.count;
+                    break;
+                }
+            }
+        }
 
         if (!numBFrames) {
             std::cout << "[   WARN   ] Bframe not supported for " << mComponentName
@@ -817,6 +826,10 @@
 TEST_P(Codec2VideoEncHidlTest, AdaptiveBitrateTest) {
     description("Encodes input file for different bitrates");
     if (mDisableTest) GTEST_SKIP() << "Test is disabled";
+    if (mMime != "video/avc" && mMime != "video/hevc" && mMime != "video/x-vnd.on2.vp8" &&
+        mMime != "video/x-vnd.on2.vp9") {
+        GTEST_SKIP() << "AdaptiveBitrateTest is enabled only for avc, hevc, vp8 and vp9 encoders";
+    }
 
     std::ifstream eleStream;
     eleStream.open(mInputFile, std::ifstream::binary);
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 82460c9..44a2c5b 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -1332,8 +1332,8 @@
             }
         }
 
-        // set channel-mask
         if (config->mDomain & Config::IS_AUDIO) {
+            // set channel-mask
             int32_t mask;
             if (msg->findInt32(KEY_CHANNEL_MASK, &mask)) {
                 if (config->mDomain & Config::IS_ENCODER) {
@@ -1342,6 +1342,15 @@
                     config->mOutputFormat->setInt32(KEY_CHANNEL_MASK, mask);
                 }
             }
+
+            // set PCM encoding
+            int32_t pcmEncoding = kAudioEncodingPcm16bit;
+            msg->findInt32(KEY_PCM_ENCODING, &pcmEncoding);
+            if (encoder) {
+                config->mInputFormat->setInt32("android._config-pcm-encoding", pcmEncoding);
+            } else {
+                config->mOutputFormat->setInt32("android._config-pcm-encoding", pcmEncoding);
+            }
         }
 
         std::unique_ptr<C2Param> colorTransferRequestParam;
@@ -1421,6 +1430,10 @@
             }
         }
 
+        if (config->mTunneled) {
+            config->mOutputFormat->setInt32("android._tunneled", 1);
+        }
+
         ALOGD("setup formats input: %s",
                 config->mInputFormat->debugString().c_str());
         ALOGD("setup formats output: %s",
@@ -1896,9 +1909,11 @@
     {
         Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
         const std::unique_ptr<Config> &config = *configLocked;
+        sp<ANativeWindow> nativeWindow = static_cast<ANativeWindow *>(surface.get());
+        status_t err = OK;
+
         if (config->mTunneled && config->mSidebandHandle != nullptr) {
-            sp<ANativeWindow> nativeWindow = static_cast<ANativeWindow *>(surface.get());
-            status_t err = native_window_set_sideband_stream(
+            err = native_window_set_sideband_stream(
                     nativeWindow.get(),
                     const_cast<native_handle_t *>(config->mSidebandHandle->handle()));
             if (err != OK) {
@@ -1906,6 +1921,15 @@
                         nativeWindow.get(), config->mSidebandHandle->handle(), err);
                 return err;
             }
+        } else {
+            // Explicitly reset the sideband handle of the window for
+            // non-tunneled video in case the window was previously used
+            // for a tunneled video playback.
+            err = native_window_set_sideband_stream(nativeWindow.get(), nullptr);
+            if (err != OK) {
+                ALOGE("native_window_set_sideband_stream(nullptr) failed! (err %d).", err);
+                return err;
+            }
         }
     }
     return mChannel->setSurface(surface);
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 8194bb8..23a326f 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -19,6 +19,7 @@
 #include <utils/Log.h>
 
 #include <algorithm>
+#include <atomic>
 #include <list>
 #include <numeric>
 
@@ -155,6 +156,7 @@
         input->pipelineDelay = 0u;
         input->numSlots = kSmoothnessFactor;
         input->numExtraSlots = 0u;
+        input->lastFlushIndex = 0u;
     }
     {
         Mutexed<Output>::Locked output(mOutput);
@@ -1122,6 +1124,7 @@
         input->numSlots = numInputSlots;
         input->extraBuffers.flush();
         input->numExtraSlots = 0u;
+        input->lastFlushIndex = mFrameIndex.load(std::memory_order_relaxed);
         if (audioEncoder && encoderFrameSize && sampleRate && channelCount) {
             input->frameReassembler.init(
                     pool,
@@ -1385,6 +1388,12 @@
                 }
             }
         }
+
+        int32_t tunneled = 0;
+        if (!outputFormat->findInt32("android._tunneled", &tunneled)) {
+            tunneled = 0;
+        }
+        mTunneled = (tunneled != 0);
     }
 
     // Set up pipeline control. This has to be done after mInputBuffers and
@@ -1529,6 +1538,7 @@
     ALOGV("[%s] flush", mName);
     std::vector<uint64_t> indices;
     std::list<std::unique_ptr<C2Work>> configs;
+    mInput.lock()->lastFlushIndex = mFrameIndex.load(std::memory_order_relaxed);
     for (const std::unique_ptr<C2Work> &work : flushedWork) {
         indices.push_back(work->input.ordinal.frameIndex.peeku());
         if (!(work->input.flags & C2FrameData::FLAG_CODEC_CONFIG)) {
@@ -1595,12 +1605,18 @@
     }
     std::shared_ptr<C2Buffer> buffer =
             mPipelineWatcher.lock()->onInputBufferReleased(frameIndex, arrayIndex);
-    bool newInputSlotAvailable;
+    bool newInputSlotAvailable = false;
     {
         Mutexed<Input>::Locked input(mInput);
-        newInputSlotAvailable = input->buffers->expireComponentBuffer(buffer);
-        if (!newInputSlotAvailable) {
-            (void)input->extraBuffers.expireComponentBuffer(buffer);
+        if (input->lastFlushIndex >= frameIndex) {
+            ALOGD("[%s] Ignoring stale input buffer done callback: "
+                  "last flush index = %lld, frameIndex = %lld",
+                  mName, input->lastFlushIndex.peekll(), (long long)frameIndex);
+        } else {
+            newInputSlotAvailable = input->buffers->expireComponentBuffer(buffer);
+            if (!newInputSlotAvailable) {
+                (void)input->extraBuffers.expireComponentBuffer(buffer);
+            }
         }
     }
     if (newInputSlotAvailable) {
@@ -1887,10 +1903,21 @@
         }
     }
 
+    bool drop = false;
+    if (worklet->output.flags & C2FrameData::FLAG_DROP_FRAME) {
+        ALOGV("[%s] onWorkDone: drop buffer but keep metadata", mName);
+        drop = true;
+    }
+
     if (notifyClient && !buffer && !flags) {
-        ALOGV("[%s] onWorkDone: Not reporting output buffer (%lld)",
-              mName, work->input.ordinal.frameIndex.peekull());
-        notifyClient = false;
+        if (mTunneled && drop && outputFormat) {
+            ALOGV("[%s] onWorkDone: Keep tunneled, drop frame with format change (%lld)",
+                  mName, work->input.ordinal.frameIndex.peekull());
+        } else {
+            ALOGV("[%s] onWorkDone: Not reporting output buffer (%lld)",
+                  mName, work->input.ordinal.frameIndex.peekull());
+            notifyClient = false;
+        }
     }
 
     if (buffer) {
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index 5a2aca2..26eef30 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -273,6 +273,7 @@
         size_t numExtraSlots;
         uint32_t inputDelay;
         uint32_t pipelineDelay;
+        c2_cntr64_t lastFlushIndex;
 
         FrameReassembler frameReassembler;
     };
@@ -323,6 +324,8 @@
         return mCrypto != nullptr || mDescrambler != nullptr;
     }
     std::atomic_bool mSendEncryptedInfoBuffer;
+
+    std::atomic_bool mTunneled;
 };
 
 // Conversion of a c2_status_t value to a status_t value may depend on the
diff --git a/media/codec2/sfplugin/CCodecBuffers.cpp b/media/codec2/sfplugin/CCodecBuffers.cpp
index d94d0a2..20f2ecf 100644
--- a/media/codec2/sfplugin/CCodecBuffers.cpp
+++ b/media/codec2/sfplugin/CCodecBuffers.cpp
@@ -21,6 +21,7 @@
 #include <C2PlatformSupport.h>
 
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/MediaDefs.h>
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaCodecConstants.h>
 #include <media/stagefright/SkipCutBuffer.h>
@@ -100,7 +101,10 @@
         if (img->mNumPlanes > 0 && img->mType != img->MEDIA_IMAGE_TYPE_UNKNOWN) {
             int32_t stride = img->mPlane[0].mRowInc;
             mFormatWithImageData->setInt32(KEY_STRIDE, stride);
-            ALOGD("[%s] updating stride = %d", mName, stride);
+            mFormatWithImageData->setInt32(KEY_WIDTH, img->mWidth);
+            mFormatWithImageData->setInt32(KEY_HEIGHT, img->mHeight);
+            ALOGD("[%s] updating stride = %d, width: %d, height: %d",
+                  mName, stride, img->mWidth, img->mHeight);
             if (img->mNumPlanes > 1 && stride > 0) {
                 int64_t offsetDelta =
                     (int64_t)img->mPlane[1].mOffset - (int64_t)img->mPlane[0].mOffset;
@@ -201,6 +205,56 @@
     mSkipCutBuffer = new SkipCutBuffer(skip, cut, mChannelCount);
 }
 
+bool OutputBuffers::convert(
+        const std::shared_ptr<C2Buffer> &src, sp<Codec2Buffer> *dst) {
+    if (!src || src->data().type() != C2BufferData::LINEAR) {
+        return false;
+    }
+    int32_t configEncoding = kAudioEncodingPcm16bit;
+    int32_t codecEncoding = kAudioEncodingPcm16bit;
+    if (mFormat->findInt32("android._codec-pcm-encoding", &codecEncoding)
+            && mFormat->findInt32("android._config-pcm-encoding", &configEncoding)) {
+        if (mSrcEncoding != codecEncoding || mDstEncoding != configEncoding) {
+            if (codecEncoding != configEncoding) {
+                mDataConverter = AudioConverter::Create(
+                        (AudioEncoding)codecEncoding, (AudioEncoding)configEncoding);
+                ALOGD_IF(mDataConverter, "[%s] Converter created from %d to %d",
+                         mName, codecEncoding, configEncoding);
+                mFormatWithConverter = mFormat->dup();
+                mFormatWithConverter->setInt32(KEY_PCM_ENCODING, configEncoding);
+            } else {
+                mDataConverter = nullptr;
+                mFormatWithConverter = nullptr;
+            }
+            mSrcEncoding = codecEncoding;
+            mDstEncoding = configEncoding;
+        }
+        if (int encoding; !mFormat->findInt32(KEY_PCM_ENCODING, &encoding)
+                || encoding != mDstEncoding) {
+        }
+    }
+    if (!mDataConverter) {
+        return false;
+    }
+    sp<MediaCodecBuffer> srcBuffer = ConstLinearBlockBuffer::Allocate(mFormat, src);
+    if (!srcBuffer) {
+        return false;
+    }
+    if (!*dst) {
+        *dst = new Codec2Buffer(
+                mFormat,
+                new ABuffer(mDataConverter->targetSize(srcBuffer->size())));
+    }
+    sp<MediaCodecBuffer> dstBuffer = *dst;
+    status_t err = mDataConverter->convert(srcBuffer, dstBuffer);
+    if (err != OK) {
+        ALOGD("[%s] buffer conversion failed: %d", mName, err);
+        return false;
+    }
+    dstBuffer->setFormat(mFormatWithConverter);
+    return true;
+}
+
 void OutputBuffers::clearStash() {
     mPending.clear();
     mReorderStash.clear();
@@ -1082,7 +1136,7 @@
         return err;
     }
     c2Buffer->setFormat(mFormat);
-    if (!c2Buffer->copy(buffer)) {
+    if (!convert(buffer, &c2Buffer) && !c2Buffer->copy(buffer)) {
         ALOGD("[%s] copy buffer failed", mName);
         return WOULD_BLOCK;
     }
@@ -1198,9 +1252,12 @@
         const std::shared_ptr<C2Buffer> &buffer,
         size_t *index,
         sp<MediaCodecBuffer> *clientBuffer) {
-    sp<Codec2Buffer> newBuffer = wrap(buffer);
-    if (newBuffer == nullptr) {
-        return NO_MEMORY;
+    sp<Codec2Buffer> newBuffer;
+    if (!convert(buffer, &newBuffer)) {
+        newBuffer = wrap(buffer);
+        if (newBuffer == nullptr) {
+            return NO_MEMORY;
+        }
     }
     newBuffer->setFormat(mFormat);
     *index = mImpl.assignSlot(newBuffer);
diff --git a/media/codec2/sfplugin/CCodecBuffers.h b/media/codec2/sfplugin/CCodecBuffers.h
index 995d3a4..c8e9930 100644
--- a/media/codec2/sfplugin/CCodecBuffers.h
+++ b/media/codec2/sfplugin/CCodecBuffers.h
@@ -18,9 +18,11 @@
 
 #define CCODEC_BUFFERS_H_
 
+#include <optional>
 #include <string>
 
 #include <C2Config.h>
+#include <DataConverter.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/MediaCodecBuffer.h>
 
@@ -382,6 +384,14 @@
      */
     void submit(const sp<MediaCodecBuffer> &buffer);
 
+    /**
+     * Apply DataConverter from |src| to |*dst| if needed. If |*dst| is nullptr,
+     * a new buffer is allocated.
+     *
+     * Returns true if conversion was needed and executed; false otherwise.
+     */
+    bool convert(const std::shared_ptr<C2Buffer> &src, sp<Codec2Buffer> *dst);
+
 private:
     // SkipCutBuffer
     int32_t mDelay;
@@ -391,6 +401,12 @@
 
     void setSkipCutBuffer(int32_t skip, int32_t cut);
 
+    // DataConverter
+    sp<DataConverter> mDataConverter;
+    sp<AMessage> mFormatWithConverter;
+    std::optional<int32_t> mSrcEncoding;
+    std::optional<int32_t> mDstEncoding;
+
     // Output stash
 
     // Struct for an entry in the output stash (mPending and mReorderStash)
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index c275187..03418d9 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -657,24 +657,29 @@
     add(ConfigMapper(KEY_SAMPLE_RATE,   C2_PARAMKEY_CODED_SAMPLE_RATE,  "value")
         .limitTo(D::AUDIO & D::CODED));
 
-    add(ConfigMapper(KEY_PCM_ENCODING,  C2_PARAMKEY_PCM_ENCODING,       "value")
+    auto pcmEncodingMapper = [](C2Value v) -> C2Value {
+        int32_t value;
+        C2Config::pcm_encoding_t to;
+        if (v.get(&value) && C2Mapper::map(value, &to)) {
+            return to;
+        }
+        return C2Value();
+    };
+    auto pcmEncodingReverse = [](C2Value v) -> C2Value {
+        C2Config::pcm_encoding_t value;
+        int32_t to;
+        using C2ValueType=typename _c2_reduce_enum_to_underlying_type<decltype(value)>::type;
+        if (v.get((C2ValueType*)&value) && C2Mapper::map(value, &to)) {
+            return to;
+        }
+        return C2Value();
+    };
+    add(ConfigMapper(KEY_PCM_ENCODING,              C2_PARAMKEY_PCM_ENCODING, "value")
         .limitTo(D::AUDIO)
-        .withMappers([](C2Value v) -> C2Value {
-            int32_t value;
-            C2Config::pcm_encoding_t to;
-            if (v.get(&value) && C2Mapper::map(value, &to)) {
-                return to;
-            }
-            return C2Value();
-        }, [](C2Value v) -> C2Value {
-            C2Config::pcm_encoding_t value;
-            int32_t to;
-            using C2ValueType=typename _c2_reduce_enum_to_underlying_type<decltype(value)>::type;
-            if (v.get((C2ValueType*)&value) && C2Mapper::map(value, &to)) {
-                return to;
-            }
-            return C2Value();
-        }));
+        .withMappers(pcmEncodingMapper, pcmEncodingReverse));
+    add(ConfigMapper("android._codec-pcm-encoding", C2_PARAMKEY_PCM_ENCODING, "value")
+        .limitTo(D::AUDIO & D::READ)
+        .withMappers(pcmEncodingMapper, pcmEncodingReverse));
 
     add(ConfigMapper(KEY_IS_ADTS, C2_PARAMKEY_AAC_PACKAGING, "value")
         .limitTo(D::AUDIO & D::CODED)
diff --git a/media/codec2/vndk/C2PlatformStorePluginLoader.cpp b/media/codec2/vndk/C2PlatformStorePluginLoader.cpp
index bee028a..2a888a8 100644
--- a/media/codec2/vndk/C2PlatformStorePluginLoader.cpp
+++ b/media/codec2/vndk/C2PlatformStorePluginLoader.cpp
@@ -59,13 +59,14 @@
 
 c2_status_t C2PlatformStorePluginLoader::createBlockPool(
         ::C2Allocator::id_t allocatorId, ::C2BlockPool::local_id_t blockPoolId,
-        std::shared_ptr<C2BlockPool>* pool) {
+        std::shared_ptr<C2BlockPool>* pool,
+        std::function<void(C2BlockPool *)> deleter) {
     if (mCreateBlockPool == nullptr) {
         ALOGD("Handle or CreateBlockPool symbol is null");
         return C2_NOT_FOUND;
     }
 
-    std::shared_ptr<::C2BlockPool> ptr(mCreateBlockPool(allocatorId, blockPoolId));
+    std::shared_ptr<::C2BlockPool> ptr(mCreateBlockPool(allocatorId, blockPoolId), deleter);
     if (ptr) {
         *pool = ptr;
         return C2_OK;
@@ -75,14 +76,16 @@
 }
 
 c2_status_t C2PlatformStorePluginLoader::createAllocator(
-        ::C2Allocator::id_t allocatorId, std::shared_ptr<C2Allocator>* const allocator) {
+        ::C2Allocator::id_t allocatorId,
+        std::shared_ptr<C2Allocator>* const allocator,
+        std::function<void(C2Allocator *)> deleter) {
     if (mCreateAllocator == nullptr) {
         ALOGD("Handle or CreateAllocator symbol is null");
         return C2_NOT_FOUND;
     }
 
     c2_status_t res = C2_CORRUPTED;
-    allocator->reset(mCreateAllocator(allocatorId, &res));
+    allocator->reset(mCreateAllocator(allocatorId, &res), deleter);
     if (res != C2_OK) {
         ALOGD("Failed to CreateAllocator by id: %u, res: %d", allocatorId, res);
         allocator->reset();
diff --git a/media/codec2/vndk/C2Store.cpp b/media/codec2/vndk/C2Store.cpp
index c07c09e..1660c38 100644
--- a/media/codec2/vndk/C2Store.cpp
+++ b/media/codec2/vndk/C2Store.cpp
@@ -443,6 +443,7 @@
 public:
     _C2BlockPoolCache() : mBlockPoolSeqId(C2BlockPool::PLATFORM_START + 1) {}
 
+private:
     c2_status_t _createBlockPool(
             C2PlatformAllocatorStore::id_t allocatorId,
             std::vector<std::shared_ptr<const C2Component>> components,
@@ -456,14 +457,19 @@
         if (allocatorId == C2AllocatorStore::DEFAULT_LINEAR) {
             allocatorId = GetPreferredLinearAllocatorId(GetCodec2PoolMask());
         }
+        auto deleter = [this, poolId](C2BlockPool *pool) {
+            std::unique_lock lock(mMutex);
+            mBlockPools.erase(poolId);
+            mComponents.erase(poolId);
+            delete pool;
+        };
         switch(allocatorId) {
             case C2PlatformAllocatorStore::ION: /* also ::DMABUFHEAP */
                 res = allocatorStore->fetchAllocator(
                         C2PlatformAllocatorStore::ION, &allocator);
                 if (res == C2_OK) {
-                    std::shared_ptr<C2BlockPool> ptr =
-                            std::make_shared<C2PooledBlockPool>(
-                                    allocator, poolId);
+                    std::shared_ptr<C2BlockPool> ptr(
+                            new C2PooledBlockPool(allocator, poolId), deleter);
                     *pool = ptr;
                     mBlockPools[poolId] = ptr;
                     mComponents[poolId].insert(
@@ -475,9 +481,8 @@
                 res = allocatorStore->fetchAllocator(
                         C2PlatformAllocatorStore::BLOB, &allocator);
                 if (res == C2_OK) {
-                    std::shared_ptr<C2BlockPool> ptr =
-                            std::make_shared<C2PooledBlockPool>(
-                                    allocator, poolId);
+                    std::shared_ptr<C2BlockPool> ptr(
+                            new C2PooledBlockPool(allocator, poolId), deleter);
                     *pool = ptr;
                     mBlockPools[poolId] = ptr;
                     mComponents[poolId].insert(
@@ -490,8 +495,8 @@
                 res = allocatorStore->fetchAllocator(
                         C2AllocatorStore::DEFAULT_GRAPHIC, &allocator);
                 if (res == C2_OK) {
-                    std::shared_ptr<C2BlockPool> ptr =
-                        std::make_shared<C2PooledBlockPool>(allocator, poolId);
+                    std::shared_ptr<C2BlockPool> ptr(
+                        new C2PooledBlockPool(allocator, poolId), deleter);
                     *pool = ptr;
                     mBlockPools[poolId] = ptr;
                     mComponents[poolId].insert(
@@ -503,9 +508,8 @@
                 res = allocatorStore->fetchAllocator(
                         C2PlatformAllocatorStore::BUFFERQUEUE, &allocator);
                 if (res == C2_OK) {
-                    std::shared_ptr<C2BlockPool> ptr =
-                            std::make_shared<C2BufferQueueBlockPool>(
-                                    allocator, poolId);
+                    std::shared_ptr<C2BlockPool> ptr(
+                            new C2BufferQueueBlockPool(allocator, poolId), deleter);
                     *pool = ptr;
                     mBlockPools[poolId] = ptr;
                     mComponents[poolId].insert(
@@ -517,7 +521,7 @@
                 // Try to create block pool from platform store plugins.
                 std::shared_ptr<C2BlockPool> ptr;
                 res = C2PlatformStorePluginLoader::GetInstance()->createBlockPool(
-                        allocatorId, poolId, &ptr);
+                        allocatorId, poolId, &ptr, deleter);
                 if (res == C2_OK) {
                     *pool = ptr;
                     mBlockPools[poolId] = ptr;
@@ -530,17 +534,20 @@
         return res;
     }
 
+public:
     c2_status_t createBlockPool(
             C2PlatformAllocatorStore::id_t allocatorId,
             std::vector<std::shared_ptr<const C2Component>> components,
             std::shared_ptr<C2BlockPool> *pool) {
+        std::unique_lock lock(mMutex);
         return _createBlockPool(allocatorId, components, mBlockPoolSeqId++, pool);
     }
 
-    bool getBlockPool(
+    c2_status_t getBlockPool(
             C2BlockPool::local_id_t blockPoolId,
             std::shared_ptr<const C2Component> component,
             std::shared_ptr<C2BlockPool> *pool) {
+        std::unique_lock lock(mMutex);
         // TODO: use one iterator for multiple blockpool type scalability.
         std::shared_ptr<C2BlockPool> ptr;
         auto it = mBlockPools.find(blockPoolId);
@@ -558,14 +565,22 @@
                         });
                 if (found != mComponents[blockPoolId].end()) {
                     *pool = ptr;
-                    return true;
+                    return C2_OK;
                 }
             }
         }
-        return false;
+        // TODO: remove this. this is temporary
+        if (blockPoolId == C2BlockPool::PLATFORM_START) {
+            return _createBlockPool(
+                    C2PlatformAllocatorStore::BUFFERQUEUE, {component}, blockPoolId, pool);
+        }
+        return C2_NOT_FOUND;
     }
 
 private:
+    // Deleter needs to hold this mutex, and there is a small chance that deleter
+    // is invoked while the mutex is held.
+    std::recursive_mutex mMutex;
     C2BlockPool::local_id_t mBlockPoolSeqId;
 
     std::map<C2BlockPool::local_id_t, std::weak_ptr<C2BlockPool>> mBlockPools;
@@ -574,7 +589,6 @@
 
 static std::unique_ptr<_C2BlockPoolCache> sBlockPoolCache =
     std::make_unique<_C2BlockPoolCache>();
-static std::mutex sBlockPoolCacheMutex;
 
 } // anynymous namespace
 
@@ -582,15 +596,12 @@
         C2BlockPool::local_id_t id, std::shared_ptr<const C2Component> component,
         std::shared_ptr<C2BlockPool> *pool) {
     pool->reset();
-    std::lock_guard<std::mutex> lock(sBlockPoolCacheMutex);
     std::shared_ptr<C2AllocatorStore> allocatorStore = GetCodec2PlatformAllocatorStore();
     std::shared_ptr<C2Allocator> allocator;
     c2_status_t res = C2_NOT_FOUND;
 
     if (id >= C2BlockPool::PLATFORM_START) {
-        if (sBlockPoolCache->getBlockPool(id, component, pool)) {
-            return C2_OK;
-        }
+        return sBlockPoolCache->getBlockPool(id, component, pool);
     }
 
     switch (id) {
@@ -606,11 +617,6 @@
             *pool = std::make_shared<C2BasicGraphicBlockPool>(allocator);
         }
         break;
-    // TODO: remove this. this is temporary
-    case C2BlockPool::PLATFORM_START:
-        res = sBlockPoolCache->_createBlockPool(
-                C2PlatformAllocatorStore::BUFFERQUEUE, {component}, id, pool);
-        break;
     default:
         break;
     }
@@ -623,7 +629,6 @@
         std::shared_ptr<C2BlockPool> *pool) {
     pool->reset();
 
-    std::lock_guard<std::mutex> lock(sBlockPoolCacheMutex);
     return sBlockPoolCache->createBlockPool(allocatorId, components, pool);
 }
 
@@ -633,7 +638,6 @@
         std::shared_ptr<C2BlockPool> *pool) {
     pool->reset();
 
-    std::lock_guard<std::mutex> lock(sBlockPoolCacheMutex);
     return sBlockPoolCache->createBlockPool(allocatorId, {component}, pool);
 }
 
diff --git a/media/codec2/vndk/include/C2PlatformStorePluginLoader.h b/media/codec2/vndk/include/C2PlatformStorePluginLoader.h
index 4c10643..73d1b5e 100644
--- a/media/codec2/vndk/include/C2PlatformStorePluginLoader.h
+++ b/media/codec2/vndk/include/C2PlatformStorePluginLoader.h
@@ -61,9 +61,11 @@
      * \retval C2_NOT_FOUND the extension symbol was not found.
      * \retval C2_BAD_INDEX the input allocatorId is not defined in platform store extension.
      */
-    c2_status_t createBlockPool(::C2Allocator::id_t allocatorId,
-                                ::C2BlockPool::local_id_t blockPoolId,
-                                std::shared_ptr<C2BlockPool>* pool);
+    c2_status_t createBlockPool(
+            ::C2Allocator::id_t allocatorId,
+            ::C2BlockPool::local_id_t blockPoolId,
+            std::shared_ptr<C2BlockPool>* pool,
+            std::function<void(C2BlockPool *)> deleter = std::default_delete<C2BlockPool>());
 
     /**
      * Creates allocator from platform store extension.
@@ -81,8 +83,10 @@
      * \retval C2_BAD_INDEX the input allocatorId is not defined in platform store extension.
      * \retval C2_NO_MEMORY not enough memory to create the allocator
      */
-    c2_status_t createAllocator(::C2Allocator::id_t allocatorId,
-                                std::shared_ptr<C2Allocator>* const allocator);
+    c2_status_t createAllocator(
+            ::C2Allocator::id_t allocatorId,
+            std::shared_ptr<C2Allocator>* const allocator,
+            std::function<void(C2Allocator *)> deleter = std::default_delete<C2Allocator>());
 
 private:
     explicit C2PlatformStorePluginLoader(const char *libPath);
diff --git a/media/codecs/amrwb/enc/Android.bp b/media/codecs/amrwb/enc/Android.bp
index cc72eb7..d945531 100644
--- a/media/codecs/amrwb/enc/Android.bp
+++ b/media/codecs/amrwb/enc/Android.bp
@@ -139,11 +139,6 @@
         },
     },
 
-    include_dirs: [
-        "frameworks/av/include",
-        "frameworks/av/media/libstagefright/include",
-    ],
-
     local_include_dirs: ["src"],
     export_include_dirs: ["inc"],
 
diff --git a/media/codecs/m4v_h263/enc/src/fastidct.cpp b/media/codecs/m4v_h263/enc/src/fastidct.cpp
index 688effc..ec1b28f 100644
--- a/media/codecs/m4v_h263/enc/src/fastidct.cpp
+++ b/media/codecs/m4v_h263/enc/src/fastidct.cpp
@@ -76,6 +76,8 @@
     return ;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_col2(Short *blk)
 {
     int32 x0, x1, x3, x5, x7;//, x8;
@@ -102,6 +104,8 @@
     return ;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_col3(Short *blk)
 {
     int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;
@@ -137,6 +141,8 @@
     return ;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_col4(Short *blk)
 {
     int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;
@@ -180,6 +186,8 @@
 }
 
 #ifndef SMALL_DCT
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_col0x40(Short *blk)
 {
     int32 x1, x3, x5, x7;//, x8;
@@ -230,6 +238,8 @@
     return ;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_col0x10(Short *blk)
 {
     int32 x1, x3, x5,  x7;
@@ -256,6 +266,8 @@
 
 #endif /* SMALL_DCT */
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_col(Short *blk)
 {
     int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;
@@ -368,6 +380,8 @@
     return;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_row2Inter(Short *blk, UChar *rec, Int lx)
 {
     int32 x0, x1, x2, x4, x5;
@@ -427,6 +441,8 @@
     return ;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_row3Inter(Short *blk, UChar *rec, Int lx)
 {
     int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;
@@ -497,6 +513,8 @@
     return ;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_row4Inter(Short *blk, UChar *rec, Int lx)
 {
     int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;
@@ -573,6 +591,8 @@
 }
 
 #ifndef SMALL_DCT
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_row0x40Inter(Short *blk, UChar *rec, Int lx)
 {
     int32 x1, x2, x4, x5;
@@ -686,6 +706,8 @@
     return ;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_row0x10Inter(Short *blk, UChar *rec, Int lx)
 {
     int32 x1, x3, x5, x7;
@@ -741,6 +763,8 @@
 
 #endif /* SMALL_DCT */
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_rowInter(Short *blk, UChar *rec, Int lx)
 {
     int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;
@@ -864,6 +888,8 @@
     return;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_row2Intra(Short *blk, UChar *rec, Int lx)
 {
     int32 x0, x1, x2, x4, x5;
@@ -919,6 +945,8 @@
     return ;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_row3Intra(Short *blk, UChar *rec, Int lx)
 {
     int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;
@@ -985,6 +1013,8 @@
     return ;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_row4Intra(Short *blk, UChar *rec, Int lx)
 {
     int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;
@@ -1058,6 +1088,8 @@
 }
 
 #ifndef SMALL_DCT
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_row0x40Intra(Short *blk, UChar *rec, Int lx)
 {
     int32  x1, x2, x4, x5;
@@ -1166,6 +1198,8 @@
     return ;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_row0x10Intra(Short *blk, UChar *rec, Int lx)
 {
     int32 x1, x3, x5, x7;
@@ -1218,6 +1252,8 @@
 }
 
 #endif /* SMALL_DCT */
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_rowIntra(Short *blk, UChar *rec, Int lx)
 {
     int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;
@@ -1364,6 +1400,8 @@
     return;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_row2zmv(Short *blk, UChar *rec, UChar *pred, Int lx)
 {
     int32 x0, x1, x2, x4, x5;
@@ -1424,6 +1462,8 @@
     return ;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_row3zmv(Short *blk, UChar *rec, UChar *pred, Int lx)
 {
     int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;
@@ -1495,6 +1535,8 @@
     return ;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_row4zmv(Short *blk, UChar *rec, UChar *pred, Int lx)
 {
     int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;
@@ -1572,6 +1614,8 @@
 }
 
 #ifndef SMALL_DCT
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_row0x40zmv(Short *blk, UChar *rec, UChar *pred, Int lx)
 {
     int32 x1, x2, x4, x5;
@@ -1687,6 +1731,8 @@
     return ;
 }
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_row0x10zmv(Short *blk, UChar *rec, UChar *pred, Int lx)
 {
     int32 x1, x3, x5, x7;
@@ -1743,6 +1789,8 @@
 
 #endif /* SMALL_DCT */
 
+/* Ignoring overflows as idct function expects and uses overflows */
+__attribute__((no_sanitize("signed-integer-overflow")))
 void idct_rowzmv(Short *blk, UChar *rec, UChar *pred, Int lx)
 {
     int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;
diff --git a/media/codecs/m4v_h263/fuzzer/mpeg4_h263_dec_fuzzer.cpp b/media/codecs/m4v_h263/fuzzer/mpeg4_h263_dec_fuzzer.cpp
index 912c821..5e613d9 100644
--- a/media/codecs/m4v_h263/fuzzer/mpeg4_h263_dec_fuzzer.cpp
+++ b/media/codecs/m4v_h263/fuzzer/mpeg4_h263_dec_fuzzer.cpp
@@ -50,7 +50,7 @@
 
  private:
   tagvideoDecControls *mDecHandle = nullptr;
-  uint8_t *mOutputBuffer[kNumOutputBuffers];
+  uint8_t *mOutputBuffer[kNumOutputBuffers] = {};
   bool mInitialized = false;
   bool mFramesConfigured = false;
 #ifdef MPEG4
diff --git a/media/codecs/mp3dec/Android.bp b/media/codecs/mp3dec/Android.bp
index 015b8b6..1ab0511 100644
--- a/media/codecs/mp3dec/Android.bp
+++ b/media/codecs/mp3dec/Android.bp
@@ -108,8 +108,6 @@
         cfi: true,
     },
 
-    include_dirs: ["frameworks/av/media/libstagefright/include"],
-
     header_libs: ["libstagefright_mp3dec_headers"],
     export_header_lib_headers: ["libstagefright_mp3dec_headers"],
 
diff --git a/media/codecs/mp3dec/src/pvmp3_stereo_proc.cpp b/media/codecs/mp3dec/src/pvmp3_stereo_proc.cpp
index c04f7f3..b3ecc77 100644
--- a/media/codecs/mp3dec/src/pvmp3_stereo_proc.cpp
+++ b/media/codecs/mp3dec/src/pvmp3_stereo_proc.cpp
@@ -219,6 +219,7 @@
 ; FUNCTION CODE
 ----------------------------------------------------------------------------*/
 
+// deliberately plays near overflow points of int32
 #if __has_attribute(no_sanitize)
 __attribute__((no_sanitize("integer")))
 #endif
diff --git a/media/extractors/Android.bp b/media/extractors/Android.bp
index 7513cb1..66585da 100644
--- a/media/extractors/Android.bp
+++ b/media/extractors/Android.bp
@@ -24,10 +24,6 @@
 cc_defaults {
     name: "extractor-defaults",
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright/include",
-    ],
-
     shared_libs: [
         "liblog",
     ],
diff --git a/media/extractors/aac/Android.bp b/media/extractors/aac/Android.bp
index 7bf3a13..a926422 100644
--- a/media/extractors/aac/Android.bp
+++ b/media/extractors/aac/Android.bp
@@ -21,6 +21,10 @@
 
     srcs: ["AACExtractor.cpp"],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     static_libs: [
         "libstagefright_foundation",
         "libstagefright_metadatautils",
diff --git a/media/extractors/aac/AACExtractor.h b/media/extractors/aac/include/AACExtractor.h
similarity index 100%
rename from media/extractors/aac/AACExtractor.h
rename to media/extractors/aac/include/AACExtractor.h
diff --git a/media/extractors/amr/Android.bp b/media/extractors/amr/Android.bp
index 712360d..121b7a3 100644
--- a/media/extractors/amr/Android.bp
+++ b/media/extractors/amr/Android.bp
@@ -21,6 +21,10 @@
 
     srcs: ["AMRExtractor.cpp"],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     static_libs: [
         "libstagefright_foundation",
     ],
diff --git a/media/extractors/amr/AMRExtractor.h b/media/extractors/amr/include/AMRExtractor.h
similarity index 100%
rename from media/extractors/amr/AMRExtractor.h
rename to media/extractors/amr/include/AMRExtractor.h
diff --git a/media/extractors/flac/Android.bp b/media/extractors/flac/Android.bp
index 9a2a76b..fd51622 100644
--- a/media/extractors/flac/Android.bp
+++ b/media/extractors/flac/Android.bp
@@ -23,8 +23,8 @@
 
     srcs: ["FLACExtractor.cpp"],
 
-    include_dirs: [
-        "external/flac/include",
+    export_include_dirs: [
+        "include",
     ],
 
     shared_libs: [
diff --git a/media/extractors/flac/FLACExtractor.h b/media/extractors/flac/include/FLACExtractor.h
similarity index 100%
rename from media/extractors/flac/FLACExtractor.h
rename to media/extractors/flac/include/FLACExtractor.h
diff --git a/media/extractors/fuzzers/Android.bp b/media/extractors/fuzzers/Android.bp
index 0e54b58..490e195 100644
--- a/media/extractors/fuzzers/Android.bp
+++ b/media/extractors/fuzzers/Android.bp
@@ -80,11 +80,6 @@
     defaults: ["extractor-fuzzer-defaults"],
     host_supported: true,
 
-    include_dirs: [
-        "frameworks/av/media/extractors/mpeg2",
-        "frameworks/av/media/libstagefright",
-    ],
-
     static_libs: [
         "libstagefright_foundation_without_imemory",
         "libstagefright_mpeg2support",
@@ -124,14 +119,6 @@
         "mp4_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/mp4",
-    ],
-
-    header_libs: [
-        "libaudioclient_headers",
-    ],
-
     static_libs: [
         "libstagefright_id3",
         "libstagefright_esds",
@@ -150,10 +137,6 @@
         "wav_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/wav",
-    ],
-
     static_libs: [
         "libfifo",
         "libwavextractor",
@@ -173,10 +156,6 @@
         "amr_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/amr",
-    ],
-
     static_libs: [
         "libamrextractor",
     ],
@@ -193,10 +172,6 @@
         "mkv_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/mkv",
-    ],
-
     static_libs: [
         "libwebm",
         "libstagefright_flacdec",
@@ -217,9 +192,6 @@
         "ogg_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/ogg",
-    ],
 
     static_libs: [
         "libstagefright_metadatautils",
@@ -265,10 +237,6 @@
         "mp3_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/mp3",
-    ],
-
     static_libs: [
         "libfifo",
         "libmp3extractor",
@@ -285,10 +253,6 @@
         "aac_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/aac",
-    ],
-
     static_libs: [
         "libaacextractor",
         "libstagefright_metadatautils",
@@ -304,10 +268,6 @@
         "flac_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/flac",
-    ],
-
     static_libs: [
         "libstagefright_metadatautils",
         "libFLAC",
@@ -329,10 +289,6 @@
         "midi_extractor_fuzzer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/midi",
-    ],
-
     static_libs: [
         "libsonivox",
         "libmedia_midiiowrapper",
diff --git a/media/extractors/midi/Android.bp b/media/extractors/midi/Android.bp
index 08a6fa0..feabf9e 100644
--- a/media/extractors/midi/Android.bp
+++ b/media/extractors/midi/Android.bp
@@ -23,6 +23,10 @@
 
     srcs: ["MidiExtractor.cpp"],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     header_libs: [
         "libmedia_datasource_headers",
     ],
diff --git a/media/extractors/midi/MidiExtractor.h b/media/extractors/midi/include/MidiExtractor.h
similarity index 100%
rename from media/extractors/midi/MidiExtractor.h
rename to media/extractors/midi/include/MidiExtractor.h
diff --git a/media/extractors/mkv/Android.bp b/media/extractors/mkv/Android.bp
index 54c5b27..98ce305 100644
--- a/media/extractors/mkv/Android.bp
+++ b/media/extractors/mkv/Android.bp
@@ -21,10 +21,8 @@
 
     srcs: ["MatroskaExtractor.cpp"],
 
-    include_dirs: [
-        "external/flac/include",
-        "external/libvpx/libwebm",
-        "frameworks/av/media/libstagefright/flac/dec",
+    export_include_dirs: [
+        "include",
     ],
 
     shared_libs: [
diff --git a/media/extractors/mkv/MatroskaExtractor.h b/media/extractors/mkv/include/MatroskaExtractor.h
similarity index 100%
rename from media/extractors/mkv/MatroskaExtractor.h
rename to media/extractors/mkv/include/MatroskaExtractor.h
diff --git a/media/extractors/mp3/Android.bp b/media/extractors/mp3/Android.bp
index 75b9b7b..396a13a 100644
--- a/media/extractors/mp3/Android.bp
+++ b/media/extractors/mp3/Android.bp
@@ -16,6 +16,10 @@
             "XINGSeeker.cpp",
     ],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     static_libs: [
         "libutils",
         "libstagefright_id3",
diff --git a/media/extractors/mp3/MP3Extractor.h b/media/extractors/mp3/include/MP3Extractor.h
similarity index 100%
rename from media/extractors/mp3/MP3Extractor.h
rename to media/extractors/mp3/include/MP3Extractor.h
diff --git a/media/extractors/mp3/MP3Seeker.h b/media/extractors/mp3/include/MP3Seeker.h
similarity index 100%
rename from media/extractors/mp3/MP3Seeker.h
rename to media/extractors/mp3/include/MP3Seeker.h
diff --git a/media/extractors/mp3/VBRISeeker.h b/media/extractors/mp3/include/VBRISeeker.h
similarity index 100%
rename from media/extractors/mp3/VBRISeeker.h
rename to media/extractors/mp3/include/VBRISeeker.h
diff --git a/media/extractors/mp3/XINGSeeker.h b/media/extractors/mp3/include/XINGSeeker.h
similarity index 100%
rename from media/extractors/mp3/XINGSeeker.h
rename to media/extractors/mp3/include/XINGSeeker.h
diff --git a/media/extractors/mp4/Android.bp b/media/extractors/mp4/Android.bp
index 7fa6bfd..540d75d 100644
--- a/media/extractors/mp4/Android.bp
+++ b/media/extractors/mp4/Android.bp
@@ -15,6 +15,15 @@
     ],
 }
 
+cc_library_headers {
+    name: "libmp4extractor_headers",
+    host_supported: true,
+
+    export_include_dirs: [
+        "include",
+    ],
+}
+
 cc_library {
     name: "libmp4extractor",
     defaults: ["extractor-defaults"],
@@ -27,6 +36,10 @@
         "SampleTable.cpp",
     ],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     static_libs: [
         "libstagefright_esds",
         "libstagefright_foundation",
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index fbcd554..8836c47 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -157,6 +157,7 @@
 
     MediaBufferHelper *mBuffer;
 
+    size_t mSrcBufferSize;
     uint8_t *mSrcBuffer;
 
     bool mIsHeif;
@@ -5083,6 +5084,7 @@
       mNALLengthSize(0),
       mStarted(false),
       mBuffer(NULL),
+      mSrcBufferSize(0),
       mSrcBuffer(NULL),
       mItemTable(itemTable),
       mElstShiftStartTicks(elstShiftStartTicks),
@@ -5264,6 +5266,7 @@
         // file probably specified a bad max size
         return AMEDIA_ERROR_MALFORMED;
     }
+    mSrcBufferSize = max_size;
 
     mStarted = true;
 
@@ -5280,6 +5283,7 @@
         mBuffer = NULL;
     }
 
+    mSrcBufferSize = 0;
     delete[] mSrcBuffer;
     mSrcBuffer = NULL;
 
@@ -6467,13 +6471,19 @@
         // Whole NAL units are returned but each fragment is prefixed by
         // the start code (0x00 00 00 01).
         ssize_t num_bytes_read = 0;
-        num_bytes_read = mDataSource->readAt(offset, mSrcBuffer, size);
+        bool mSrcBufferFitsDataToRead = size <= mSrcBufferSize;
+        if (mSrcBufferFitsDataToRead) {
+          num_bytes_read = mDataSource->readAt(offset, mSrcBuffer, size);
+        } else {
+          // We are trying to read a sample larger than the expected max sample size.
+          // Fall through and let the failure be handled by the following if.
+          android_errorWriteLog(0x534e4554, "188893559");
+        }
 
         if (num_bytes_read < (ssize_t)size) {
             mBuffer->release();
             mBuffer = NULL;
-
-            return AMEDIA_ERROR_IO;
+            return mSrcBufferFitsDataToRead ? AMEDIA_ERROR_IO : AMEDIA_ERROR_MALFORMED;
         }
 
         uint8_t *dstData = (uint8_t *)mBuffer->data();
diff --git a/media/extractors/mp4/AC4Parser.h b/media/extractors/mp4/include/AC4Parser.h
similarity index 100%
rename from media/extractors/mp4/AC4Parser.h
rename to media/extractors/mp4/include/AC4Parser.h
diff --git a/media/extractors/mp4/ItemTable.h b/media/extractors/mp4/include/ItemTable.h
similarity index 100%
rename from media/extractors/mp4/ItemTable.h
rename to media/extractors/mp4/include/ItemTable.h
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/include/MPEG4Extractor.h
similarity index 100%
rename from media/extractors/mp4/MPEG4Extractor.h
rename to media/extractors/mp4/include/MPEG4Extractor.h
diff --git a/media/extractors/mp4/SampleIterator.h b/media/extractors/mp4/include/SampleIterator.h
similarity index 100%
rename from media/extractors/mp4/SampleIterator.h
rename to media/extractors/mp4/include/SampleIterator.h
diff --git a/media/extractors/mp4/SampleTable.h b/media/extractors/mp4/include/SampleTable.h
similarity index 100%
rename from media/extractors/mp4/SampleTable.h
rename to media/extractors/mp4/include/SampleTable.h
diff --git a/media/extractors/mpeg2/Android.bp b/media/extractors/mpeg2/Android.bp
index 7e6247b..8faecae 100644
--- a/media/extractors/mpeg2/Android.bp
+++ b/media/extractors/mpeg2/Android.bp
@@ -38,6 +38,10 @@
         "MPEG2TSExtractor.cpp",
     ],
 
+    export_include_dirs: [
+        "include",
+    ],
+
     shared_libs: [
         "libbase",
         "libcgrouprc#29",
diff --git a/media/extractors/mpeg2/MPEG2PSExtractor.h b/media/extractors/mpeg2/include/MPEG2PSExtractor.h
similarity index 100%
rename from media/extractors/mpeg2/MPEG2PSExtractor.h
rename to media/extractors/mpeg2/include/MPEG2PSExtractor.h
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.h b/media/extractors/mpeg2/include/MPEG2TSExtractor.h
similarity index 100%
rename from media/extractors/mpeg2/MPEG2TSExtractor.h
rename to media/extractors/mpeg2/include/MPEG2TSExtractor.h
diff --git a/media/extractors/ogg/Android.bp b/media/extractors/ogg/Android.bp
index d7540c4..dc3c25c 100644
--- a/media/extractors/ogg/Android.bp
+++ b/media/extractors/ogg/Android.bp
@@ -22,8 +22,8 @@
 
     srcs: ["OggExtractor.cpp"],
 
-    include_dirs: [
-        "external/tremolo",
+    export_include_dirs: [
+        "include",
     ],
 
     header_libs: [
diff --git a/media/extractors/ogg/OggExtractor.h b/media/extractors/ogg/include/OggExtractor.h
similarity index 100%
rename from media/extractors/ogg/OggExtractor.h
rename to media/extractors/ogg/include/OggExtractor.h
diff --git a/media/extractors/tests/Android.bp b/media/extractors/tests/Android.bp
index 5d97d9a..3c3bbdc 100644
--- a/media/extractors/tests/Android.bp
+++ b/media/extractors/tests/Android.bp
@@ -45,14 +45,11 @@
         "libdatasource",
         "libwatchdog",
 
-        "libstagefright",
         "libstagefright_id3",
         "libstagefright_flacdec",
         "libstagefright_esds",
         "libstagefright_mpeg2support",
-        "libstagefright_mpeg2extractor",
         "libstagefright_foundation_colorutils_ndk",
-        "libstagefright_foundation",
         "libstagefright_metadatautils",
 
         "libmedia_midiiowrapper",
@@ -74,17 +71,14 @@
         "libcutils",
         "libmediandk",
         "libmedia",
+        "libstagefright",
+        "libstagefright_foundation",
         "libcrypto",
         "libhidlmemory",
         "libhidlbase",
         "libbase",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/",
-        "frameworks/av/media/libstagefright/",
-    ],
-
     compile_multilib: "first",
 
     cflags: [
diff --git a/media/extractors/tests/ExtractorUnitTest.cpp b/media/extractors/tests/ExtractorUnitTest.cpp
index 84ec1f2..2bd9c6a 100644
--- a/media/extractors/tests/ExtractorUnitTest.cpp
+++ b/media/extractors/tests/ExtractorUnitTest.cpp
@@ -27,18 +27,18 @@
 #include <media/stagefright/MetaDataUtils.h>
 #include <media/stagefright/foundation/OpusHeader.h>
 
-#include "aac/AACExtractor.h"
-#include "amr/AMRExtractor.h"
-#include "flac/FLACExtractor.h"
-#include "midi/MidiExtractor.h"
-#include "mkv/MatroskaExtractor.h"
-#include "mp3/MP3Extractor.h"
-#include "mp4/MPEG4Extractor.h"
-#include "mp4/SampleTable.h"
-#include "mpeg2/MPEG2PSExtractor.h"
-#include "mpeg2/MPEG2TSExtractor.h"
-#include "ogg/OggExtractor.h"
-#include "wav/WAVExtractor.h"
+#include <AACExtractor.h>
+#include <AMRExtractor.h>
+#include <FLACExtractor.h>
+#include <MidiExtractor.h>
+#include <MatroskaExtractor.h>
+#include <MP3Extractor.h>
+#include <MPEG4Extractor.h>
+#include <SampleTable.h>
+#include <MPEG2PSExtractor.h>
+#include <MPEG2TSExtractor.h>
+#include <OggExtractor.h>
+#include <WAVExtractor.h>
 
 #include "ExtractorUnitTestEnvironment.h"
 
diff --git a/media/extractors/wav/Android.bp b/media/extractors/wav/Android.bp
index cc5e1c7..b7e2af3 100644
--- a/media/extractors/wav/Android.bp
+++ b/media/extractors/wav/Android.bp
@@ -22,8 +22,8 @@
 
     srcs: ["WAVExtractor.cpp"],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright/include",
+    export_include_dirs: [
+        "include",
     ],
 
     shared_libs: [
diff --git a/media/extractors/wav/WAVExtractor.h b/media/extractors/wav/include/WAVExtractor.h
similarity index 100%
rename from media/extractors/wav/WAVExtractor.h
rename to media/extractors/wav/include/WAVExtractor.h
diff --git a/media/janitors/reliability_mainline_OWNERS b/media/janitors/reliability_mainline_OWNERS
new file mode 100644
index 0000000..e4c4fc2
--- /dev/null
+++ b/media/janitors/reliability_mainline_OWNERS
@@ -0,0 +1,5 @@
+# Bug component: 1051309
+# go/android-media-relaibility
+
+essick@google.com
+nchalko@google.com
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index ed3b986..efa9941 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -444,6 +444,22 @@
 };
 typedef int32_t aaudio_content_type_t;
 
+enum {
+
+    /**
+     * Constant indicating the audio content associated with these attributes will follow the
+     * default platform behavior with regards to which content will be spatialized or not.
+     */
+    AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO = 1,
+
+    /**
+     * Constant indicating the audio content associated with these attributes should never
+     * be spatialized.
+     */
+    AAUDIO_SPATIALIZATION_BEHAVIOR_NEVER = 2,
+};
+typedef int32_t aaudio_spatialization_behavior_t;
+
 /**
  * Defines the audio source.
  * An audio source defines both a default physical source of audio signal, and a recording
@@ -985,6 +1001,37 @@
         aaudio_content_type_t contentType) __INTRODUCED_IN(28);
 
 /**
+ * Sets the behavior affecting whether spatialization will be used.
+ *
+ * The AAudio system will use this information to select whether the stream will go
+ * through a spatializer effect or not when the effect is supported and enabled.
+ *
+ * Available since API level 32.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param spatializationBehavior the desired behavior with regards to spatialization, eg.
+ *     {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO}
+ */
+AAUDIO_API void AAudioStreamBuilder_setSpatializationBehavior(AAudioStreamBuilder* builder,
+        aaudio_spatialization_behavior_t spatializationBehavior) __INTRODUCED_IN(32);
+
+/**
+ * Specifies whether the audio data of this output stream has already been processed for
+ * spatialization.
+ *
+ * If the stream has been processed for spatialization, setting this to true will prevent
+ * issues such as double-processing on platforms that will spatialize audio data.
+ *
+ * Available since API level 32.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param isSpatialized true if the content is already processed for binaural or transaural spatial
+ *     rendering, false otherwise.
+ */
+AAUDIO_API void AAudioStreamBuilder_setIsContentSpatialized(AAudioStreamBuilder* builder,
+        bool isSpatialized) __INTRODUCED_IN(32);
+
+/**
  * Set the input (capture) preset for the stream.
  *
  * The AAudio system will use this information to optimize the
@@ -1791,6 +1838,31 @@
         __INTRODUCED_IN(28);
 
 /**
+ * Return the spatialization behavior for the stream.
+ *
+ * If none was explicitly set, it will return the default
+ * {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO} behavior.
+ *
+ * Available since API level 32.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return spatialization behavior, for example {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO}
+ */
+AAUDIO_API aaudio_spatialization_behavior_t AAudioStream_getSpatializationBehavior(
+        AAudioStream* stream) __INTRODUCED_IN(32);
+
+/**
+ * Return whether the content of the stream is spatialized.
+ *
+ * Available since API level 32.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return true if the content is spatialized
+ */
+AAUDIO_API bool AAudioStream_isContentSpatialized(AAudioStream* stream) __INTRODUCED_IN(32);
+
+
+/**
  * Return the input preset for the stream.
  *
  * Available since API level 28.
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index da59da6..01aaa2a 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -168,6 +168,7 @@
         "libbinder",
         "framework-permission-aidl-cpp",
         "aaudio-aidl-cpp",
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "libaudioclient_aidl_conversion",
     ],
@@ -246,6 +247,7 @@
         "binding/aidl/aaudio/IAAudioService.aidl",
     ],
     imports: [
+        "android.media.audio.common.types",
         "audioclient-types-aidl",
         "shared-file-region-aidl",
         "framework-permission-aidl",
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index cf76225..b60bac2 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -29,7 +29,7 @@
 
 using namespace aaudio;
 
-using android::media::AudioFormatDescription;
+using android::media::audio::common::AudioFormatDescription;
 
 AAudioStreamConfiguration::AAudioStreamConfiguration(const StreamParameters& parcelable) {
     setChannelMask(parcelable.channelMask);
@@ -46,6 +46,13 @@
     setUsage(parcelable.usage);
     static_assert(sizeof(aaudio_content_type_t) == sizeof(parcelable.contentType));
     setContentType(parcelable.contentType);
+
+    static_assert(sizeof(aaudio_spatialization_behavior_t) ==
+            sizeof(parcelable.spatializationBehavior));
+    setSpatializationBehavior(parcelable.spatializationBehavior);
+    setIsContentSpatialized(parcelable.isContentSpatialized);
+
+
     static_assert(sizeof(aaudio_input_preset_t) == sizeof(parcelable.inputPreset));
     setInputPreset(parcelable.inputPreset);
     setBufferCapacity(parcelable.bufferCapacity);
@@ -76,7 +83,8 @@
         result.audioFormat = convAudioFormat.value();
     } else {
         result.audioFormat = AudioFormatDescription{};
-        result.audioFormat.type = android::media::AudioFormatType::SYS_RESERVED_INVALID;
+        result.audioFormat.type =
+                android::media::audio::common::AudioFormatType::SYS_RESERVED_INVALID;
     }
     static_assert(sizeof(aaudio_direction_t) == sizeof(result.direction));
     result.direction = getDirection();
diff --git a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
index 3b274f2..983e193 100644
--- a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
@@ -16,7 +16,7 @@
 
 package aaudio;
 
-import android.media.AudioFormatDescription;
+import android.media.audio.common.AudioFormatDescription;
 
 parcelable StreamParameters {
     int                                       channelMask;  //          = AAUDIO_UNSPECIFIED;
@@ -27,6 +27,8 @@
     int /* aaudio_direction_t */              direction;  //            = AAUDIO_DIRECTION_OUTPUT;
     int /* aaudio_usage_t */                  usage;  //                = AAUDIO_UNSPECIFIED;
     int /* aaudio_content_type_t */           contentType;  //          = AAUDIO_UNSPECIFIED;
+    int /* aaudio_spatialization_behavior_t */spatializationBehavior; //= AAUDIO_UNSPECIFIED;
+    boolean                                   isContentSpatialized;  // = false;
     int /* aaudio_input_preset_t */           inputPreset;  //          = AAUDIO_UNSPECIFIED;
     int                                       bufferCapacity;  //       = AAUDIO_UNSPECIFIED;
     int /* aaudio_allowed_capture_policy_t */ allowedCapturePolicy;  // = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 8284be5..89d42bf 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -27,6 +27,7 @@
 #include <aaudio/AAudio.h>
 #include <cutils/properties.h>
 
+#include <media/AudioSystem.h>
 #include <media/MediaMetricsItem.h>
 #include <utils/Trace.h>
 
@@ -95,7 +96,7 @@
         return result;
     }
 
-    const int32_t burstMinMicros = AAudioProperty_getHardwareBurstMinMicros();
+    const int32_t burstMinMicros = android::AudioSystem::getAAudioHardwareBurstMinUsec();
     int32_t burstMicros = 0;
 
     const audio_format_t requestedFormat = getFormat();
@@ -127,6 +128,8 @@
 
     request.getConfiguration().setUsage(getUsage());
     request.getConfiguration().setContentType(getContentType());
+    request.getConfiguration().setSpatializationBehavior(getSpatializationBehavior());
+    request.getConfiguration().setIsContentSpatialized(isContentSpatialized());
     request.getConfiguration().setInputPreset(getInputPreset());
     request.getConfiguration().setPrivacySensitive(isPrivacySensitive());
 
@@ -186,6 +189,8 @@
 
     setUsage(configurationOutput.getUsage());
     setContentType(configurationOutput.getContentType());
+    setSpatializationBehavior(configurationOutput.getSpatializationBehavior());
+    setIsContentSpatialized(configurationOutput.isContentSpatialized());
     setInputPreset(configurationOutput.getInputPreset());
 
     // Save device format so we can do format conversion and volume scaling together.
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 02e7f5f..90ff4a5 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -167,6 +167,18 @@
     streamBuilder->setContentType(contentType);
 }
 
+AAUDIO_API void AAudioStreamBuilder_setSpatializationBehavior(AAudioStreamBuilder* builder,
+        aaudio_spatialization_behavior_t spatializationBehavior) {
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+    streamBuilder->setSpatializationBehavior(spatializationBehavior);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setIsContentSpatialized(AAudioStreamBuilder* builder,
+                                                            bool isSpatialized) {
+    AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+    streamBuilder->setIsContentSpatialized(isSpatialized);
+}
+
 AAUDIO_API void AAudioStreamBuilder_setInputPreset(AAudioStreamBuilder* builder,
                                                    aaudio_input_preset_t inputPreset) {
     AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
@@ -340,7 +352,8 @@
 {
 
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
-    return audioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
+    android::sp<AudioStream> spAudioStream(audioStream);
+    return spAudioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
 }
 
 // ============================================================
@@ -503,6 +516,19 @@
     return audioStream->getContentType();
 }
 
+AAUDIO_API aaudio_spatialization_behavior_t AAudioStream_getSpatializationBehavior(
+        AAudioStream* stream)
+{
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->getSpatializationBehavior();
+}
+
+AAUDIO_API bool AAudioStream_isContentSpatialized(AAudioStream* stream)
+{
+    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+    return audioStream->isContentSpatialized();
+}
+
 AAUDIO_API aaudio_input_preset_t AAudioStream_getInputPreset(AAudioStream* stream)
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.cpp b/media/libaaudio/src/core/AAudioStreamParameters.cpp
index 84133f4..8b7b75e 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.cpp
+++ b/media/libaaudio/src/core/AAudioStreamParameters.cpp
@@ -41,6 +41,8 @@
     mBufferCapacity       = other.mBufferCapacity;
     mUsage                = other.mUsage;
     mContentType          = other.mContentType;
+    mSpatializationBehavior = other.mSpatializationBehavior;
+    mIsContentSpatialized = other.mIsContentSpatialized;
     mInputPreset          = other.mInputPreset;
     mAllowedCapturePolicy = other.mAllowedCapturePolicy;
     mIsPrivacySensitive   = other.mIsPrivacySensitive;
@@ -158,6 +160,19 @@
             // break;
     }
 
+    switch (mSpatializationBehavior) {
+        case AAUDIO_UNSPECIFIED:
+        case AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO:
+        case AAUDIO_SPATIALIZATION_BEHAVIOR_NEVER:
+            break; // valid
+        default:
+            ALOGD("spatialization behavior not valid = %d", mSpatializationBehavior);
+            return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+            // break;
+    }
+
+    // no validation required for mIsContentSpatialized
+
     switch (mInputPreset) {
         case AAUDIO_UNSPECIFIED:
         case AAUDIO_INPUT_PRESET_GENERIC:
@@ -287,6 +302,8 @@
     ALOGD("mBufferCapacity       = %6d", mBufferCapacity);
     ALOGD("mUsage                = %6d", mUsage);
     ALOGD("mContentType          = %6d", mContentType);
+    ALOGD("mSpatializationBehavior = %6d", mSpatializationBehavior);
+    ALOGD("mIsContentSpatialized = %s", mIsContentSpatialized ? "true" : "false");
     ALOGD("mInputPreset          = %6d", mInputPreset);
     ALOGD("mAllowedCapturePolicy = %6d", mAllowedCapturePolicy);
     ALOGD("mIsPrivacySensitive   = %s", mIsPrivacySensitive ? "true" : "false");
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.h b/media/libaaudio/src/core/AAudioStreamParameters.h
index 7a6ed3e..cb998bf 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.h
+++ b/media/libaaudio/src/core/AAudioStreamParameters.h
@@ -97,6 +97,22 @@
         mContentType = contentType;
     }
 
+    aaudio_spatialization_behavior_t getSpatializationBehavior() const {
+        return mSpatializationBehavior;
+    }
+
+    void setSpatializationBehavior(aaudio_spatialization_behavior_t spatializationBehavior) {
+        mSpatializationBehavior = spatializationBehavior;
+    }
+
+    bool isContentSpatialized() const {
+        return mIsContentSpatialized;
+    }
+
+    void setIsContentSpatialized(bool isSpatialized) {
+        mIsContentSpatialized = isSpatialized;
+    }
+
     aaudio_input_preset_t getInputPreset() const {
         return mInputPreset;
     }
@@ -183,6 +199,9 @@
     aaudio_direction_t              mDirection            = AAUDIO_DIRECTION_OUTPUT;
     aaudio_usage_t                  mUsage                = AAUDIO_UNSPECIFIED;
     aaudio_content_type_t           mContentType          = AAUDIO_UNSPECIFIED;
+    aaudio_spatialization_behavior_t mSpatializationBehavior
+                                                          = AAUDIO_UNSPECIFIED;
+    bool                            mIsContentSpatialized = false;
     aaudio_input_preset_t           mInputPreset          = AAUDIO_UNSPECIFIED;
     int32_t                         mBufferCapacity       = AAUDIO_UNSPECIFIED;
     aaudio_allowed_capture_policy_t mAllowedCapturePolicy = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index ffc3b9d..73432af 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -92,6 +92,12 @@
     if (mContentType == AAUDIO_UNSPECIFIED) {
         mContentType = AAUDIO_CONTENT_TYPE_MUSIC;
     }
+    mSpatializationBehavior = builder.getSpatializationBehavior();
+    // for consistency with other properties, note UNSPECIFIED is the same as AUTO
+    if (mSpatializationBehavior == AAUDIO_UNSPECIFIED) {
+        mSpatializationBehavior = AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO;
+    }
+    mIsContentSpatialized = builder.isContentSpatialized();
     mInputPreset = builder.getInputPreset();
     if (mInputPreset == AAUDIO_UNSPECIFIED) {
         mInputPreset = AAUDIO_INPUT_PRESET_VOICE_RECOGNITION;
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 8bb9b19..afb8551 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -253,6 +253,14 @@
         return mContentType;
     }
 
+    aaudio_spatialization_behavior_t getSpatializationBehavior() const {
+        return mSpatializationBehavior;
+    }
+
+    bool isContentSpatialized() const {
+        return mIsContentSpatialized;
+    }
+
     aaudio_input_preset_t getInputPreset() const {
         return mInputPreset;
     }
@@ -594,6 +602,14 @@
         mContentType = contentType;
     }
 
+    void setSpatializationBehavior(aaudio_spatialization_behavior_t spatializationBehavior) {
+        mSpatializationBehavior = spatializationBehavior;
+    }
+
+    void setIsContentSpatialized(bool isContentSpatialized) {
+        mIsContentSpatialized = isContentSpatialized;
+    }
+
     /**
      * This should not be called after the open() call.
      */
@@ -651,6 +667,8 @@
 
     aaudio_usage_t              mUsage           = AAUDIO_UNSPECIFIED;
     aaudio_content_type_t       mContentType     = AAUDIO_UNSPECIFIED;
+    aaudio_spatialization_behavior_t mSpatializationBehavior = AAUDIO_UNSPECIFIED;
+    bool                        mIsContentSpatialized = false;
     aaudio_input_preset_t       mInputPreset     = AAUDIO_UNSPECIFIED;
     aaudio_allowed_capture_policy_t mAllowedCapturePolicy = AAUDIO_ALLOW_CAPTURE_BY_ALL;
     bool                        mIsPrivacySensitive = false;
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index a8fd0d9..2be3d65 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -20,9 +20,14 @@
 
 #include <new>
 #include <stdint.h>
+#include <vector>
 
 #include <aaudio/AAudio.h>
 #include <aaudio/AAudioTesting.h>
+#include <android/media/audio/common/AudioMMapPolicy.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
+#include <media/AudioSystem.h>
 
 #include "binding/AAudioBinderClient.h"
 #include "client/AudioStreamInternalCapture.h"
@@ -35,6 +40,10 @@
 
 using namespace aaudio;
 
+using android::media::audio::common::AudioMMapPolicy;
+using android::media::audio::common::AudioMMapPolicyInfo;
+using android::media::audio::common::AudioMMapPolicyType;
+
 #define AAUDIO_MMAP_POLICY_DEFAULT             AAUDIO_POLICY_NEVER
 #define AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT   AAUDIO_POLICY_NEVER
 
@@ -86,6 +95,37 @@
     return result;
 }
 
+namespace {
+
+aaudio_policy_t aidl2legacy_aaudio_policy(AudioMMapPolicy aidl) {
+    switch (aidl) {
+        case AudioMMapPolicy::NEVER:
+            return AAUDIO_POLICY_NEVER;
+        case AudioMMapPolicy::AUTO:
+            return AAUDIO_POLICY_AUTO;
+        case AudioMMapPolicy::ALWAYS:
+            return AAUDIO_POLICY_ALWAYS;
+        case AudioMMapPolicy::UNSPECIFIED:
+        default:
+            return AAUDIO_UNSPECIFIED;
+    }
+}
+
+// The aaudio policy will be ALWAYS, NEVER, UNSPECIFIED only when all policy info are
+// ALWAYS, NEVER or UNSPECIFIED. Otherwise, the aaudio policy will be AUTO.
+aaudio_policy_t getAAudioPolicy(
+        const std::vector<AudioMMapPolicyInfo>& policyInfos) {
+    if (policyInfos.empty()) return AAUDIO_POLICY_AUTO;
+    for (size_t i = 1; i < policyInfos.size(); ++i) {
+        if (policyInfos.at(i).mmapPolicy != policyInfos.at(0).mmapPolicy) {
+            return AAUDIO_POLICY_AUTO;
+        }
+    }
+    return aidl2legacy_aaudio_policy(policyInfos.at(0).mmapPolicy);
+}
+
+} // namespace
+
 // Try to open using MMAP path if that is allowed.
 // Fall back to Legacy path if MMAP not available.
 // Exact behavior is controlled by MMapPolicy.
@@ -104,25 +144,32 @@
         return result;
     }
 
+    std::vector<AudioMMapPolicyInfo> policyInfos;
     // The API setting is the highest priority.
     aaudio_policy_t mmapPolicy = AudioGlobal_getMMapPolicy();
     // If not specified then get from a system property.
-    if (mmapPolicy == AAUDIO_UNSPECIFIED) {
-        mmapPolicy = AAudioProperty_getMMapPolicy();
+    if (mmapPolicy == AAUDIO_UNSPECIFIED && android::AudioSystem::getMmapPolicyInfo(
+                AudioMMapPolicyType::DEFAULT, &policyInfos) == NO_ERROR) {
+        mmapPolicy = getAAudioPolicy(policyInfos);
     }
     // If still not specified then use the default.
     if (mmapPolicy == AAUDIO_UNSPECIFIED) {
         mmapPolicy = AAUDIO_MMAP_POLICY_DEFAULT;
     }
 
-    int32_t mapExclusivePolicy = AAudioProperty_getMMapExclusivePolicy();
-    if (mapExclusivePolicy == AAUDIO_UNSPECIFIED) {
-        mapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
+    policyInfos.clear();
+    aaudio_policy_t mmapExclusivePolicy = AAUDIO_UNSPECIFIED;
+    if (android::AudioSystem::getMmapPolicyInfo(
+            AudioMMapPolicyType::EXCLUSIVE, &policyInfos) == NO_ERROR) {
+        mmapExclusivePolicy = getAAudioPolicy(policyInfos);
+    }
+    if (mmapExclusivePolicy == AAUDIO_UNSPECIFIED) {
+        mmapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
     }
 
     aaudio_sharing_mode_t sharingMode = getSharingMode();
     if ((sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE)
-        && (mapExclusivePolicy == AAUDIO_POLICY_NEVER)) {
+        && (mmapExclusivePolicy == AAUDIO_POLICY_NEVER)) {
         ALOGD("%s() EXCLUSIVE sharing mode not supported. Use SHARED.", __func__);
         sharingMode = AAUDIO_SHARING_MODE_SHARED;
         setSharingMode(sharingMode);
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 118c004..b512b48 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -135,7 +135,9 @@
     const audio_usage_t usage =
             AAudioConvert_usageToInternal(builder.getUsage());
     const audio_flags_mask_t attributesFlags =
-        AAudioConvert_allowCapturePolicyToAudioFlagsMask(builder.getAllowedCapturePolicy());
+        AAudioConvert_allowCapturePolicyToAudioFlagsMask(builder.getAllowedCapturePolicy(),
+                                                         builder.getSpatializationBehavior(),
+                                                         builder.isContentSpatialized());
 
     const audio_attributes_t attributes = {
             .content_type = contentType,
@@ -215,7 +217,6 @@
         mBlockAdapter = nullptr;
     }
 
-    setState(AAUDIO_STREAM_STATE_OPEN);
     setDeviceId(mAudioTrack->getRoutedDeviceId());
 
     aaudio_session_id_t actualSessionId =
@@ -248,6 +249,19 @@
              "open() perfMode changed from %d to %d",
              perfMode, actualPerformanceMode);
 
+    if (getState() != AAUDIO_STREAM_STATE_UNINITIALIZED) {
+        ALOGE("%s - Open canceled since state = %d", __func__, getState());
+        if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED)
+        {
+            ALOGE("%s - Opening while state is disconnected", __func__);
+            safeReleaseClose();
+            return AAUDIO_ERROR_DISCONNECTED;
+        }
+        safeReleaseClose();
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+
+    setState(AAUDIO_STREAM_STATE_OPEN);
     return AAUDIO_OK;
 }
 
diff --git a/media/libaaudio/src/libaaudio.map.txt b/media/libaaudio/src/libaaudio.map.txt
index 8fa9e38..f45b816 100644
--- a/media/libaaudio/src/libaaudio.map.txt
+++ b/media/libaaudio/src/libaaudio.map.txt
@@ -26,6 +26,8 @@
     AAudioStreamBuilder_setPackageName;   # introduced=31
     AAudioStreamBuilder_setAttributionTag;   # introduced=31
     AAudioStreamBuilder_setChannelMask;    # introduced=32
+    AAudioStreamBuilder_setSpatializationBehavior; # introduced=32
+    AAudioStreamBuilder_setIsContentSpatialized;   # introduced=32
     AAudioStreamBuilder_openStream;
     AAudioStreamBuilder_delete;
     AAudioStream_close;
@@ -63,6 +65,8 @@
     AAudioStream_isPrivacySensitive;   # introduced=30
     AAudioStream_release;        # introduced=30
     AAudioStream_getChannelMask;  # introduced=32
+    AAudioStream_getSpatializationBehavior;  # introduced=32
+    AAudioStream_isContentSpatialized;       # introduced=32
   local:
     *;
 };
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index d829934..a0952fe 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -235,20 +235,46 @@
 }
 
 audio_flags_mask_t AAudioConvert_allowCapturePolicyToAudioFlagsMask(
-        aaudio_allowed_capture_policy_t policy) {
+        aaudio_allowed_capture_policy_t policy,
+        aaudio_spatialization_behavior_t spatializationBehavior,
+        bool isContentSpatialized) {
+    audio_flags_mask_t flagsMask = AUDIO_FLAG_NONE;
     switch (policy) {
         case AAUDIO_UNSPECIFIED:
         case AAUDIO_ALLOW_CAPTURE_BY_ALL:
-            return AUDIO_FLAG_NONE;
+            // flagsMask is not modified
+            break;
         case AAUDIO_ALLOW_CAPTURE_BY_SYSTEM:
-            return AUDIO_FLAG_NO_MEDIA_PROJECTION;
+            flagsMask = static_cast<audio_flags_mask_t>(flagsMask | AUDIO_FLAG_NO_MEDIA_PROJECTION);
+            break;
         case AAUDIO_ALLOW_CAPTURE_BY_NONE:
-            return static_cast<audio_flags_mask_t>(
+            flagsMask = static_cast<audio_flags_mask_t>(flagsMask |
                     AUDIO_FLAG_NO_MEDIA_PROJECTION | AUDIO_FLAG_NO_SYSTEM_CAPTURE);
+            break;
         default:
-            ALOGE("%s() 0x%08X unrecognized", __func__, policy);
-            return AUDIO_FLAG_NONE; //
+            ALOGE("%s() 0x%08X unrecognized capture policy", __func__, policy);
+            // flagsMask is not modified
     }
+
+    switch (spatializationBehavior) {
+        case AAUDIO_UNSPECIFIED:
+        case AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO:
+            // flagsMask is not modified
+            break;
+        case AAUDIO_SPATIALIZATION_BEHAVIOR_NEVER:
+            flagsMask = static_cast<audio_flags_mask_t>(flagsMask | AUDIO_FLAG_NEVER_SPATIALIZE);
+            break;
+        default:
+            ALOGE("%s() 0x%08X unrecognized spatialization behavior",
+                  __func__, spatializationBehavior);
+            // flagsMask is not modified
+    }
+
+    if (isContentSpatialized) {
+        flagsMask = static_cast<audio_flags_mask_t>(flagsMask | AUDIO_FLAG_CONTENT_SPATIALIZED);
+    }
+
+    return flagsMask;
 }
 
 audio_flags_mask_t AAudioConvert_privacySensitiveToAudioFlagsMask(
@@ -518,45 +544,6 @@
     return AAUDIO_OK;
 }
 
-static int32_t AAudioProperty_getMMapProperty(const char *propName,
-                                              int32_t defaultValue,
-                                              const char * caller) {
-    int32_t prop = property_get_int32(propName, defaultValue);
-    switch (prop) {
-        case AAUDIO_UNSPECIFIED:
-        case AAUDIO_POLICY_NEVER:
-        case AAUDIO_POLICY_ALWAYS:
-        case AAUDIO_POLICY_AUTO:
-            break;
-        default:
-            ALOGE("%s: invalid = %d", caller, prop);
-            prop = defaultValue;
-            break;
-    }
-    return prop;
-}
-
-int32_t AAudioProperty_getMMapPolicy() {
-    return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_POLICY,
-                                          AAUDIO_UNSPECIFIED, __func__);
-}
-
-int32_t AAudioProperty_getMMapExclusivePolicy() {
-    return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY,
-                                          AAUDIO_UNSPECIFIED, __func__);
-}
-
-int32_t AAudioProperty_getMixerBursts() {
-    const int32_t defaultBursts = 2; // arbitrary, use 2 for double buffered
-    const int32_t maxBursts = 1024; // arbitrary
-    int32_t prop = property_get_int32(AAUDIO_PROP_MIXER_BURSTS, defaultBursts);
-    if (prop < 1 || prop > maxBursts) {
-        ALOGE("AAudioProperty_getMixerBursts: invalid = %d", prop);
-        prop = defaultBursts;
-    }
-    return prop;
-}
-
 int32_t AAudioProperty_getWakeupDelayMicros() {
     const int32_t minMicros = 0; // arbitrary
     const int32_t defaultMicros = 200; // arbitrary, based on some observed jitter
@@ -587,18 +574,6 @@
     return prop;
 }
 
-int32_t AAudioProperty_getHardwareBurstMinMicros() {
-    const int32_t defaultMicros = 1000; // arbitrary
-    const int32_t maxMicros = 1000 * 1000; // arbitrary
-    int32_t prop = property_get_int32(AAUDIO_PROP_HW_BURST_MIN_USEC, defaultMicros);
-    if (prop < 1 || prop > maxMicros) {
-        ALOGE("AAudioProperty_getHardwareBurstMinMicros: invalid = %d, use %d",
-              prop, defaultMicros);
-        prop = defaultMicros;
-    }
-    return prop;
-}
-
 static int32_t AAudioProperty_getMMapOffsetMicros(const char *functionName,
         const char *propertyName) {
     const int32_t minMicros = -20000; // arbitrary
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index 52e57da..b59ce1c 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -91,7 +91,9 @@
  * @return internal audio flags mask
  */
 audio_flags_mask_t AAudioConvert_allowCapturePolicyToAudioFlagsMask(
-        aaudio_allowed_capture_policy_t policy);
+        aaudio_allowed_capture_policy_t policy,
+        aaudio_spatialization_behavior_t spatializationBehavior,
+        bool isContentSpatialized);
 
 audio_flags_mask_t AAudioConvert_privacySensitiveToAudioFlagsMask(
         bool privacySensitive);
@@ -126,27 +128,6 @@
 // Note that this code may be replaced by Settings or by some other system configuration tool.
 
 /**
- * Read system property.
- * @return AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER or AAUDIO_POLICY_AUTO or AAUDIO_POLICY_ALWAYS
- */
-int32_t AAudioProperty_getMMapPolicy();
-#define AAUDIO_PROP_MMAP_POLICY           "aaudio.mmap_policy"
-
-/**
- * Read system property.
- * @return AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER or AAUDIO_POLICY_AUTO or AAUDIO_POLICY_ALWAYS
- */
-int32_t AAudioProperty_getMMapExclusivePolicy();
-#define AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY "aaudio.mmap_exclusive_policy"
-
-/**
- * Read system property.
- * @return number of bursts per AAudio service mixer cycle
- */
-int32_t AAudioProperty_getMixerBursts();
-#define AAUDIO_PROP_MIXER_BURSTS           "aaudio.mixer_bursts"
-
-/**
  * Read a system property that specifies the number of extra microseconds that a thread
  * should sleep when waiting for another thread to service a FIFO. This is used
  * to avoid the waking thread from being overly optimistic about the other threads
@@ -167,19 +148,6 @@
 #define AAUDIO_PROP_MINIMUM_SLEEP_USEC      "aaudio.minimum_sleep_usec"
 
 /**
- * Read system property.
- * This is handy in case the DMA is bursting too quickly for the CPU to keep up.
- * For example, there may be a DMA burst every 100 usec but you only
- * want to feed the MMAP buffer every 2000 usec.
- *
- * This will affect the framesPerBurst for an MMAP stream.
- *
- * @return minimum number of microseconds for a MMAP HW burst
- */
-int32_t AAudioProperty_getHardwareBurstMinMicros();
-#define AAUDIO_PROP_HW_BURST_MIN_USEC      "aaudio.hw_burst_min_usec"
-
-/**
  * Read a system property that specifies an offset that will be added to MMAP timestamps.
  * This can be used to correct bias in the timestamp.
  * It can also be used to analyze the time distribution of the timestamp
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 98e9727..ea00a5a 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -48,7 +48,7 @@
     shared_libs: ["libaaudio_internal"],
 }
 
-cc_test {
+cc_binary {
     name: "test_timestamps",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_timestamps.cpp"],
@@ -60,121 +60,71 @@
     name: "test_open_params",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_open_params.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
-cc_test {
+cc_binary {
     name: "test_no_close",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_no_close.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
-cc_test {
+cc_binary {
     name: "test_aaudio_recovery",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_recovery.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
-cc_test {
+cc_binary {
     name: "test_n_streams",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_n_streams.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
-cc_test {
+cc_binary {
     name: "test_bad_disconnect",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_bad_disconnect.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
     name: "test_various",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_various.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
     name: "test_session_id",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_session_id.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
-cc_test {
+cc_binary {
     name: "test_aaudio_monkey",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_aaudio_monkey.cpp"],
     header_libs: ["libaaudio_example_utils"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
     name: "test_attributes",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_attributes.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
     name: "test_interference",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_interference.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
@@ -196,28 +146,18 @@
     ],
 }
 
-cc_test {
+cc_binary {
     name: "test_return_stop",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_return_stop.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
     name: "test_callback_race",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_callback_race.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
 
 cc_test {
@@ -238,7 +178,7 @@
     ],
 }
 
-cc_test {
+cc_binary {
     name: "test_steal_exclusive",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_steal_exclusive.cpp"],
@@ -251,15 +191,9 @@
     ],
 }
 
-
-cc_test {
+cc_binary {
     name: "test_disconnect_race",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_disconnect_race.cpp"],
-    shared_libs: [
-        "libaaudio",
-        "libbinder",
-        "libcutils",
-        "libutils",
-    ],
+    shared_libs: ["libaaudio"],
 }
diff --git a/media/libaaudio/tests/test_attributes.cpp b/media/libaaudio/tests/test_attributes.cpp
index d540866..b88d562 100644
--- a/media/libaaudio/tests/test_attributes.cpp
+++ b/media/libaaudio/tests/test_attributes.cpp
@@ -16,6 +16,10 @@
 
 // Test AAudio attributes such as Usage, ContentType and InputPreset.
 
+// TODO Many of these tests are duplicates of CTS tests in
+// "test_aaudio_attributes.cpp". That other file is more current.
+// So these tests could be deleted.
+
 #include <stdio.h>
 #include <unistd.h>
 
@@ -91,7 +95,7 @@
     aaudio_allowed_capture_policy_t expectedCapturePolicy =
             (capturePolicy == DONT_SET || capturePolicy == AAUDIO_UNSPECIFIED)
             ? AAUDIO_ALLOW_CAPTURE_BY_ALL // default
-            : preset;
+            : capturePolicy;
     EXPECT_EQ(expectedCapturePolicy, AAudioStream_getAllowedCapturePolicy(aaudioStream));
 
     bool expectedPrivacyMode =
@@ -132,10 +136,7 @@
     AAUDIO_USAGE_ASSISTANCE_SONIFICATION,
     AAUDIO_USAGE_GAME,
     AAUDIO_USAGE_ASSISTANT,
-    AAUDIO_SYSTEM_USAGE_EMERGENCY,
-    AAUDIO_SYSTEM_USAGE_SAFETY,
-    AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS,
-    AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT
+    // Note that the AAUDIO_SYSTEM_USAGE_* values requires special permission.
 };
 
 static const aaudio_content_type_t sContentypes[] = {
diff --git a/media/libaaudio/tests/test_steal_exclusive.cpp b/media/libaaudio/tests/test_steal_exclusive.cpp
index 05c560d..5cb005c 100644
--- a/media/libaaudio/tests/test_steal_exclusive.cpp
+++ b/media/libaaudio/tests/test_steal_exclusive.cpp
@@ -110,6 +110,10 @@
         mOpenDelayMillis = openDelayMillis;
     }
 
+    void setCloseEnabled(bool enabled) {
+        mCloseEnabled = enabled;
+    }
+
     void restartStream() {
         int retriesLeft = mMaxRetries;
         aaudio_result_t result;
@@ -189,10 +193,12 @@
         std::lock_guard<std::mutex> lock(mLock);
         aaudio_result_t result = AAUDIO_OK;
         if (mStream != nullptr) {
-            result = AAudioStream_close(mStream);
-            if (result != AAUDIO_OK) {
-                printf("AAudioStream_close returned %s\n",
-                       AAudio_convertResultToText(result));
+            if (mCloseEnabled) {
+                result = AAudioStream_close(mStream);
+                printf("AAudioStream_close() returned %s\n",
+                        AAudio_convertResultToText(result));
+            } else {
+                printf("AAudioStream_close() DISABLED!\n");
             }
             mStream = nullptr;
         }
@@ -287,6 +293,7 @@
     std::string         mName;
     int                 mMaxRetries = 1;
     int                 mOpenDelayMillis = 0;
+    bool                mCloseEnabled = true;
 };
 
 // Callback function that fills the audio output buffer.
@@ -319,11 +326,12 @@
 }
 
 static void s_usage() {
-    printf("test_steal_exclusive [-i] [-r{maxRetries}] [-d{delay}] -s\n");
+    printf("test_steal_exclusive [-i] [-r{maxRetries}] [-d{delay}] -s -c{flag}\n");
     printf("     -i direction INPUT, otherwise OUTPUT\n");
-    printf("     -d delay open by milliseconds, default = 0\n");
-    printf("     -r max retries in the error callback, default = 1\n");
+    printf("     -d Delay open by milliseconds, default = 0\n");
+    printf("     -r max Retries in the error callback, default = 1\n");
     printf("     -s try to open in SHARED mode\n");
+    printf("     -c enable or disabling Closing of the stream with 0/1, default = 1\n");
 }
 
 int main(int argc, char ** argv) {
@@ -334,6 +342,7 @@
     int errorCount = 0;
     int maxRetries = 1;
     int openDelayMillis = 0;
+    bool closeEnabled = true;
     aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
 
     // Make printf print immediately so that debug info is not stuck
@@ -348,6 +357,9 @@
         if (arg[0] == '-') {
             char option = arg[1];
             switch (option) {
+                case 'c':
+                    closeEnabled = atoi(&arg[2]) != 0;
+                    break;
                 case 'd':
                     openDelayMillis = atoi(&arg[2]);
                     break;
@@ -376,6 +388,8 @@
     thief.setOpenDelayMillis(openDelayMillis);
     victim.setMaxRetries(maxRetries);
     thief.setMaxRetries(maxRetries);
+    victim.setCloseEnabled(closeEnabled);
+    thief.setCloseEnabled(closeEnabled);
 
     result = victim.openAudioStream(direction, requestedSharingMode);
     if (result != AAUDIO_OK) {
@@ -442,7 +456,7 @@
     }
 
     LOGI("Both streams running. Ask user to plug in headset. ====");
-    printf("\n====\nPlease PLUG IN A HEADSET now!\n====\n\n");
+    printf("\n====\nPlease PLUG IN A HEADSET now! - OPTIONAL\n====\n\n");
 
     if (result == AAUDIO_OK) {
         const int watchLoops = DUET_DURATION_MSEC / SLEEP_DURATION_MSEC;
diff --git a/media/libaaudio/tests/test_various.cpp b/media/libaaudio/tests/test_various.cpp
index cbf863f..b68fc7b 100644
--- a/media/libaaudio/tests/test_various.cpp
+++ b/media/libaaudio/tests/test_various.cpp
@@ -25,6 +25,7 @@
 
 #include <gtest/gtest.h>
 #include <unistd.h>
+#include <thread>
 
 // Callback function that does nothing.
 aaudio_data_callback_result_t NoopDataCallbackProc(
@@ -51,6 +52,7 @@
 }
 
 constexpr int64_t NANOS_PER_MILLISECOND = 1000 * 1000;
+constexpr int64_t MICROS_PER_MILLISECOND = 1000;
 
 void checkReleaseThenClose(aaudio_performance_mode_t perfMode,
         aaudio_sharing_mode_t sharingMode,
@@ -762,6 +764,58 @@
     checkCallbackOnce(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
 }
 
+void waitForStateChangeToClosingorClosed(AAudioStream **stream, std::atomic<bool>* isReady)
+{
+    *isReady = true;
+    aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(*stream,
+                                                         AAUDIO_STREAM_STATE_OPEN, &state,
+                                                         10000 * NANOS_PER_MILLISECOND));
+    if ((state != AAUDIO_STREAM_STATE_CLOSING) && (state != AAUDIO_STREAM_STATE_CLOSED)){
+        FAIL() << "ERROR - State not closing or closed. Current state: " <<
+                AAudio_convertStreamStateToText(state);
+    }
+}
+
+void testWaitForStateChangeClose(aaudio_performance_mode_t perfMode) {
+    AAudioStreamBuilder *aaudioBuilder = nullptr;
+    AAudioStream *aaudioStream = nullptr;
+
+    ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+    AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, perfMode);
+    ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+
+    // Verify Open State
+    aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+                                                         AAUDIO_STREAM_STATE_UNKNOWN, &state,
+                                                         1000 * NANOS_PER_MILLISECOND));
+    EXPECT_EQ(AAUDIO_STREAM_STATE_OPEN, state);
+
+    std::atomic<bool> isWaitThreadReady{false};
+
+    // Spawn a new thread to wait for the state change
+    std::thread waitThread (waitForStateChangeToClosingorClosed, &aaudioStream,
+                            &isWaitThreadReady);
+
+    // Wait for worker thread to be ready
+    while (!isWaitThreadReady) {
+        usleep(MICROS_PER_MILLISECOND);
+    }
+    // Sleep an additional millisecond to make sure waitForAudioThread is called
+    usleep(MICROS_PER_MILLISECOND);
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+    waitThread.join();
+}
+
+TEST(test_various, wait_for_state_change_close_none) {
+    testWaitForStateChangeClose(AAUDIO_PERFORMANCE_MODE_NONE);
+}
+
+TEST(test_various, wait_for_state_change_close_lowlat) {
+    testWaitForStateChangeClose(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+}
+
 // ************************************************************
 struct WakeUpCallbackData {
     void wakeOther() {
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index b0eb7bd..5f63a69 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -34,6 +34,36 @@
 namespace android {
 
 using base::unexpected;
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioContentType;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceAddress;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioDeviceType;
+using media::audio::common::AudioEncapsulationMetadataType;
+using media::audio::common::AudioEncapsulationMode;
+using media::audio::common::AudioEncapsulationType;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioFormatType;
+using media::audio::common::AudioGain;
+using media::audio::common::AudioGainConfig;
+using media::audio::common::AudioGainMode;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioOffloadInfo;
+using media::audio::common::AudioPortExt;
+using media::audio::common::AudioPortMixExt;
+using media::audio::common::AudioPortMixExtUseCase;
+using media::audio::common::AudioProfile;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStandard;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::AudioUuid;
+using media::audio::common::ExtraAudioDescriptor;
+using media::audio::common::Int;
+using media::audio::common::PcmType;
 
 namespace {
 
@@ -225,60 +255,6 @@
     return std::string(legacy.c_str());
 }
 
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<int32_t> aidl2legacy_AudioPortConfigType_int32_t(
-        media::AudioPortConfigType aidl) {
-    switch (aidl) {
-        case media::AudioPortConfigType::SAMPLE_RATE:
-            return AUDIO_PORT_CONFIG_SAMPLE_RATE;
-        case media::AudioPortConfigType::CHANNEL_MASK:
-            return AUDIO_PORT_CONFIG_CHANNEL_MASK;
-        case media::AudioPortConfigType::FORMAT:
-            return AUDIO_PORT_CONFIG_FORMAT;
-        case media::AudioPortConfigType::GAIN:
-            return AUDIO_PORT_CONFIG_GAIN;
-        case media::AudioPortConfigType::FLAGS:
-            return AUDIO_PORT_CONFIG_FLAGS;
-    }
-    return unexpected(BAD_VALUE);
-}
-
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<media::AudioPortConfigType> legacy2aidl_int32_t_AudioPortConfigType(
-        int32_t legacy) {
-    switch (legacy) {
-        case AUDIO_PORT_CONFIG_SAMPLE_RATE:
-            return media::AudioPortConfigType::SAMPLE_RATE;
-        case AUDIO_PORT_CONFIG_CHANNEL_MASK:
-            return media::AudioPortConfigType::CHANNEL_MASK;
-        case AUDIO_PORT_CONFIG_FORMAT:
-            return media::AudioPortConfigType::FORMAT;
-        case AUDIO_PORT_CONFIG_GAIN:
-            return media::AudioPortConfigType::GAIN;
-        case AUDIO_PORT_CONFIG_FLAGS:
-            return media::AudioPortConfigType::FLAGS;
-    }
-    return unexpected(BAD_VALUE);
-}
-
-ConversionResult<unsigned int> aidl2legacy_int32_t_config_mask(int32_t aidl) {
-    return convertBitmask<unsigned int, int32_t, int, media::AudioPortConfigType>(
-            aidl, aidl2legacy_AudioPortConfigType_int32_t,
-            // AudioPortConfigType enum is index-based.
-            indexToEnum_index<media::AudioPortConfigType>,
-            // AUDIO_PORT_CONFIG_* flags are mask-based.
-            enumToMask_bitmask<unsigned int, int>);
-}
-
-ConversionResult<int32_t> legacy2aidl_config_mask_int32_t(unsigned int legacy) {
-    return convertBitmask<int32_t, unsigned int, media::AudioPortConfigType, int>(
-            legacy, legacy2aidl_int32_t_AudioPortConfigType,
-            // AUDIO_PORT_CONFIG_* flags are mask-based.
-            indexToEnum_bitmask<unsigned>,
-            // AudioPortConfigType enum is index-based.
-            enumToMask_index<int32_t, media::AudioPortConfigType>);
-}
-
 ConversionResult<audio_io_config_event_t> aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(
         media::AudioIoConfigEvent aidl) {
     switch (aidl) {
@@ -388,21 +364,21 @@
 namespace {
 
 namespace detail {
-using AudioChannelPair = std::pair<audio_channel_mask_t, media::AudioChannelLayout>;
+using AudioChannelPair = std::pair<audio_channel_mask_t, AudioChannelLayout>;
 using AudioChannelPairs = std::vector<AudioChannelPair>;
-using AudioDevicePair = std::pair<audio_devices_t, media::AudioDeviceDescription>;
+using AudioDevicePair = std::pair<audio_devices_t, AudioDeviceDescription>;
 using AudioDevicePairs = std::vector<AudioDevicePair>;
-using AudioFormatPair = std::pair<audio_format_t, media::AudioFormatDescription>;
+using AudioFormatPair = std::pair<audio_format_t, AudioFormatDescription>;
 using AudioFormatPairs = std::vector<AudioFormatPair>;
 }
 
 const detail::AudioChannelPairs& getInAudioChannelPairs() {
     static const detail::AudioChannelPairs pairs = {
-#define DEFINE_INPUT_LAYOUT(n)                                                               \
-            {                                                                                \
-                AUDIO_CHANNEL_IN_##n,                                                        \
-                media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::layoutMask>( \
-                        media::AudioChannelLayout::LAYOUT_##n)                               \
+#define DEFINE_INPUT_LAYOUT(n)                                                 \
+            {                                                                  \
+                AUDIO_CHANNEL_IN_##n,                                          \
+                AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
+                        AudioChannelLayout::LAYOUT_##n)                        \
             }
 
         DEFINE_INPUT_LAYOUT(MONO),
@@ -421,11 +397,11 @@
 
 const detail::AudioChannelPairs& getOutAudioChannelPairs() {
     static const detail::AudioChannelPairs pairs = {
-#define DEFINE_OUTPUT_LAYOUT(n)                                                              \
-            {                                                                                \
-                AUDIO_CHANNEL_OUT_##n,                                                       \
-                media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::layoutMask>( \
-                        media::AudioChannelLayout::LAYOUT_##n)                               \
+#define DEFINE_OUTPUT_LAYOUT(n)                                                \
+            {                                                                  \
+                AUDIO_CHANNEL_OUT_##n,                                         \
+                AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
+                        AudioChannelLayout::LAYOUT_##n)                        \
             }
 
         DEFINE_OUTPUT_LAYOUT(MONO),
@@ -464,11 +440,11 @@
 
 const detail::AudioChannelPairs& getVoiceAudioChannelPairs() {
     static const detail::AudioChannelPairs pairs = {
-#define DEFINE_VOICE_LAYOUT(n)                                                               \
-            {                                                                                \
-                AUDIO_CHANNEL_IN_VOICE_##n,                                                  \
-                media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::voiceMask>(  \
-                        media::AudioChannelLayout::VOICE_##n)                                \
+#define DEFINE_VOICE_LAYOUT(n)                                                 \
+            {                                                                  \
+                AUDIO_CHANNEL_IN_VOICE_##n,                                    \
+                AudioChannelLayout::make<AudioChannelLayout::Tag::voiceMask>(  \
+                        AudioChannelLayout::VOICE_##n)                         \
             }
         DEFINE_VOICE_LAYOUT(UPLINK_MONO),
         DEFINE_VOICE_LAYOUT(DNLINK_MONO),
@@ -478,9 +454,9 @@
     return pairs;
 }
 
-media::AudioDeviceDescription make_AudioDeviceDescription(media::AudioDeviceType type,
+AudioDeviceDescription make_AudioDeviceDescription(AudioDeviceType type,
         const std::string& connection = "") {
-    media::AudioDeviceDescription result;
+    AudioDeviceDescription result;
     result.type = type;
     result.connection = connection;
     return result;
@@ -488,7 +464,7 @@
 
 void append_AudioDeviceDescription(detail::AudioDevicePairs& pairs,
         audio_devices_t inputType, audio_devices_t outputType,
-        media::AudioDeviceType inType, media::AudioDeviceType outType,
+        AudioDeviceType inType, AudioDeviceType outType,
         const std::string& connection = "") {
     pairs.push_back(std::make_pair(inputType, make_AudioDeviceDescription(inType, connection)));
     pairs.push_back(std::make_pair(outputType, make_AudioDeviceDescription(outType, connection)));
@@ -498,198 +474,198 @@
     static const detail::AudioDevicePairs pairs = []() {
         detail::AudioDevicePairs pairs = {{
             {
-                AUDIO_DEVICE_NONE, media::AudioDeviceDescription{}
+                AUDIO_DEVICE_NONE, AudioDeviceDescription{}
             },
             {
                 AUDIO_DEVICE_OUT_EARPIECE, make_AudioDeviceDescription(
-                        media::AudioDeviceType::OUT_SPEAKER_EARPIECE)
+                        AudioDeviceType::OUT_SPEAKER_EARPIECE)
             },
             {
                 AUDIO_DEVICE_OUT_SPEAKER, make_AudioDeviceDescription(
-                        media::AudioDeviceType::OUT_SPEAKER)
+                        AudioDeviceType::OUT_SPEAKER)
             },
             {
                 AUDIO_DEVICE_OUT_WIRED_HEADPHONE, make_AudioDeviceDescription(
-                        media::AudioDeviceType::OUT_HEADPHONE,
-                        media::AudioDeviceDescription::CONNECTION_ANALOG())
+                        AudioDeviceType::OUT_HEADPHONE,
+                        AudioDeviceDescription::CONNECTION_ANALOG())
             },
             {
                 AUDIO_DEVICE_OUT_BLUETOOTH_SCO, make_AudioDeviceDescription(
-                        media::AudioDeviceType::OUT_DEVICE,
-                        media::AudioDeviceDescription::CONNECTION_BT_SCO())
+                        AudioDeviceType::OUT_DEVICE,
+                        AudioDeviceDescription::CONNECTION_BT_SCO())
             },
             {
                 AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT, make_AudioDeviceDescription(
-                        media::AudioDeviceType::OUT_CARKIT,
-                        media::AudioDeviceDescription::CONNECTION_BT_SCO())
+                        AudioDeviceType::OUT_CARKIT,
+                        AudioDeviceDescription::CONNECTION_BT_SCO())
             },
             {
                 AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES, make_AudioDeviceDescription(
-                        media::AudioDeviceType::OUT_HEADPHONE,
-                        media::AudioDeviceDescription::CONNECTION_BT_A2DP())
+                        AudioDeviceType::OUT_HEADPHONE,
+                        AudioDeviceDescription::CONNECTION_BT_A2DP())
             },
             {
                 AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER, make_AudioDeviceDescription(
-                        media::AudioDeviceType::OUT_SPEAKER,
-                        media::AudioDeviceDescription::CONNECTION_BT_A2DP())
+                        AudioDeviceType::OUT_SPEAKER,
+                        AudioDeviceDescription::CONNECTION_BT_A2DP())
             },
             {
                 AUDIO_DEVICE_OUT_TELEPHONY_TX, make_AudioDeviceDescription(
-                        media::AudioDeviceType::OUT_TELEPHONY_TX)
+                        AudioDeviceType::OUT_TELEPHONY_TX)
             },
             {
                 AUDIO_DEVICE_OUT_AUX_LINE, make_AudioDeviceDescription(
-                        media::AudioDeviceType::OUT_LINE_AUX)
+                        AudioDeviceType::OUT_LINE_AUX)
             },
             {
                 AUDIO_DEVICE_OUT_SPEAKER_SAFE, make_AudioDeviceDescription(
-                        media::AudioDeviceType::OUT_SPEAKER_SAFE)
+                        AudioDeviceType::OUT_SPEAKER_SAFE)
             },
             {
                 AUDIO_DEVICE_OUT_HEARING_AID, make_AudioDeviceDescription(
-                        media::AudioDeviceType::OUT_HEARING_AID,
-                        media::AudioDeviceDescription::CONNECTION_WIRELESS())
+                        AudioDeviceType::OUT_HEARING_AID,
+                        AudioDeviceDescription::CONNECTION_WIRELESS())
             },
             {
                 AUDIO_DEVICE_OUT_ECHO_CANCELLER, make_AudioDeviceDescription(
-                        media::AudioDeviceType::OUT_ECHO_CANCELLER)
+                        AudioDeviceType::OUT_ECHO_CANCELLER)
             },
             {
                 AUDIO_DEVICE_OUT_BLE_SPEAKER, make_AudioDeviceDescription(
-                        media::AudioDeviceType::OUT_SPEAKER,
-                        media::AudioDeviceDescription::CONNECTION_BT_LE())
+                        AudioDeviceType::OUT_SPEAKER,
+                        AudioDeviceDescription::CONNECTION_BT_LE())
             },
             // AUDIO_DEVICE_IN_AMBIENT and IN_COMMUNICATION are removed since they were deprecated.
             {
                 AUDIO_DEVICE_IN_BUILTIN_MIC, make_AudioDeviceDescription(
-                        media::AudioDeviceType::IN_MICROPHONE)
+                        AudioDeviceType::IN_MICROPHONE)
             },
             {
                 AUDIO_DEVICE_IN_BACK_MIC, make_AudioDeviceDescription(
-                        media::AudioDeviceType::IN_MICROPHONE_BACK)
+                        AudioDeviceType::IN_MICROPHONE_BACK)
             },
             {
                 AUDIO_DEVICE_IN_TELEPHONY_RX, make_AudioDeviceDescription(
-                        media::AudioDeviceType::IN_TELEPHONY_RX)
+                        AudioDeviceType::IN_TELEPHONY_RX)
             },
             {
                 AUDIO_DEVICE_IN_TV_TUNER, make_AudioDeviceDescription(
-                        media::AudioDeviceType::IN_TV_TUNER)
+                        AudioDeviceType::IN_TV_TUNER)
             },
             {
                 AUDIO_DEVICE_IN_LOOPBACK, make_AudioDeviceDescription(
-                        media::AudioDeviceType::IN_LOOPBACK)
+                        AudioDeviceType::IN_LOOPBACK)
             },
             {
                 AUDIO_DEVICE_IN_BLUETOOTH_BLE, make_AudioDeviceDescription(
-                        media::AudioDeviceType::IN_DEVICE,
-                        media::AudioDeviceDescription::CONNECTION_BT_LE())
+                        AudioDeviceType::IN_DEVICE,
+                        AudioDeviceDescription::CONNECTION_BT_LE())
             },
             {
                 AUDIO_DEVICE_IN_ECHO_REFERENCE, make_AudioDeviceDescription(
-                        media::AudioDeviceType::IN_ECHO_REFERENCE)
+                        AudioDeviceType::IN_ECHO_REFERENCE)
             }
         }};
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_DEFAULT, AUDIO_DEVICE_OUT_DEFAULT,
-                media::AudioDeviceType::IN_DEFAULT, media::AudioDeviceType::OUT_DEFAULT);
+                AudioDeviceType::IN_DEFAULT, AudioDeviceType::OUT_DEFAULT);
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_OUT_WIRED_HEADSET,
-                media::AudioDeviceType::IN_HEADSET, media::AudioDeviceType::OUT_HEADSET,
-                media::AudioDeviceDescription::CONNECTION_ANALOG());
+                AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+                AudioDeviceDescription::CONNECTION_ANALOG());
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET,
-                media::AudioDeviceType::IN_HEADSET, media::AudioDeviceType::OUT_HEADSET,
-                media::AudioDeviceDescription::CONNECTION_BT_SCO());
+                AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+                AudioDeviceDescription::CONNECTION_BT_SCO());
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_HDMI, AUDIO_DEVICE_OUT_HDMI,
-                media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
-                media::AudioDeviceDescription::CONNECTION_HDMI());
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_HDMI());
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_REMOTE_SUBMIX, AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                media::AudioDeviceType::IN_SUBMIX, media::AudioDeviceType::OUT_SUBMIX);
+                AudioDeviceType::IN_SUBMIX, AudioDeviceType::OUT_SUBMIX);
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET, AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET,
-                media::AudioDeviceType::IN_HEADSET, media::AudioDeviceType::OUT_HEADSET,
-                media::AudioDeviceDescription::CONNECTION_ANALOG_DOCK());
+                AudioDeviceType::IN_DOCK, AudioDeviceType::OUT_DOCK,
+                AudioDeviceDescription::CONNECTION_ANALOG());
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET,
-                media::AudioDeviceType::IN_HEADSET, media::AudioDeviceType::OUT_HEADSET,
-                media::AudioDeviceDescription::CONNECTION_DIGITAL_DOCK());
+                AudioDeviceType::IN_DOCK, AudioDeviceType::OUT_DOCK,
+                AudioDeviceDescription::CONNECTION_USB());
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_USB_ACCESSORY, AUDIO_DEVICE_OUT_USB_ACCESSORY,
-                media::AudioDeviceType::IN_ACCESSORY, media::AudioDeviceType::OUT_ACCESSORY,
-                media::AudioDeviceDescription::CONNECTION_USB());
+                AudioDeviceType::IN_ACCESSORY, AudioDeviceType::OUT_ACCESSORY,
+                AudioDeviceDescription::CONNECTION_USB());
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_OUT_USB_DEVICE,
-                media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
-                media::AudioDeviceDescription::CONNECTION_USB());
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_USB());
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_FM_TUNER, AUDIO_DEVICE_OUT_FM,
-                media::AudioDeviceType::IN_FM_TUNER, media::AudioDeviceType::OUT_FM);
+                AudioDeviceType::IN_FM_TUNER, AudioDeviceType::OUT_FM);
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_LINE, AUDIO_DEVICE_OUT_LINE,
-                media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
-                media::AudioDeviceDescription::CONNECTION_ANALOG());
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_ANALOG());
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_SPDIF, AUDIO_DEVICE_OUT_SPDIF,
-                media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
-                media::AudioDeviceDescription::CONNECTION_SPDIF());
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_SPDIF());
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_BLUETOOTH_A2DP, AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,
-                media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
-                media::AudioDeviceDescription::CONNECTION_BT_A2DP());
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_BT_A2DP());
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_IP, AUDIO_DEVICE_OUT_IP,
-                media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
-                media::AudioDeviceDescription::CONNECTION_IP_V4());
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_IP_V4());
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_BUS, AUDIO_DEVICE_OUT_BUS,
-                media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
-                media::AudioDeviceDescription::CONNECTION_BUS());
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_BUS());
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_PROXY, AUDIO_DEVICE_OUT_PROXY,
-                media::AudioDeviceType::IN_AFE_PROXY, media::AudioDeviceType::OUT_AFE_PROXY);
+                AudioDeviceType::IN_AFE_PROXY, AudioDeviceType::OUT_AFE_PROXY);
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_USB_HEADSET, AUDIO_DEVICE_OUT_USB_HEADSET,
-                media::AudioDeviceType::IN_HEADSET, media::AudioDeviceType::OUT_HEADSET,
-                media::AudioDeviceDescription::CONNECTION_USB());
+                AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+                AudioDeviceDescription::CONNECTION_USB());
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_HDMI_ARC, AUDIO_DEVICE_OUT_HDMI_ARC,
-                media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
-                media::AudioDeviceDescription::CONNECTION_HDMI_ARC());
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_HDMI_ARC());
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_HDMI_EARC, AUDIO_DEVICE_OUT_HDMI_EARC,
-                media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
-                media::AudioDeviceDescription::CONNECTION_HDMI_EARC());
+                AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+                AudioDeviceDescription::CONNECTION_HDMI_EARC());
         append_AudioDeviceDescription(pairs,
                 AUDIO_DEVICE_IN_BLE_HEADSET, AUDIO_DEVICE_OUT_BLE_HEADSET,
-                media::AudioDeviceType::IN_HEADSET, media::AudioDeviceType::OUT_HEADSET,
-                media::AudioDeviceDescription::CONNECTION_BT_LE());
+                AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+                AudioDeviceDescription::CONNECTION_BT_LE());
         return pairs;
     }();
     return pairs;
 }
 
-media::AudioFormatDescription make_AudioFormatDescription(media::AudioFormatType type) {
-    media::AudioFormatDescription result;
+AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
+    AudioFormatDescription result;
     result.type = type;
     return result;
 }
 
-media::AudioFormatDescription make_AudioFormatDescription(media::PcmType pcm) {
-    auto result = make_AudioFormatDescription(media::AudioFormatType::PCM);
+AudioFormatDescription make_AudioFormatDescription(PcmType pcm) {
+    auto result = make_AudioFormatDescription(AudioFormatType::PCM);
     result.pcm = pcm;
     return result;
 }
 
-media::AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
-    media::AudioFormatDescription result;
+AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
+    AudioFormatDescription result;
     result.encoding = encoding;
     return result;
 }
 
-media::AudioFormatDescription make_AudioFormatDescription(media::PcmType transport,
+AudioFormatDescription make_AudioFormatDescription(PcmType transport,
         const std::string& encoding) {
     auto result = make_AudioFormatDescription(encoding);
     result.pcm = transport;
@@ -700,31 +676,30 @@
     static const detail::AudioFormatPairs pairs = {{
         {
             AUDIO_FORMAT_INVALID,
-            make_AudioFormatDescription(media::AudioFormatType::SYS_RESERVED_INVALID)
+            make_AudioFormatDescription(AudioFormatType::SYS_RESERVED_INVALID)
         },
         {
-            AUDIO_FORMAT_DEFAULT, media::AudioFormatDescription{}
+            AUDIO_FORMAT_DEFAULT, AudioFormatDescription{}
         },
         {
-            AUDIO_FORMAT_PCM_16_BIT, make_AudioFormatDescription(media::PcmType::INT_16_BIT)
+            AUDIO_FORMAT_PCM_16_BIT, make_AudioFormatDescription(PcmType::INT_16_BIT)
         },
         {
-            AUDIO_FORMAT_PCM_8_BIT, make_AudioFormatDescription(media::PcmType::UINT_8_BIT)
+            AUDIO_FORMAT_PCM_8_BIT, make_AudioFormatDescription(PcmType::UINT_8_BIT)
         },
         {
-            AUDIO_FORMAT_PCM_32_BIT, make_AudioFormatDescription(media::PcmType::INT_32_BIT)
+            AUDIO_FORMAT_PCM_32_BIT, make_AudioFormatDescription(PcmType::INT_32_BIT)
         },
         {
-            AUDIO_FORMAT_PCM_8_24_BIT, make_AudioFormatDescription(media::PcmType::FIXED_Q_8_24)
+            AUDIO_FORMAT_PCM_8_24_BIT, make_AudioFormatDescription(PcmType::FIXED_Q_8_24)
         },
         {
-            AUDIO_FORMAT_PCM_FLOAT, make_AudioFormatDescription(media::PcmType::FLOAT_32_BIT)
+            AUDIO_FORMAT_PCM_FLOAT, make_AudioFormatDescription(PcmType::FLOAT_32_BIT)
         },
         {
-            AUDIO_FORMAT_PCM_24_BIT_PACKED, make_AudioFormatDescription(media::PcmType::INT_24_BIT)
+            AUDIO_FORMAT_PCM_24_BIT_PACKED, make_AudioFormatDescription(PcmType::INT_24_BIT)
         },
         {
-            // See the comment in MediaDefs.h.
             AUDIO_FORMAT_MP3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEG)
         },
         {
@@ -734,52 +709,41 @@
             AUDIO_FORMAT_AMR_WB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_WB)
         },
         {
-            // Note: in MediaDefs.cpp MEDIA_MIMETYPE_AUDIO_AAC = "audio/mp4a-latm".
-            AUDIO_FORMAT_AAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_FORMAT)
+            AUDIO_FORMAT_AAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_MP4)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_MAIN, make_AudioFormatDescription("audio/aac.main")
+            AUDIO_FORMAT_AAC_MAIN, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_MAIN)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_LC, make_AudioFormatDescription("audio/aac.lc")
+            AUDIO_FORMAT_AAC_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LC)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_SSR, make_AudioFormatDescription("audio/aac.ssr")
+            AUDIO_FORMAT_AAC_SSR, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_SSR)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_LTP, make_AudioFormatDescription("audio/aac.ltp")
+            AUDIO_FORMAT_AAC_LTP, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LTP)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_HE_V1, make_AudioFormatDescription("audio/aac.he.v1")
+            AUDIO_FORMAT_AAC_HE_V1, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_HE_V1)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_SCALABLE, make_AudioFormatDescription("audio/aac.scalable")
+            AUDIO_FORMAT_AAC_SCALABLE,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_ERLC, make_AudioFormatDescription("audio/aac.erlc")
+            AUDIO_FORMAT_AAC_ERLC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ERLC)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_LD, make_AudioFormatDescription("audio/aac.ld")
+            AUDIO_FORMAT_AAC_LD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LD)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_HE_V2, make_AudioFormatDescription("audio/aac.he.v2")
+            AUDIO_FORMAT_AAC_HE_V2, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_HE_V2)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_ELD, make_AudioFormatDescription("audio/aac.eld")
+            AUDIO_FORMAT_AAC_ELD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ELD)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_XHE, make_AudioFormatDescription("audio/aac.xhe")
+            AUDIO_FORMAT_AAC_XHE, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_XHE)
         },
         // AUDIO_FORMAT_HE_AAC_V1 and HE_AAC_V2 are removed since they were deprecated long time
         // ago.
@@ -796,7 +760,6 @@
             AUDIO_FORMAT_E_AC3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EAC3)
         },
         {
-            // Note: not in the IANA registry.
             AUDIO_FORMAT_E_AC3_JOC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EAC3_JOC)
         },
         {
@@ -809,13 +772,12 @@
         // nested AudioFormatDescriptions. The legacy 'AUDIO_FORMAT_IEC61937' type doesn't
         // specify the format of the encapsulated bitstream.
         {
-            // Note: not in the IANA registry.
             AUDIO_FORMAT_IEC61937,
-            make_AudioFormatDescription(media::PcmType::INT_16_BIT, "audio/x-iec61937")
+            make_AudioFormatDescription(PcmType::INT_16_BIT, MEDIA_MIMETYPE_AUDIO_IEC61937)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_DOLBY_TRUEHD, make_AudioFormatDescription("audio/vnd.dolby.truehd")
+            AUDIO_FORMAT_DOLBY_TRUEHD,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD)
         },
         {
             AUDIO_FORMAT_EVRC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRC)
@@ -830,11 +792,9 @@
             AUDIO_FORMAT_EVRCNW, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCNW)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_ADIF, make_AudioFormatDescription("audio/aac.adif")
+            AUDIO_FORMAT_AAC_ADIF, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADIF)
         },
         {
-            // Note: not in the IANA registry.
             AUDIO_FORMAT_WMA, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_WMA)
         },
         {
@@ -845,7 +805,6 @@
             AUDIO_FORMAT_AMR_WB_PLUS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS)
         },
         {
-            // Note: not in the IANA registry.
             AUDIO_FORMAT_MP2, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II)
         },
         {
@@ -859,7 +818,6 @@
             AUDIO_FORMAT_FLAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_FLAC)
         },
         {
-            // Note: not in the IANA registry.
             AUDIO_FORMAT_ALAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_ALAC)
         },
         {
@@ -867,52 +825,49 @@
             AUDIO_FORMAT_APE, make_AudioFormatDescription("audio/x-ape")
         },
         {
-            // Note: not in the IANA registry.
             AUDIO_FORMAT_AAC_ADTS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_ADTS_MAIN, make_AudioFormatDescription("audio/aac-adts.main")
+            AUDIO_FORMAT_AAC_ADTS_MAIN,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_ADTS_LC, make_AudioFormatDescription("audio/aac-adts.lc")
+            AUDIO_FORMAT_AAC_ADTS_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_ADTS_SSR, make_AudioFormatDescription("audio/aac-adts.ssr")
+            AUDIO_FORMAT_AAC_ADTS_SSR,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_ADTS_LTP, make_AudioFormatDescription("audio/aac-adts.ltp")
+            AUDIO_FORMAT_AAC_ADTS_LTP,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_ADTS_HE_V1, make_AudioFormatDescription("audio/aac-adts.he.v1")
+            AUDIO_FORMAT_AAC_ADTS_HE_V1,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_ADTS_SCALABLE, make_AudioFormatDescription("audio/aac-adts.scalable")
+            AUDIO_FORMAT_AAC_ADTS_SCALABLE,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_ADTS_ERLC, make_AudioFormatDescription("audio/aac-adts.erlc")
+            AUDIO_FORMAT_AAC_ADTS_ERLC,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_ADTS_LD, make_AudioFormatDescription("audio/aac-adts.ld")
+            AUDIO_FORMAT_AAC_ADTS_LD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_ADTS_HE_V2, make_AudioFormatDescription("audio/aac-adts.he.v2")
+            AUDIO_FORMAT_AAC_ADTS_HE_V2,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_ADTS_ELD, make_AudioFormatDescription("audio/aac-adts.eld")
+            AUDIO_FORMAT_AAC_ADTS_ELD,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_ADTS_XHE, make_AudioFormatDescription("audio/aac-adts.xhe")
+            AUDIO_FORMAT_AAC_ADTS_XHE,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE)
         },
         {
             // Note: not in the IANA registry. "vnd.octel.sbc" is not BT SBC.
@@ -926,7 +881,6 @@
             AUDIO_FORMAT_APTX_HD, make_AudioFormatDescription("audio/vnd.qcom.aptx.hd")
         },
         {
-            // Note: not in the IANA registry. Matches MediaDefs.cpp.
             AUDIO_FORMAT_AC4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AC4)
         },
         {
@@ -934,35 +888,36 @@
             AUDIO_FORMAT_LDAC, make_AudioFormatDescription("audio/vnd.sony.ldac")
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_MAT, make_AudioFormatDescription("audio/vnd.dolby.mat")
+            AUDIO_FORMAT_MAT, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT)
         },
         {
             // Note: not in the IANA registry.
-            AUDIO_FORMAT_MAT_1_0, make_AudioFormatDescription("audio/vnd.dolby.mat.1.0")
+            AUDIO_FORMAT_MAT_1_0,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".1.0"))
         },
         {
             // Note: not in the IANA registry.
-            AUDIO_FORMAT_MAT_2_0, make_AudioFormatDescription("audio/vnd.dolby.mat.2.0")
+            AUDIO_FORMAT_MAT_2_0,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".2.0"))
         },
         {
             // Note: not in the IANA registry.
-            AUDIO_FORMAT_MAT_2_1, make_AudioFormatDescription("audio/vnd.dolby.mat.2.1")
+            AUDIO_FORMAT_MAT_2_1,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".2.1"))
         },
         {
             AUDIO_FORMAT_AAC_LATM, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_LATM_LC, make_AudioFormatDescription("audio/mp4a-latm.lc")
+            AUDIO_FORMAT_AAC_LATM_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_LATM_HE_V1, make_AudioFormatDescription("audio/mp4a-latm.he.v1")
+            AUDIO_FORMAT_AAC_LATM_HE_V1,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_AAC_LATM_HE_V2, make_AudioFormatDescription("audio/mp4a-latm.he.v2")
+            AUDIO_FORMAT_AAC_LATM_HE_V2,
+            make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2)
         },
         {
             // Note: not in the IANA registry.
@@ -989,29 +944,23 @@
             AUDIO_FORMAT_LC3, make_AudioFormatDescription("audio/x-lc3")
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_MPEGH, make_AudioFormatDescription("audio/x-mpegh")
+            AUDIO_FORMAT_MPEGH, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_MPEGH_BL_L3, make_AudioFormatDescription("audio/x-mpegh.bl.l3")
+            AUDIO_FORMAT_MPEGH_BL_L3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_MPEGH_BL_L4, make_AudioFormatDescription("audio/x-mpegh.bl.l4")
+            AUDIO_FORMAT_MPEGH_BL_L4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_MPEGH_LC_L3, make_AudioFormatDescription("audio/x-mpegh.lc.l3")
+            AUDIO_FORMAT_MPEGH_LC_L3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3)
         },
         {
-            // Note: not in the IANA registry.
-            AUDIO_FORMAT_MPEGH_LC_L4, make_AudioFormatDescription("audio/x-mpegh.lc.l4")
+            AUDIO_FORMAT_MPEGH_LC_L4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4)
         },
         {
-            // Note: not in the IANA registry.
             AUDIO_FORMAT_IEC60958,
-            make_AudioFormatDescription(media::PcmType::INT_24_BIT, "audio/x-iec60958")
+            make_AudioFormatDescription(PcmType::INT_24_BIT, MEDIA_MIMETYPE_AUDIO_IEC60958)
         },
         {
             AUDIO_FORMAT_DTS_UHD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS_UHD)
@@ -1055,14 +1004,14 @@
 }  // namespace
 
 ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
-        const media::AudioChannelLayout& aidl, bool isInput) {
-    using ReverseMap = std::unordered_map<media::AudioChannelLayout, audio_channel_mask_t>;
-    using Tag = media::AudioChannelLayout::Tag;
+        const AudioChannelLayout& aidl, bool isInput) {
+    using ReverseMap = std::unordered_map<AudioChannelLayout, audio_channel_mask_t>;
+    using Tag = AudioChannelLayout::Tag;
     static const ReverseMap mIn = make_ReverseMap(getInAudioChannelPairs());
     static const ReverseMap mOut = make_ReverseMap(getOutAudioChannelPairs());
     static const ReverseMap mVoice = make_ReverseMap(getVoiceAudioChannelPairs());
 
-    auto convert = [](const media::AudioChannelLayout& aidl, const ReverseMap& m,
+    auto convert = [](const AudioChannelLayout& aidl, const ReverseMap& m,
             const char* func, const char* type) -> ConversionResult<audio_channel_mask_t> {
         if (auto it = m.find(aidl); it != m.end()) {
             return it->second;
@@ -1100,16 +1049,16 @@
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ConversionResult<AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
         audio_channel_mask_t legacy, bool isInput) {
-    using DirectMap = std::unordered_map<audio_channel_mask_t, media::AudioChannelLayout>;
-    using Tag = media::AudioChannelLayout::Tag;
+    using DirectMap = std::unordered_map<audio_channel_mask_t, AudioChannelLayout>;
+    using Tag = AudioChannelLayout::Tag;
     static const DirectMap mInAndVoice = make_DirectMap(
             getInAudioChannelPairs(), getVoiceAudioChannelPairs());
     static const DirectMap mOut = make_DirectMap(getOutAudioChannelPairs());
 
     auto convert = [](const audio_channel_mask_t legacy, const DirectMap& m,
-            const char* func, const char* type) -> ConversionResult<media::AudioChannelLayout> {
+            const char* func, const char* type) -> ConversionResult<AudioChannelLayout> {
         if (auto it = m.find(legacy); it != m.end()) {
             return it->second;
         } else {
@@ -1120,9 +1069,9 @@
     };
 
     if (legacy == AUDIO_CHANNEL_NONE) {
-        return media::AudioChannelLayout{};
+        return AudioChannelLayout{};
     } else if (legacy == AUDIO_CHANNEL_INVALID) {
-        return media::AudioChannelLayout::make<Tag::invalid>(0);
+        return AudioChannelLayout::make<Tag::invalid>(0);
     }
 
     const audio_channel_representation_t repr = audio_channel_mask_get_representation(legacy);
@@ -1130,7 +1079,7 @@
         if (audio_channel_mask_is_valid(legacy)) {
             const int indexMask = VALUE_OR_RETURN(
                     convertIntegral<int>(audio_channel_mask_get_bits(legacy)));
-            return media::AudioChannelLayout::make<Tag::indexMask>(indexMask);
+            return AudioChannelLayout::make<Tag::indexMask>(indexMask);
         } else {
             ALOGE("%s: legacy audio_channel_mask_t value 0x%x is invalid", __func__, legacy);
             return unexpected(BAD_VALUE);
@@ -1146,8 +1095,8 @@
 }
 
 ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
-        const media::AudioDeviceDescription& aidl) {
-    static const std::unordered_map<media::AudioDeviceDescription, audio_devices_t> m =
+        const AudioDeviceDescription& aidl) {
+    static const std::unordered_map<AudioDeviceDescription, audio_devices_t> m =
             make_ReverseMap(getAudioDevicePairs());
     if (auto it = m.find(aidl); it != m.end()) {
         return it->second;
@@ -1157,9 +1106,9 @@
     }
 }
 
-ConversionResult<media::AudioDeviceDescription> legacy2aidl_audio_devices_t_AudioDeviceDescription(
+ConversionResult<AudioDeviceDescription> legacy2aidl_audio_devices_t_AudioDeviceDescription(
         audio_devices_t legacy) {
-    static const std::unordered_map<audio_devices_t, media::AudioDeviceDescription> m =
+    static const std::unordered_map<audio_devices_t, AudioDeviceDescription> m =
             make_DirectMap(getAudioDevicePairs());
     if (auto it = m.find(legacy); it != m.end()) {
         return it->second;
@@ -1170,9 +1119,61 @@
     }
 }
 
+status_t aidl2legacy_AudioDevice_audio_device(
+        const AudioDevice& aidl,
+        audio_devices_t* legacyType, char* legacyAddress) {
+    *legacyType = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
+    return aidl2legacy_string(
+                    aidl.address.get<AudioDeviceAddress::id>(),
+                    legacyAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN);
+}
+
+status_t aidl2legacy_AudioDevice_audio_device(
+        const AudioDevice& aidl,
+        audio_devices_t* legacyType, String8* legacyAddress) {
+    *legacyType = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
+    *legacyAddress = VALUE_OR_RETURN_STATUS(aidl2legacy_string_view_String8(
+                    aidl.address.get<AudioDeviceAddress::id>()));
+    return OK;
+}
+
+status_t aidl2legacy_AudioDevice_audio_device(
+        const AudioDevice& aidl,
+        audio_devices_t* legacyType, std::string* legacyAddress) {
+    *legacyType = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
+    *legacyAddress = aidl.address.get<AudioDeviceAddress::id>();
+    return OK;
+}
+
+ConversionResult<AudioDevice> legacy2aidl_audio_device_AudioDevice(
+        audio_devices_t legacyType, const char* legacyAddress) {
+    AudioDevice aidl;
+    aidl.type = VALUE_OR_RETURN(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(legacyType));
+    const std::string aidl_id = VALUE_OR_RETURN(
+            legacy2aidl_string(legacyAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN));
+    aidl.address = AudioDeviceAddress::make<AudioDeviceAddress::id>(aidl_id);
+    return aidl;
+}
+
+ConversionResult<AudioDevice>
+legacy2aidl_audio_device_AudioDevice(
+        audio_devices_t legacyType, const String8& legacyAddress) {
+    AudioDevice aidl;
+    aidl.type = VALUE_OR_RETURN(
+            legacy2aidl_audio_devices_t_AudioDeviceDescription(legacyType));
+    const std::string aidl_id = VALUE_OR_RETURN(
+            legacy2aidl_String8_string(legacyAddress));
+    aidl.address = AudioDeviceAddress::make<AudioDeviceAddress::id>(aidl_id);
+    return aidl;
+}
+
 ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
-        const media::AudioFormatDescription& aidl) {
-    static const std::unordered_map<media::AudioFormatDescription, audio_format_t> m =
+        const AudioFormatDescription& aidl) {
+    static const std::unordered_map<AudioFormatDescription, audio_format_t> m =
             make_ReverseMap(getAudioFormatPairs());
     if (auto it = m.find(aidl); it != m.end()) {
         return it->second;
@@ -1182,9 +1183,9 @@
     }
 }
 
-ConversionResult<media::AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
+ConversionResult<AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
         audio_format_t legacy) {
-    static const std::unordered_map<audio_format_t, media::AudioFormatDescription> m =
+    static const std::unordered_map<audio_format_t, AudioFormatDescription> m =
             make_DirectMap(getAudioFormatPairs());
     if (auto it = m.find(legacy); it != m.end()) {
         return it->second;
@@ -1195,57 +1196,58 @@
     }
 }
 
-ConversionResult<audio_gain_mode_t> aidl2legacy_AudioGainMode_audio_gain_mode_t(media::AudioGainMode aidl) {
+ConversionResult<audio_gain_mode_t> aidl2legacy_AudioGainMode_audio_gain_mode_t(
+        AudioGainMode aidl) {
     switch (aidl) {
-        case media::AudioGainMode::JOINT:
+        case AudioGainMode::JOINT:
             return AUDIO_GAIN_MODE_JOINT;
-        case media::AudioGainMode::CHANNELS:
+        case AudioGainMode::CHANNELS:
             return AUDIO_GAIN_MODE_CHANNELS;
-        case media::AudioGainMode::RAMP:
+        case AudioGainMode::RAMP:
             return AUDIO_GAIN_MODE_RAMP;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioGainMode> legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy) {
+ConversionResult<AudioGainMode> legacy2aidl_audio_gain_mode_t_AudioGainMode(
+        audio_gain_mode_t legacy) {
     switch (legacy) {
         case AUDIO_GAIN_MODE_JOINT:
-            return media::AudioGainMode::JOINT;
+            return AudioGainMode::JOINT;
         case AUDIO_GAIN_MODE_CHANNELS:
-            return media::AudioGainMode::CHANNELS;
+            return AudioGainMode::CHANNELS;
         case AUDIO_GAIN_MODE_RAMP:
-            return media::AudioGainMode::RAMP;
+            return AudioGainMode::RAMP;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl) {
-    return convertBitmask<audio_gain_mode_t, int32_t, audio_gain_mode_t, media::AudioGainMode>(
+    return convertBitmask<audio_gain_mode_t, int32_t, audio_gain_mode_t, AudioGainMode>(
             aidl, aidl2legacy_AudioGainMode_audio_gain_mode_t,
             // AudioGainMode is index-based.
-            indexToEnum_index<media::AudioGainMode>,
+            indexToEnum_index<AudioGainMode>,
             // AUDIO_GAIN_MODE_* constants are mask-based.
             enumToMask_bitmask<audio_gain_mode_t, audio_gain_mode_t>);
 }
 
 ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy) {
-    return convertBitmask<int32_t, audio_gain_mode_t, media::AudioGainMode, audio_gain_mode_t>(
+    return convertBitmask<int32_t, audio_gain_mode_t, AudioGainMode, audio_gain_mode_t>(
             legacy, legacy2aidl_audio_gain_mode_t_AudioGainMode,
             // AUDIO_GAIN_MODE_* constants are mask-based.
             indexToEnum_bitmask<audio_gain_mode_t>,
             // AudioGainMode is index-based.
-            enumToMask_index<int32_t, media::AudioGainMode>);
+            enumToMask_index<int32_t, AudioGainMode>);
 }
 
 ConversionResult<audio_gain_config> aidl2legacy_AudioGainConfig_audio_gain_config(
-        const media::AudioGainConfig& aidl, media::AudioPortRole role, media::AudioPortType type) {
+        const AudioGainConfig& aidl, bool isInput) {
     audio_gain_config legacy;
     legacy.index = VALUE_OR_RETURN(convertIntegral<int>(aidl.index));
     legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
-    const bool isInput = VALUE_OR_RETURN(direction(role, type)) == Direction::INPUT;
     legacy.channel_mask = VALUE_OR_RETURN(
             aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
-    const bool isJoint = bitmaskIsSet(aidl.mode, media::AudioGainMode::JOINT);
+    const bool isJoint = bitmaskIsSet(aidl.mode, AudioGainMode::JOINT);
     size_t numValues = isJoint ? 1
                                : isInput ? audio_channel_count_from_in_mask(legacy.channel_mask)
                                          : audio_channel_count_from_out_mask(legacy.channel_mask);
@@ -1259,12 +1261,11 @@
     return legacy;
 }
 
-ConversionResult<media::AudioGainConfig> legacy2aidl_audio_gain_config_AudioGainConfig(
-        const audio_gain_config& legacy, audio_port_role_t role, audio_port_type_t type) {
-    media::AudioGainConfig aidl;
+ConversionResult<AudioGainConfig> legacy2aidl_audio_gain_config_AudioGainConfig(
+        const audio_gain_config& legacy, bool isInput) {
+    AudioGainConfig aidl;
     aidl.index = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.index));
     aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
-    const bool isInput = VALUE_OR_RETURN(direction(role, type)) == Direction::INPUT;
     aidl.channelMask = VALUE_OR_RETURN(
             legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
     const bool isJoint = (legacy.mode & AUDIO_GAIN_MODE_JOINT) != 0;
@@ -1496,175 +1497,175 @@
 }
 
 ConversionResult<audio_port_config_device_ext>
-aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
-        const media::AudioPortConfigDeviceExt& aidl) {
+aidl2legacy_AudioDevice_audio_port_config_device_ext(
+        const AudioDevice& aidl, const media::AudioPortDeviceExtSys& aidlDeviceExt) {
     audio_port_config_device_ext legacy;
-    legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
-    legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
-    RETURN_IF_ERROR(aidl2legacy_string(aidl.address, legacy.address, AUDIO_DEVICE_MAX_ADDRESS_LEN));
+    legacy.hw_module = VALUE_OR_RETURN(
+            aidl2legacy_int32_t_audio_module_handle_t(aidlDeviceExt.hwModule));
+    RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(aidl, &legacy.type, legacy.address));
     return legacy;
 }
 
-ConversionResult<media::AudioPortConfigDeviceExt>
-legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
-        const audio_port_config_device_ext& legacy) {
-    media::AudioPortConfigDeviceExt aidl;
-    aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
-    aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_AudioDeviceDescription(legacy.type));
-    aidl.address = VALUE_OR_RETURN(
-            legacy2aidl_string(legacy.address, AUDIO_DEVICE_MAX_ADDRESS_LEN));
-    return aidl;
+status_t legacy2aidl_audio_port_config_device_ext_AudioDevice(
+        const audio_port_config_device_ext& legacy,
+        AudioDevice* aidl, media::AudioPortDeviceExtSys* aidlDeviceExt) {
+    aidlDeviceExt->hwModule = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+    *aidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_device_AudioDevice(legacy.type, legacy.address));
+    return OK;
 }
 
 ConversionResult<audio_stream_type_t> aidl2legacy_AudioStreamType_audio_stream_type_t(
-        media::AudioStreamType aidl) {
+        AudioStreamType aidl) {
     switch (aidl) {
-        case media::AudioStreamType::DEFAULT:
+        case AudioStreamType::INVALID:
+            break;  // return error
+        case AudioStreamType::SYS_RESERVED_DEFAULT:
             return AUDIO_STREAM_DEFAULT;
-        case media::AudioStreamType::VOICE_CALL:
+        case AudioStreamType::VOICE_CALL:
             return AUDIO_STREAM_VOICE_CALL;
-        case media::AudioStreamType::SYSTEM:
+        case AudioStreamType::SYSTEM:
             return AUDIO_STREAM_SYSTEM;
-        case media::AudioStreamType::RING:
+        case AudioStreamType::RING:
             return AUDIO_STREAM_RING;
-        case media::AudioStreamType::MUSIC:
+        case AudioStreamType::MUSIC:
             return AUDIO_STREAM_MUSIC;
-        case media::AudioStreamType::ALARM:
+        case AudioStreamType::ALARM:
             return AUDIO_STREAM_ALARM;
-        case media::AudioStreamType::NOTIFICATION:
+        case AudioStreamType::NOTIFICATION:
             return AUDIO_STREAM_NOTIFICATION;
-        case media::AudioStreamType::BLUETOOTH_SCO:
+        case AudioStreamType::BLUETOOTH_SCO:
             return AUDIO_STREAM_BLUETOOTH_SCO;
-        case media::AudioStreamType::ENFORCED_AUDIBLE:
+        case AudioStreamType::ENFORCED_AUDIBLE:
             return AUDIO_STREAM_ENFORCED_AUDIBLE;
-        case media::AudioStreamType::DTMF:
+        case AudioStreamType::DTMF:
             return AUDIO_STREAM_DTMF;
-        case media::AudioStreamType::TTS:
+        case AudioStreamType::TTS:
             return AUDIO_STREAM_TTS;
-        case media::AudioStreamType::ACCESSIBILITY:
+        case AudioStreamType::ACCESSIBILITY:
             return AUDIO_STREAM_ACCESSIBILITY;
-        case media::AudioStreamType::ASSISTANT:
+        case AudioStreamType::ASSISTANT:
             return AUDIO_STREAM_ASSISTANT;
-        case media::AudioStreamType::REROUTING:
+        case AudioStreamType::SYS_RESERVED_REROUTING:
             return AUDIO_STREAM_REROUTING;
-        case media::AudioStreamType::PATCH:
+        case AudioStreamType::SYS_RESERVED_PATCH:
             return AUDIO_STREAM_PATCH;
-        case media::AudioStreamType::CALL_ASSISTANT:
+        case AudioStreamType::CALL_ASSISTANT:
             return AUDIO_STREAM_CALL_ASSISTANT;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
+ConversionResult<AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
         audio_stream_type_t legacy) {
     switch (legacy) {
         case AUDIO_STREAM_DEFAULT:
-            return media::AudioStreamType::DEFAULT;
+            return AudioStreamType::SYS_RESERVED_DEFAULT;
         case AUDIO_STREAM_VOICE_CALL:
-            return media::AudioStreamType::VOICE_CALL;
+            return AudioStreamType::VOICE_CALL;
         case AUDIO_STREAM_SYSTEM:
-            return media::AudioStreamType::SYSTEM;
+            return AudioStreamType::SYSTEM;
         case AUDIO_STREAM_RING:
-            return media::AudioStreamType::RING;
+            return AudioStreamType::RING;
         case AUDIO_STREAM_MUSIC:
-            return media::AudioStreamType::MUSIC;
+            return AudioStreamType::MUSIC;
         case AUDIO_STREAM_ALARM:
-            return media::AudioStreamType::ALARM;
+            return AudioStreamType::ALARM;
         case AUDIO_STREAM_NOTIFICATION:
-            return media::AudioStreamType::NOTIFICATION;
+            return AudioStreamType::NOTIFICATION;
         case AUDIO_STREAM_BLUETOOTH_SCO:
-            return media::AudioStreamType::BLUETOOTH_SCO;
+            return AudioStreamType::BLUETOOTH_SCO;
         case AUDIO_STREAM_ENFORCED_AUDIBLE:
-            return media::AudioStreamType::ENFORCED_AUDIBLE;
+            return AudioStreamType::ENFORCED_AUDIBLE;
         case AUDIO_STREAM_DTMF:
-            return media::AudioStreamType::DTMF;
+            return AudioStreamType::DTMF;
         case AUDIO_STREAM_TTS:
-            return media::AudioStreamType::TTS;
+            return AudioStreamType::TTS;
         case AUDIO_STREAM_ACCESSIBILITY:
-            return media::AudioStreamType::ACCESSIBILITY;
+            return AudioStreamType::ACCESSIBILITY;
         case AUDIO_STREAM_ASSISTANT:
-            return media::AudioStreamType::ASSISTANT;
+            return AudioStreamType::ASSISTANT;
         case AUDIO_STREAM_REROUTING:
-            return media::AudioStreamType::REROUTING;
+            return AudioStreamType::SYS_RESERVED_REROUTING;
         case AUDIO_STREAM_PATCH:
-            return media::AudioStreamType::PATCH;
+            return AudioStreamType::SYS_RESERVED_PATCH;
         case AUDIO_STREAM_CALL_ASSISTANT:
-            return media::AudioStreamType::CALL_ASSISTANT;
+            return AudioStreamType::CALL_ASSISTANT;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<audio_source_t> aidl2legacy_AudioSourceType_audio_source_t(
-        media::AudioSourceType aidl) {
+ConversionResult<audio_source_t> aidl2legacy_AudioSource_audio_source_t(
+        AudioSource aidl) {
     switch (aidl) {
-        case media::AudioSourceType::INVALID:
-            // This value does not have an enum
+        case AudioSource::SYS_RESERVED_INVALID:
             return AUDIO_SOURCE_INVALID;
-        case media::AudioSourceType::DEFAULT:
+        case AudioSource::DEFAULT:
             return AUDIO_SOURCE_DEFAULT;
-        case media::AudioSourceType::MIC:
+        case AudioSource::MIC:
             return AUDIO_SOURCE_MIC;
-        case media::AudioSourceType::VOICE_UPLINK:
+        case AudioSource::VOICE_UPLINK:
             return AUDIO_SOURCE_VOICE_UPLINK;
-        case media::AudioSourceType::VOICE_DOWNLINK:
+        case AudioSource::VOICE_DOWNLINK:
             return AUDIO_SOURCE_VOICE_DOWNLINK;
-        case media::AudioSourceType::VOICE_CALL:
+        case AudioSource::VOICE_CALL:
             return AUDIO_SOURCE_VOICE_CALL;
-        case media::AudioSourceType::CAMCORDER:
+        case AudioSource::CAMCORDER:
             return AUDIO_SOURCE_CAMCORDER;
-        case media::AudioSourceType::VOICE_RECOGNITION:
+        case AudioSource::VOICE_RECOGNITION:
             return AUDIO_SOURCE_VOICE_RECOGNITION;
-        case media::AudioSourceType::VOICE_COMMUNICATION:
+        case AudioSource::VOICE_COMMUNICATION:
             return AUDIO_SOURCE_VOICE_COMMUNICATION;
-        case media::AudioSourceType::REMOTE_SUBMIX:
+        case AudioSource::REMOTE_SUBMIX:
             return AUDIO_SOURCE_REMOTE_SUBMIX;
-        case media::AudioSourceType::UNPROCESSED:
+        case AudioSource::UNPROCESSED:
             return AUDIO_SOURCE_UNPROCESSED;
-        case media::AudioSourceType::VOICE_PERFORMANCE:
+        case AudioSource::VOICE_PERFORMANCE:
             return AUDIO_SOURCE_VOICE_PERFORMANCE;
-        case media::AudioSourceType::ECHO_REFERENCE:
+        case AudioSource::ECHO_REFERENCE:
             return AUDIO_SOURCE_ECHO_REFERENCE;
-        case media::AudioSourceType::FM_TUNER:
+        case AudioSource::FM_TUNER:
             return AUDIO_SOURCE_FM_TUNER;
-        case media::AudioSourceType::HOTWORD:
+        case AudioSource::HOTWORD:
             return AUDIO_SOURCE_HOTWORD;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioSourceType> legacy2aidl_audio_source_t_AudioSourceType(
+ConversionResult<AudioSource> legacy2aidl_audio_source_t_AudioSource(
         audio_source_t legacy) {
     switch (legacy) {
         case AUDIO_SOURCE_INVALID:
-            return media::AudioSourceType::INVALID;
+            return AudioSource::SYS_RESERVED_INVALID;
         case AUDIO_SOURCE_DEFAULT:
-            return media::AudioSourceType::DEFAULT;
+            return AudioSource::DEFAULT;
         case AUDIO_SOURCE_MIC:
-            return media::AudioSourceType::MIC;
+            return AudioSource::MIC;
         case AUDIO_SOURCE_VOICE_UPLINK:
-            return media::AudioSourceType::VOICE_UPLINK;
+            return AudioSource::VOICE_UPLINK;
         case AUDIO_SOURCE_VOICE_DOWNLINK:
-            return media::AudioSourceType::VOICE_DOWNLINK;
+            return AudioSource::VOICE_DOWNLINK;
         case AUDIO_SOURCE_VOICE_CALL:
-            return media::AudioSourceType::VOICE_CALL;
+            return AudioSource::VOICE_CALL;
         case AUDIO_SOURCE_CAMCORDER:
-            return media::AudioSourceType::CAMCORDER;
+            return AudioSource::CAMCORDER;
         case AUDIO_SOURCE_VOICE_RECOGNITION:
-            return media::AudioSourceType::VOICE_RECOGNITION;
+            return AudioSource::VOICE_RECOGNITION;
         case AUDIO_SOURCE_VOICE_COMMUNICATION:
-            return media::AudioSourceType::VOICE_COMMUNICATION;
+            return AudioSource::VOICE_COMMUNICATION;
         case AUDIO_SOURCE_REMOTE_SUBMIX:
-            return media::AudioSourceType::REMOTE_SUBMIX;
+            return AudioSource::REMOTE_SUBMIX;
         case AUDIO_SOURCE_UNPROCESSED:
-            return media::AudioSourceType::UNPROCESSED;
+            return AudioSource::UNPROCESSED;
         case AUDIO_SOURCE_VOICE_PERFORMANCE:
-            return media::AudioSourceType::VOICE_PERFORMANCE;
+            return AudioSource::VOICE_PERFORMANCE;
         case AUDIO_SOURCE_ECHO_REFERENCE:
-            return media::AudioSourceType::ECHO_REFERENCE;
+            return AudioSource::ECHO_REFERENCE;
         case AUDIO_SOURCE_FM_TUNER:
-            return media::AudioSourceType::FM_TUNER;
+            return AudioSource::FM_TUNER;
         case AUDIO_SOURCE_HOTWORD:
-            return media::AudioSourceType::HOTWORD;
+            return AudioSource::HOTWORD;
     }
     return unexpected(BAD_VALUE);
 }
@@ -1680,8 +1681,8 @@
 // This type is unnamed in the original definition, thus we name it here.
 using audio_port_config_mix_ext_usecase = decltype(audio_port_config_mix_ext::usecase);
 
-ConversionResult<audio_port_config_mix_ext_usecase> aidl2legacy_AudioPortConfigMixExtUseCase(
-        const media::AudioPortConfigMixExtUseCase& aidl, media::AudioPortRole role) {
+ConversionResult<audio_port_config_mix_ext_usecase> aidl2legacy_AudioPortMixExtUseCase(
+        const AudioPortMixExtUseCase& aidl, media::AudioPortRole role) {
     audio_port_config_mix_ext_usecase legacy;
 
     switch (role) {
@@ -1698,16 +1699,16 @@
 
         case media::AudioPortRole::SINK:
             // This is not a bug. A SINK role corresponds to the source field.
-            legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSourceType_audio_source_t(
+            legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSource_audio_source_t(
                     VALUE_OR_RETURN(UNION_GET(aidl, source))));
             return legacy;
     }
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
-ConversionResult<media::AudioPortConfigMixExtUseCase> legacy2aidl_AudioPortConfigMixExtUseCase(
+ConversionResult<AudioPortMixExtUseCase> legacy2aidl_AudioPortMixExtUseCase(
         const audio_port_config_mix_ext_usecase& legacy, audio_port_role_t role) {
-    media::AudioPortConfigMixExtUseCase aidl;
+    AudioPortMixExtUseCase aidl;
 
     switch (role) {
         case AUDIO_PORT_ROLE_NONE:
@@ -1721,52 +1722,53 @@
         case AUDIO_PORT_ROLE_SINK:
             // This is not a bug. A SINK role corresponds to the source field.
             UNION_SET(aidl, source,
-                      VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.source)));
+                      VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.source)));
             return aidl;
     }
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
-ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortConfigMixExt(
-        const media::AudioPortConfigMixExt& aidl, media::AudioPortRole role) {
+ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortMixExt(
+        const AudioPortMixExt& aidl, media::AudioPortRole role,
+        const media::AudioPortMixExtSys& aidlMixExt) {
     audio_port_config_mix_ext legacy;
-    legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
+    legacy.hw_module = VALUE_OR_RETURN(
+            aidl2legacy_int32_t_audio_module_handle_t(aidlMixExt.hwModule));
     legacy.handle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.handle));
-    legacy.usecase = VALUE_OR_RETURN(aidl2legacy_AudioPortConfigMixExtUseCase(aidl.usecase, role));
+    legacy.usecase = VALUE_OR_RETURN(aidl2legacy_AudioPortMixExtUseCase(aidl.usecase, role));
     return legacy;
 }
 
-ConversionResult<media::AudioPortConfigMixExt> legacy2aidl_AudioPortConfigMixExt(
-        const audio_port_config_mix_ext& legacy, audio_port_role_t role) {
-    media::AudioPortConfigMixExt aidl;
-    aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
-    aidl.handle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
-    aidl.usecase = VALUE_OR_RETURN(legacy2aidl_AudioPortConfigMixExtUseCase(legacy.usecase, role));
-    return aidl;
+status_t legacy2aidl_AudioPortMixExt(
+        const audio_port_config_mix_ext& legacy, audio_port_role_t role,
+        AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt) {
+    aidlMixExt->hwModule = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+    aidl->handle = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
+    aidl->usecase = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_AudioPortMixExtUseCase(legacy.usecase, role));
+    return OK;
 }
 
 ConversionResult<audio_port_config_session_ext>
-aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
-        const media::AudioPortConfigSessionExt& aidl) {
+aidl2legacy_int32_t_audio_port_config_session_ext(int32_t aidl) {
     audio_port_config_session_ext legacy;
-    legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.session));
+    legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl));
     return legacy;
 }
 
-ConversionResult<media::AudioPortConfigSessionExt>
-legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
+ConversionResult<int32_t>
+legacy2aidl_audio_port_config_session_ext_int32_t(
         const audio_port_config_session_ext& legacy) {
-    media::AudioPortConfigSessionExt aidl;
-    aidl.session = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.session));
-    return aidl;
+    return legacy2aidl_audio_session_t_int32_t(legacy.session);
 }
 
 // This type is unnamed in the original definition, thus we name it here.
 using audio_port_config_ext = decltype(audio_port_config::ext);
 
-ConversionResult<audio_port_config_ext> aidl2legacy_AudioPortConfigExt(
-        const media::AudioPortConfigExt& aidl, media::AudioPortType type,
-        media::AudioPortRole role) {
+ConversionResult<audio_port_config_ext> aidl2legacy_AudioPortExt_audio_port_config_ext(
+        const AudioPortExt& aidl, media::AudioPortType type,
+        media::AudioPortRole role, const media::AudioPortExtSys& aidlSys) {
     audio_port_config_ext legacy;
     switch (type) {
         case media::AudioPortType::NONE:
@@ -1775,16 +1777,19 @@
             return legacy;
         case media::AudioPortType::DEVICE:
             legacy.device = VALUE_OR_RETURN(
-                    aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
-                            VALUE_OR_RETURN(UNION_GET(aidl, device))));
+                    aidl2legacy_AudioDevice_audio_port_config_device_ext(
+                            VALUE_OR_RETURN(UNION_GET(aidl, device)),
+                            VALUE_OR_RETURN(UNION_GET(aidlSys, device))));
             return legacy;
         case media::AudioPortType::MIX:
             legacy.mix = VALUE_OR_RETURN(
-                    aidl2legacy_AudioPortConfigMixExt(VALUE_OR_RETURN(UNION_GET(aidl, mix)), role));
+                    aidl2legacy_AudioPortMixExt(
+                            VALUE_OR_RETURN(UNION_GET(aidl, mix)), role,
+                            VALUE_OR_RETURN(UNION_GET(aidlSys, mix))));
             return legacy;
         case media::AudioPortType::SESSION:
             legacy.session = VALUE_OR_RETURN(
-                    aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
+                    aidl2legacy_int32_t_audio_port_config_session_ext(
                             VALUE_OR_RETURN(UNION_GET(aidl, session))));
             return legacy;
 
@@ -1792,97 +1797,114 @@
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
-ConversionResult<media::AudioPortConfigExt> legacy2aidl_AudioPortConfigExt(
-        const audio_port_config_ext& legacy, audio_port_type_t type, audio_port_role_t role) {
-    media::AudioPortConfigExt aidl;
-
+status_t legacy2aidl_AudioPortExt(
+        const audio_port_config_ext& legacy, audio_port_type_t type, audio_port_role_t role,
+        AudioPortExt* aidl, media::AudioPortExtSys* aidlSys) {
     switch (type) {
         case AUDIO_PORT_TYPE_NONE:
-            UNION_SET(aidl, unspecified, false);
-            return aidl;
-        case AUDIO_PORT_TYPE_DEVICE:
-            UNION_SET(aidl, device,
-                      VALUE_OR_RETURN(
-                        legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
-                          legacy.device)));
-            return aidl;
-        case AUDIO_PORT_TYPE_MIX:
-            UNION_SET(aidl, mix,
-                      VALUE_OR_RETURN(legacy2aidl_AudioPortConfigMixExt(legacy.mix, role)));
-            return aidl;
+            UNION_SET(*aidl, unspecified, false);
+            UNION_SET(*aidlSys, unspecified, false);
+            return OK;
+        case AUDIO_PORT_TYPE_DEVICE: {
+            AudioDevice device;
+            media::AudioPortDeviceExtSys deviceSys;
+            RETURN_STATUS_IF_ERROR(
+                    legacy2aidl_audio_port_config_device_ext_AudioDevice(
+                            legacy.device, &device, &deviceSys));
+            UNION_SET(*aidl, device, device);
+            UNION_SET(*aidlSys, device, deviceSys);
+            return OK;
+        }
+        case AUDIO_PORT_TYPE_MIX: {
+            AudioPortMixExt mix;
+            media::AudioPortMixExtSys mixSys;
+            RETURN_STATUS_IF_ERROR(legacy2aidl_AudioPortMixExt(legacy.mix, role, &mix, &mixSys));
+            UNION_SET(*aidl, mix, mix);
+            UNION_SET(*aidlSys, mix, mixSys);
+            return OK;
+        }
         case AUDIO_PORT_TYPE_SESSION:
-            UNION_SET(aidl, session,
-                      VALUE_OR_RETURN(
-                        legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
-                          legacy.session)));
-            return aidl;
+            UNION_SET(*aidl, session, VALUE_OR_RETURN_STATUS(
+                            legacy2aidl_audio_port_config_session_ext_int32_t(legacy.session)));
+            UNION_SET(*aidlSys, unspecified, false);
+            return OK;
     }
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
         const media::AudioPortConfig& aidl) {
-    audio_port_config legacy;
-    legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.id));
-    legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.role));
-    legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.type));
-    legacy.config_mask = VALUE_OR_RETURN(aidl2legacy_int32_t_config_mask(aidl.configMask));
-    if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::SAMPLE_RATE)) {
-        legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.sampleRate));
+    audio_port_config legacy{};
+    legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.hal.id));
+    legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.sys.role));
+    legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.sys.type));
+    const bool isInput =
+            VALUE_OR_RETURN(direction(aidl.sys.role, aidl.sys.type)) == Direction::INPUT;
+    if (aidl.hal.sampleRate.has_value()) {
+        legacy.sample_rate = VALUE_OR_RETURN(
+                convertIntegral<unsigned int>(aidl.hal.sampleRate.value().value));
+        legacy.config_mask |= AUDIO_PORT_CONFIG_SAMPLE_RATE;
     }
-    if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::CHANNEL_MASK)) {
-        const bool isInput = VALUE_OR_RETURN(direction(aidl.role, aidl.type)) == Direction::INPUT;
+    if (aidl.hal.channelMask.has_value()) {
         legacy.channel_mask =
                 VALUE_OR_RETURN(
                         aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
-                                aidl.channelMask, isInput));
+                                aidl.hal.channelMask.value(), isInput));
+        legacy.config_mask |= AUDIO_PORT_CONFIG_CHANNEL_MASK;
     }
-    if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::FORMAT)) {
+    if (aidl.hal.format.has_value()) {
         legacy.format = VALUE_OR_RETURN(
-                aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
+                aidl2legacy_AudioFormatDescription_audio_format_t(aidl.hal.format.value()));
+        legacy.config_mask |= AUDIO_PORT_CONFIG_FORMAT;
     }
-    if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::GAIN)) {
-        legacy.gain = VALUE_OR_RETURN(
-                aidl2legacy_AudioGainConfig_audio_gain_config(aidl.gain, aidl.role, aidl.type));
+    if (aidl.hal.gain.has_value()) {
+        legacy.gain = VALUE_OR_RETURN(aidl2legacy_AudioGainConfig_audio_gain_config(
+                        aidl.hal.gain.value(), isInput));
+        legacy.config_mask |= AUDIO_PORT_CONFIG_GAIN;
     }
-    if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::FLAGS)) {
+    if (aidl.sys.flags.has_value()) {
         legacy.flags = VALUE_OR_RETURN(
-                aidl2legacy_AudioIoFlags_audio_io_flags(aidl.flags, aidl.role, aidl.type));
+                aidl2legacy_AudioIoFlags_audio_io_flags(
+                        aidl.sys.flags.value(), aidl.sys.role, aidl.sys.type));
+        legacy.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
     }
-    legacy.ext = VALUE_OR_RETURN(aidl2legacy_AudioPortConfigExt(aidl.ext, aidl.type, aidl.role));
+    legacy.ext = VALUE_OR_RETURN(
+            aidl2legacy_AudioPortExt_audio_port_config_ext(
+                    aidl.hal.ext, aidl.sys.type, aidl.sys.role, aidl.sys.ext));
     return legacy;
 }
 
 ConversionResult<media::AudioPortConfig> legacy2aidl_audio_port_config_AudioPortConfig(
         const audio_port_config& legacy) {
     media::AudioPortConfig aidl;
-    aidl.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
-    aidl.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
-    aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
-    aidl.configMask = VALUE_OR_RETURN(legacy2aidl_config_mask_int32_t(legacy.config_mask));
+    aidl.hal.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
+    aidl.sys.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
+    aidl.sys.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
+    const bool isInput = VALUE_OR_RETURN(
+            direction(legacy.role, legacy.type)) == Direction::INPUT;
     if (legacy.config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
-        aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
+        Int aidl_sampleRate;
+        aidl_sampleRate.value = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
+        aidl.hal.sampleRate = aidl_sampleRate;
     }
     if (legacy.config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
-        const bool isInput = VALUE_OR_RETURN(
-                direction(legacy.role, legacy.type)) == Direction::INPUT;
-        aidl.channelMask = VALUE_OR_RETURN(
+        aidl.hal.channelMask = VALUE_OR_RETURN(
                 legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
     }
     if (legacy.config_mask & AUDIO_PORT_CONFIG_FORMAT) {
-        aidl.format = VALUE_OR_RETURN(
+        aidl.hal.format = VALUE_OR_RETURN(
                 legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
     }
     if (legacy.config_mask & AUDIO_PORT_CONFIG_GAIN) {
-        aidl.gain = VALUE_OR_RETURN(legacy2aidl_audio_gain_config_AudioGainConfig(
-                legacy.gain, legacy.role, legacy.type));
+        aidl.hal.gain = VALUE_OR_RETURN(
+                legacy2aidl_audio_gain_config_AudioGainConfig(legacy.gain, isInput));
     }
     if (legacy.config_mask & AUDIO_PORT_CONFIG_FLAGS) {
-        aidl.flags = VALUE_OR_RETURN(
+        aidl.sys.flags = VALUE_OR_RETURN(
                 legacy2aidl_audio_io_flags_AudioIoFlags(legacy.flags, legacy.role, legacy.type));
     }
-    aidl.ext =
-            VALUE_OR_RETURN(legacy2aidl_AudioPortConfigExt(legacy.ext, legacy.type, legacy.role));
+    RETURN_IF_ERROR(legacy2aidl_AudioPortExt(legacy.ext, legacy.type, legacy.role,
+                    &aidl.hal.ext, &aidl.sys.ext));
     return aidl;
 }
 
@@ -1987,137 +2009,139 @@
 }
 
 ConversionResult<audio_content_type_t>
-aidl2legacy_AudioContentType_audio_content_type_t(media::AudioContentType aidl) {
+aidl2legacy_AudioContentType_audio_content_type_t(AudioContentType aidl) {
     switch (aidl) {
-        case media::AudioContentType::UNKNOWN:
+        case AudioContentType::UNKNOWN:
             return AUDIO_CONTENT_TYPE_UNKNOWN;
-        case media::AudioContentType::SPEECH:
+        case AudioContentType::SPEECH:
             return AUDIO_CONTENT_TYPE_SPEECH;
-        case media::AudioContentType::MUSIC:
+        case AudioContentType::MUSIC:
             return AUDIO_CONTENT_TYPE_MUSIC;
-        case media::AudioContentType::MOVIE:
+        case AudioContentType::MOVIE:
             return AUDIO_CONTENT_TYPE_MOVIE;
-        case media::AudioContentType::SONIFICATION:
+        case AudioContentType::SONIFICATION:
             return AUDIO_CONTENT_TYPE_SONIFICATION;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioContentType>
+ConversionResult<AudioContentType>
 legacy2aidl_audio_content_type_t_AudioContentType(audio_content_type_t legacy) {
     switch (legacy) {
         case AUDIO_CONTENT_TYPE_UNKNOWN:
-            return media::AudioContentType::UNKNOWN;
+            return AudioContentType::UNKNOWN;
         case AUDIO_CONTENT_TYPE_SPEECH:
-            return media::AudioContentType::SPEECH;
+            return AudioContentType::SPEECH;
         case AUDIO_CONTENT_TYPE_MUSIC:
-            return media::AudioContentType::MUSIC;
+            return AudioContentType::MUSIC;
         case AUDIO_CONTENT_TYPE_MOVIE:
-            return media::AudioContentType::MOVIE;
+            return AudioContentType::MOVIE;
         case AUDIO_CONTENT_TYPE_SONIFICATION:
-            return media::AudioContentType::SONIFICATION;
+            return AudioContentType::SONIFICATION;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_usage_t>
-aidl2legacy_AudioUsage_audio_usage_t(media::AudioUsage aidl) {
+aidl2legacy_AudioUsage_audio_usage_t(AudioUsage aidl) {
     switch (aidl) {
-        case media::AudioUsage::UNKNOWN:
+        case AudioUsage::INVALID:
+            break;  // return error
+        case AudioUsage::UNKNOWN:
             return AUDIO_USAGE_UNKNOWN;
-        case media::AudioUsage::MEDIA:
+        case AudioUsage::MEDIA:
             return AUDIO_USAGE_MEDIA;
-        case media::AudioUsage::VOICE_COMMUNICATION:
+        case AudioUsage::VOICE_COMMUNICATION:
             return AUDIO_USAGE_VOICE_COMMUNICATION;
-        case media::AudioUsage::VOICE_COMMUNICATION_SIGNALLING:
+        case AudioUsage::VOICE_COMMUNICATION_SIGNALLING:
             return AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
-        case media::AudioUsage::ALARM:
+        case AudioUsage::ALARM:
             return AUDIO_USAGE_ALARM;
-        case media::AudioUsage::NOTIFICATION:
+        case AudioUsage::NOTIFICATION:
             return AUDIO_USAGE_NOTIFICATION;
-        case media::AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE:
+        case AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE:
             return AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
-        case media::AudioUsage::NOTIFICATION_COMMUNICATION_REQUEST:
+        case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_REQUEST:
             return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST;
-        case media::AudioUsage::NOTIFICATION_COMMUNICATION_INSTANT:
+        case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_INSTANT:
             return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT;
-        case media::AudioUsage::NOTIFICATION_COMMUNICATION_DELAYED:
+        case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_DELAYED:
             return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED;
-        case media::AudioUsage::NOTIFICATION_EVENT:
+        case AudioUsage::NOTIFICATION_EVENT:
             return AUDIO_USAGE_NOTIFICATION_EVENT;
-        case media::AudioUsage::ASSISTANCE_ACCESSIBILITY:
+        case AudioUsage::ASSISTANCE_ACCESSIBILITY:
             return AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
-        case media::AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE:
+        case AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE:
             return AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE;
-        case media::AudioUsage::ASSISTANCE_SONIFICATION:
+        case AudioUsage::ASSISTANCE_SONIFICATION:
             return AUDIO_USAGE_ASSISTANCE_SONIFICATION;
-        case media::AudioUsage::GAME:
+        case AudioUsage::GAME:
             return AUDIO_USAGE_GAME;
-        case media::AudioUsage::VIRTUAL_SOURCE:
+        case AudioUsage::VIRTUAL_SOURCE:
             return AUDIO_USAGE_VIRTUAL_SOURCE;
-        case media::AudioUsage::ASSISTANT:
+        case AudioUsage::ASSISTANT:
             return AUDIO_USAGE_ASSISTANT;
-        case media::AudioUsage::CALL_ASSISTANT:
+        case AudioUsage::CALL_ASSISTANT:
             return AUDIO_USAGE_CALL_ASSISTANT;
-        case media::AudioUsage::EMERGENCY:
+        case AudioUsage::EMERGENCY:
             return AUDIO_USAGE_EMERGENCY;
-        case media::AudioUsage::SAFETY:
+        case AudioUsage::SAFETY:
             return AUDIO_USAGE_SAFETY;
-        case media::AudioUsage::VEHICLE_STATUS:
+        case AudioUsage::VEHICLE_STATUS:
             return AUDIO_USAGE_VEHICLE_STATUS;
-        case media::AudioUsage::ANNOUNCEMENT:
+        case AudioUsage::ANNOUNCEMENT:
             return AUDIO_USAGE_ANNOUNCEMENT;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioUsage>
+ConversionResult<AudioUsage>
 legacy2aidl_audio_usage_t_AudioUsage(audio_usage_t legacy) {
     switch (legacy) {
         case AUDIO_USAGE_UNKNOWN:
-            return media::AudioUsage::UNKNOWN;
+            return AudioUsage::UNKNOWN;
         case AUDIO_USAGE_MEDIA:
-            return media::AudioUsage::MEDIA;
+            return AudioUsage::MEDIA;
         case AUDIO_USAGE_VOICE_COMMUNICATION:
-            return media::AudioUsage::VOICE_COMMUNICATION;
+            return AudioUsage::VOICE_COMMUNICATION;
         case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
-            return media::AudioUsage::VOICE_COMMUNICATION_SIGNALLING;
+            return AudioUsage::VOICE_COMMUNICATION_SIGNALLING;
         case AUDIO_USAGE_ALARM:
-            return media::AudioUsage::ALARM;
+            return AudioUsage::ALARM;
         case AUDIO_USAGE_NOTIFICATION:
-            return media::AudioUsage::NOTIFICATION;
+            return AudioUsage::NOTIFICATION;
         case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
-            return media::AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE;
+            return AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE;
         case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
-            return media::AudioUsage::NOTIFICATION_COMMUNICATION_REQUEST;
+            return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_REQUEST;
         case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
-            return media::AudioUsage::NOTIFICATION_COMMUNICATION_INSTANT;
+            return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_INSTANT;
         case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
-            return media::AudioUsage::NOTIFICATION_COMMUNICATION_DELAYED;
+            return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_DELAYED;
         case AUDIO_USAGE_NOTIFICATION_EVENT:
-            return media::AudioUsage::NOTIFICATION_EVENT;
+            return AudioUsage::NOTIFICATION_EVENT;
         case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
-            return media::AudioUsage::ASSISTANCE_ACCESSIBILITY;
+            return AudioUsage::ASSISTANCE_ACCESSIBILITY;
         case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
-            return media::AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE;
+            return AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE;
         case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
-            return media::AudioUsage::ASSISTANCE_SONIFICATION;
+            return AudioUsage::ASSISTANCE_SONIFICATION;
         case AUDIO_USAGE_GAME:
-            return media::AudioUsage::GAME;
+            return AudioUsage::GAME;
         case AUDIO_USAGE_VIRTUAL_SOURCE:
-            return media::AudioUsage::VIRTUAL_SOURCE;
+            return AudioUsage::VIRTUAL_SOURCE;
         case AUDIO_USAGE_ASSISTANT:
-            return media::AudioUsage::ASSISTANT;
+            return AudioUsage::ASSISTANT;
         case AUDIO_USAGE_CALL_ASSISTANT:
-            return media::AudioUsage::CALL_ASSISTANT;
+            return AudioUsage::CALL_ASSISTANT;
         case AUDIO_USAGE_EMERGENCY:
-            return media::AudioUsage::EMERGENCY;
+            return AudioUsage::EMERGENCY;
         case AUDIO_USAGE_SAFETY:
-            return media::AudioUsage::SAFETY;
+            return AudioUsage::SAFETY;
         case AUDIO_USAGE_VEHICLE_STATUS:
-            return media::AudioUsage::VEHICLE_STATUS;
+            return AudioUsage::VEHICLE_STATUS;
         case AUDIO_USAGE_ANNOUNCEMENT:
-            return media::AudioUsage::ANNOUNCEMENT;
+            return AudioUsage::ANNOUNCEMENT;
     }
     return unexpected(BAD_VALUE);
 }
@@ -2153,6 +2177,10 @@
             return AUDIO_FLAG_NO_SYSTEM_CAPTURE;
         case media::AudioFlag::CAPTURE_PRIVATE:
             return AUDIO_FLAG_CAPTURE_PRIVATE;
+        case media::AudioFlag::CONTENT_SPATIALIZED:
+            return AUDIO_FLAG_CONTENT_SPATIALIZED;
+        case media::AudioFlag::NEVER_SPATIALIZE:
+            return AUDIO_FLAG_NEVER_SPATIALIZE;
     }
     return unexpected(BAD_VALUE);
 }
@@ -2190,6 +2218,10 @@
             return media::AudioFlag::NO_SYSTEM_CAPTURE;
         case AUDIO_FLAG_CAPTURE_PRIVATE:
             return media::AudioFlag::CAPTURE_PRIVATE;
+        case AUDIO_FLAG_CONTENT_SPATIALIZED:
+            return media::AudioFlag::CONTENT_SPATIALIZED;
+        case AUDIO_FLAG_NEVER_SPATIALIZE:
+            return media::AudioFlag::NEVER_SPATIALIZE;
     }
     return unexpected(BAD_VALUE);
 }
@@ -2215,7 +2247,7 @@
     legacy.content_type = VALUE_OR_RETURN(
             aidl2legacy_AudioContentType_audio_content_type_t(aidl.contentType));
     legacy.usage = VALUE_OR_RETURN(aidl2legacy_AudioUsage_audio_usage_t(aidl.usage));
-    legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSourceType_audio_source_t(aidl.source));
+    legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSource_audio_source_t(aidl.source));
     legacy.flags = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_flags_mask_t_mask(aidl.flags));
     RETURN_IF_ERROR(aidl2legacy_string(aidl.tags, legacy.tags, sizeof(legacy.tags)));
     return legacy;
@@ -2227,51 +2259,51 @@
     aidl.contentType = VALUE_OR_RETURN(
             legacy2aidl_audio_content_type_t_AudioContentType(legacy.content_type));
     aidl.usage = VALUE_OR_RETURN(legacy2aidl_audio_usage_t_AudioUsage(legacy.usage));
-    aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.source));
+    aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.source));
     aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_flags_mask_t_int32_t_mask(legacy.flags));
     aidl.tags = VALUE_OR_RETURN(legacy2aidl_string(legacy.tags, sizeof(legacy.tags)));
     return aidl;
 }
 
 ConversionResult<audio_encapsulation_mode_t>
-aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(media::AudioEncapsulationMode aidl) {
+aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(AudioEncapsulationMode aidl) {
     switch (aidl) {
-        case media::AudioEncapsulationMode::NONE:
+        case AudioEncapsulationMode::INVALID:
+            break;  // return error
+        case AudioEncapsulationMode::NONE:
             return AUDIO_ENCAPSULATION_MODE_NONE;
-        case media::AudioEncapsulationMode::ELEMENTARY_STREAM:
+        case AudioEncapsulationMode::ELEMENTARY_STREAM:
             return AUDIO_ENCAPSULATION_MODE_ELEMENTARY_STREAM;
-        case media::AudioEncapsulationMode::HANDLE:
+        case AudioEncapsulationMode::HANDLE:
             return AUDIO_ENCAPSULATION_MODE_HANDLE;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioEncapsulationMode>
+ConversionResult<AudioEncapsulationMode>
 legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode(audio_encapsulation_mode_t legacy) {
     switch (legacy) {
         case AUDIO_ENCAPSULATION_MODE_NONE:
-            return media::AudioEncapsulationMode::NONE;
+            return AudioEncapsulationMode::NONE;
         case AUDIO_ENCAPSULATION_MODE_ELEMENTARY_STREAM:
-            return media::AudioEncapsulationMode::ELEMENTARY_STREAM;
+            return AudioEncapsulationMode::ELEMENTARY_STREAM;
         case AUDIO_ENCAPSULATION_MODE_HANDLE:
-            return media::AudioEncapsulationMode::HANDLE;
+            return AudioEncapsulationMode::HANDLE;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_offload_info_t>
-aidl2legacy_AudioOffloadInfo_audio_offload_info_t(const media::AudioOffloadInfo& aidl) {
-    audio_offload_info_t legacy;
-    legacy.version = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.version));
-    legacy.size = sizeof(audio_offload_info_t);
-    audio_config_base_t config = VALUE_OR_RETURN(
-            aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config, false /*isInput*/));
-    legacy.sample_rate = config.sample_rate;
-    legacy.channel_mask = config.channel_mask;
-    legacy.format = config.format;
+aidl2legacy_AudioOffloadInfo_audio_offload_info_t(const AudioOffloadInfo& aidl) {
+    audio_offload_info_t legacy = AUDIO_INFO_INITIALIZER;
+    audio_config_base_t base = VALUE_OR_RETURN(
+            aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.base, false /*isInput*/));
+    legacy.sample_rate = base.sample_rate;
+    legacy.channel_mask = base.channel_mask;
+    legacy.format = base.format;
     legacy.stream_type = VALUE_OR_RETURN(
             aidl2legacy_AudioStreamType_audio_stream_type_t(aidl.streamType));
-    legacy.bit_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.bitRate));
+    legacy.bit_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.bitRatePerSecond));
     legacy.duration_us = VALUE_OR_RETURN(convertIntegral<int64_t>(aidl.durationUs));
     legacy.has_video = aidl.hasVideo;
     legacy.is_streaming = aidl.isStreaming;
@@ -2285,22 +2317,20 @@
     return legacy;
 }
 
-ConversionResult<media::AudioOffloadInfo>
+ConversionResult<AudioOffloadInfo>
 legacy2aidl_audio_offload_info_t_AudioOffloadInfo(const audio_offload_info_t& legacy) {
-    media::AudioOffloadInfo aidl;
+    AudioOffloadInfo aidl;
     // Version 0.1 fields.
     if (legacy.size < offsetof(audio_offload_info_t, usage) + sizeof(audio_offload_info_t::usage)) {
         return unexpected(BAD_VALUE);
     }
-    aidl.version = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.version));
-    aidl.config.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
-    aidl.config.channelMask = VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
-                    legacy.channel_mask, false /*isInput*/));
-    aidl.config.format = VALUE_OR_RETURN(
-            legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
+    const audio_config_base_t base = { .sample_rate = legacy.sample_rate,
+        .channel_mask = legacy.channel_mask, .format = legacy.format };
+    aidl.base = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(
+                    base, false /*isInput*/));
     aidl.streamType = VALUE_OR_RETURN(
             legacy2aidl_audio_stream_type_t_AudioStreamType(legacy.stream_type));
-    aidl.bitRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.bit_rate));
+    aidl.bitRatePerSecond = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.bit_rate));
     aidl.durationUs = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy.duration_us));
     aidl.hasVideo = legacy.has_video;
     aidl.isStreaming = legacy.is_streaming;
@@ -2324,25 +2354,25 @@
 }
 
 ConversionResult<audio_config_t>
-aidl2legacy_AudioConfig_audio_config_t(const media::AudioConfig& aidl, bool isInput) {
-    audio_config_t legacy;
-    legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.sampleRate));
-    legacy.channel_mask = VALUE_OR_RETURN(
-            aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
-    legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
+aidl2legacy_AudioConfig_audio_config_t(const AudioConfig& aidl, bool isInput) {
+    const audio_config_base_t legacyBase = VALUE_OR_RETURN(
+            aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.base, isInput));
+    audio_config_t legacy = AUDIO_CONFIG_INITIALIZER;
+    legacy.sample_rate = legacyBase.sample_rate;
+    legacy.channel_mask = legacyBase.channel_mask;
+    legacy.format = legacyBase.format;
     legacy.offload_info = VALUE_OR_RETURN(
             aidl2legacy_AudioOffloadInfo_audio_offload_info_t(aidl.offloadInfo));
     legacy.frame_count = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.frameCount));
     return legacy;
 }
 
-ConversionResult<media::AudioConfig>
+ConversionResult<AudioConfig>
 legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy, bool isInput) {
-    media::AudioConfig aidl;
-    aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
-    aidl.channelMask = VALUE_OR_RETURN(
-            legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
-    aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
+    const audio_config_base_t base = { .sample_rate = legacy.sample_rate,
+        .channel_mask = legacy.channel_mask, .format = legacy.format };
+    AudioConfig aidl;
+    aidl.base = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(base, isInput));
     aidl.offloadInfo = VALUE_OR_RETURN(
             legacy2aidl_audio_offload_info_t_AudioOffloadInfo(legacy.offload_info));
     aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy.frame_count));
@@ -2350,7 +2380,7 @@
 }
 
 ConversionResult<audio_config_base_t>
-aidl2legacy_AudioConfigBase_audio_config_base_t(const media::AudioConfigBase& aidl, bool isInput) {
+aidl2legacy_AudioConfigBase_audio_config_base_t(const AudioConfigBase& aidl, bool isInput) {
     audio_config_base_t legacy;
     legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.sampleRate));
     legacy.channel_mask = VALUE_OR_RETURN(
@@ -2359,9 +2389,9 @@
     return legacy;
 }
 
-ConversionResult<media::AudioConfigBase>
+ConversionResult<AudioConfigBase>
 legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy, bool isInput) {
-    media::AudioConfigBase aidl;
+    AudioConfigBase aidl;
     aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
     aidl.channelMask = VALUE_OR_RETURN(
             legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
@@ -2424,7 +2454,7 @@
 }
 
 ConversionResult<audio_uuid_t>
-aidl2legacy_AudioUuid_audio_uuid_t(const media::AudioUuid& aidl) {
+aidl2legacy_AudioUuid_audio_uuid_t(const AudioUuid& aidl) {
     audio_uuid_t legacy;
     legacy.timeLow = VALUE_OR_RETURN(convertReinterpret<uint32_t>(aidl.timeLow));
     legacy.timeMid = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.timeMid));
@@ -2437,9 +2467,9 @@
     return legacy;
 }
 
-ConversionResult<media::AudioUuid>
+ConversionResult<AudioUuid>
 legacy2aidl_audio_uuid_t_AudioUuid(const audio_uuid_t& legacy) {
-    media::AudioUuid aidl;
+    AudioUuid aidl;
     aidl.timeLow = VALUE_OR_RETURN(convertReinterpret<int32_t>(legacy.timeLow));
     aidl.timeMid = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.timeMid));
     aidl.timeHiAndVersion = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.timeHiAndVersion));
@@ -2480,28 +2510,28 @@
 
 ConversionResult<audio_encapsulation_metadata_type_t>
 aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t(
-        media::AudioEncapsulationMetadataType aidl) {
+        AudioEncapsulationMetadataType aidl) {
     switch (aidl) {
-        case media::AudioEncapsulationMetadataType::NONE:
+        case AudioEncapsulationMetadataType::NONE:
             return AUDIO_ENCAPSULATION_METADATA_TYPE_NONE;
-        case media::AudioEncapsulationMetadataType::FRAMEWORK_TUNER:
+        case AudioEncapsulationMetadataType::FRAMEWORK_TUNER:
             return AUDIO_ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER;
-        case media::AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR:
+        case AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR:
             return AUDIO_ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioEncapsulationMetadataType>
+ConversionResult<AudioEncapsulationMetadataType>
 legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType(
         audio_encapsulation_metadata_type_t legacy) {
     switch (legacy) {
         case AUDIO_ENCAPSULATION_METADATA_TYPE_NONE:
-            return media::AudioEncapsulationMetadataType::NONE;
+            return AudioEncapsulationMetadataType::NONE;
         case AUDIO_ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER:
-            return media::AudioEncapsulationMetadataType::FRAMEWORK_TUNER;
+            return AudioEncapsulationMetadataType::FRAMEWORK_TUNER;
         case AUDIO_ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR:
-            return media::AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR;
+            return AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR;
     }
     return unexpected(BAD_VALUE);
 }
@@ -2511,9 +2541,9 @@
     return convertBitmask<uint32_t,
             int32_t,
             audio_encapsulation_mode_t,
-            media::AudioEncapsulationMode>(
+            AudioEncapsulationMode>(
             aidl, aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t,
-            indexToEnum_index<media::AudioEncapsulationMode>,
+            indexToEnum_index<AudioEncapsulationMode>,
             enumToMask_index<uint32_t, audio_encapsulation_mode_t>);
 }
 
@@ -2521,11 +2551,11 @@
 legacy2aidl_AudioEncapsulationMode_mask(uint32_t legacy) {
     return convertBitmask<int32_t,
             uint32_t,
-            media::AudioEncapsulationMode,
+            AudioEncapsulationMode,
             audio_encapsulation_mode_t>(
             legacy, legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode,
             indexToEnum_index<audio_encapsulation_mode_t>,
-            enumToMask_index<int32_t, media::AudioEncapsulationMode>);
+            enumToMask_index<int32_t, AudioEncapsulationMode>);
 }
 
 ConversionResult<uint32_t>
@@ -2533,9 +2563,9 @@
     return convertBitmask<uint32_t,
             int32_t,
             audio_encapsulation_metadata_type_t,
-            media::AudioEncapsulationMetadataType>(
+            AudioEncapsulationMetadataType>(
             aidl, aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t,
-            indexToEnum_index<media::AudioEncapsulationMetadataType>,
+            indexToEnum_index<AudioEncapsulationMetadataType>,
             enumToMask_index<uint32_t, audio_encapsulation_metadata_type_t>);
 }
 
@@ -2543,106 +2573,78 @@
 legacy2aidl_AudioEncapsulationMetadataType_mask(uint32_t legacy) {
     return convertBitmask<int32_t,
             uint32_t,
-            media::AudioEncapsulationMetadataType,
+            AudioEncapsulationMetadataType,
             audio_encapsulation_metadata_type_t>(
             legacy, legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType,
             indexToEnum_index<audio_encapsulation_metadata_type_t>,
-            enumToMask_index<int32_t, media::AudioEncapsulationMetadataType>);
-}
-
-ConversionResult<audio_mix_latency_class_t>
-aidl2legacy_AudioMixLatencyClass_audio_mix_latency_class_t(
-        media::AudioMixLatencyClass aidl) {
-    switch (aidl) {
-        case media::AudioMixLatencyClass::LOW:
-            return AUDIO_LATENCY_LOW;
-        case media::AudioMixLatencyClass::NORMAL:
-            return AUDIO_LATENCY_NORMAL;
-    }
-    return unexpected(BAD_VALUE);
-}
-
-ConversionResult<media::AudioMixLatencyClass>
-legacy2aidl_audio_mix_latency_class_t_AudioMixLatencyClass(
-        audio_mix_latency_class_t legacy) {
-    switch (legacy) {
-        case AUDIO_LATENCY_LOW:
-            return media::AudioMixLatencyClass::LOW;
-        case AUDIO_LATENCY_NORMAL:
-            return media::AudioMixLatencyClass::NORMAL;
-    }
-    return unexpected(BAD_VALUE);
+            enumToMask_index<int32_t, AudioEncapsulationMetadataType>);
 }
 
 ConversionResult<audio_port_device_ext>
-aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(const media::AudioPortDeviceExt& aidl) {
+aidl2legacy_AudioDevice_audio_port_device_ext(
+        const AudioDevice& aidl, const media::AudioPortDeviceExtSys& aidlSys) {
     audio_port_device_ext legacy;
-    legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
-    legacy.type = VALUE_OR_RETURN(
-            aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.device.type));
-    RETURN_IF_ERROR(
-            aidl2legacy_string(aidl.device.address, legacy.address, sizeof(legacy.address)));
+    legacy.hw_module = VALUE_OR_RETURN(
+            aidl2legacy_int32_t_audio_module_handle_t(aidlSys.hwModule));
+    RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(aidl, &legacy.type, legacy.address));
     legacy.encapsulation_modes = VALUE_OR_RETURN(
-            aidl2legacy_AudioEncapsulationMode_mask(aidl.encapsulationModes));
+            aidl2legacy_AudioEncapsulationMode_mask(aidlSys.encapsulationModes));
     legacy.encapsulation_metadata_types = VALUE_OR_RETURN(
-            aidl2legacy_AudioEncapsulationMetadataType_mask(aidl.encapsulationMetadataTypes));
+            aidl2legacy_AudioEncapsulationMetadataType_mask(
+                    aidlSys.encapsulationMetadataTypes));
     return legacy;
 }
 
-ConversionResult<media::AudioPortDeviceExt>
-legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(const audio_port_device_ext& legacy) {
-    media::AudioPortDeviceExt aidl;
-    aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
-    aidl.device.type = VALUE_OR_RETURN(
-            legacy2aidl_audio_devices_t_AudioDeviceDescription(legacy.type));
-    aidl.device.address = VALUE_OR_RETURN(
-            legacy2aidl_string(legacy.address, sizeof(legacy.address)));
-    aidl.encapsulationModes = VALUE_OR_RETURN(
+status_t legacy2aidl_audio_port_device_ext_AudioDevice(
+        const audio_port_device_ext& legacy,
+        AudioDevice* aidl, media::AudioPortDeviceExtSys* aidlDeviceExt) {
+    aidlDeviceExt->hwModule = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+    *aidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_device_AudioDevice(legacy.type, legacy.address));
+    aidlDeviceExt->encapsulationModes = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioEncapsulationMode_mask(legacy.encapsulation_modes));
-    aidl.encapsulationMetadataTypes = VALUE_OR_RETURN(
+    aidlDeviceExt->encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioEncapsulationMetadataType_mask(legacy.encapsulation_metadata_types));
-    return aidl;
+    return OK;
 }
 
 ConversionResult<audio_port_mix_ext>
-aidl2legacy_AudioPortMixExt_audio_port_mix_ext(const media::AudioPortMixExt& aidl) {
-    audio_port_mix_ext legacy;
-    legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
+aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
+        const AudioPortMixExt& aidl, const media::AudioPortMixExtSys& aidlSys) {
+    audio_port_mix_ext legacy{};
+    legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidlSys.hwModule));
     legacy.handle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.handle));
-    legacy.latency_class = VALUE_OR_RETURN(
-            aidl2legacy_AudioMixLatencyClass_audio_mix_latency_class_t(aidl.latencyClass));
     return legacy;
 }
 
-ConversionResult<media::AudioPortMixExt>
-legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy) {
-    media::AudioPortMixExt aidl;
-    aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
-    aidl.handle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
-    aidl.latencyClass = VALUE_OR_RETURN(
-            legacy2aidl_audio_mix_latency_class_t_AudioMixLatencyClass(legacy.latency_class));
-    return aidl;
+status_t
+legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy,
+        AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt) {
+    aidlMixExt->hwModule = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+    aidl->handle = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
+    return OK;
 }
 
 ConversionResult<audio_port_session_ext>
-aidl2legacy_AudioPortSessionExt_audio_port_session_ext(const media::AudioPortSessionExt& aidl) {
+aidl2legacy_int32_t_audio_port_session_ext(int32_t aidl) {
     audio_port_session_ext legacy;
-    legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.session));
+    legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl));
     return legacy;
 }
 
-ConversionResult<media::AudioPortSessionExt>
-legacy2aidl_audio_port_session_ext_AudioPortSessionExt(const audio_port_session_ext& legacy) {
-    media::AudioPortSessionExt aidl;
-    aidl.session = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.session));
-    return aidl;
+ConversionResult<int32_t>
+legacy2aidl_audio_port_session_ext_int32_t(const audio_port_session_ext& legacy) {
+    return legacy2aidl_audio_session_t_int32_t(legacy.session);
 }
 
 // This type is unnamed in the original definition, thus we name it here.
 using audio_port_v7_ext = decltype(audio_port_v7::ext);
 
-ConversionResult<audio_port_v7_ext> aidl2legacy_AudioPortExt(
-        const media::AudioPortExt& aidl, media::AudioPortType type) {
+ConversionResult<audio_port_v7_ext> aidl2legacy_AudioPortExt_audio_port_v7_ext(
+        const AudioPortExt& aidl, media::AudioPortType type,
+        const media::AudioPortExtSys& aidlSys) {
     audio_port_v7_ext legacy;
     switch (type) {
         case media::AudioPortType::NONE:
@@ -2651,67 +2653,82 @@
             return legacy;
         case media::AudioPortType::DEVICE:
             legacy.device = VALUE_OR_RETURN(
-                    aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
-                            VALUE_OR_RETURN(UNION_GET(aidl, device))));
+                    aidl2legacy_AudioDevice_audio_port_device_ext(
+                            VALUE_OR_RETURN(UNION_GET(aidl, device)),
+                            VALUE_OR_RETURN(UNION_GET(aidlSys, device))));
             return legacy;
         case media::AudioPortType::MIX:
             legacy.mix = VALUE_OR_RETURN(
                     aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
-                            VALUE_OR_RETURN(UNION_GET(aidl, mix))));
+                            VALUE_OR_RETURN(UNION_GET(aidl, mix)),
+                            VALUE_OR_RETURN(UNION_GET(aidlSys, mix))));
             return legacy;
         case media::AudioPortType::SESSION:
-            legacy.session = VALUE_OR_RETURN(aidl2legacy_AudioPortSessionExt_audio_port_session_ext(
-                    VALUE_OR_RETURN(UNION_GET(aidl, session))));
+            legacy.session = VALUE_OR_RETURN(
+                    aidl2legacy_int32_t_audio_port_session_ext(
+                            VALUE_OR_RETURN(UNION_GET(aidl, session))));
             return legacy;
 
     }
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
-ConversionResult<media::AudioPortExt> legacy2aidl_AudioPortExt(
-        const audio_port_v7_ext& legacy, audio_port_type_t type) {
-    media::AudioPortExt aidl;
+status_t legacy2aidl_AudioPortExt(
+        const audio_port_v7_ext& legacy, audio_port_type_t type,
+        AudioPortExt* aidl, media::AudioPortExtSys* aidlSys) {
     switch (type) {
         case AUDIO_PORT_TYPE_NONE:
-            UNION_SET(aidl, unspecified, false);
-            return aidl;
-        case AUDIO_PORT_TYPE_DEVICE:
-            UNION_SET(aidl, device,
-                      VALUE_OR_RETURN(
-                              legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(legacy.device)));
-            return aidl;
-        case AUDIO_PORT_TYPE_MIX:
-            UNION_SET(aidl, mix,
-                      VALUE_OR_RETURN(legacy2aidl_audio_port_mix_ext_AudioPortMixExt(legacy.mix)));
-            return aidl;
+            UNION_SET(*aidl, unspecified, false);
+            UNION_SET(*aidlSys, unspecified, false);
+            return OK;
+        case AUDIO_PORT_TYPE_DEVICE: {
+            AudioDevice device;
+            media::AudioPortDeviceExtSys deviceSys;
+            RETURN_STATUS_IF_ERROR(
+                    legacy2aidl_audio_port_device_ext_AudioDevice(
+                            legacy.device, &device, &deviceSys));
+            UNION_SET(*aidl, device, device);
+            UNION_SET(*aidlSys, device, deviceSys);
+            return OK;
+        }
+        case AUDIO_PORT_TYPE_MIX: {
+            AudioPortMixExt mix;
+            media::AudioPortMixExtSys mixSys;
+            RETURN_STATUS_IF_ERROR(
+                    legacy2aidl_audio_port_mix_ext_AudioPortMixExt(
+                            legacy.mix, &mix, &mixSys));
+            UNION_SET(*aidl, mix, mix);
+            UNION_SET(*aidlSys, mix, mixSys);
+            return OK;
+        }
         case AUDIO_PORT_TYPE_SESSION:
-            UNION_SET(aidl, session,
-                      VALUE_OR_RETURN(legacy2aidl_audio_port_session_ext_AudioPortSessionExt(
-                              legacy.session)));
-            return aidl;
+            UNION_SET(*aidl, session, VALUE_OR_RETURN_STATUS(
+                            legacy2aidl_audio_port_session_ext_int32_t(legacy.session)));
+            UNION_SET(*aidlSys, unspecified, false);
+            return OK;
     }
     LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<audio_profile>
-aidl2legacy_AudioProfile_audio_profile(const media::AudioProfile& aidl, bool isInput) {
+aidl2legacy_AudioProfile_audio_profile(const AudioProfile& aidl, bool isInput) {
     audio_profile legacy;
     legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
 
-    if (aidl.samplingRates.size() > std::size(legacy.sample_rates)) {
+    if (aidl.sampleRates.size() > std::size(legacy.sample_rates)) {
         return unexpected(BAD_VALUE);
     }
     RETURN_IF_ERROR(
-            convertRange(aidl.samplingRates.begin(), aidl.samplingRates.end(), legacy.sample_rates,
+            convertRange(aidl.sampleRates.begin(), aidl.sampleRates.end(), legacy.sample_rates,
                          convertIntegral<int32_t, unsigned int>));
-    legacy.num_sample_rates = aidl.samplingRates.size();
+    legacy.num_sample_rates = aidl.sampleRates.size();
 
     if (aidl.channelMasks.size() > std::size(legacy.channel_masks)) {
         return unexpected(BAD_VALUE);
     }
     RETURN_IF_ERROR(
             convertRange(aidl.channelMasks.begin(), aidl.channelMasks.end(), legacy.channel_masks,
-                    [isInput](const media::AudioChannelLayout& l) {
+                    [isInput](const AudioChannelLayout& l) {
                         return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
                     }));
     legacy.num_channel_masks = aidl.channelMasks.size();
@@ -2721,9 +2738,9 @@
     return legacy;
 }
 
-ConversionResult<media::AudioProfile>
+ConversionResult<AudioProfile>
 legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy, bool isInput) {
-    media::AudioProfile aidl;
+    AudioProfile aidl;
     aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
 
     if (legacy.num_sample_rates > std::size(legacy.sample_rates)) {
@@ -2731,7 +2748,7 @@
     }
     RETURN_IF_ERROR(
             convertRange(legacy.sample_rates, legacy.sample_rates + legacy.num_sample_rates,
-                         std::back_inserter(aidl.samplingRates),
+                         std::back_inserter(aidl.sampleRates),
                          convertIntegral<unsigned int, int32_t>));
 
     if (legacy.num_channel_masks > std::size(legacy.channel_masks)) {
@@ -2751,11 +2768,11 @@
 }
 
 ConversionResult<audio_gain>
-aidl2legacy_AudioGain_audio_gain(const media::AudioGain& aidl) {
+aidl2legacy_AudioGain_audio_gain(const AudioGain& aidl, bool isInput) {
     audio_gain legacy;
     legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
     legacy.channel_mask = VALUE_OR_RETURN(aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
-                    aidl.channelMask, aidl.isInput));
+                    aidl.channelMask, isInput));
     legacy.min_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.minValue));
     legacy.max_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.maxValue));
     legacy.default_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.defaultValue));
@@ -2765,11 +2782,10 @@
     return legacy;
 }
 
-ConversionResult<media::AudioGain>
+ConversionResult<AudioGain>
 legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy, bool isInput) {
-    media::AudioGain aidl;
+    AudioGain aidl;
     aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
-    aidl.isInput = isInput;
     aidl.channelMask = VALUE_OR_RETURN(
             legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
     aidl.minValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.min_value));
@@ -2784,50 +2800,59 @@
 ConversionResult<audio_port_v7>
 aidl2legacy_AudioPort_audio_port_v7(const media::AudioPort& aidl) {
     audio_port_v7 legacy;
-    legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.id));
-    legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.role));
-    legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.type));
-    RETURN_IF_ERROR(aidl2legacy_string(aidl.name, legacy.name, sizeof(legacy.name)));
+    legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.hal.id));
+    legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.sys.role));
+    legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.sys.type));
+    RETURN_IF_ERROR(aidl2legacy_string(aidl.hal.name, legacy.name, sizeof(legacy.name)));
 
-    if (aidl.profiles.size() > std::size(legacy.audio_profiles)) {
+    if (aidl.hal.profiles.size() > std::size(legacy.audio_profiles)) {
         return unexpected(BAD_VALUE);
     }
-    const bool isInput = VALUE_OR_RETURN(direction(aidl.role, aidl.type)) == Direction::INPUT;
-    RETURN_IF_ERROR(convertRange(aidl.profiles.begin(), aidl.profiles.end(), legacy.audio_profiles,
-                                 [isInput](const media::AudioProfile& p) {
-                                     return aidl2legacy_AudioProfile_audio_profile(p, isInput);
-                                 }));
-    legacy.num_audio_profiles = aidl.profiles.size();
+    const bool isInput =
+            VALUE_OR_RETURN(direction(aidl.sys.role, aidl.sys.type)) == Direction::INPUT;
+    RETURN_IF_ERROR(convertRange(
+                    aidl.hal.profiles.begin(), aidl.hal.profiles.end(), legacy.audio_profiles,
+                    [isInput](const AudioProfile& p) {
+                        return aidl2legacy_AudioProfile_audio_profile(p, isInput);
+                    }));
+    legacy.num_audio_profiles = aidl.hal.profiles.size();
 
-    if (aidl.extraAudioDescriptors.size() > std::size(legacy.extra_audio_descriptors)) {
+    if (aidl.hal.extraAudioDescriptors.size() > std::size(legacy.extra_audio_descriptors)) {
         return unexpected(BAD_VALUE);
     }
     RETURN_IF_ERROR(
-            convertRange(aidl.extraAudioDescriptors.begin(), aidl.extraAudioDescriptors.end(),
-                         legacy.extra_audio_descriptors,
-                         aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor));
-    legacy.num_extra_audio_descriptors = aidl.extraAudioDescriptors.size();
+            convertRange(
+                    aidl.hal.extraAudioDescriptors.begin(), aidl.hal.extraAudioDescriptors.end(),
+                    legacy.extra_audio_descriptors,
+                    aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor));
+    legacy.num_extra_audio_descriptors = aidl.hal.extraAudioDescriptors.size();
 
-    if (aidl.gains.size() > std::size(legacy.gains)) {
+    if (aidl.hal.gains.size() > std::size(legacy.gains)) {
         return unexpected(BAD_VALUE);
     }
-    RETURN_IF_ERROR(convertRange(aidl.gains.begin(), aidl.gains.end(), legacy.gains,
-                                 aidl2legacy_AudioGain_audio_gain));
-    legacy.num_gains = aidl.gains.size();
+    RETURN_IF_ERROR(convertRange(aidl.hal.gains.begin(), aidl.hal.gains.end(), legacy.gains,
+                                 [isInput](const AudioGain& g) {
+                                     return aidl2legacy_AudioGain_audio_gain(g, isInput);
+                                 }));
+    legacy.num_gains = aidl.hal.gains.size();
 
+    media::AudioPortConfig aidlPortConfig;
+    aidlPortConfig.hal = aidl.hal.activeConfig;
+    aidlPortConfig.sys = aidl.sys.activeConfig;
     legacy.active_config = VALUE_OR_RETURN(
-            aidl2legacy_AudioPortConfig_audio_port_config(aidl.activeConfig));
-    legacy.ext = VALUE_OR_RETURN(aidl2legacy_AudioPortExt(aidl.ext, aidl.type));
+            aidl2legacy_AudioPortConfig_audio_port_config(aidlPortConfig));
+    legacy.ext = VALUE_OR_RETURN(
+            aidl2legacy_AudioPortExt_audio_port_v7_ext(aidl.hal.ext, aidl.sys.type, aidl.sys.ext));
     return legacy;
 }
 
 ConversionResult<media::AudioPort>
 legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy) {
     media::AudioPort aidl;
-    aidl.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
-    aidl.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
-    aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
-    aidl.name = VALUE_OR_RETURN(legacy2aidl_string(legacy.name, sizeof(legacy.name)));
+    aidl.hal.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
+    aidl.sys.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
+    aidl.sys.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
+    aidl.hal.name = VALUE_OR_RETURN(legacy2aidl_string(legacy.name, sizeof(legacy.name)));
 
     if (legacy.num_audio_profiles > std::size(legacy.audio_profiles)) {
         return unexpected(BAD_VALUE);
@@ -2835,7 +2860,7 @@
     const bool isInput = VALUE_OR_RETURN(direction(legacy.role, legacy.type)) == Direction::INPUT;
     RETURN_IF_ERROR(
             convertRange(legacy.audio_profiles, legacy.audio_profiles + legacy.num_audio_profiles,
-                         std::back_inserter(aidl.profiles),
+                         std::back_inserter(aidl.hal.profiles),
                          [isInput](const audio_profile& p) {
                              return legacy2aidl_audio_profile_AudioProfile(p, isInput);
                          }));
@@ -2843,10 +2868,11 @@
     if (legacy.num_extra_audio_descriptors > std::size(legacy.extra_audio_descriptors)) {
         return unexpected(BAD_VALUE);
     }
+    aidl.sys.profiles.resize(legacy.num_audio_profiles);
     RETURN_IF_ERROR(
             convertRange(legacy.extra_audio_descriptors,
                     legacy.extra_audio_descriptors + legacy.num_extra_audio_descriptors,
-                    std::back_inserter(aidl.extraAudioDescriptors),
+                    std::back_inserter(aidl.hal.extraAudioDescriptors),
                     legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor));
 
     if (legacy.num_gains > std::size(legacy.gains)) {
@@ -2854,55 +2880,59 @@
     }
     RETURN_IF_ERROR(
             convertRange(legacy.gains, legacy.gains + legacy.num_gains,
-                         std::back_inserter(aidl.gains),
+                         std::back_inserter(aidl.hal.gains),
                          [isInput](const audio_gain& g) {
                              return legacy2aidl_audio_gain_AudioGain(g, isInput);
                          }));
+    aidl.sys.gains.resize(legacy.num_gains);
 
-    aidl.activeConfig = VALUE_OR_RETURN(
+    media::AudioPortConfig aidlPortConfig = VALUE_OR_RETURN(
             legacy2aidl_audio_port_config_AudioPortConfig(legacy.active_config));
-    aidl.ext = VALUE_OR_RETURN(legacy2aidl_AudioPortExt(legacy.ext, legacy.type));
+    aidl.hal.activeConfig = aidlPortConfig.hal;
+    aidl.sys.activeConfig = aidlPortConfig.sys;
+    RETURN_IF_ERROR(
+            legacy2aidl_AudioPortExt(legacy.ext, legacy.type, &aidl.hal.ext, &aidl.sys.ext));
     return aidl;
 }
 
 ConversionResult<audio_mode_t>
-aidl2legacy_AudioMode_audio_mode_t(media::AudioMode aidl) {
+aidl2legacy_AudioMode_audio_mode_t(AudioMode aidl) {
     switch (aidl) {
-        case media::AudioMode::INVALID:
+        case AudioMode::SYS_RESERVED_INVALID:
             return AUDIO_MODE_INVALID;
-        case media::AudioMode::CURRENT:
+        case AudioMode::SYS_RESERVED_CURRENT:
             return AUDIO_MODE_CURRENT;
-        case media::AudioMode::NORMAL:
+        case AudioMode::NORMAL:
             return AUDIO_MODE_NORMAL;
-        case media::AudioMode::RINGTONE:
+        case AudioMode::RINGTONE:
             return AUDIO_MODE_RINGTONE;
-        case media::AudioMode::IN_CALL:
+        case AudioMode::IN_CALL:
             return AUDIO_MODE_IN_CALL;
-        case media::AudioMode::IN_COMMUNICATION:
+        case AudioMode::IN_COMMUNICATION:
             return AUDIO_MODE_IN_COMMUNICATION;
-        case media::AudioMode::CALL_SCREEN:
+        case AudioMode::CALL_SCREEN:
             return AUDIO_MODE_CALL_SCREEN;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioMode>
+ConversionResult<AudioMode>
 legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy) {
     switch (legacy) {
         case AUDIO_MODE_INVALID:
-            return media::AudioMode::INVALID;
+            return AudioMode::SYS_RESERVED_INVALID;
         case AUDIO_MODE_CURRENT:
-            return media::AudioMode::CURRENT;
+            return AudioMode::SYS_RESERVED_CURRENT;
         case AUDIO_MODE_NORMAL:
-            return media::AudioMode::NORMAL;
+            return AudioMode::NORMAL;
         case AUDIO_MODE_RINGTONE:
-            return media::AudioMode::RINGTONE;
+            return AudioMode::RINGTONE;
         case AUDIO_MODE_IN_CALL:
-            return media::AudioMode::IN_CALL;
+            return AudioMode::IN_CALL;
         case AUDIO_MODE_IN_COMMUNICATION:
-            return media::AudioMode::IN_COMMUNICATION;
+            return AudioMode::IN_COMMUNICATION;
         case AUDIO_MODE_CALL_SCREEN:
-            return media::AudioMode::CALL_SCREEN;
+            return AudioMode::CALL_SCREEN;
         case AUDIO_MODE_CNT:
             break;
     }
@@ -3052,30 +3082,30 @@
 }
 
 ConversionResult<audio_standard_t>
-aidl2legacy_AudioStandard_audio_standard_t(media::AudioStandard aidl) {
+aidl2legacy_AudioStandard_audio_standard_t(AudioStandard aidl) {
     switch (aidl) {
-        case media::AudioStandard::NONE:
+        case AudioStandard::NONE:
             return AUDIO_STANDARD_NONE;
-        case media::AudioStandard::EDID:
+        case AudioStandard::EDID:
             return AUDIO_STANDARD_EDID;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioStandard>
+ConversionResult<AudioStandard>
 legacy2aidl_audio_standard_t_AudioStandard(audio_standard_t legacy) {
     switch (legacy) {
         case AUDIO_STANDARD_NONE:
-            return media::AudioStandard::NONE;
+            return AudioStandard::NONE;
         case AUDIO_STANDARD_EDID:
-            return media::AudioStandard::EDID;
+            return AudioStandard::EDID;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_extra_audio_descriptor>
 aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor(
-        const media::ExtraAudioDescriptor& aidl) {
+        const ExtraAudioDescriptor& aidl) {
     audio_extra_audio_descriptor legacy;
     legacy.standard = VALUE_OR_RETURN(aidl2legacy_AudioStandard_audio_standard_t(aidl.standard));
     if (aidl.audioDescriptor.size() > EXTRA_AUDIO_DESCRIPTOR_SIZE) {
@@ -3090,10 +3120,10 @@
     return legacy;
 }
 
-ConversionResult<media::ExtraAudioDescriptor>
+ConversionResult<ExtraAudioDescriptor>
 legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor(
         const audio_extra_audio_descriptor& legacy) {
-    media::ExtraAudioDescriptor aidl;
+    ExtraAudioDescriptor aidl;
     aidl.standard = VALUE_OR_RETURN(legacy2aidl_audio_standard_t_AudioStandard(legacy.standard));
     if (legacy.descriptor_length > EXTRA_AUDIO_DESCRIPTOR_SIZE) {
         return unexpected(BAD_VALUE);
@@ -3109,24 +3139,24 @@
 
 ConversionResult<audio_encapsulation_type_t>
 aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
-        const media::AudioEncapsulationType& aidl) {
+        const AudioEncapsulationType& aidl) {
     switch (aidl) {
-        case media::AudioEncapsulationType::NONE:
+        case AudioEncapsulationType::NONE:
             return AUDIO_ENCAPSULATION_TYPE_NONE;
-        case media::AudioEncapsulationType::IEC61937:
+        case AudioEncapsulationType::IEC61937:
             return AUDIO_ENCAPSULATION_TYPE_IEC61937;
     }
     return unexpected(BAD_VALUE);
 }
 
-ConversionResult<media::AudioEncapsulationType>
+ConversionResult<AudioEncapsulationType>
 legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
         const audio_encapsulation_type_t & legacy) {
     switch (legacy) {
         case AUDIO_ENCAPSULATION_TYPE_NONE:
-            return media::AudioEncapsulationType::NONE;
+            return AudioEncapsulationType::NONE;
         case AUDIO_ENCAPSULATION_TYPE_IEC61937:
-            return media::AudioEncapsulationType::IEC61937;
+            return AudioEncapsulationType::IEC61937;
     }
     return unexpected(BAD_VALUE);
 }
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 9b9ceea..f6446bb 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -25,11 +25,13 @@
     static_libs: [
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
+        "spatializer-aidl-cpp",
         "av-types-aidl-cpp",
     ],
     export_static_lib_headers: [
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
+        "spatializer-aidl-cpp",
         "av-types-aidl-cpp",
     ],
     target: {
@@ -49,6 +51,7 @@
         "PolicyAidlConversion.cpp"
     ],
     shared_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
@@ -69,6 +72,7 @@
     include_dirs: ["system/media/audio_utils/include"],
     export_include_dirs: ["include"],
     export_shared_lib_headers: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
@@ -109,9 +113,11 @@
         "TrackPlayerBase.cpp",
     ],
     shared_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
+        "spatializer-aidl-cpp",
         "audiopolicy-types-aidl-cpp",
         "av-types-aidl-cpp",
         "capture_state_listener-aidl-cpp",
@@ -131,12 +137,12 @@
         "libprocessgroup",
         "libshmemcompat",
         "libutils",
-        "libvibrator",
         "framework-permission-aidl-cpp",
     ],
     export_shared_lib_headers: [
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
+        "spatializer-aidl-cpp",
         "framework-permission-aidl-cpp",
         "libbinder",
     ],
@@ -224,6 +230,7 @@
         "libaudioclient_aidl_conversion_util",
     ],
     shared_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "libbase",
         "libbinder",
@@ -235,6 +242,7 @@
         "framework-permission-aidl-cpp",
     ],
     export_shared_lib_headers: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "libbase",
         "shared-file-region-aidl-cpp",
@@ -303,60 +311,31 @@
     local_include_dir: "aidl",
     srcs: [
         "aidl/android/media/AudioAttributesInternal.aidl",
-        "aidl/android/media/AudioChannelLayout.aidl",
         "aidl/android/media/AudioClient.aidl",
-        "aidl/android/media/AudioConfig.aidl",
-        "aidl/android/media/AudioConfigBase.aidl",
-        "aidl/android/media/AudioContentType.aidl",
-        "aidl/android/media/AudioDevice.aidl",
-        "aidl/android/media/AudioDeviceDescription.aidl",
-        "aidl/android/media/AudioDeviceType.aidl",
         "aidl/android/media/AudioDualMonoMode.aidl",
-        "aidl/android/media/AudioEncapsulationMode.aidl",
-        "aidl/android/media/AudioEncapsulationMetadataType.aidl",
-        "aidl/android/media/AudioEncapsulationType.aidl",
-        "aidl/android/media/AudioFormatDescription.aidl",
-        "aidl/android/media/AudioFormatType.aidl",
         "aidl/android/media/AudioFlag.aidl",
-        "aidl/android/media/AudioGain.aidl",
-        "aidl/android/media/AudioGainConfig.aidl",
-        "aidl/android/media/AudioGainMode.aidl",
+        "aidl/android/media/AudioGainSys.aidl",
         "aidl/android/media/AudioInputFlags.aidl",
         "aidl/android/media/AudioIoConfigEvent.aidl",
         "aidl/android/media/AudioIoDescriptor.aidl",
         "aidl/android/media/AudioIoFlags.aidl",
-        "aidl/android/media/AudioMixLatencyClass.aidl",
-        "aidl/android/media/AudioMode.aidl",
-        "aidl/android/media/AudioOffloadInfo.aidl",
         "aidl/android/media/AudioOutputFlags.aidl",
         "aidl/android/media/AudioPatch.aidl",
         "aidl/android/media/AudioPlaybackRate.aidl",
         "aidl/android/media/AudioPort.aidl",
+        "aidl/android/media/AudioPortSys.aidl",
         "aidl/android/media/AudioPortConfig.aidl",
-        "aidl/android/media/AudioPortConfigType.aidl",
-        "aidl/android/media/AudioPortConfigDeviceExt.aidl",
-        "aidl/android/media/AudioPortConfigExt.aidl",
-        "aidl/android/media/AudioPortConfigMixExt.aidl",
-        "aidl/android/media/AudioPortConfigMixExtUseCase.aidl",
-        "aidl/android/media/AudioPortConfigSessionExt.aidl",
-        "aidl/android/media/AudioPortDeviceExt.aidl",
-        "aidl/android/media/AudioPortExt.aidl",
-        "aidl/android/media/AudioPortMixExt.aidl",
+        "aidl/android/media/AudioPortConfigSys.aidl",
+        "aidl/android/media/AudioPortDeviceExtSys.aidl",
+        "aidl/android/media/AudioPortExtSys.aidl",
+        "aidl/android/media/AudioPortMixExtSys.aidl",
         "aidl/android/media/AudioPortRole.aidl",
-        "aidl/android/media/AudioPortSessionExt.aidl",
         "aidl/android/media/AudioPortType.aidl",
-        "aidl/android/media/AudioProfile.aidl",
-        "aidl/android/media/AudioSourceType.aidl",
-        "aidl/android/media/AudioStandard.aidl",
-        "aidl/android/media/AudioStreamType.aidl",
+        "aidl/android/media/AudioProfileSys.aidl",
         "aidl/android/media/AudioTimestampInternal.aidl",
         "aidl/android/media/AudioUniqueIdUse.aidl",
-        "aidl/android/media/AudioUsage.aidl",
-        "aidl/android/media/AudioUuid.aidl",
         "aidl/android/media/AudioVibratorInfo.aidl",
         "aidl/android/media/EffectDescriptor.aidl",
-        "aidl/android/media/ExtraAudioDescriptor.aidl",
-        "aidl/android/media/PcmType.aidl",
         "aidl/android/media/TrackSecondaryOutputInfo.aidl",
     ],
     imports: [
@@ -398,11 +377,13 @@
         "aidl/android/media/AudioProductStrategy.aidl",
         "aidl/android/media/AudioVolumeGroup.aidl",
         "aidl/android/media/DeviceRole.aidl",
-        "aidl/android/media/HeadTrackingMode.aidl",
         "aidl/android/media/SoundTriggerSession.aidl",
         "aidl/android/media/SpatializationLevel.aidl",
+        "aidl/android/media/SpatializationMode.aidl",
+        "aidl/android/media/SpatializerHeadTrackingMode.aidl",
     ],
     imports: [
+        "android.media.audio.common.types",
         "audioclient-types-aidl",
     ],
     backend: {
@@ -445,6 +426,7 @@
         "aidl/android/media/IAudioTrackCallback.aidl",
     ],
     imports: [
+        "android.media.audio.common.types",
         "audioclient-types-aidl",
         "av-types-aidl",
         "effect-aidl",
@@ -476,18 +458,47 @@
         "aidl/android/media/GetInputForAttrResponse.aidl",
         "aidl/android/media/GetOutputForAttrResponse.aidl",
         "aidl/android/media/GetSpatializerResponse.aidl",
-        "aidl/android/media/Int.aidl",
         "aidl/android/media/RecordClientInfo.aidl",
         "aidl/android/media/IAudioPolicyService.aidl",
         "aidl/android/media/IAudioPolicyServiceClient.aidl",
-        "aidl/android/media/INativeSpatializerCallback.aidl",
-        "aidl/android/media/ISpatializer.aidl",
     ],
     imports: [
+        "android.media.audio.common.types",
         "audioclient-types-aidl",
         "audiopolicy-types-aidl",
         "capture_state_listener-aidl",
         "framework-permission-aidl",
+        "spatializer-aidl",
+    ],
+
+    double_loadable: true,
+    backend: {
+        cpp: {
+            min_sdk_version: "29",
+            apex_available: [
+                "//apex_available:platform",
+                "com.android.media",
+            ],
+        },
+        java: {
+            sdk_version: "module_current",
+        },
+    },
+}
+
+aidl_interface {
+    name: "spatializer-aidl",
+    unstable: true,
+    local_include_dir: "aidl",
+    host_supported: true,
+    vendor_available: true,
+    srcs: [
+        "aidl/android/media/INativeSpatializerCallback.aidl",
+        "aidl/android/media/ISpatializer.aidl",
+        "aidl/android/media/ISpatializerHeadTrackingCallback.aidl",
+    ],
+    imports: [
+        "audiopolicy-types-aidl",
     ],
 
     double_loadable: true,
diff --git a/media/libaudioclient/AudioAttributes.cpp b/media/libaudioclient/AudioAttributes.cpp
index 83bf5a7..260c06c 100644
--- a/media/libaudioclient/AudioAttributes.cpp
+++ b/media/libaudioclient/AudioAttributes.cpp
@@ -24,9 +24,6 @@
 #include <media/AudioAttributes.h>
 #include <media/PolicyAidlConversion.h>
 
-#define RETURN_STATUS_IF_ERROR(x) \
-    { auto _tmp = (x); if (_tmp != OK) return _tmp; }
-
 namespace android {
 
 status_t AudioAttributes::readFromParcel(const Parcel* parcel) {
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index 9091599..62f863d 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -32,16 +32,12 @@
 #include <private/media/AudioEffectShared.h>
 #include <utils/Log.h>
 
-#define RETURN_STATUS_IF_ERROR(x)    \
-    {                                \
-        auto _tmp = (x);             \
-        if (_tmp != OK) return _tmp; \
-    }
-
 namespace android {
 using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
 using media::IAudioPolicyService;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioUuid;
 
 namespace {
 
@@ -571,7 +567,7 @@
 
     int32_t audioSessionAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_session_t_int32_t(audioSession));
-    media::Int countAidl;
+    media::audio::common::Int countAidl;
     countAidl.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*count));
     std::vector<media::EffectDescriptor> retAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -619,12 +615,12 @@
         uuid = *EFFECT_UUID_NULL;
     }
 
-    media::AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
-    media::AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
+    AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
+    AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
     std::string opPackageNameAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_String16_string(opPackageName));
-    media::AudioSourceType sourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(source));
+    AudioSource sourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(source));
     int32_t retAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->addSourceDefaultEffect(typeAidl, opPackageNameAidl, uuidAidl, priority, sourceAidl,
@@ -662,11 +658,11 @@
         uuid = *EFFECT_UUID_NULL;
     }
 
-    media::AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
-    media::AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
+    AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
+    AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
     std::string opPackageNameAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_String16_string(opPackageName));
-    media::AudioUsage usageAidl = VALUE_OR_RETURN_STATUS(
+    media::audio::common::AudioUsage usageAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_usage_t_AudioUsage(usage));
     int32_t retAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
diff --git a/media/libaudioclient/AudioProductStrategy.cpp b/media/libaudioclient/AudioProductStrategy.cpp
index f98027a..ecd423a 100644
--- a/media/libaudioclient/AudioProductStrategy.cpp
+++ b/media/libaudioclient/AudioProductStrategy.cpp
@@ -21,9 +21,6 @@
 #include <media/AudioAttributes.h>
 #include <media/PolicyAidlConversion.h>
 
-#define RETURN_STATUS_IF_ERROR(x) \
-    { auto _tmp = (x); if (_tmp != OK) return _tmp; }
-
 namespace android {
 
 status_t AudioProductStrategy::readFromParcel(const Parcel* parcel) {
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 65bf97d..edad6d1 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -40,19 +40,25 @@
        if (!_tmp.ok()) return aidl_utils::binderStatusFromStatusT(_tmp.error()); \
        std::move(_tmp.value()); })
 
-#define RETURN_STATUS_IF_ERROR(x)    \
-    {                                \
-        auto _tmp = (x);             \
-        if (_tmp != OK) return _tmp; \
-    }
-
 // ----------------------------------------------------------------------------
 
 namespace android {
 using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
+using content::AttributionSourceState;
 using media::IAudioPolicyService;
-using android::content::AttributionSourceState;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
+using media::audio::common::AudioOffloadInfo;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::Int;
 
 // client singleton for AudioFlinger binder interface
 Mutex AudioSystem::gLock;
@@ -846,10 +852,8 @@
         name = device_name;
     }
 
-    media::AudioDevice deviceAidl;
-    deviceAidl.type = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
-    deviceAidl.address = address;
+    AudioDevice deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_device_AudioDevice(device, address));
 
     return statusTFromBinderStatus(
             aps->setDeviceConnectionState(
@@ -867,10 +871,8 @@
     if (aps == 0) return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
 
     auto result = [&]() -> ConversionResult<audio_policy_dev_state_t> {
-        media::AudioDevice deviceAidl;
-        deviceAidl.type = VALUE_OR_RETURN(
-                legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
-        deviceAidl.address = device_address;
+        AudioDevice deviceAidl = VALUE_OR_RETURN(
+                legacy2aidl_audio_device_AudioDevice(device, device_address));
 
         media::AudioPolicyDeviceState result;
         RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -898,10 +900,8 @@
         name = device_name;
     }
 
-    media::AudioDevice deviceAidl;
-    deviceAidl.type = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
-    deviceAidl.address = address;
+    AudioDevice deviceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_device_AudioDevice(device, address));
 
     return statusTFromBinderStatus(
             aps->handleDeviceConfigChange(deviceAidl, name, VALUE_OR_RETURN_STATUS(
@@ -954,7 +954,7 @@
     if (aps == 0) return AUDIO_IO_HANDLE_NONE;
 
     auto result = [&]() -> ConversionResult<audio_io_handle_t> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+        AudioStreamType streamAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         int32_t outputAidl;
         RETURN_IF_ERROR(
@@ -1002,7 +1002,7 @@
     media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_attributes_t_AudioAttributesInternal(*attr));
     int32_t sessionAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(session));
-    media::AudioConfig configAidl = VALUE_OR_RETURN_STATUS(
+    AudioConfig configAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_config_t_AudioConfig(*config, false /*isInput*/));
     int32_t flagsAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
@@ -1096,7 +1096,7 @@
     int32_t inputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(*input));
     int32_t riidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_unique_id_t_int32_t(riid));
     int32_t sessionAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(session));
-    media::AudioConfigBase configAidl = VALUE_OR_RETURN_STATUS(
+    AudioConfigBase configAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_config_base_t_AudioConfigBase(*config, true /*isInput*/));
     int32_t flagsAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
     int32_t selectedDeviceIdAidl = VALUE_OR_RETURN_STATUS(
@@ -1153,7 +1153,7 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t indexMinAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(indexMin));
     int32_t indexMaxAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(indexMax));
@@ -1167,10 +1167,10 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t indexAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(index));
-    media::AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+    AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     return statusTFromBinderStatus(
             aps->setStreamVolumeIndex(streamAidl, deviceAidl, indexAidl));
@@ -1182,9 +1182,9 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
-    media::AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+    AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     int32_t indexAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -1204,7 +1204,7 @@
     media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
     int32_t indexAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(index));
-    media::AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+    AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     return statusTFromBinderStatus(
             aps->setVolumeIndexForAttributes(attrAidl, deviceAidl, indexAidl));
@@ -1218,7 +1218,7 @@
 
     media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
-    media::AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+    AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
     int32_t indexAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -1258,7 +1258,7 @@
     if (aps == 0) return PRODUCT_STRATEGY_NONE;
 
     auto result = [&]() -> ConversionResult<product_strategy_t> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+        AudioStreamType streamAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         int32_t resultAidl;
         RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -1273,9 +1273,9 @@
     if (aps == 0) return DeviceTypeSet{};
 
     auto result = [&]() -> ConversionResult<DeviceTypeSet> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+        AudioStreamType streamAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
-        std::vector<media::AudioDeviceDescription> resultAidl;
+        std::vector<AudioDeviceDescription> resultAidl;
         RETURN_IF_ERROR(statusTFromBinderStatus(
                 aps->getDevicesForStream(streamAidl, &resultAidl)));
         return convertContainer<DeviceTypeSet>(resultAidl,
@@ -1294,7 +1294,7 @@
 
     media::AudioAttributesEx aaAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioAttributes_AudioAttributesEx(aa));
-    std::vector<media::AudioDevice> retAidl;
+    std::vector<AudioDevice> retAidl;
     RETURN_STATUS_IF_ERROR(
             statusTFromBinderStatus(aps->getDevicesForAttributes(aaAidl, &retAidl)));
     *devices = VALUE_OR_RETURN_STATUS(
@@ -1372,7 +1372,7 @@
     if (aps == 0) return PERMISSION_DENIED;
     if (state == NULL) return BAD_VALUE;
 
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t inPastMsAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(inPastMs));
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -1386,7 +1386,7 @@
     if (aps == 0) return PERMISSION_DENIED;
     if (state == NULL) return BAD_VALUE;
 
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t inPastMsAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(inPastMs));
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -1399,8 +1399,8 @@
     if (aps == 0) return PERMISSION_DENIED;
     if (state == NULL) return BAD_VALUE;
 
-    media::AudioSourceType streamAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(stream));
+    AudioSource streamAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(stream));
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->isSourceActive(streamAidl, state)));
     return OK;
@@ -1444,9 +1444,9 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == nullptr) return PERMISSION_DENIED;
 
-    std::vector<media::AudioUsage> systemUsagesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioUsage>>(systemUsages,
-                                                             legacy2aidl_audio_usage_t_AudioUsage));
+    std::vector<AudioUsage> systemUsagesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioUsage>>(systemUsages,
+                                                      legacy2aidl_audio_usage_t_AudioUsage));
     return statusTFromBinderStatus(aps->setSupportedSystemUsages(systemUsagesAidl));
 }
 
@@ -1466,7 +1466,7 @@
     if (aps == 0) return AUDIO_OFFLOAD_NOT_SUPPORTED;
 
     auto result = [&]() -> ConversionResult<audio_offload_mode_t> {
-        media::AudioOffloadInfo infoAidl = VALUE_OR_RETURN(
+        AudioOffloadInfo infoAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_offload_info_t_AudioOffloadInfo(info));
         media::AudioOffloadMode retAidl;
         RETURN_IF_ERROR(
@@ -1494,7 +1494,7 @@
             legacy2aidl_audio_port_role_t_AudioPortRole(role));
     media::AudioPortType typeAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_port_type_t_AudioPortType(type));
-    media::Int numPortsAidl;
+    Int numPortsAidl;
     numPortsAidl.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*num_ports));
     std::vector<media::AudioPort> portsAidl;
     int32_t generationAidl;
@@ -1561,7 +1561,7 @@
     if (aps == 0) return PERMISSION_DENIED;
 
 
-    media::Int numPatchesAidl;
+    Int numPatchesAidl;
     numPatchesAidl.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*num_patches));
     std::vector<media::AudioPatch> patchesAidl;
     int32_t generationAidl;
@@ -1718,7 +1718,7 @@
     if (aps == 0) return AUDIO_MODE_INVALID;
 
     auto result = [&]() -> ConversionResult<audio_mode_t> {
-        media::AudioMode retAidl;
+        media::audio::common::AudioMode retAidl;
         RETURN_IF_ERROR(statusTFromBinderStatus(aps->getPhoneState(&retAidl)));
         return aidl2legacy_AudioMode_audio_mode_t(retAidl);
     }();
@@ -1743,8 +1743,8 @@
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t uidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(uid));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
                                                               legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(aps->setUidDeviceAffinities(uidAidl, devicesAidl));
 }
@@ -1763,9 +1763,9 @@
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t userIdAidl = VALUE_OR_RETURN_STATUS(convertReinterpret<int32_t>(userId));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(
             aps->setUserIdDeviceAffinities(userIdAidl, devicesAidl));
 }
@@ -1838,10 +1838,10 @@
     if (aps == 0) return NAN;
 
     auto result = [&]() -> ConversionResult<float> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+        AudioStreamType streamAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         int32_t indexAidl = VALUE_OR_RETURN(convertIntegral<int32_t>(index));
-        media::AudioDeviceDescription deviceAidl = VALUE_OR_RETURN(
+        AudioDeviceDescription deviceAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
         float retAidl;
         RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -1874,10 +1874,10 @@
 
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    media::Int numSurroundFormatsAidl;
+    Int numSurroundFormatsAidl;
     numSurroundFormatsAidl.value =
             VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*numSurroundFormats));
-    std::vector<media::AudioFormatDescription> surroundFormatsAidl;
+    std::vector<AudioFormatDescription> surroundFormatsAidl;
     std::vector<bool> surroundFormatsEnabledAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getSurroundFormats(&numSurroundFormatsAidl, &surroundFormatsAidl,
@@ -1901,10 +1901,10 @@
 
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    media::Int numSurroundFormatsAidl;
+    Int numSurroundFormatsAidl;
     numSurroundFormatsAidl.value =
             VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*numSurroundFormats));
-    std::vector<media::AudioFormatDescription> surroundFormatsAidl;
+    std::vector<AudioFormatDescription> surroundFormatsAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getReportedSurroundFormats(&numSurroundFormatsAidl, &surroundFormatsAidl)));
 
@@ -1920,7 +1920,7 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    media::AudioFormatDescription audioFormatAidl = VALUE_OR_RETURN_STATUS(
+    AudioFormatDescription audioFormatAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_format_t_AudioFormatDescription(audioFormat));
     return statusTFromBinderStatus(
             aps->setSurroundFormatEnabled(audioFormatAidl, enabled));
@@ -1982,7 +1982,7 @@
             & aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
-    std::vector<media::AudioFormatDescription> formatsAidl;
+    std::vector<AudioFormatDescription> formatsAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getHwOffloadEncodingFormatsSupportedForA2DP(&formatsAidl)));
     *formats = VALUE_OR_RETURN_STATUS(
@@ -2127,9 +2127,9 @@
 
     int32_t strategyAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_product_strategy_t_int32_t(strategy));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(
             aps->setDevicesRoleForStrategy(strategyAidl, roleAidl, devicesAidl));
 }
@@ -2155,7 +2155,7 @@
     }
     int32_t strategyAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_product_strategy_t_int32_t(strategy));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl;
+    std::vector<AudioDevice> devicesAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getDevicesForRoleAndStrategy(strategyAidl, roleAidl, &devicesAidl)));
     devices = VALUE_OR_RETURN_STATUS(
@@ -2172,12 +2172,12 @@
         return PERMISSION_DENIED;
     }
 
-    media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+    AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(audioSource));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(
             aps->setDevicesRoleForCapturePreset(audioSourceAidl, roleAidl, devicesAidl));
 }
@@ -2189,12 +2189,12 @@
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
-    media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+    AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(audioSource));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(
             aps->addDevicesRoleForCapturePreset(audioSourceAidl, roleAidl, devicesAidl));
 }
@@ -2205,12 +2205,12 @@
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
-    media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+    AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(audioSource));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return statusTFromBinderStatus(
             aps->removeDevicesRoleForCapturePreset(audioSourceAidl, roleAidl, devicesAidl));
 }
@@ -2221,8 +2221,8 @@
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
-    media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+    AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(audioSource));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
     return statusTFromBinderStatus(
             aps->clearDevicesRoleForCapturePreset(audioSourceAidl, roleAidl));
@@ -2235,10 +2235,10 @@
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
-    media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+    AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_source_t_AudioSource(audioSource));
     media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
-    std::vector<media::AudioDevice> devicesAidl;
+    std::vector<AudioDevice> devicesAidl;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->getDevicesForRoleAndCapturePreset(audioSourceAidl, roleAidl, &devicesAidl)));
     devices = VALUE_OR_RETURN_STATUS(
@@ -2277,11 +2277,11 @@
 
     std::optional<media::AudioAttributesInternal> attrAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_attributes_t_AudioAttributesInternal(attributes));
-    std::optional<media::AudioConfig> configAidl = VALUE_OR_RETURN_STATUS(
+    std::optional<AudioConfig> configAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_config_t_AudioConfig(configuration, false /*isInput*/));
-    std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                   legacy2aidl_AudioDeviceTypeAddress));
+    std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             aps->canBeSpatialized(attrAidl, configAidl, devicesAidl, canBeSpatialized)));
     return OK;
@@ -2352,6 +2352,31 @@
     return af->setVibratorInfos(vibratorInfos);
 }
 
+status_t AudioSystem::getMmapPolicyInfo(
+        AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == nullptr) {
+        return PERMISSION_DENIED;
+    }
+    return af->getMmapPolicyInfos(policyType, policyInfos);
+}
+
+int32_t AudioSystem::getAAudioMixerBurstCount() {
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == nullptr) {
+        return PERMISSION_DENIED;
+    }
+    return af->getAAudioMixerBurstCount();
+}
+
+int32_t AudioSystem::getAAudioHardwareBurstMinUsec() {
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == nullptr) {
+        return PERMISSION_DENIED;
+    }
+    return af->getAAudioHardwareBurstMinUsec();
+}
+
 // ---------------------------------------------------------------------------
 
 int AudioSystem::AudioPolicyServiceClient::addAudioPortCallback(
@@ -2463,12 +2488,12 @@
 Status AudioSystem::AudioPolicyServiceClient::onRecordingConfigurationUpdate(
         int32_t event,
         const media::RecordClientInfo& clientInfo,
-        const media::AudioConfigBase& clientConfig,
+        const AudioConfigBase& clientConfig,
         const std::vector<media::EffectDescriptor>& clientEffects,
-        const media::AudioConfigBase& deviceConfig,
+        const AudioConfigBase& deviceConfig,
         const std::vector<media::EffectDescriptor>& effects,
         int32_t patchHandle,
-        media::AudioSourceType source) {
+        AudioSource source) {
     record_config_callback cb = NULL;
     {
         Mutex::Autolock _l(AudioSystem::gLock);
@@ -2494,7 +2519,7 @@
         audio_patch_handle_t patchHandleLegacy = VALUE_OR_RETURN_BINDER_STATUS(
                 aidl2legacy_int32_t_audio_patch_handle_t(patchHandle));
         audio_source_t sourceLegacy = VALUE_OR_RETURN_BINDER_STATUS(
-                aidl2legacy_AudioSourceType_audio_source_t(source));
+                aidl2legacy_AudioSource_audio_source_t(source));
         cb(eventLegacy, &clientInfoLegacy, &clientConfigLegacy, clientEffectsLegacy,
            &deviceConfigLegacy, effectsLegacy, patchHandleLegacy, sourceLegacy);
     }
@@ -2538,7 +2563,7 @@
     legacy.riid = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_unique_id_t(aidl.riid));
     legacy.uid = VALUE_OR_RETURN(aidl2legacy_int32_t_uid_t(aidl.uid));
     legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.session));
-    legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSourceType_audio_source_t(aidl.source));
+    legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSource_audio_source_t(aidl.source));
     legacy.port_id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
     legacy.silenced = aidl.silenced;
     return legacy;
@@ -2550,7 +2575,7 @@
     aidl.riid = VALUE_OR_RETURN(legacy2aidl_audio_unique_id_t_int32_t(legacy.riid));
     aidl.uid = VALUE_OR_RETURN(legacy2aidl_uid_t_int32_t(legacy.uid));
     aidl.session = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.session));
-    aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.source));
+    aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.source));
     aidl.portId = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.port_id));
     aidl.silenced = legacy.silenced;
     return aidl;
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 5b7760b..8cf22c4 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -21,6 +21,7 @@
 #include <inttypes.h>
 #include <math.h>
 #include <sys/resource.h>
+#include <thread>
 
 #include <android/media/IAudioPolicyService.h>
 #include <android-base/macros.h>
@@ -170,7 +171,7 @@
     if (aps == 0) return false;
 
     auto result = [&]() -> ConversionResult<bool> {
-        media::AudioConfigBase configAidl = VALUE_OR_RETURN(
+        media::audio::common::AudioConfigBase configAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_config_base_t_AudioConfigBase(config, false /*isInput*/));
         media::AudioAttributesInternal attributesAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_attributes_t_AudioAttributesInternal(attributes));
@@ -947,6 +948,44 @@
     mAudioTrack->flush();
 }
 
+bool AudioTrack::pauseAndWait(const std::chrono::milliseconds& timeout)
+{
+    using namespace std::chrono_literals;
+
+    pause();
+
+    AutoMutex lock(mLock);
+    // offload and direct tracks do not wait because pause volume ramp is handled by hardware.
+    if (isOffloadedOrDirect_l()) return true;
+
+    // Wait for the track state to be anything besides pausing.
+    // This ensures that the volume has ramped down.
+    constexpr auto SLEEP_INTERVAL_MS = 10ms;
+    auto begin = std::chrono::steady_clock::now();
+    while (true) {
+        // wait for state to change
+        const int state = mProxy->getState();
+
+        mLock.unlock(); // only local variables accessed until lock.
+        auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(
+                std::chrono::steady_clock::now() - begin);
+        if (state != CBLK_STATE_PAUSING) {
+            ALOGV("%s: success state:%d after %lld ms", __func__, state, elapsed.count());
+            return true;
+        }
+        std::chrono::milliseconds remaining = timeout - elapsed;
+        if (remaining.count() <= 0) {
+            ALOGW("%s: timeout expired state:%d still pausing:%d after %lld ms",
+                    __func__, state, CBLK_STATE_PAUSING, elapsed.count());
+            return false;
+        }
+        // It is conceivable that the track is restored while sleeping;
+        // as this logic is advisory, we allow that.
+        std::this_thread::sleep_for(std::min(remaining, SLEEP_INTERVAL_MS));
+        mLock.lock();
+    }
+}
+
 void AudioTrack::pause()
 {
     const int64_t beginNs = systemTime();
@@ -1797,7 +1836,7 @@
                 mAwaitBoost = true;
             }
         } else {
-            ALOGD("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
+            ALOGV("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
                   __func__, mPortId, mReqFrameCount, mFrameCount);
         }
     }
diff --git a/media/libaudioclient/AudioVolumeGroup.cpp b/media/libaudioclient/AudioVolumeGroup.cpp
index 361f7b8..ab95246 100644
--- a/media/libaudioclient/AudioVolumeGroup.cpp
+++ b/media/libaudioclient/AudioVolumeGroup.cpp
@@ -26,11 +26,10 @@
 #include <media/AudioAttributes.h>
 #include <media/PolicyAidlConversion.h>
 
-#define RETURN_STATUS_IF_ERROR(x) \
-    { auto _tmp = (x); if (_tmp != OK) return _tmp; }
-
 namespace android {
 
+using media::audio::common::AudioStreamType;
+
 status_t AudioVolumeGroup::readFromParcel(const Parcel *parcel)
 {
     media::AudioVolumeGroup aidl;
@@ -55,7 +54,7 @@
                     legacy.getAudioAttributes(),
                     legacy2aidl_audio_attributes_t_AudioAttributesInternal));
     aidl.streams = VALUE_OR_RETURN(
-            convertContainer<std::vector<media::AudioStreamType>>(legacy.getStreamTypes(),
+            convertContainer<std::vector<AudioStreamType>>(legacy.getStreamTypes(),
             legacy2aidl_audio_stream_type_t_AudioStreamType));
     return aidl;
 }
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 6d2ec93..88e7396 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -17,6 +17,7 @@
 
 #define LOG_TAG "IAudioFlinger"
 //#define LOG_NDEBUG 0
+
 #include <utils/Log.h>
 
 #include <stdint.h>
@@ -30,6 +31,13 @@
 
 using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUuid;
 
 #define MAX_ITEMS_PER_LIST 1024
 
@@ -40,12 +48,6 @@
        std::move(_tmp.value()); \
      })
 
-#define RETURN_STATUS_IF_ERROR(x)    \
-    {                                \
-       auto _tmp = (x);              \
-       if (_tmp != OK) return _tmp;  \
-    }
-
 #define RETURN_BINDER_IF_ERROR(x)                         \
     {                                                     \
        auto _tmp = (x);                                   \
@@ -252,7 +254,7 @@
 audio_format_t AudioFlingerClientAdapter::format(audio_io_handle_t output) const {
     auto result = [&]() -> ConversionResult<audio_format_t> {
         int32_t outputAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output));
-        media::AudioFormatDescription aidlRet;
+        AudioFormatDescription aidlRet;
         RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->format(outputAidl, &aidlRet)));
         return aidl2legacy_AudioFormatDescription_audio_format_t(aidlRet);
     }();
@@ -319,14 +321,14 @@
 
 status_t AudioFlingerClientAdapter::setStreamVolume(audio_stream_type_t stream, float value,
                                                     audio_io_handle_t output) {
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
     return statusTFromBinderStatus(mDelegate->setStreamVolume(streamAidl, value, outputAidl));
 }
 
 status_t AudioFlingerClientAdapter::setStreamMute(audio_stream_type_t stream, bool muted) {
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     return statusTFromBinderStatus(mDelegate->setStreamMute(streamAidl, muted));
 }
@@ -334,7 +336,7 @@
 float AudioFlingerClientAdapter::streamVolume(audio_stream_type_t stream,
                                               audio_io_handle_t output) const {
     auto result = [&]() -> ConversionResult<float> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+        AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
         float aidlRet;
@@ -348,7 +350,7 @@
 
 bool AudioFlingerClientAdapter::streamMute(audio_stream_type_t stream) const {
     auto result = [&]() -> ConversionResult<bool> {
-        media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+        AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         bool aidlRet;
         RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -360,7 +362,7 @@
 }
 
 status_t AudioFlingerClientAdapter::setMode(audio_mode_t mode) {
-    media::AudioMode modeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_mode_t_AudioMode(mode));
+    AudioMode modeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_mode_t_AudioMode(mode));
     return statusTFromBinderStatus(mDelegate->setMode(modeAidl));
 }
 
@@ -420,9 +422,9 @@
                                                      audio_channel_mask_t channelMask) const {
     auto result = [&]() -> ConversionResult<size_t> {
         int32_t sampleRateAidl = VALUE_OR_RETURN(convertIntegral<int32_t>(sampleRate));
-        media::AudioFormatDescription formatAidl = VALUE_OR_RETURN(
+        AudioFormatDescription formatAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_format_t_AudioFormatDescription(format));
-        media::AudioChannelLayout channelMaskAidl = VALUE_OR_RETURN(
+        AudioChannelLayout channelMaskAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_channel_mask_t_AudioChannelLayout(channelMask, true /*isInput*/));
         int64_t aidlRet;
         RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -479,7 +481,7 @@
 }
 
 status_t AudioFlingerClientAdapter::invalidateStream(audio_stream_type_t stream) {
-    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+    AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     return statusTFromBinderStatus(mDelegate->invalidateStream(streamAidl));
 }
@@ -578,9 +580,9 @@
                                                         const effect_uuid_t* pTypeUUID,
                                                         uint32_t preferredTypeFlag,
                                                         effect_descriptor_t* pDescriptor) const {
-    media::AudioUuid effectUuidAidl = VALUE_OR_RETURN_STATUS(
+    AudioUuid effectUuidAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_uuid_t_AudioUuid(*pEffectUUID));
-    media::AudioUuid typeUuidAidl = VALUE_OR_RETURN_STATUS(
+    AudioUuid typeUuidAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_uuid_t_AudioUuid(*pTypeUUID));
     int32_t preferredTypeFlagAidl = VALUE_OR_RETURN_STATUS(
             convertReinterpret<int32_t>(preferredTypeFlag));
@@ -775,6 +777,32 @@
     return statusTFromBinderStatus(mDelegate->updateSecondaryOutputs(trackSecondaryOutputInfos));
 }
 
+status_t AudioFlingerClientAdapter::getMmapPolicyInfos(
+        AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+    return statusTFromBinderStatus(mDelegate->getMmapPolicyInfos(policyType, policyInfos));
+}
+
+int32_t AudioFlingerClientAdapter::getAAudioMixerBurstCount() {
+    auto result = [&]() -> ConversionResult<int32_t> {
+        int32_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->getAAudioMixerBurstCount(&aidlRet)));
+        return convertIntegral<int32_t>(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
+int32_t AudioFlingerClientAdapter::getAAudioHardwareBurstMinUsec() {
+    auto result = [&]() -> ConversionResult<int32_t> {
+        int32_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getAAudioHardwareBurstMinUsec(&aidlRet)));
+        return convertIntegral<int32_t>(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
 
 ////////////////////////////////////////////////////////////////////////////////////////////////////
 // AudioFlingerServerAdapter
@@ -820,7 +848,7 @@
 }
 
 Status AudioFlingerServerAdapter::format(int32_t output,
-                                         media::AudioFormatDescription* _aidl_return) {
+                                         AudioFormatDescription* _aidl_return) {
     audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_int32_t_audio_io_handle_t(output));
     *_aidl_return = VALUE_OR_RETURN_BINDER(
@@ -870,7 +898,7 @@
     return Status::fromStatusT(mDelegate->getMasterBalance(_aidl_return));
 }
 
-Status AudioFlingerServerAdapter::setStreamVolume(media::AudioStreamType stream, float value,
+Status AudioFlingerServerAdapter::setStreamVolume(AudioStreamType stream, float value,
                                                   int32_t output) {
     audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
@@ -879,13 +907,13 @@
     return Status::fromStatusT(mDelegate->setStreamVolume(streamLegacy, value, outputLegacy));
 }
 
-Status AudioFlingerServerAdapter::setStreamMute(media::AudioStreamType stream, bool muted) {
+Status AudioFlingerServerAdapter::setStreamMute(AudioStreamType stream, bool muted) {
     audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
     return Status::fromStatusT(mDelegate->setStreamMute(streamLegacy, muted));
 }
 
-Status AudioFlingerServerAdapter::streamVolume(media::AudioStreamType stream, int32_t output,
+Status AudioFlingerServerAdapter::streamVolume(AudioStreamType stream, int32_t output,
                                                float* _aidl_return) {
     audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
@@ -895,14 +923,14 @@
     return Status::ok();
 }
 
-Status AudioFlingerServerAdapter::streamMute(media::AudioStreamType stream, bool* _aidl_return) {
+Status AudioFlingerServerAdapter::streamMute(AudioStreamType stream, bool* _aidl_return) {
     audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
     *_aidl_return = mDelegate->streamMute(streamLegacy);
     return Status::ok();
 }
 
-Status AudioFlingerServerAdapter::setMode(media::AudioMode mode) {
+Status AudioFlingerServerAdapter::setMode(AudioMode mode) {
     audio_mode_t modeLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioMode_audio_mode_t(mode));
     return Status::fromStatusT(mDelegate->setMode(modeLegacy));
 }
@@ -948,8 +976,8 @@
 }
 
 Status AudioFlingerServerAdapter::getInputBufferSize(int32_t sampleRate,
-                                                     const media::AudioFormatDescription& format,
-                                                     const media::AudioChannelLayout& channelMask,
+                                                     const AudioFormatDescription& format,
+                                                     const AudioChannelLayout& channelMask,
                                                      int64_t* _aidl_return) {
     uint32_t sampleRateLegacy = VALUE_OR_RETURN_BINDER(convertIntegral<uint32_t>(sampleRate));
     audio_format_t formatLegacy = VALUE_OR_RETURN_BINDER(
@@ -1006,7 +1034,7 @@
     return Status::fromStatusT(mDelegate->closeInput(inputLegacy));
 }
 
-Status AudioFlingerServerAdapter::invalidateStream(media::AudioStreamType stream) {
+Status AudioFlingerServerAdapter::invalidateStream(AudioStreamType stream) {
     audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
             aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
     return Status::fromStatusT(mDelegate->invalidateStream(streamLegacy));
@@ -1081,8 +1109,8 @@
     return Status::ok();
 }
 
-Status AudioFlingerServerAdapter::getEffectDescriptor(const media::AudioUuid& effectUUID,
-                                                      const media::AudioUuid& typeUUID,
+Status AudioFlingerServerAdapter::getEffectDescriptor(const AudioUuid& effectUUID,
+                                                      const AudioUuid& typeUUID,
                                                       int32_t preferredTypeFlag,
                                                       media::EffectDescriptor* _aidl_return) {
     effect_uuid_t effectUuidLegacy = VALUE_OR_RETURN_BINDER(
@@ -1247,4 +1275,21 @@
     return Status::fromStatusT(mDelegate->updateSecondaryOutputs(trackSecondaryOutputs));
 }
 
+Status AudioFlingerServerAdapter::getMmapPolicyInfos(
+        AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *_aidl_return) {
+    return Status::fromStatusT(mDelegate->getMmapPolicyInfos(policyType, _aidl_return));
+}
+
+Status AudioFlingerServerAdapter::getAAudioMixerBurstCount(int32_t* _aidl_return) {
+    *_aidl_return = VALUE_OR_RETURN_BINDER(
+            convertIntegral<int32_t>(mDelegate->getAAudioMixerBurstCount()));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getAAudioHardwareBurstMinUsec(int32_t* _aidl_return) {
+    *_aidl_return = VALUE_OR_RETURN_BINDER(
+            convertIntegral<int32_t>(mDelegate->getAAudioHardwareBurstMinUsec()));
+    return Status::ok();
+}
+
 } // namespace android
diff --git a/media/libaudioclient/PolicyAidlConversion.cpp b/media/libaudioclient/PolicyAidlConversion.cpp
index 676bb37..fd94568 100644
--- a/media/libaudioclient/PolicyAidlConversion.cpp
+++ b/media/libaudioclient/PolicyAidlConversion.cpp
@@ -25,6 +25,7 @@
 namespace android {
 
 using base::unexpected;
+using media::audio::common::AudioDeviceAddress;
 
 ConversionResult<volume_group_t>
 aidl2legacy_int32_t_volume_group_t(int32_t aidl) {
@@ -152,7 +153,7 @@
 
         case media::AudioMixMatchCriterionValue::source:
             legacy.mSource = VALUE_OR_RETURN(
-                    aidl2legacy_AudioSourceType_audio_source_t(UNION_GET(aidl, source).value()));
+                    aidl2legacy_AudioSource_audio_source_t(UNION_GET(aidl, source).value()));
             *rule |= RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET;
             return legacy;
 
@@ -184,7 +185,7 @@
 
         case RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET:
             UNION_SET(aidl, source,
-                      VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.mSource)));
+                      VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.mSource)));
             break;
 
         case RULE_MATCH_UID:
@@ -238,9 +239,8 @@
                     aidl.format, false /*isInput*/));
     legacy.mRouteFlags = VALUE_OR_RETURN(
             aidl2legacy_AudioMixRouteFlag_uint32_t_mask(aidl.routeFlags));
-    legacy.mDeviceType = VALUE_OR_RETURN(
-            aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.device.type));
-    legacy.mDeviceAddress = VALUE_OR_RETURN(aidl2legacy_string_view_String8(aidl.device.address));
+    RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
+                    aidl.device, &legacy.mDeviceType, &legacy.mDeviceAddress));
     legacy.mCbFlags = VALUE_OR_RETURN(aidl2legacy_AudioMixCallbackFlag_uint32_t_mask(aidl.cbFlags));
     legacy.mAllowPrivilegedMediaPlaybackCapture = aidl.allowPrivilegedMediaPlaybackCapture;
     legacy.mVoiceCommunicationCaptureAllowed = aidl.voiceCommunicationCaptureAllowed;
@@ -261,9 +261,9 @@
                     legacy.mFormat, false /*isInput*/));
     aidl.routeFlags = VALUE_OR_RETURN(
             legacy2aidl_uint32_t_AudioMixRouteFlag_mask(legacy.mRouteFlags));
-    aidl.device.type = VALUE_OR_RETURN(
-            legacy2aidl_audio_devices_t_AudioDeviceDescription(legacy.mDeviceType));
-    aidl.device.address = VALUE_OR_RETURN(legacy2aidl_String8_string(legacy.mDeviceAddress));
+    aidl.device = VALUE_OR_RETURN(
+            legacy2aidl_audio_device_AudioDevice(
+                    legacy.mDeviceType, legacy.mDeviceAddress));
     aidl.cbFlags = VALUE_OR_RETURN(legacy2aidl_uint32_t_AudioMixCallbackFlag_mask(legacy.mCbFlags));
     aidl.allowPrivilegedMediaPlaybackCapture = legacy.mAllowPrivilegedMediaPlaybackCapture;
     aidl.voiceCommunicationCaptureAllowed = legacy.mVoiceCommunicationCaptureAllowed;
diff --git a/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl b/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl
index 04a02c7..335866f 100644
--- a/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl
@@ -17,7 +17,7 @@
 package android.media;
 
 import android.media.AudioAttributesInternal;
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
 
 /**
  * This is the equivalent of the android::AudioAttributes C++ type.
diff --git a/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl b/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl
index 699df0a..2e74206 100644
--- a/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl
@@ -16,9 +16,9 @@
 
 package android.media;
 
-import android.media.AudioContentType;
-import android.media.AudioSourceType;
-import android.media.AudioUsage;
+import android.media.audio.common.AudioContentType;
+import android.media.audio.common.AudioSource;
+import android.media.audio.common.AudioUsage;
 
 /**
  * The "Internal" suffix of this type name is to disambiguate it from the
@@ -28,7 +28,7 @@
 parcelable AudioAttributesInternal {
     AudioContentType contentType;
     AudioUsage usage;
-    AudioSourceType source;
+    AudioSource source;
     // Bitmask, indexed by AudioFlag.
     int flags;
     @utf8InCpp String tags; /* UTF8 */
diff --git a/media/libaudioclient/aidl/android/media/AudioChannelLayout.aidl b/media/libaudioclient/aidl/android/media/AudioChannelLayout.aidl
deleted file mode 100644
index 3259105..0000000
--- a/media/libaudioclient/aidl/android/media/AudioChannelLayout.aidl
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * This structure describes a layout of a multi-channel stream.
- * There are two possible ways for representing a layout:
- *
- * - indexed mask, which tells what channels of an audio frame are used, but
- *   doesn't label them in any way, thus a correspondence between channels in
- *   the same position of frames originating from different streams must be
- *   established externally;
- *
- * - layout mask, which gives a label to each channel, thus allowing to
- *   match channels between streams of different layouts.
- *
- * Both representations are agnostic of the direction of audio transfer. Also,
- * by construction, the number of bits set to '1' in the mask indicates the
- * number of channels in the audio frame. A channel mask per se only defines the
- * presence or absence of a channel, not the order. Please see 'INTERLEAVE_*'
- * constants for the platform convention of order.
- *
- * The structure also defines a "voice mask" which is a special case of
- * layout mask, intended for processing voice audio from telecommunication
- * use cases.
- */
-union AudioChannelLayout {
-    /**
-     * This variant is used for representing the "null" ("none") value
-     * for the channel layout. The field value must always be '0'.
-     */
-    int none = 0;
-    /**
-     * This variant is used for indicating an "invalid" layout for use by the
-     * framework only. HAL implementations must not accept or emit
-     * AudioChannelLayout values for this variant. The field value must always
-     * be '0'.
-     */
-    int invalid = 0;
-    /**
-     * This variant is used for representing indexed masks. The mask indicates
-     * what channels are used. For example, the mask that specifies to use only
-     * channels 1 and 3 when interacting with a multi-channel device is defined
-     * as a combination of the 1st and the 3rd bits and thus is equal to 5. See
-     * also the 'INDEX_MASK_*' constants. The 'indexMask' field must have at
-     * least one bit set.
-     */
-    int indexMask;
-    /**
-     * This variant is used for representing layout masks.
-     * It is recommended to use one of 'LAYOUT_*' values. The 'layoutMask' field
-     * must have at least one bit set.
-     */
-    int layoutMask;
-    /**
-     * This variant is used for processing of voice audio input and output.
-     * It is recommended to use one of 'VOICE_*' values. The 'voiceMask' field
-     * must have at least one bit set.
-     */
-    int voiceMask;
-
-    /**
-     * 'INDEX_MASK_*' constants define how many channels are used.
-     * The mask constants below are 'canonical' masks. Each 'INDEX_MASK_N'
-     * constant declares that all N channels are used and arranges
-     * them starting from the LSB.
-     */
-    const int INDEX_MASK_1 = (1 << 1) - 1;
-    const int INDEX_MASK_2 = (1 << 2) - 1;
-    const int INDEX_MASK_3 = (1 << 3) - 1;
-    const int INDEX_MASK_4 = (1 << 4) - 1;
-    const int INDEX_MASK_5 = (1 << 5) - 1;
-    const int INDEX_MASK_6 = (1 << 6) - 1;
-    const int INDEX_MASK_7 = (1 << 7) - 1;
-    const int INDEX_MASK_8 = (1 << 8) - 1;
-    const int INDEX_MASK_9 = (1 << 9) - 1;
-    const int INDEX_MASK_10 = (1 << 10) - 1;
-    const int INDEX_MASK_11 = (1 << 11) - 1;
-    const int INDEX_MASK_12 = (1 << 12) - 1;
-    const int INDEX_MASK_13 = (1 << 13) - 1;
-    const int INDEX_MASK_14 = (1 << 14) - 1;
-    const int INDEX_MASK_15 = (1 << 15) - 1;
-    const int INDEX_MASK_16 = (1 << 16) - 1;
-    const int INDEX_MASK_17 = (1 << 17) - 1;
-    const int INDEX_MASK_18 = (1 << 18) - 1;
-    const int INDEX_MASK_19 = (1 << 19) - 1;
-    const int INDEX_MASK_20 = (1 << 20) - 1;
-    const int INDEX_MASK_21 = (1 << 21) - 1;
-    const int INDEX_MASK_22 = (1 << 22) - 1;
-    const int INDEX_MASK_23 = (1 << 23) - 1;
-    const int INDEX_MASK_24 = (1 << 24) - 1;
-
-    /**
-     * 'LAYOUT_*' constants define channel layouts recognized by
-     * the audio system. The order of the channels in the frame is assumed
-     * to be from the LSB to MSB for all the bits set to '1'.
-     */
-    const int LAYOUT_MONO = CHANNEL_FRONT_LEFT;
-    const int LAYOUT_STEREO =
-            CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT;
-    const int LAYOUT_2POINT1 =
-            CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_LOW_FREQUENCY;
-    const int LAYOUT_TRI =
-            CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_FRONT_CENTER;
-    const int LAYOUT_TRI_BACK =
-            CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_BACK_CENTER;
-    const int LAYOUT_3POINT1 =
-            CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_FRONT_CENTER |
-            CHANNEL_LOW_FREQUENCY;
-    const int LAYOUT_2POINT0POINT2 =
-            CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
-            CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
-    const int LAYOUT_2POINT1POINT2 =
-            LAYOUT_2POINT0POINT2 | CHANNEL_LOW_FREQUENCY;
-    const int LAYOUT_3POINT0POINT2 =
-            CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
-            CHANNEL_FRONT_CENTER |
-            CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
-    const int LAYOUT_3POINT1POINT2 =
-            LAYOUT_3POINT0POINT2 | CHANNEL_LOW_FREQUENCY;
-    const int LAYOUT_QUAD =
-            CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
-            CHANNEL_BACK_LEFT | CHANNEL_BACK_RIGHT;
-    const int LAYOUT_QUAD_SIDE =
-            CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
-            CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT;
-    const int LAYOUT_SURROUND =
-            CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
-            CHANNEL_FRONT_CENTER | CHANNEL_BACK_CENTER;
-    const int LAYOUT_PENTA = LAYOUT_QUAD | CHANNEL_FRONT_CENTER;
-    const int LAYOUT_5POINT1 =
-            CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
-            CHANNEL_FRONT_CENTER | CHANNEL_LOW_FREQUENCY |
-            CHANNEL_BACK_LEFT | CHANNEL_BACK_RIGHT;
-    const int LAYOUT_5POINT1_SIDE =
-            CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
-            CHANNEL_FRONT_CENTER | CHANNEL_LOW_FREQUENCY |
-            CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT;
-    const int LAYOUT_5POINT1POINT2 = LAYOUT_5POINT1 |
-            CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
-    const int LAYOUT_5POINT1POINT4 = LAYOUT_5POINT1 |
-            CHANNEL_TOP_FRONT_LEFT | CHANNEL_TOP_FRONT_RIGHT |
-            CHANNEL_TOP_BACK_LEFT | CHANNEL_TOP_BACK_RIGHT;
-    const int LAYOUT_6POINT1 =
-            CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
-            CHANNEL_FRONT_CENTER | CHANNEL_LOW_FREQUENCY |
-            CHANNEL_BACK_LEFT | CHANNEL_BACK_RIGHT | CHANNEL_BACK_CENTER;
-    const int LAYOUT_7POINT1 = LAYOUT_5POINT1 |
-            CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT;
-    const int LAYOUT_7POINT1POINT2 = LAYOUT_7POINT1 |
-            CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
-    const int LAYOUT_7POINT1POINT4 = LAYOUT_7POINT1 |
-            CHANNEL_TOP_FRONT_LEFT | CHANNEL_TOP_FRONT_RIGHT |
-            CHANNEL_TOP_BACK_LEFT | CHANNEL_TOP_BACK_RIGHT;
-    const int LAYOUT_9POINT1POINT4 = LAYOUT_7POINT1POINT4 |
-            CHANNEL_FRONT_WIDE_LEFT | CHANNEL_FRONT_WIDE_RIGHT;
-    const int LAYOUT_9POINT1POINT6 = LAYOUT_9POINT1POINT4 |
-            CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
-    const int LAYOUT_13POINT_360RA =
-            CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
-            CHANNEL_FRONT_CENTER |
-            CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT |
-            CHANNEL_TOP_FRONT_LEFT | CHANNEL_TOP_FRONT_RIGHT |
-            CHANNEL_TOP_FRONT_CENTER |
-            CHANNEL_TOP_BACK_LEFT | CHANNEL_TOP_BACK_RIGHT |
-            CHANNEL_BOTTOM_FRONT_LEFT | CHANNEL_BOTTOM_FRONT_RIGHT |
-            CHANNEL_BOTTOM_FRONT_CENTER;
-    const int LAYOUT_22POINT2 = LAYOUT_7POINT1POINT4 |
-            CHANNEL_FRONT_LEFT_OF_CENTER | CHANNEL_FRONT_RIGHT_OF_CENTER |
-            CHANNEL_BACK_CENTER | CHANNEL_TOP_CENTER |
-            CHANNEL_TOP_FRONT_CENTER | CHANNEL_TOP_BACK_CENTER |
-            CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT |
-            CHANNEL_BOTTOM_FRONT_LEFT | CHANNEL_BOTTOM_FRONT_RIGHT |
-            CHANNEL_BOTTOM_FRONT_CENTER |
-            CHANNEL_LOW_FREQUENCY_2;
-    const int LAYOUT_MONO_HAPTIC_A =
-            LAYOUT_MONO | CHANNEL_HAPTIC_A;
-    const int LAYOUT_STEREO_HAPTIC_A =
-            LAYOUT_STEREO | CHANNEL_HAPTIC_A;
-    const int LAYOUT_HAPTIC_AB =
-            CHANNEL_HAPTIC_A | CHANNEL_HAPTIC_B;
-    const int LAYOUT_MONO_HAPTIC_AB =
-            LAYOUT_MONO | LAYOUT_HAPTIC_AB;
-    const int LAYOUT_STEREO_HAPTIC_AB =
-            LAYOUT_STEREO | LAYOUT_HAPTIC_AB;
-    const int LAYOUT_FRONT_BACK =
-            CHANNEL_FRONT_CENTER | CHANNEL_BACK_CENTER;
-
-    /**
-     * Expresses the convention when stereo audio samples are stored interleaved
-     * in an array.  This should improve readability by allowing code to use
-     * symbolic indices instead of hard-coded [0] and [1].
-     *
-     * For multi-channel beyond stereo, the platform convention is that channels
-     * are interleaved in order from least significant channel mask bit to most
-     * significant channel mask bit, with unused bits skipped. Any exceptions
-     * to this convention will be noted at the appropriate API.
-     */
-    const int INTERLEAVE_LEFT = 0;
-    const int INTERLEAVE_RIGHT = 1;
-
-    /**
-     * 'CHANNEL_*' constants are used to build 'LAYOUT_*' masks. Each constant
-     * must have exactly one bit set. The values do not match
-     * 'android.media.AudioFormat.CHANNEL_OUT_*' constants from the SDK
-     * for better efficiency in masks processing.
-     */
-    const int CHANNEL_FRONT_LEFT = 1 << 0;
-    const int CHANNEL_FRONT_RIGHT = 1 << 1;
-    const int CHANNEL_FRONT_CENTER = 1 << 2;
-    const int CHANNEL_LOW_FREQUENCY = 1 << 3;
-    const int CHANNEL_BACK_LEFT = 1 << 4;
-    const int CHANNEL_BACK_RIGHT = 1 << 5;
-    const int CHANNEL_FRONT_LEFT_OF_CENTER = 1 << 6;
-    const int CHANNEL_FRONT_RIGHT_OF_CENTER = 1 << 7;
-    const int CHANNEL_BACK_CENTER = 1 << 8;
-    const int CHANNEL_SIDE_LEFT = 1 << 9;
-    const int CHANNEL_SIDE_RIGHT = 1 << 10;
-    const int CHANNEL_TOP_CENTER = 1 << 11;
-    const int CHANNEL_TOP_FRONT_LEFT = 1 << 12;
-    const int CHANNEL_TOP_FRONT_CENTER = 1 << 13;
-    const int CHANNEL_TOP_FRONT_RIGHT = 1 << 14;
-    const int CHANNEL_TOP_BACK_LEFT = 1 << 15;
-    const int CHANNEL_TOP_BACK_CENTER = 1 << 16;
-    const int CHANNEL_TOP_BACK_RIGHT = 1 << 17;
-    const int CHANNEL_TOP_SIDE_LEFT = 1 << 18;
-    const int CHANNEL_TOP_SIDE_RIGHT = 1 << 19;
-    const int CHANNEL_BOTTOM_FRONT_LEFT = 1 << 20;
-    const int CHANNEL_BOTTOM_FRONT_CENTER = 1 << 21;
-    const int CHANNEL_BOTTOM_FRONT_RIGHT = 1 << 22;
-    const int CHANNEL_LOW_FREQUENCY_2 = 1 << 23;
-    const int CHANNEL_FRONT_WIDE_LEFT = 1 << 24;
-    const int CHANNEL_FRONT_WIDE_RIGHT = 1 << 25;
-    /**
-     * Haptic channels are not part of multichannel standards, however they
-     * enhance user experience when playing so they are packed together with the
-     * channels of the program. To avoid collision with positional channels the
-     * values for haptic channels start at the MSB of an integer (after the sign
-     * bit) and move down to LSB.
-     */
-    const int CHANNEL_HAPTIC_B = 1 << 29;
-    const int CHANNEL_HAPTIC_A = 1 << 30;
-
-    /**
-     * 'VOICE_*' constants define layouts for voice audio. The order of the
-     * channels in the frame is assumed to be from the LSB to MSB for all the
-     * bits set to '1'.
-     */
-    const int VOICE_UPLINK_MONO = CHANNEL_VOICE_UPLINK;
-    const int VOICE_DNLINK_MONO = CHANNEL_VOICE_DNLINK;
-    const int VOICE_CALL_MONO = CHANNEL_VOICE_UPLINK | CHANNEL_VOICE_DNLINK;
-
-    /**
-     * 'CHANNEL_VOICE_*' constants are used to build 'VOICE_*' masks. Each
-     * constant must have exactly one bit set. Use the same values as
-     * 'android.media.AudioFormat.CHANNEL_IN_VOICE_*' constants from the SDK.
-     */
-    const int CHANNEL_VOICE_UPLINK = 0x4000;
-    const int CHANNEL_VOICE_DNLINK = 0x8000;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioConfig.aidl b/media/libaudioclient/aidl/android/media/AudioConfig.aidl
deleted file mode 100644
index 6996d42..0000000
--- a/media/libaudioclient/aidl/android/media/AudioConfig.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioChannelLayout;
-import android.media.AudioFormatDescription;
-import android.media.AudioOffloadInfo;
-
-/**
- * {@hide}
- */
-parcelable AudioConfig {
-    int sampleRate;
-    AudioChannelLayout channelMask;
-    AudioFormatDescription format;
-    AudioOffloadInfo offloadInfo;
-    long frameCount;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl b/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
deleted file mode 100644
index e84161b..0000000
--- a/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioChannelLayout;
-import android.media.AudioFormatDescription;
-
-/**
- * {@hide}
- */
-parcelable AudioConfigBase {
-    int sampleRate;
-    AudioChannelLayout channelMask;
-    AudioFormatDescription format;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioContentType.aidl b/media/libaudioclient/aidl/android/media/AudioContentType.aidl
deleted file mode 100644
index f734fba..0000000
--- a/media/libaudioclient/aidl/android/media/AudioContentType.aidl
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-@Backing(type="int")
-enum AudioContentType {
-    UNKNOWN = 0,
-    SPEECH = 1,
-    MUSIC = 2,
-    MOVIE = 3,
-    SONIFICATION = 4,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioDevice.aidl b/media/libaudioclient/aidl/android/media/AudioDevice.aidl
deleted file mode 100644
index a815874..0000000
--- a/media/libaudioclient/aidl/android/media/AudioDevice.aidl
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioDeviceDescription;
-
-/**
- * {@hide}
- */
-parcelable AudioDevice {
-    AudioDeviceDescription type;
-    @utf8InCpp String address;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioDeviceDescription.aidl b/media/libaudioclient/aidl/android/media/AudioDeviceDescription.aidl
deleted file mode 100644
index f7548b9..0000000
--- a/media/libaudioclient/aidl/android/media/AudioDeviceDescription.aidl
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioDeviceType;
-
-parcelable AudioDeviceDescription {
-    /**
-     * Type and directionality of the device. For bidirectional audio devices
-     * two descriptions need to be created, having the same value for
-     * the 'connection' field.
-     *
-     * See 'AudioDeviceType' for the list of supported values.
-     */
-    AudioDeviceType type = AudioDeviceType.NONE;
-    /**
-     * Specifies the type of the connection of the device to the audio system.
-     * Usually it's some kind of a communication protocol, e.g. Bluetooth SCO or
-     * USB. There is a list of connection types recognized by the framework,
-     * defined using 'CONNECTION_' constants. Vendors can add their own
-     * connection types with "vx.<vendor>." prefix.
-     *
-     * When the 'connection' field is left empty and 'type != NONE | DEFAULT',
-     * it is assumed that the device is permanently attached to the audio
-     * system, e.g. a built-in speaker or microphone.
-     *
-     * The 'connection' field must be left empty if 'type' is 'NONE' or
-     * '{IN|OUT}_DEFAULT'.
-     */
-    @utf8InCpp String connection;
-    /**
-     * Analog connection, for example, via 3.5 mm analog jack.
-     */
-    const @utf8InCpp String CONNECTION_ANALOG = "analog";
-    /**
-     * Low-End (Analog) Desk Dock.
-     */
-    const @utf8InCpp String CONNECTION_ANALOG_DOCK = "analog-dock";
-    /**
-     * Bluetooth A2DP connection.
-     */
-    const @utf8InCpp String CONNECTION_BT_A2DP = "bt-a2dp";
-    /**
-     * Bluetooth Low Energy (LE) connection.
-     */
-    const @utf8InCpp String CONNECTION_BT_LE = "bt-le";
-    /**
-     * Bluetooth SCO connection.
-     */
-    const @utf8InCpp String CONNECTION_BT_SCO = "bt-sco";
-    /**
-     * Bus connection. Mostly used in automotive scenarios.
-     */
-    const @utf8InCpp String CONNECTION_BUS = "bus";
-    /**
-     * High-End (Digital) Desk Dock.
-     */
-    const @utf8InCpp String CONNECTION_DIGITAL_DOCK = "digital-dock";
-    /**
-     * HDMI connection.
-     */
-    const @utf8InCpp String CONNECTION_HDMI = "hdmi";
-    /**
-     * HDMI ARC connection.
-     */
-    const @utf8InCpp String CONNECTION_HDMI_ARC = "hdmi-arc";
-    /**
-     * HDMI eARC connection.
-     */
-    const @utf8InCpp String CONNECTION_HDMI_EARC = "hdmi-earc";
-    /**
-     * IP v4 connection.
-     */
-    const @utf8InCpp String CONNECTION_IP_V4 = "ip-v4";
-    /**
-     * SPDIF connection.
-     */
-    const @utf8InCpp String CONNECTION_SPDIF = "spdif";
-    /**
-     * A wireless connection when the actual protocol is unspecified.
-     */
-    const @utf8InCpp String CONNECTION_WIRELESS = "wireless";
-    /**
-     * USB connection.
-     */
-    const @utf8InCpp String CONNECTION_USB = "usb";
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioDeviceType.aidl b/media/libaudioclient/aidl/android/media/AudioDeviceType.aidl
deleted file mode 100644
index 4da9fd6..0000000
--- a/media/libaudioclient/aidl/android/media/AudioDeviceType.aidl
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * The type of the audio device. Only used as part of 'AudioDeviceDescription'
- * structure.
- *
- * Types are divided into "input" and "output" categories. Audio devices that
- * have both audio input and output, for example, headsets, are represented by a
- * pair of input and output device types.
- *
- * The 'AudioDeviceType' intentionally binds together directionality and 'kind'
- * of the device to avoid making them fully orthogonal. This is because not all
- * types of devices are bidirectional, for example, speakers can only be used
- * for output and microphones can only be used for input (at least, in the
- * context of the audio framework).
- */
-@Backing(type="int")
-enum AudioDeviceType {
-    /**
-     * "None" type is a "null" value. All fields of 'AudioDeviceDescription'
-     * must have default / empty / null values.
-     */
-    NONE = 0,
-    /**
-     * The "default" device is used when the client does not have any
-     * preference for a particular device.
-     */
-    IN_DEFAULT = 1,
-    /**
-     * A device implementing Android Open Accessory protocol.
-     */
-    IN_ACCESSORY = 2,
-    /**
-     * Input from a DSP front-end proxy device.
-     */
-    IN_AFE_PROXY = 3,
-    /**
-     * Used when only the connection protocol is known, e.g. a "HDMI Device."
-     */
-    IN_DEVICE = 4,
-    /**
-     * A device providing reference input for echo canceller.
-     */
-    IN_ECHO_REFERENCE = 5,
-    /**
-     * FM Tuner input.
-     */
-    IN_FM_TUNER = 6,
-    /**
-     * A microphone of a headset.
-     */
-    IN_HEADSET = 7,
-    /**
-     * Loopback input.
-     */
-    IN_LOOPBACK = 8,
-    /**
-     * The main microphone (the frontal mic on mobile devices).
-     */
-    IN_MICROPHONE = 9,
-    /**
-     * The secondary microphone (the back mic on mobile devices).
-     */
-    IN_MICROPHONE_BACK = 10,
-    /**
-     * Input from a submix of other streams.
-     */
-    IN_SUBMIX = 11,
-    /**
-     * Audio received via the telephone line.
-     */
-    IN_TELEPHONY_RX = 12,
-    /**
-     * TV Tuner audio input.
-     */
-    IN_TV_TUNER = 13,
-    /**
-     * The "default" device is used when the client does not have any
-     * preference for a particular device.
-     */
-    OUT_DEFAULT = 129,
-    /**
-     * A device implementing Android Open Accessory protocol.
-     */
-    OUT_ACCESSORY = 130,
-    /**
-     * Output from a DSP front-end proxy device.
-     */
-    OUT_AFE_PROXY = 131,
-    /**
-     * Car audio system.
-     */
-    OUT_CARKIT = 132,
-    /**
-     * Used when only the connection protocol is known, e.g. a "HDMI Device."
-     */
-    OUT_DEVICE = 133,
-    /**
-     * The echo canceller device.
-     */
-    OUT_ECHO_CANCELLER = 134,
-    /**
-     * The FM Tuner device.
-     */
-    OUT_FM = 135,
-    /**
-     * Headphones.
-     */
-    OUT_HEADPHONE = 136,
-    /**
-     * Headphones of a headset.
-     */
-    OUT_HEADSET = 137,
-    /**
-     * Hearing aid.
-     */
-    OUT_HEARING_AID = 138,
-    /**
-     * Secondary line level output.
-     */
-    OUT_LINE_AUX = 139,
-    /**
-     * The main speaker.
-     */
-    OUT_SPEAKER = 140,
-    /**
-     * The speaker of a mobile device in the case when it is close to the ear.
-     */
-    OUT_SPEAKER_EARPIECE = 141,
-    /**
-     * The main speaker with overload / overheating protection.
-     */
-    OUT_SPEAKER_SAFE = 142,
-    /**
-     * Output into a submix.
-     */
-    OUT_SUBMIX = 143,
-    /**
-     * Output into a telephone line.
-     */
-    OUT_TELEPHONY_TX = 144,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationMetadataType.aidl b/media/libaudioclient/aidl/android/media/AudioEncapsulationMetadataType.aidl
deleted file mode 100644
index b03adfe..0000000
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationMetadataType.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioEncapsulationMetadataType {
-    NONE = 0,
-    FRAMEWORK_TUNER = 1,
-    DVB_AD_DESCRIPTOR = 2,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationMode.aidl b/media/libaudioclient/aidl/android/media/AudioEncapsulationMode.aidl
deleted file mode 100644
index 9e04e82..0000000
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationMode.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioEncapsulationMode {
-     NONE = 0,
-     ELEMENTARY_STREAM = 1,
-     HANDLE = 2,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioFlag.aidl b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
index 58b493b..91361fb 100644
--- a/media/libaudioclient/aidl/android/media/AudioFlag.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
@@ -34,4 +34,6 @@
     MUTE_HAPTIC = 11,
     NO_SYSTEM_CAPTURE = 12,
     CAPTURE_PRIVATE = 13,
+    CONTENT_SPATIALIZED = 14,
+    NEVER_SPATIALIZE = 15,
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioFormatDescription.aidl b/media/libaudioclient/aidl/android/media/AudioFormatDescription.aidl
deleted file mode 100644
index a656348..0000000
--- a/media/libaudioclient/aidl/android/media/AudioFormatDescription.aidl
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioFormatType;
-import android.media.PcmType;
-
-/**
- * An extensible type for specifying audio formats. All formats are largely
- * divided into two classes: PCM and non-PCM (bitstreams). Bitstreams can
- * be encapsulated into PCM streams.
- *
- * The type defined in a way to make each format uniquely identifiable, so
- * that if the framework and the HAL construct a value for the same type
- * (e.g. PCM 16 bit), they will produce identical parcelables which will have
- * identical hashes. This makes possible deduplicating type descriptions
- * by the framework when they are received from different HAL modules without
- * relying on having some centralized registry of enumeration values.
- *
- * {@hide}
- */
-parcelable AudioFormatDescription {
-    /**
-     * The type of the audio format. See the 'AudioFormatType' for the
-     * list of supported values.
-     */
-    AudioFormatType type = AudioFormatType.DEFAULT;
-    /**
-     * The type of the PCM stream or the transport stream for PCM
-     * encapsulations.  See 'PcmType' for the list of supported values.
-     */
-    PcmType pcm = PcmType.DEFAULT;
-    /**
-     * Optional encoding specification. Must be left empty when:
-     *
-     *  - 'type == DEFAULT && pcm == DEFAULT' -- that means "default" type;
-     *  - 'type == PCM' -- that means a regular PCM stream (not an encapsulation
-     *    of an encoded bitstream).
-     *
-     * For PCM encapsulations of encoded bitstreams (e.g. an encapsulation
-     * according to IEC-61937 standard), the value of the 'pcm' field must
-     * be set accordingly, as an example, PCM_INT_16_BIT must be used for
-     * IEC-61937. Note that 'type == NON_PCM' in this case.
-     *
-     * Encoding names mostly follow IANA standards for media types (MIME), and
-     * frameworks/av/media/libstagefright/foundation/MediaDefs.cpp with the
-     * latter having priority.  Since there are still many audio types not found
-     * in any of these lists, the following rules are applied:
-     *
-     *   - If there is a direct MIME type for the encoding, the MIME type name
-     *     is used as is, e.g. "audio/eac3" for the EAC-3 format.
-     *   - If the encoding is a "subformat" of a MIME-registered format,
-     *     the latter is augmented with a suffix, e.g. "audio/eac3-joc" for the
-     *     JOC extension of EAC-3.
-     *   - If it's a proprietary format, a "vnd." prefix is added, similar to
-     *     IANA rules, e.g. "audio/vnd.dolby.truehd".
-     *   - Otherwise, "x-" prefix is added, e.g. "audio/x-iec61937".
-     *   - All MIME types not found in the IANA formats list have an associated
-     *     comment.
-     *
-     * For PCM encapsulations with a known bitstream format, the latter
-     * is added to the encapsulation encoding as a suffix, after a "+" char.
-     * For example, an IEC61937 encapsulation of AC3 has the following
-     * representation:
-     *   type = NON_PCM,
-     *   pcm = PcmType.INT_16_BIT,
-     *   encoding = "audio/x-iec61937+audio/ac3"
-     */
-    @utf8InCpp String encoding;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioFormatType.aidl b/media/libaudioclient/aidl/android/media/AudioFormatType.aidl
deleted file mode 100644
index 31ed2be..0000000
--- a/media/libaudioclient/aidl/android/media/AudioFormatType.aidl
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * The type of the audio format. Only used as part of 'AudioFormatDescription'
- * structure.
- */
-@Backing(type="byte")
-enum AudioFormatType {
-    /**
-     * "Default" type is used when the client does not care about the actual
-     * format. All fields of 'AudioFormatDescription' must have default / empty
-     * / null values.
-     */
-    DEFAULT = 0,
-    /**
-     * When the 'encoding' field of 'AudioFormatDescription' is not empty, it
-     * specifies the codec used for bitstream (non-PCM) data. It is also used
-     * in the case when the bitstream data is encapsulated into a PCM stream,
-     * see the documentation for 'AudioFormatDescription'.
-     */
-    NON_PCM = DEFAULT,
-    /**
-     * PCM type. The 'pcm' field of 'AudioFormatDescription' is used to specify
-     * the actual sample size and representation.
-     */
-    PCM = 1,
-    /**
-     * Value reserved for system use only. HALs must never return this value to
-     * the system or accept it from the system.
-     */
-    SYS_RESERVED_INVALID = -1,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioGain.aidl b/media/libaudioclient/aidl/android/media/AudioGain.aidl
deleted file mode 100644
index 4cfa96e..0000000
--- a/media/libaudioclient/aidl/android/media/AudioGain.aidl
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioChannelLayout;
-
-/**
- * {@hide}
- */
-parcelable AudioGain {
-    int index;
-    boolean isInput;
-    boolean useForVolume;
-    /** Bitmask, indexed by AudioGainMode. */
-    int mode;
-    AudioChannelLayout channelMask;
-    int minValue;
-    int maxValue;
-    int defaultValue;
-    int stepValue;
-    int minRampMs;
-    int maxRampMs;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl b/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
deleted file mode 100644
index afa3aca..0000000
--- a/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioChannelLayout;
-
-/**
- * {@hide}
- */
-parcelable AudioGainConfig {
-    /** Index of the corresponding audio_gain in the audio_port gains[] table. */
-    int index;
-
-    /** Mode requested for this command. Bitfield indexed by AudioGainMode. */
-    int mode;
-
-    /**
-     * Channels which gain value follows. N/A in joint mode.
-     */
-    AudioChannelLayout channelMask;
-
-    /**
-     * Gain values in millibels.
-     * For each channel ordered from LSb to MSb in channel mask. The number of values is 1 in joint
-     * mode, otherwise equals the number of bits implied by channelMask.
-     */
-    int[]  values;
-
-    /** Ramp duration in ms. */
-    int rampDurationMs;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioGainMode.aidl b/media/libaudioclient/aidl/android/media/AudioGainSys.aidl
similarity index 79%
rename from media/libaudioclient/aidl/android/media/AudioGainMode.aidl
rename to media/libaudioclient/aidl/android/media/AudioGainSys.aidl
index e1b9f0b..9ec8390 100644
--- a/media/libaudioclient/aidl/android/media/AudioGainMode.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioGainSys.aidl
@@ -13,14 +13,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package android.media;
 
 /**
+ * Provides additional runtime information for AudioGain, used by the framework.
+ *
  * {@hide}
  */
-@Backing(type="int")
-enum AudioGainMode {
-    JOINT    = 0,
-    CHANNELS = 1,
-    RAMP     = 2,
+parcelable AudioGainSys {
+    int index;
+    boolean isInput;
+    boolean useForVolume;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
index efdf99b..b01f902 100644
--- a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
@@ -16,9 +16,9 @@
 
 package android.media;
 
-import android.media.AudioChannelLayout;
-import android.media.AudioFormatDescription;
 import android.media.AudioPatch;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioMix.aidl b/media/libaudioclient/aidl/android/media/AudioMix.aidl
index 7473372..88b0450 100644
--- a/media/libaudioclient/aidl/android/media/AudioMix.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioMix.aidl
@@ -16,12 +16,12 @@
 
 package android.media;
 
-import android.media.AudioConfig;
-import android.media.AudioDevice;
 import android.media.AudioMixCallbackFlag;
 import android.media.AudioMixMatchCriterion;
 import android.media.AudioMixRouteFlag;
 import android.media.AudioMixType;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioDevice;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioMixLatencyClass.aidl b/media/libaudioclient/aidl/android/media/AudioMixLatencyClass.aidl
deleted file mode 100644
index d70b364..0000000
--- a/media/libaudioclient/aidl/android/media/AudioMixLatencyClass.aidl
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioMixLatencyClass {
-    LOW = 0,
-    NORMAL = 1,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl b/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl
index e26a9e1..921a93a 100644
--- a/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl
@@ -16,15 +16,15 @@
 
 package android.media;
 
-import android.media.AudioSourceType;
-import android.media.AudioUsage;
+import android.media.audio.common.AudioSource;
+import android.media.audio.common.AudioUsage;
 
 /**
  * {@hide}
  */
 union AudioMixMatchCriterionValue {
     AudioUsage usage = AudioUsage.UNKNOWN;
-    AudioSourceType source;
+    AudioSource source;
     /** Interpreted as uid_t. */
     int uid;
     int userId;
diff --git a/media/libaudioclient/aidl/android/media/AudioMode.aidl b/media/libaudioclient/aidl/android/media/AudioMode.aidl
deleted file mode 100644
index 7067dd3..0000000
--- a/media/libaudioclient/aidl/android/media/AudioMode.aidl
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioMode {
-    INVALID = -2,
-    CURRENT = -1,
-    NORMAL = 0,
-    RINGTONE = 1,
-    IN_CALL = 2,
-    IN_COMMUNICATION = 3,
-    CALL_SCREEN = 4,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl b/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl
deleted file mode 100644
index f326305..0000000
--- a/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioConfigBase;
-import android.media.AudioEncapsulationMode;
-import android.media.AudioStreamType;
-import android.media.AudioUsage;
-
-/**
- * {@hide}
- */
-parcelable AudioOffloadInfo {
-    /** Version of the info structure. Interpreted as a uint16_t version constant. */
-    int version;
-    /** Audio configuration. */
-    AudioConfigBase config;
-    /** Stream type. */
-    AudioStreamType streamType;
-    /** Bit rate in bits per second. */
-    int bitRate;
-    /** Duration in microseconds, -1 if unknown. */
-    long durationUs;
-    /** true if stream is tied to a video stream. */
-    boolean hasVideo;
-    /** true if streaming, false if local playback. */
-    boolean isStreaming;
-    int bitWidth;
-    /** Offload fragment size. */
-    int offloadBufferSize;
-    AudioUsage usage;
-    AudioEncapsulationMode encapsulationMode;
-    /** Content id from tuner HAL (0 if none). */
-    int contentId;
-    /** Sync id from tuner HAL (0 if none). */
-    int syncId;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPort.aidl b/media/libaudioclient/aidl/android/media/AudioPort.aidl
index bf0e5b7..ff177c0 100644
--- a/media/libaudioclient/aidl/android/media/AudioPort.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPort.aidl
@@ -16,35 +16,13 @@
 
 package android.media;
 
-import android.media.AudioGain;
-import android.media.AudioPortConfig;
-import android.media.AudioPortExt;
-import android.media.AudioPortRole;
-import android.media.AudioPortType;
-import android.media.AudioProfile;
-import android.media.ExtraAudioDescriptor;
+import android.media.AudioPortSys;
+import android.media.audio.common.AudioPort;
 
 /**
  * {@hide}
  */
 parcelable AudioPort {
-    /** Port unique ID. Interpreted as audio_port_handle_t. */
-    int id;
-    /** Sink or source. */
-    AudioPortRole role;
-    /** Device, mix ... */
-    AudioPortType type;
-    @utf8InCpp String name;
-    /** AudioProfiles supported by this port (format, Rates, Channels). */
-    AudioProfile[] profiles;
-    /**
-     * ExtraAudioDescriptors supported by this port. The format is not unrecognized to the
-     * platform. The audio capability is described by a hardware descriptor.
-     */
-    ExtraAudioDescriptor[] extraAudioDescriptors;
-    /** Gain controllers. */
-    AudioGain[] gains;
-    /** Current audio port configuration. */
-    AudioPortConfig activeConfig;
-    AudioPortExt ext;
+    AudioPort hal;
+    AudioPortSys sys;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
index be32a69..3a4ca31 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
@@ -16,43 +16,13 @@
 
 package android.media;
 
-import android.media.AudioChannelLayout;
-import android.media.AudioGainConfig;
-import android.media.AudioIoFlags;
-import android.media.AudioPortConfigExt;
-import android.media.AudioPortConfigType;
-import android.media.AudioPortRole;
-import android.media.AudioPortType;
-import android.media.AudioFormatDescription;
+import android.media.AudioPortConfigSys;
+import android.media.audio.common.AudioPortConfig;
 
 /**
  * {@hide}
  */
 parcelable AudioPortConfig {
-    /**
-     * Port unique ID.
-     * Interpreted as audio_port_handle_t.
-     */
-    int id;
-    /** Sink or source. */
-    AudioPortRole role;
-    /** Device, mix ... */
-    AudioPortType type;
-    /** Bitmask, indexed by AudioPortConfigType. */
-    int configMask;
-    /** Sampling rate in Hz. */
-    int sampleRate;
-    /**
-     * Channel mask, if applicable.
-     */
-    AudioChannelLayout channelMask;
-    /**
-     * Format, if applicable.
-     */
-    AudioFormatDescription format;
-    /** Gain to apply, if applicable. */
-    AudioGainConfig gain;
-    /** Framework only: HW_AV_SYNC, DIRECT, ... */
-    AudioIoFlags flags;
-    AudioPortConfigExt ext;
+    AudioPortConfig hal;
+    AudioPortConfigSys sys;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl
deleted file mode 100644
index 31e5330..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioDeviceDescription;
-
-/**
- * {@hide}
- */
-parcelable AudioPortConfigDeviceExt {
-    /**
-     * Module the device is attached to.
-     * Interpreted as audio_module_handle_t.
-     */
-    int hwModule;
-    /**
-     * Device type.
-     */
-    AudioDeviceDescription type;
-    /** Device address. "" if N/A. */
-    @utf8InCpp String address;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl
deleted file mode 100644
index 5d635b6..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioPortConfigDeviceExt;
-import android.media.AudioPortConfigMixExt;
-import android.media.AudioPortConfigSessionExt;
-
-/**
- * {@hide}
- */
-union AudioPortConfigExt {
-    /**
-     * This represents an empty union. Value is ignored.
-     * TODO(ytai): replace with the canonical representation for an empty union, as soon as it is
-     *             established.
-     */
-    boolean unspecified;
-    /** Device specific info. */
-    AudioPortConfigDeviceExt device;
-    /** Mix specific info. */
-    AudioPortConfigMixExt mix;
-    /** Session specific info. */
-    AudioPortConfigSessionExt session;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigMixExt.aidl
deleted file mode 100644
index d3226f2..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExt.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioPortConfigMixExtUseCase;
-
-/**
- * {@hide}
- */
-parcelable AudioPortConfigMixExt {
-    /**
-     * Module the stream is attached to.
-     * Interpreted as audio_module_handle_t.
-     */
-    int hwModule;
-    /**
-     * I/O handle of the input/output stream.
-     * Interpreted as audio_io_handle_t.
-     */
-    int handle;
-    AudioPortConfigMixExtUseCase usecase;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl
deleted file mode 100644
index c61f044..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioSourceType;
-import android.media.AudioStreamType;
-
-/**
- * {@hide}
- */
-union AudioPortConfigMixExtUseCase {
-    /**
-     * This to be set if the containing config has the AudioPortRole::NONE role.
-     * This represents an empty value (value is ignored).
-     * TODO(ytai): replace with the canonical representation for an empty union, as soon as it is
-     *             established.
-     */
-    boolean unspecified;
-    /** This to be set if the containing config has the AudioPortRole::SOURCE role. */
-    AudioStreamType stream;
-    /** This to be set if the containing config has the AudioPortRole::SINK role. */
-    AudioSourceType source;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigSessionExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigSessionExt.aidl
deleted file mode 100644
index a2cbf62..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigSessionExt.aidl
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioPortConfigSessionExt {
-    int session;
-}
diff --git a/media/libaudioclient/aidl/android/media/Int.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigSys.aidl
similarity index 62%
rename from media/libaudioclient/aidl/android/media/Int.aidl
rename to media/libaudioclient/aidl/android/media/AudioPortConfigSys.aidl
index 24f4d62..6a615cd 100644
--- a/media/libaudioclient/aidl/android/media/Int.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortConfigSys.aidl
@@ -16,12 +16,20 @@
 
 package android.media;
 
+import android.media.AudioIoFlags;
+import android.media.AudioPortExtSys;
+import android.media.AudioPortRole;
+import android.media.AudioPortType;
+
 /**
- * This is a simple wrapper around an 'int', putting it in a parcelable, so it can be used as an
- * inout parameter, be made @nullable, etc.
- *
  * {@hide}
  */
-parcelable Int {
-    int value;
+parcelable AudioPortConfigSys {
+    /** Sink or source. */
+    AudioPortRole role;
+    /** Device, mix ... */
+    AudioPortType type;
+    /** Flags: HW_AV_SYNC, DIRECT, ... Can be left unspecified. */
+    @nullable AudioIoFlags flags;
+    AudioPortExtSys ext;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigType.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigType.aidl
deleted file mode 100644
index 6e22b8d..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigType.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioPortConfigType {
-    SAMPLE_RATE  = 0,
-    CHANNEL_MASK = 1,
-    FORMAT       = 2,
-    GAIN         = 3,
-    FLAGS        = 4,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortDeviceExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl
similarity index 85%
rename from media/libaudioclient/aidl/android/media/AudioPortDeviceExt.aidl
rename to media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl
index b758f23..0f5a9b6 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortDeviceExt.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright (C) 2021 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,15 +16,12 @@
 
 package android.media;
 
-import android.media.AudioDevice;
-
 /**
  * {@hide}
  */
-parcelable AudioPortDeviceExt {
+parcelable AudioPortDeviceExtSys {
     /** Module the device is attached to. Interpreted as audio_module_handle_t. */
     int hwModule;
-    AudioDevice device;
     /** Bitmask, indexed by AudioEncapsulationMode. */
     int encapsulationModes;
     /** Bitmask, indexed by AudioEncapsulationMetadataType. */
diff --git a/media/libaudioclient/aidl/android/media/AudioPortExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortExt.aidl
deleted file mode 100644
index 453784b..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortExt.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioPortDeviceExt;
-import android.media.AudioPortMixExt;
-import android.media.AudioPortSessionExt;
-
-/**
- * {@hide}
- */
-union AudioPortExt {
-    /**
-     * This represents an empty union. Value is ignored.
-     * TODO(ytai): replace with the canonical representation for an empty union, as soon as it is
-     *             established.
-     */
-    boolean unspecified;
-    /** Device specific info. */
-    AudioPortDeviceExt device;
-    /** Mix specific info. */
-    AudioPortMixExt mix;
-    /** Session specific info. */
-    AudioPortSessionExt session;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl b/media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl
similarity index 62%
rename from media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
rename to media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl
index b08a604..2cdf4f6 100644
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl
@@ -16,14 +16,19 @@
 
 package android.media;
 
+import android.media.AudioPortDeviceExtSys;
+import android.media.AudioPortMixExtSys;
+
 /**
- * Audio encapsulation type is used to describe if the audio data should be sent with a particular
- * encapsulation type or not.
- *
  * {@hide}
  */
-@Backing(type="int")
-enum AudioEncapsulationType {
-    NONE     = 0,
-    IEC61937 = 1,
-}
\ No newline at end of file
+union AudioPortExtSys {
+    /**
+     * This represents an empty union. Value is ignored.
+     */
+    boolean unspecified;
+    /** System-only parameters when the port is an audio device. */
+    AudioPortDeviceExtSys device;
+    /** System-only parameters when the port is an audio mix. */
+    AudioPortMixExtSys mix;
+}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortMixExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortMixExt.aidl
deleted file mode 100644
index 62cdb8e..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortMixExt.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioMixLatencyClass;
-
-/**
- * {@hide}
- */
-parcelable AudioPortMixExt {
-    /** Module the stream is attached to. Interpreted as audio_module_handle_t. */
-    int hwModule;
-    /** I/O handle of the input/output stream. Interpreted as audio_io_handle_t. */
-    int handle;
-    /** Latency class */
-    AudioMixLatencyClass latencyClass;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioStandard.aidl b/media/libaudioclient/aidl/android/media/AudioPortMixExtSys.aidl
similarity index 81%
rename from media/libaudioclient/aidl/android/media/AudioStandard.aidl
rename to media/libaudioclient/aidl/android/media/AudioPortMixExtSys.aidl
index e131d0d..5999885 100644
--- a/media/libaudioclient/aidl/android/media/AudioStandard.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortMixExtSys.aidl
@@ -13,15 +13,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package android.media;
 
 /**
- * The audio standard that describe audio playback/capture capabilites.
- *
  * {@hide}
  */
-@Backing(type="int")
-enum AudioStandard {
-    NONE = 0,
-    EDID = 1,
+parcelable AudioPortMixExtSys {
+    /** Module the stream is attached to. Interpreted as audio_module_handle_t. */
+    int hwModule;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioPortSessionExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortSessionExt.aidl
deleted file mode 100644
index dbca168..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortSessionExt.aidl
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioPortSessionExt {
-    /** Audio session. Interpreted as audio_session_t. */
-    int session;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortSys.aidl b/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
new file mode 100644
index 0000000..27c0fe5
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.AudioGainSys;
+import android.media.AudioPortConfigSys;
+import android.media.AudioPortExtSys;
+import android.media.AudioPortRole;
+import android.media.AudioPortType;
+import android.media.AudioProfileSys;
+
+/**
+ * {@hide}
+ */
+parcelable AudioPortSys {
+    /** Sink or source. */
+    AudioPortRole role;
+    /** Device, mix ... */
+    AudioPortType type;
+    /** System-only parameters for each AudioProfile from 'port.profiles'. */
+    AudioProfileSys[] profiles;
+    /** System-only parameters for each AudioGain from 'port.gains'. */
+    AudioGainSys[] gains;
+    /** System-only parameters for 'port.activeConfig'. */
+    AudioPortConfigSys activeConfig;
+    /** System-only extra parameters for 'port.ext'. */
+    AudioPortExtSys ext;
+}
diff --git a/media/libaudioclient/aidl/android/media/AudioProfile.aidl b/media/libaudioclient/aidl/android/media/AudioProfile.aidl
deleted file mode 100644
index 9fb8d49..0000000
--- a/media/libaudioclient/aidl/android/media/AudioProfile.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioChannelLayout;
-import android.media.AudioEncapsulationType;
-import android.media.AudioFormatDescription;
-
-/**
- * {@hide}
- */
-parcelable AudioProfile {
-    @utf8InCpp String name;
-    /** The format for an audio profile should only be set when initialized. */
-    AudioFormatDescription format;
-    AudioChannelLayout[] channelMasks;
-    int[] samplingRates;
-    boolean isDynamicFormat;
-    boolean isDynamicChannels;
-    boolean isDynamicRate;
-    AudioEncapsulationType encapsulationType;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioGainMode.aidl b/media/libaudioclient/aidl/android/media/AudioProfileSys.aidl
similarity index 68%
copy from media/libaudioclient/aidl/android/media/AudioGainMode.aidl
copy to media/libaudioclient/aidl/android/media/AudioProfileSys.aidl
index e1b9f0b..329c9d5 100644
--- a/media/libaudioclient/aidl/android/media/AudioGainMode.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioProfileSys.aidl
@@ -13,14 +13,18 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package android.media;
 
 /**
+ * Provides indication whether the parameters of the AudioProfiles in the
+ * AudioPort are dynamic. Each instance of AudioProfileSys corresponds
+ * to an instance of AudioProfile.
+ *
  * {@hide}
  */
-@Backing(type="int")
-enum AudioGainMode {
-    JOINT    = 0,
-    CHANNELS = 1,
-    RAMP     = 2,
+parcelable AudioProfileSys {
+    boolean isDynamicFormat;
+    boolean isDynamicChannels;
+    boolean isDynamicRate;
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioSourceType.aidl b/media/libaudioclient/aidl/android/media/AudioSourceType.aidl
deleted file mode 100644
index 8673b92..0000000
--- a/media/libaudioclient/aidl/android/media/AudioSourceType.aidl
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioSourceType {
-    INVALID = -1,
-    DEFAULT = 0,
-    MIC = 1,
-    VOICE_UPLINK = 2,
-    VOICE_DOWNLINK = 3,
-    VOICE_CALL = 4,
-    CAMCORDER = 5,
-    VOICE_RECOGNITION = 6,
-    VOICE_COMMUNICATION = 7,
-    REMOTE_SUBMIX = 8,
-    UNPROCESSED = 9,
-    VOICE_PERFORMANCE = 10,
-    ECHO_REFERENCE = 1997,
-    FM_TUNER = 1998,
-    /**
-     * A low-priority, preemptible audio source for for background software
-     * hotword detection. Same tuning as VOICE_RECOGNITION.
-     * Used only internally by the framework.
-     */
-    HOTWORD = 1999,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioStreamType.aidl b/media/libaudioclient/aidl/android/media/AudioStreamType.aidl
deleted file mode 100644
index d777882..0000000
--- a/media/libaudioclient/aidl/android/media/AudioStreamType.aidl
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioStreamType {
-    DEFAULT = -1,
-    VOICE_CALL = 0,
-    SYSTEM = 1,
-    RING = 2,
-    MUSIC = 3,
-    ALARM = 4,
-    NOTIFICATION = 5,
-    BLUETOOTH_SCO = 6,
-    ENFORCED_AUDIBLE = 7,
-    DTMF = 8,
-    TTS = 9,
-    ACCESSIBILITY = 10,
-    ASSISTANT = 11,
-    /** For dynamic policy output mixes. Only used by the audio policy */
-    REROUTING = 12,
-    /** For audio flinger tracks volume. Only used by the audioflinger */
-    PATCH = 13,
-    /** stream for corresponding to AUDIO_USAGE_CALL_ASSISTANT */
-    CALL_ASSISTANT = 14,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioUsage.aidl b/media/libaudioclient/aidl/android/media/AudioUsage.aidl
deleted file mode 100644
index 66c5c30..0000000
--- a/media/libaudioclient/aidl/android/media/AudioUsage.aidl
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioUsage {
-    UNKNOWN = 0,
-    MEDIA = 1,
-    VOICE_COMMUNICATION = 2,
-    VOICE_COMMUNICATION_SIGNALLING = 3,
-    ALARM = 4,
-    NOTIFICATION = 5,
-    NOTIFICATION_TELEPHONY_RINGTONE = 6,
-    NOTIFICATION_COMMUNICATION_REQUEST = 7,
-    NOTIFICATION_COMMUNICATION_INSTANT = 8,
-    NOTIFICATION_COMMUNICATION_DELAYED = 9,
-    NOTIFICATION_EVENT = 10,
-    ASSISTANCE_ACCESSIBILITY = 11,
-    ASSISTANCE_NAVIGATION_GUIDANCE = 12,
-    ASSISTANCE_SONIFICATION = 13,
-    GAME = 14,
-    VIRTUAL_SOURCE = 15,
-    ASSISTANT = 16,
-    CALL_ASSISTANT = 17,
-    EMERGENCY = 1000,
-    SAFETY = 1001,
-    VEHICLE_STATUS = 1002,
-    ANNOUNCEMENT = 1003,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioUuid.aidl b/media/libaudioclient/aidl/android/media/AudioUuid.aidl
deleted file mode 100644
index bba9039..0000000
--- a/media/libaudioclient/aidl/android/media/AudioUuid.aidl
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioUuid {
-    int timeLow;
-    int timeMid;
-    int timeHiAndVersion;
-    int clockSeq;
-    byte[] node;  // Length = 6
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl b/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl
index 3a29a08..b95a1d3 100644
--- a/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl
@@ -17,7 +17,7 @@
 package android.media;
 
 import android.media.AudioAttributesInternal;
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl b/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
index 35a56eb..bcca04a 100644
--- a/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
@@ -16,10 +16,10 @@
 
 package android.media;
 
-import android.media.AudioDevice;
+import android.content.AttributionSourceState;
 import android.media.EffectDescriptor;
 import android.media.IEffectClient;
-import android.content.AttributionSourceState;
+import android.media.audio.common.AudioDevice;
 
 /**
  * Input arguments of the createEffect() method.
diff --git a/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl b/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
index 7e3c240..b938a3e 100644
--- a/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
@@ -18,7 +18,7 @@
 
 import android.media.AudioAttributesInternal;
 import android.media.AudioClient;
-import android.media.AudioConfigBase;
+import android.media.audio.common.AudioConfigBase;
 
 /**
  * CreateRecordRequest contains all input arguments sent by AudioRecord to AudioFlinger
diff --git a/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl b/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl
index 61195d0..7d159d0 100644
--- a/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl
@@ -16,9 +16,9 @@
 
 package android.media;
 
-import android.media.AudioConfigBase;
 import android.media.IAudioRecord;
 import android.media.SharedFileRegion;
+import android.media.audio.common.AudioConfigBase;
 
 /**
  * CreateRecordResponse contains all output arguments returned by AudioFlinger to AudioRecord
diff --git a/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl b/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl
index 014b3ca..212221e 100644
--- a/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl
@@ -18,9 +18,9 @@
 
 import android.media.AudioAttributesInternal;
 import android.media.AudioClient;
-import android.media.AudioConfig;
 import android.media.IAudioTrackCallback;
 import android.media.SharedFileRegion;
+import android.media.audio.common.AudioConfig;
 
 /**
  * CreateTrackInput contains all input arguments sent by AudioTrack to AudioFlinger
diff --git a/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl b/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl
index 40473fa..da6f454 100644
--- a/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
 import android.media.IAudioTrack;
 
 /**
diff --git a/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl b/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl
index 35a3d74..e5b5158 100644
--- a/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl
+++ b/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioUuid;
+import android.media.audio.common.AudioUuid;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/ExtraAudioDescriptor.aidl b/media/libaudioclient/aidl/android/media/ExtraAudioDescriptor.aidl
deleted file mode 100644
index ec5b67a..0000000
--- a/media/libaudioclient/aidl/android/media/ExtraAudioDescriptor.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioEncapsulationType;
-import android.media.AudioStandard;
-
-/**
- * The audio descriptor that descibes playback/capture capabilities according to
- * a particular standard.
- *
- * {@hide}
- */
-parcelable ExtraAudioDescriptor {
-    AudioStandard standard;
-    byte[] audioDescriptor;
-    AudioEncapsulationType encapsulationType;
-}
diff --git a/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl b/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl
index 164fb9d..963877a 100644
--- a/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
index 16f70c1..c55c66e 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -16,14 +16,10 @@
 
 package android.media;
 
-import android.media.AudioChannelLayout;
-import android.media.AudioMode;
 import android.media.AudioPatch;
 import android.media.AudioPort;
 import android.media.AudioPortConfig;
-import android.media.AudioStreamType;
 import android.media.AudioUniqueIdUse;
-import android.media.AudioUuid;
 import android.media.AudioVibratorInfo;
 import android.media.CreateEffectRequest;
 import android.media.CreateEffectResponse;
@@ -42,7 +38,13 @@
 import android.media.MicrophoneInfoData;
 import android.media.RenderPosition;
 import android.media.TrackSecondaryOutputInfo;
-import android.media.AudioFormatDescription;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
+import android.media.audio.common.AudioMMapPolicyInfo;
+import android.media.audio.common.AudioMMapPolicyType;
+import android.media.audio.common.AudioMode;
+import android.media.audio.common.AudioStreamType;
+import android.media.audio.common.AudioUuid;
 
 /**
  * {@hide}
@@ -217,4 +219,10 @@
     // This usually happens when there is a dynamic policy registered.
     void updateSecondaryOutputs(
             in TrackSecondaryOutputInfo[] trackSecondaryOutputInfos);
+
+    AudioMMapPolicyInfo[] getMmapPolicyInfos(AudioMMapPolicyType policyType);
+
+    int getAAudioMixerBurstCount();
+
+    int getAAudioHardwareBurstMinUsec();
 }
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
index 7022b9d..c59655d 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
@@ -16,18 +16,11 @@
 
 package android.media;
 
-import android.media.AudioFormatDescription;
 import android.content.AttributionSourceState;
 
 import android.media.AudioAttributesEx;
 import android.media.AudioAttributesInternal;
-import android.media.AudioConfig;
-import android.media.AudioConfigBase;
-import android.media.AudioDevice;
-import android.media.AudioDeviceDescription;
 import android.media.AudioMix;
-import android.media.AudioMode;
-import android.media.AudioOffloadInfo;
 import android.media.AudioOffloadMode;
 import android.media.AudioPatch;
 import android.media.AudioPolicyDeviceState;
@@ -38,10 +31,6 @@
 import android.media.AudioPortRole;
 import android.media.AudioPortType;
 import android.media.AudioProductStrategy;
-import android.media.AudioSourceType;
-import android.media.AudioStreamType;
-import android.media.AudioUsage;
-import android.media.AudioUuid;
 import android.media.AudioVolumeGroup;
 import android.media.DeviceRole;
 import android.media.EffectDescriptor;
@@ -51,8 +40,19 @@
 import android.media.IAudioPolicyServiceClient;
 import android.media.ICaptureStateListener;
 import android.media.INativeSpatializerCallback;
-import android.media.Int;
 import android.media.SoundTriggerSession;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioConfigBase;
+import android.media.audio.common.AudioDevice;
+import android.media.audio.common.AudioDeviceDescription;
+import android.media.audio.common.AudioFormatDescription;
+import android.media.audio.common.AudioMode;
+import android.media.audio.common.AudioOffloadInfo;
+import android.media.audio.common.AudioSource;
+import android.media.audio.common.AudioStreamType;
+import android.media.audio.common.AudioUsage;
+import android.media.audio.common.AudioUuid;
+import android.media.audio.common.Int;
 
 /**
  * IAudioPolicyService interface (see AudioPolicyInterface for method descriptions).
@@ -157,7 +157,7 @@
 
     boolean isStreamActiveRemotely(AudioStreamType stream, int inPastMs);
 
-    boolean isSourceActive(AudioSourceType source);
+    boolean isSourceActive(AudioSource source);
 
     /**
      * On input, count represents the maximum length of the returned array.
@@ -172,7 +172,7 @@
                                                        @utf8InCpp String opPackageName,
                                                        in AudioUuid uuid,
                                                        int priority,
-                                                       AudioSourceType source);
+                                                       AudioSource source);
 
     int /* audio_unique_id_t */ addStreamDefaultEffect(in AudioUuid type,
                                                        @utf8InCpp String opPackageName,
@@ -331,22 +331,22 @@
     AudioDevice[] getDevicesForRoleAndStrategy(int /* product_strategy_t */ strategy,
                                                DeviceRole role);
 
-    void setDevicesRoleForCapturePreset(AudioSourceType audioSource,
+    void setDevicesRoleForCapturePreset(AudioSource audioSource,
                                         DeviceRole role,
                                         in AudioDevice[] devices);
 
-    void addDevicesRoleForCapturePreset(AudioSourceType audioSource,
+    void addDevicesRoleForCapturePreset(AudioSource audioSource,
                                         DeviceRole role,
                                         in AudioDevice[] devices);
 
-    void removeDevicesRoleForCapturePreset(AudioSourceType audioSource,
+    void removeDevicesRoleForCapturePreset(AudioSource audioSource,
                                            DeviceRole role,
                                            in AudioDevice[] devices);
 
-    void clearDevicesRoleForCapturePreset(AudioSourceType audioSource,
+    void clearDevicesRoleForCapturePreset(AudioSource audioSource,
                                           DeviceRole role);
 
-    AudioDevice[] getDevicesForRoleAndCapturePreset(AudioSourceType audioSource,
+    AudioDevice[] getDevicesForRoleAndCapturePreset(AudioSource audioSource,
                                                     DeviceRole role);
 
     boolean registerSoundTriggerCaptureStateListener(ICaptureStateListener listener);
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl
index a7782b8..d93a59d 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl
@@ -16,10 +16,10 @@
 
 package android.media;
 
-import android.media.AudioConfigBase;
-import android.media.AudioSourceType;
 import android.media.EffectDescriptor;
 import android.media.RecordClientInfo;
+import android.media.audio.common.AudioConfigBase;
+import android.media.audio.common.AudioSource;
 
 /**
  * {@hide}
@@ -43,7 +43,7 @@
                                         in AudioConfigBase deviceConfig,
                                         in EffectDescriptor[] effects,
                                         int /* audio_patch_handle_t */ patchHandle,
-                                        AudioSourceType source);
+                                        AudioSource source);
      /** Notifies a change of audio routing */
      void onRoutingUpdated();
 }
diff --git a/media/libaudioclient/aidl/android/media/INativeSpatializerCallback.aidl b/media/libaudioclient/aidl/android/media/INativeSpatializerCallback.aidl
index f34df05..0e9634c 100644
--- a/media/libaudioclient/aidl/android/media/INativeSpatializerCallback.aidl
+++ b/media/libaudioclient/aidl/android/media/INativeSpatializerCallback.aidl
@@ -17,17 +17,18 @@
 package android.media;
 
 import android.media.SpatializationLevel;
+import android.media.SpatializerHeadTrackingMode;
 
 /**
  * The INativeSpatializerCallback interface is a callback associated to the
- * ISpatializer interface. The callback is used by the spatializer stage
- * implementation in native audio server to communicate stage changes to the
+ * ISpatializer interface. The callback is used by the spatializer
+ * implementation in native audio server to communicate state changes to the
  * client controlling the spatializer with the ISpatializer interface.
  * {@hide}
  */
-interface INativeSpatializerCallback {
-    /** Called when the spatialization level applied by the vitualizer stage changes
+oneway interface INativeSpatializerCallback {
+    /** Called when the spatialization level applied by the spatializer changes
      * (e.g. when the spatializer is enabled or disabled)
      */
-    oneway void onLevelChanged(SpatializationLevel level);
+    void onLevelChanged(SpatializationLevel level);
 }
diff --git a/media/libaudioclient/aidl/android/media/ISpatializer.aidl b/media/libaudioclient/aidl/android/media/ISpatializer.aidl
index 212d8fe..b871238 100644
--- a/media/libaudioclient/aidl/android/media/ISpatializer.aidl
+++ b/media/libaudioclient/aidl/android/media/ISpatializer.aidl
@@ -16,8 +16,11 @@
 
 package android.media;
 
-import android.media.HeadTrackingMode;
+import android.media.ISpatializerHeadTrackingCallback;
 import android.media.SpatializationLevel;
+import android.media.SpatializationMode;
+import android.media.SpatializerHeadTrackingMode;
+
 
 /**
  * The ISpatializer interface is used to control the native audio service implementation
@@ -47,22 +50,89 @@
     /** Gets the selected spatialization level (see SpatializationLevel.aidl) */
     SpatializationLevel getLevel();
 
-    /** Reports the list of supported head tracking modes (see HeadTrackingMode.aidl). The list can
-     * be empty if the spatializer implementation does not support head tracking or if no
-     * head tracking device is connected.
+    /** Reports if the spatializer engine supports head tracking or not.
+     * This is a pre condition independent of the fact that a head tracking sensor is
+     * registered or not.
      */
-    HeadTrackingMode[] getSupportedHeadTrackingModes();
+    boolean isHeadTrackingSupported();
 
-    /** Selects the desired head tracking mode (see HeadTrackingMode.aidl) */
-    void setDesiredHeadTrackingMode(HeadTrackingMode mode);
+    /** Reports the list of supported head tracking modes (see SpatializerHeadTrackingMode.aidl).
+     * The list can be empty if the spatializer implementation does not support head tracking or if
+     * no head tracking sensor is registered (see setHeadSensor() and setScreenSensor()).
+     */
+    SpatializerHeadTrackingMode[] getSupportedHeadTrackingModes();
+
+    /** Selects the desired head tracking mode (see SpatializerHeadTrackingMode.aidl) */
+    void setDesiredHeadTrackingMode(SpatializerHeadTrackingMode mode);
 
     /** Gets the actual head tracking mode. Can be different from the desired mode if conditions to
      * enable the desired mode are not met (e.g if the head tracking device was removed)
      */
-    HeadTrackingMode getActualHeadTrackingMode();
+    SpatializerHeadTrackingMode getActualHeadTrackingMode();
 
     /** Reset the head tracking algorithm to consider current head pose as neutral */
-    void recenterHeadtracker();
-    /** Set the screen to stage transform to use by the head tracking algorithm */
+    void recenterHeadTracker();
+
+    /** Set the screen to stage transform to use by the head tracking algorithm
+     * The screen to stage transform is conveyed as a vector of 6 elements,
+     * where the first three are a translation vector and
+     * the last three are a rotation vector.
+     */
     void setGlobalTransform(in float[] screenToStage);
+
+    /**
+     * Set the sensor that is to be used for head-tracking.
+     * -1 can be used to disable head-tracking.
+     */
+    void setHeadSensor(int sensorHandle);
+
+    /**
+     * Set the sensor that is to be used for screen-tracking.
+     * -1 can be used to disable screen-tracking.
+     */
+    void setScreenSensor(int sensorHandle);
+
+    /**
+     * Sets the display orientation.
+     * Orientation is expressed in the angle of rotation from the physical "up" side of the screen
+     * to the logical "up" side of the content displayed the screen. Counterclockwise angles, as
+     * viewed while facing the screen are positive.
+     */
+    void setDisplayOrientation(float physicalToLogicalAngle);
+
+    /**
+     * Sets the hinge angle for foldable devices.
+     */
+    void setHingeAngle(float hingeAngle);
+
+    /** Reports the list of supported spatialization modess (see SpatializationMode.aidl).
+     * The list should never be empty if an ISpatializer interface was successfully
+     * retrieved with IAudioPolicyService.getSpatializer().
+     */
+    SpatializationMode[] getSupportedModes();
+
+    /**
+     * Registers a callback to monitor head tracking functions.
+     * Only one callback can be registered on a Spatializer.
+     * The last callback registered wins and passing a nullptr unregisters
+     * last registered callback.
+     */
+    void registerHeadTrackingCallback(@nullable ISpatializerHeadTrackingCallback callback);
+
+    /**
+     * Sets a parameter to the spatializer engine. Used by effect implementor for vendor
+     * specific configuration.
+     */
+     void setParameter(int key, in byte[] value);
+
+    /**
+     * Gets a parameter from the spatializer engine. Used by effect implementor for vendor
+     * specific configuration.
+     */
+     void getParameter(int key, inout byte[] value);
+
+    /**
+     * Gets the io handle of the output stream the spatializer is connected to.
+     */
+     int getOutput();
 }
diff --git a/media/libaudioclient/aidl/android/media/ISpatializerHeadTrackingCallback.aidl b/media/libaudioclient/aidl/android/media/ISpatializerHeadTrackingCallback.aidl
new file mode 100644
index 0000000..23d5e13
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/ISpatializerHeadTrackingCallback.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.SpatializationLevel;
+import android.media.SpatializerHeadTrackingMode;
+
+/**
+ * The ISpatializerHeadTrackingCallback interface is a callback associated to the
+ * Spatializer head tracking function. It can be registered via the ISpatializer
+ * interface to monitor head tracking related states.
+ * {@hide}
+ */
+oneway interface ISpatializerHeadTrackingCallback {
+    /** Called when the head tracking mode has changed
+     */
+    void onHeadTrackingModeChanged(SpatializerHeadTrackingMode mode);
+
+    /** Called when the head to stage pose hase been updated
+     * The head to stage pose is conveyed as a vector of 6 elements,
+     * where the first three are a translation vector and
+     * the last three are a rotation vector.
+     */
+    void onHeadToSoundStagePoseUpdated(in float[] headToStage);
+}
diff --git a/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl b/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl
index 2e55526..75ff8e9 100644
--- a/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl
@@ -16,9 +16,9 @@
 
 package android.media;
 
-import android.media.AudioConfig;
-import android.media.AudioDevice;
-import android.media.AudioSourceType;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioDevice;
+import android.media.audio.common.AudioSource;
 
 /**
  * {@hide}
@@ -30,7 +30,7 @@
     int input;
     AudioConfig config;
     AudioDevice device;
-    AudioSourceType source;
+    AudioSource source;
     /** Bitmask, indexed by AudioInputFlag. */
     int flags;
 }
diff --git a/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl b/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl
index b613ba5..41bc38a 100644
--- a/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl
@@ -16,8 +16,8 @@
 
 package android.media;
 
-import android.media.AudioConfig;
-import android.media.AudioDevice;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioDevice;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
index 1541948..90e7ea6 100644
--- a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
@@ -16,9 +16,9 @@
 
 package android.media;
 
-import android.media.AudioConfig;
-import android.media.AudioConfigBase;
 import android.media.AudioPort;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioConfigBase;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl b/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl
index a051969..451a0bf 100644
--- a/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioConfig;
+import android.media.audio.common.AudioConfig;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/PcmType.aidl b/media/libaudioclient/aidl/android/media/PcmType.aidl
deleted file mode 100644
index c9e327c..0000000
--- a/media/libaudioclient/aidl/android/media/PcmType.aidl
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * The type of the encoding used for representing PCM samples. Only used as
- * part of 'AudioFormatDescription' structure.
- */
-@Backing(type="byte")
-enum PcmType {
-    /**
-     * "Default" value used when the type 'AudioFormatDescription' is "default".
-     */
-    DEFAULT = 0,
-    /**
-     * Unsigned 8-bit integer.
-     */
-    UINT_8_BIT = DEFAULT,
-    /**
-     * Signed 16-bit integer.
-     */
-    INT_16_BIT = 1,
-    /**
-     * Signed 32-bit integer.
-     */
-    INT_32_BIT = 2,
-    /**
-     * Q8.24 fixed point format.
-     */
-    FIXED_Q_8_24 = 3,
-    /**
-     * IEEE 754 32-bit floating point format.
-     */
-    FLOAT_32_BIT = 4,
-    /**
-     * Signed 24-bit integer.
-     */
-    INT_24_BIT = 5,
-}
diff --git a/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl b/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl
index 3280460..7dad58d 100644
--- a/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl
+++ b/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioSourceType;
+import android.media.audio.common.AudioSource;
 
 /**
  * {@hide}
@@ -28,7 +28,7 @@
     int uid;
     /** Interpreted as audio_session_t. */
     int session;
-    AudioSourceType source;
+    AudioSource source;
     /** Interpreted as audio_port_handle_t. */
     int portId;
     boolean silenced;
diff --git a/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl b/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl
index b9708b2..4b540a9 100644
--- a/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl
+++ b/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl
@@ -16,7 +16,7 @@
 
 package android.media;
 
-import android.media.AudioDeviceDescription;
+import android.media.audio.common.AudioDeviceDescription;
 
 /**
  * {@hide}
diff --git a/media/libaudioclient/aidl/android/media/SpatializationLevel.aidl b/media/libaudioclient/aidl/android/media/SpatializationLevel.aidl
index cef42bb..961c5a1 100644
--- a/media/libaudioclient/aidl/android/media/SpatializationLevel.aidl
+++ b/media/libaudioclient/aidl/android/media/SpatializationLevel.aidl
@@ -17,7 +17,7 @@
 package android.media;
 
 /**
- * The spatialization level or mode supported by the spatializer stage effect implementation.
+ * The spatialization level supported by the spatializer stage effect implementation.
  * Used by methods of the ISpatializer interface.
  * {@hide}
  */
diff --git a/media/libaudioclient/aidl/android/media/HeadTrackingMode.aidl b/media/libaudioclient/aidl/android/media/SpatializationMode.aidl
similarity index 61%
copy from media/libaudioclient/aidl/android/media/HeadTrackingMode.aidl
copy to media/libaudioclient/aidl/android/media/SpatializationMode.aidl
index d6cf410..5d8fd93 100644
--- a/media/libaudioclient/aidl/android/media/HeadTrackingMode.aidl
+++ b/media/libaudioclient/aidl/android/media/SpatializationMode.aidl
@@ -16,20 +16,15 @@
 
 package android.media;
 
-
 /**
- * The head tracking mode supported by the spatializer stage effect implementation.
+ * The spatialization mode supported by the spatializer stage effect implementation.
  * Used by methods of the ISpatializer interface.
  * {@hide}
  */
 @Backing(type="byte")
-enum HeadTrackingMode {
-    /** Head tracking is active in a mode not listed below (forward compatibility) */
-    OTHER = 0,
-    /** Head tracking is disabled */
-    DISABLED = 1,
-    /** Head tracking is performed relative to the real work environment */
-    RELATIVE_WORLD = 2,
-    /** Head tracking is performed relative to the device's screen */
-    RELATIVE_SCREEN = 3,
+enum SpatializationMode {
+    /** The spatializer supports binaural mode (over headphones type devices). */
+    SPATIALIZATER_BINAURAL = 0,
+    /** The spatializer supports transaural mode (over speaker type devices). */
+    SPATIALIZATER_TRANSAURAL = 1,
 }
diff --git a/media/libaudioclient/aidl/android/media/HeadTrackingMode.aidl b/media/libaudioclient/aidl/android/media/SpatializerHeadTrackingMode.aidl
similarity index 90%
rename from media/libaudioclient/aidl/android/media/HeadTrackingMode.aidl
rename to media/libaudioclient/aidl/android/media/SpatializerHeadTrackingMode.aidl
index d6cf410..58e0f61 100644
--- a/media/libaudioclient/aidl/android/media/HeadTrackingMode.aidl
+++ b/media/libaudioclient/aidl/android/media/SpatializerHeadTrackingMode.aidl
@@ -18,12 +18,12 @@
 
 
 /**
- * The head tracking mode supported by the spatializer stage effect implementation.
+ * The head tracking mode supported by the spatializer effect implementation.
  * Used by methods of the ISpatializer interface.
  * {@hide}
  */
 @Backing(type="byte")
-enum HeadTrackingMode {
+enum SpatializerHeadTrackingMode {
     /** Head tracking is active in a mode not listed below (forward compatibility) */
     OTHER = 0,
     /** Head tracking is disabled */
diff --git a/media/libaudioclient/fuzzer/Android.bp b/media/libaudioclient/fuzzer/Android.bp
index b290aa8..969e3e6 100644
--- a/media/libaudioclient/fuzzer/Android.bp
+++ b/media/libaudioclient/fuzzer/Android.bp
@@ -46,6 +46,7 @@
     ],
     shared_libs: [
         "android.hardware.audio.common-util",
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
diff --git a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
index 52868d9..80fe1ba 100644
--- a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
+++ b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
@@ -604,7 +604,7 @@
     request.config = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_config_t_AudioConfig(config, true /*isInput*/));
     request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(deviceTypeAddr));
-    request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSourceType(source));
+    request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSource(source));
     request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
 
     media::OpenInputResponse response{};
diff --git a/media/libaudioclient/include/media/AidlConversion.h b/media/libaudioclient/include/media/AidlConversion.h
index 2cf127c..52ea91b 100644
--- a/media/libaudioclient/include/media/AidlConversion.h
+++ b/media/libaudioclient/include/media/AidlConversion.h
@@ -22,38 +22,43 @@
 #include <system/audio.h>
 
 #include <android/media/AudioAttributesInternal.h>
-#include <android/media/AudioChannelLayout.h>
 #include <android/media/AudioClient.h>
-#include <android/media/AudioConfig.h>
-#include <android/media/AudioConfigBase.h>
-#include <android/media/AudioDeviceDescription.h>
 #include <android/media/AudioDualMonoMode.h>
-#include <android/media/AudioEncapsulationMode.h>
-#include <android/media/AudioEncapsulationMetadataType.h>
-#include <android/media/AudioEncapsulationType.h>
 #include <android/media/AudioFlag.h>
-#include <android/media/AudioFormatDescription.h>
-#include <android/media/AudioGain.h>
-#include <android/media/AudioGainMode.h>
 #include <android/media/AudioInputFlags.h>
 #include <android/media/AudioIoConfigEvent.h>
 #include <android/media/AudioIoDescriptor.h>
-#include <android/media/AudioMixLatencyClass.h>
-#include <android/media/AudioMode.h>
 #include <android/media/AudioOutputFlags.h>
 #include <android/media/AudioPlaybackRate.h>
 #include <android/media/AudioPort.h>
-#include <android/media/AudioPortConfigType.h>
-#include <android/media/AudioPortDeviceExt.h>
-#include <android/media/AudioPortExt.h>
-#include <android/media/AudioPortMixExt.h>
-#include <android/media/AudioPortSessionExt.h>
-#include <android/media/AudioProfile.h>
+#include <android/media/AudioPortConfig.h>
+#include <android/media/AudioPortDeviceExtSys.h>
 #include <android/media/AudioTimestampInternal.h>
 #include <android/media/AudioUniqueIdUse.h>
 #include <android/media/EffectDescriptor.h>
-#include <android/media/ExtraAudioDescriptor.h>
 #include <android/media/TrackSecondaryOutputInfo.h>
+#include <android/media/audio/common/AudioChannelLayout.h>
+#include <android/media/audio/common/AudioConfig.h>
+#include <android/media/audio/common/AudioConfigBase.h>
+#include <android/media/audio/common/AudioContentType.h>
+#include <android/media/audio/common/AudioDeviceDescription.h>
+#include <android/media/audio/common/AudioEncapsulationMetadataType.h>
+#include <android/media/audio/common/AudioEncapsulationMode.h>
+#include <android/media/audio/common/AudioEncapsulationType.h>
+#include <android/media/audio/common/AudioFormatDescription.h>
+#include <android/media/audio/common/AudioGain.h>
+#include <android/media/audio/common/AudioGainConfig.h>
+#include <android/media/audio/common/AudioGainMode.h>
+#include <android/media/audio/common/AudioMode.h>
+#include <android/media/audio/common/AudioOffloadInfo.h>
+#include <android/media/audio/common/AudioPortExt.h>
+#include <android/media/audio/common/AudioPortMixExt.h>
+#include <android/media/audio/common/AudioProfile.h>
+#include <android/media/audio/common/AudioSource.h>
+#include <android/media/audio/common/AudioStandard.h>
+#include <android/media/audio/common/AudioUsage.h>
+#include <android/media/audio/common/AudioUuid.h>
+#include <android/media/audio/common/ExtraAudioDescriptor.h>
 
 #include <android/media/SharedFileRegion.h>
 #include <binder/IMemory.h>
@@ -89,13 +94,6 @@
 ConversionResult<audio_hw_sync_t> aidl2legacy_int32_t_audio_hw_sync_t(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_audio_hw_sync_t_int32_t(audio_hw_sync_t legacy);
 
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<int32_t> aidl2legacy_AudioPortConfigType_int32_t(
-        media::AudioPortConfigType aidl);
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<media::AudioPortConfigType> legacy2aidl_int32_t_AudioPortConfigType(
-        int32_t legacy);
-
 ConversionResult<unsigned int> aidl2legacy_int32_t_config_mask(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_config_mask_int32_t(unsigned int legacy);
 
@@ -132,32 +130,48 @@
         audio_port_type_t legacy);
 
 ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
-        const media::AudioChannelLayout& aidl, bool isInput);
-ConversionResult<media::AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
-        audio_channel_mask_t legacy, bool isInput);
+        const media::audio::common::AudioChannelLayout& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioChannelLayout>
+legacy2aidl_audio_channel_mask_t_AudioChannelLayout(audio_channel_mask_t legacy, bool isInput);
 
 ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
-        const media::AudioDeviceDescription& aidl);
-ConversionResult<media::AudioDeviceDescription> legacy2aidl_audio_devices_t_AudioDeviceDescription(
-        audio_devices_t legacy);
+        const media::audio::common::AudioDeviceDescription& aidl);
+ConversionResult<media::audio::common::AudioDeviceDescription>
+legacy2aidl_audio_devices_t_AudioDeviceDescription(audio_devices_t legacy);
+
+status_t aidl2legacy_AudioDevice_audio_device(
+        const media::audio::common::AudioDevice& aidl,
+        audio_devices_t* legacyType, char* legacyAddress);
+status_t aidl2legacy_AudioDevice_audio_device(
+        const media::audio::common::AudioDevice& aidl,
+        audio_devices_t* legacyType, String8* legacyAddress);
+status_t aidl2legacy_AudioDevice_audio_device(
+        const media::audio::common::AudioDevice& aidl,
+        audio_devices_t* legacyType, std::string* legacyAddress);
+ConversionResult<media::audio::common::AudioDevice>
+legacy2aidl_audio_device_AudioDevice(
+        audio_devices_t legacyType, const char* legacyAddress);
+ConversionResult<media::audio::common::AudioDevice>
+legacy2aidl_audio_device_AudioDevice(
+        audio_devices_t legacyType, const String8& legacyAddress);
 
 ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
-        const media::AudioFormatDescription& aidl);
-ConversionResult<media::AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
-        audio_format_t legacy);
+        const media::audio::common::AudioFormatDescription& aidl);
+ConversionResult<media::audio::common::AudioFormatDescription>
+legacy2aidl_audio_format_t_AudioFormatDescription(audio_format_t legacy);
 
 ConversionResult<audio_gain_mode_t>
-aidl2legacy_AudioGainMode_audio_gain_mode_t(media::AudioGainMode aidl);
-ConversionResult<media::AudioGainMode>
+aidl2legacy_AudioGainMode_audio_gain_mode_t(media::audio::common::AudioGainMode aidl);
+ConversionResult<media::audio::common::AudioGainMode>
 legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy);
 
 ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy);
 
 ConversionResult<audio_gain_config> aidl2legacy_AudioGainConfig_audio_gain_config(
-        const media::AudioGainConfig& aidl, media::AudioPortRole role, media::AudioPortType type);
-ConversionResult<media::AudioGainConfig> legacy2aidl_audio_gain_config_AudioGainConfig(
-        const audio_gain_config& legacy, audio_port_role_t role, audio_port_type_t type);
+        const media::audio::common::AudioGainConfig& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioGainConfig>
+legacy2aidl_audio_gain_config_AudioGainConfig(const audio_gain_config& legacy, bool isInput);
 
 ConversionResult<audio_input_flags_t> aidl2legacy_AudioInputFlags_audio_input_flags_t(
         media::AudioInputFlags aidl);
@@ -185,35 +199,37 @@
         const audio_io_flags& legacy, audio_port_role_t role, audio_port_type_t type);
 
 ConversionResult<audio_port_config_device_ext>
-aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
-        const media::AudioPortConfigDeviceExt& aidl);
-ConversionResult<media::AudioPortConfigDeviceExt>
-legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
-        const audio_port_config_device_ext& legacy);
+aidl2legacy_AudioDevice_audio_port_config_device_ext(
+        const media::audio::common::AudioDevice& aidl,
+        const media::AudioPortDeviceExtSys& aidlDeviceExt);
+status_t legacy2aidl_audio_port_config_device_ext_AudioDevice(
+        const audio_port_config_device_ext& legacy,
+        media::audio::common::AudioDevice* aidl, media::AudioPortDeviceExtSys* aidlDeviceExt);
 
 ConversionResult<audio_stream_type_t> aidl2legacy_AudioStreamType_audio_stream_type_t(
-        media::AudioStreamType aidl);
-ConversionResult<media::AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
-        audio_stream_type_t legacy);
+        media::audio::common::AudioStreamType aidl);
+ConversionResult<media::audio::common::AudioStreamType>
+legacy2aidl_audio_stream_type_t_AudioStreamType(audio_stream_type_t legacy);
 
-ConversionResult<audio_source_t> aidl2legacy_AudioSourceType_audio_source_t(
-        media::AudioSourceType aidl);
-ConversionResult<media::AudioSourceType> legacy2aidl_audio_source_t_AudioSourceType(
+ConversionResult<audio_source_t> aidl2legacy_AudioSource_audio_source_t(
+        media::audio::common::AudioSource aidl);
+ConversionResult<media::audio::common::AudioSource>
+        legacy2aidl_audio_source_t_AudioSource(
         audio_source_t legacy);
 
 ConversionResult<audio_session_t> aidl2legacy_int32_t_audio_session_t(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_audio_session_t_int32_t(audio_session_t legacy);
 
-ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortConfigMixExt(
-        const media::AudioPortConfigMixExt& aidl, media::AudioPortRole role);
-ConversionResult<media::AudioPortConfigMixExt> legacy2aidl_AudioPortConfigMixExt(
-        const audio_port_config_mix_ext& legacy, audio_port_role_t role);
+ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortMixExt(
+        const media::audio::common::AudioPortMixExt& aidl, media::AudioPortRole role,
+        const media::AudioPortMixExtSys& aidlMixExt);
+status_t legacy2aidl_AudioPortMixExt(
+        const audio_port_config_mix_ext& legacy, audio_port_role_t role,
+        media::audio::common::AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt);
 
 ConversionResult<audio_port_config_session_ext>
-aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
-        const media::AudioPortConfigSessionExt& aidl);
-ConversionResult<media::AudioPortConfigSessionExt>
-legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
+aidl2legacy_int32_t_audio_port_config_session_ext(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
         const audio_port_config_session_ext& legacy);
 
 ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
@@ -237,13 +253,14 @@
         const AudioClient& legacy);
 
 ConversionResult<audio_content_type_t>
-aidl2legacy_AudioContentType_audio_content_type_t(media::AudioContentType aidl);
-ConversionResult<media::AudioContentType>
+aidl2legacy_AudioContentType_audio_content_type_t(
+        media::audio::common::AudioContentType aidl);
+ConversionResult<media::audio::common::AudioContentType>
 legacy2aidl_audio_content_type_t_AudioContentType(audio_content_type_t legacy);
 
 ConversionResult<audio_usage_t>
-aidl2legacy_AudioUsage_audio_usage_t(media::AudioUsage aidl);
-ConversionResult<media::AudioUsage>
+aidl2legacy_AudioUsage_audio_usage_t(media::audio::common::AudioUsage aidl);
+ConversionResult<media::audio::common::AudioUsage>
 legacy2aidl_audio_usage_t_AudioUsage(audio_usage_t legacy);
 
 ConversionResult<audio_flags_mask_t>
@@ -262,23 +279,26 @@
 legacy2aidl_audio_attributes_t_AudioAttributesInternal(const audio_attributes_t& legacy);
 
 ConversionResult<audio_encapsulation_mode_t>
-aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(media::AudioEncapsulationMode aidl);
-ConversionResult<media::AudioEncapsulationMode>
+aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(
+        media::audio::common::AudioEncapsulationMode aidl);
+ConversionResult<media::audio::common::AudioEncapsulationMode>
 legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode(audio_encapsulation_mode_t legacy);
 
 ConversionResult<audio_offload_info_t>
-aidl2legacy_AudioOffloadInfo_audio_offload_info_t(const media::AudioOffloadInfo& aidl);
-ConversionResult<media::AudioOffloadInfo>
+aidl2legacy_AudioOffloadInfo_audio_offload_info_t(
+        const media::audio::common::AudioOffloadInfo& aidl);
+ConversionResult<media::audio::common::AudioOffloadInfo>
 legacy2aidl_audio_offload_info_t_AudioOffloadInfo(const audio_offload_info_t& legacy);
 
 ConversionResult<audio_config_t>
-aidl2legacy_AudioConfig_audio_config_t(const media::AudioConfig& aidl, bool isInput);
-ConversionResult<media::AudioConfig>
+aidl2legacy_AudioConfig_audio_config_t(const media::audio::common::AudioConfig& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioConfig>
 legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy, bool isInput);
 
 ConversionResult<audio_config_base_t>
-aidl2legacy_AudioConfigBase_audio_config_base_t(const media::AudioConfigBase& aidl, bool isInput);
-ConversionResult<media::AudioConfigBase>
+aidl2legacy_AudioConfigBase_audio_config_base_t(
+        const media::audio::common::AudioConfigBase& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioConfigBase>
 legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy, bool isInput);
 
 ConversionResult<sp<IMemory>>
@@ -297,8 +317,8 @@
 legacy2aidl_AudioTimestamp_AudioTimestampInternal(const AudioTimestamp& legacy);
 
 ConversionResult<audio_uuid_t>
-aidl2legacy_AudioUuid_audio_uuid_t(const media::AudioUuid& aidl);
-ConversionResult<media::AudioUuid>
+aidl2legacy_AudioUuid_audio_uuid_t(const media::audio::common::AudioUuid& aidl);
+ConversionResult<media::audio::common::AudioUuid>
 legacy2aidl_audio_uuid_t_AudioUuid(const audio_uuid_t& legacy);
 
 ConversionResult<effect_descriptor_t>
@@ -308,8 +328,8 @@
 
 ConversionResult<audio_encapsulation_metadata_type_t>
 aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t(
-        media::AudioEncapsulationMetadataType aidl);
-ConversionResult<media::AudioEncapsulationMetadataType>
+        media::audio::common::AudioEncapsulationMetadataType aidl);
+ConversionResult<media::audio::common::AudioEncapsulationMetadataType>
 legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType(
         audio_encapsulation_metadata_type_t legacy);
 
@@ -323,37 +343,38 @@
 ConversionResult<int32_t>
 legacy2aidl_AudioEncapsulationMetadataType_mask(uint32_t legacy);
 
-ConversionResult<audio_mix_latency_class_t>
-aidl2legacy_AudioMixLatencyClass_audio_mix_latency_class_t(
-        media::AudioMixLatencyClass aidl);
-ConversionResult<media::AudioMixLatencyClass>
-legacy2aidl_audio_mix_latency_class_t_AudioMixLatencyClass(
-        audio_mix_latency_class_t legacy);
-
 ConversionResult<audio_port_device_ext>
-aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(const media::AudioPortDeviceExt& aidl);
-ConversionResult<media::AudioPortDeviceExt>
-legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(const audio_port_device_ext& legacy);
+aidl2legacy_AudioDevice_audio_port_device_ext(
+        const media::audio::common::AudioDevice& aidl,
+        const media::AudioPortDeviceExtSys& aidlDeviceExt);
+status_t legacy2aidl_audio_port_device_ext_AudioDevice(
+        const audio_port_device_ext& legacy,
+        media::audio::common::AudioDevice* aidl,
+        media::AudioPortDeviceExtSys* aidlDeviceExt);
 
 ConversionResult<audio_port_mix_ext>
-aidl2legacy_AudioPortMixExt_audio_port_mix_ext(const media::AudioPortMixExt& aidl);
-ConversionResult<media::AudioPortMixExt>
-legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy);
+aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
+        const media::audio::common::AudioPortMixExt& aidl,
+        const media::AudioPortMixExtSys& aidlMixExt);
+status_t legacy2aidl_audio_port_mix_ext_AudioPortMixExt(
+        const audio_port_mix_ext& legacy,
+        media::audio::common::AudioPortMixExt* aidl,
+        media::AudioPortMixExtSys* aidlMixExt);
 
 ConversionResult<audio_port_session_ext>
-aidl2legacy_AudioPortSessionExt_audio_port_session_ext(const media::AudioPortSessionExt& aidl);
-ConversionResult<media::AudioPortSessionExt>
-legacy2aidl_audio_port_session_ext_AudioPortSessionExt(const audio_port_session_ext& legacy);
+aidl2legacy_int32_t_audio_port_session_ext(int32_t aidl);
+ConversionResult<int32_t>
+legacy2aidl_audio_port_session_ext_int32_t(const audio_port_session_ext& legacy);
 
 ConversionResult<audio_profile>
-aidl2legacy_AudioProfile_audio_profile(const media::AudioProfile& aidl, bool isInput);
-ConversionResult<media::AudioProfile>
+aidl2legacy_AudioProfile_audio_profile(
+        const media::audio::common::AudioProfile& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioProfile>
 legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy, bool isInput);
 
 ConversionResult<audio_gain>
-aidl2legacy_AudioGain_audio_gain(const media::AudioGain& aidl);
-// The AIDL structure provides a flag for direction indication while the legacy type doesn't.
-ConversionResult<media::AudioGain>
+aidl2legacy_AudioGain_audio_gain(const media::audio::common::AudioGain& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioGain>
 legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy, bool isInput);
 
 ConversionResult<audio_port_v7>
@@ -362,8 +383,8 @@
 legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy);
 
 ConversionResult<audio_mode_t>
-aidl2legacy_AudioMode_audio_mode_t(media::AudioMode aidl);
-ConversionResult<media::AudioMode>
+aidl2legacy_AudioMode_audio_mode_t(media::audio::common::AudioMode aidl);
+ConversionResult<media::audio::common::AudioMode>
 legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy);
 
 ConversionResult<audio_unique_id_use_t>
@@ -397,21 +418,21 @@
 legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(const audio_playback_rate_t& legacy);
 
 ConversionResult<audio_standard_t>
-aidl2legacy_AudioStandard_audio_standard_t(media::AudioStandard aidl);
-ConversionResult<media::AudioStandard>
+aidl2legacy_AudioStandard_audio_standard_t(media::audio::common::AudioStandard aidl);
+ConversionResult<media::audio::common::AudioStandard>
 legacy2aidl_audio_standard_t_AudioStandard(audio_standard_t legacy);
 
 ConversionResult<audio_extra_audio_descriptor>
 aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor(
-        const media::ExtraAudioDescriptor& aidl);
-ConversionResult<media::ExtraAudioDescriptor>
+        const media::audio::common::ExtraAudioDescriptor& aidl);
+ConversionResult<media::audio::common::ExtraAudioDescriptor>
 legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor(
         const audio_extra_audio_descriptor& legacy);
 
 ConversionResult<audio_encapsulation_type_t>
 aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
-        const media::AudioEncapsulationType& aidl);
-ConversionResult<media::AudioEncapsulationType>
+        const media::audio::common::AudioEncapsulationType& aidl);
+ConversionResult<media::audio::common::AudioEncapsulationType>
 legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
         const audio_encapsulation_type_t & legacy);
 
diff --git a/media/libaudioclient/include/media/AidlConversionUtil.h b/media/libaudioclient/include/media/AidlConversionUtil.h
index c1a2be3..dfabd55 100644
--- a/media/libaudioclient/include/media/AidlConversionUtil.h
+++ b/media/libaudioclient/include/media/AidlConversionUtil.h
@@ -41,6 +41,9 @@
 #define RETURN_IF_ERROR(result) \
     if (status_t _tmp = (result); _tmp != OK) return base::unexpected(_tmp);
 
+#define RETURN_STATUS_IF_ERROR(result) \
+    if (status_t _tmp = (result); _tmp != OK) return _tmp;
+
 #define VALUE_OR_RETURN_STATUS(x)           \
     ({                                      \
        auto _tmp = (x);                     \
@@ -119,6 +122,47 @@
     return output;
 }
 
+/**
+ * A generic template that helps to "zip" two input containers of the same size
+ * into a single vector of converted types. The conversion function must
+ * thus accept two arguments.
+ */
+template<typename OutputContainer, typename InputContainer1,
+        typename InputContainer2, typename Func>
+ConversionResult<OutputContainer>
+convertContainers(const InputContainer1& input1, const InputContainer2& input2,
+        const Func& itemConversion) {
+    auto iter2 = input2.begin();
+    OutputContainer output;
+    auto ins = std::inserter(output, output.begin());
+    for (const auto& item1 : input1) {
+        RETURN_IF_ERROR(iter2 != input2.end() ? OK : BAD_VALUE);
+        *ins = VALUE_OR_RETURN(itemConversion(item1, *iter2++));
+    }
+    return output;
+}
+
+/**
+ * A generic template that helps to "unzip" a per-element conversion into
+ * a pair of elements into a pair of containers. The conversion function
+ * must emit a pair of elements.
+ */
+template<typename OutputContainer1, typename OutputContainer2,
+        typename InputContainer, typename Func>
+ConversionResult<std::pair<OutputContainer1, OutputContainer2>>
+convertContainerSplit(const InputContainer& input, const Func& itemConversion) {
+    OutputContainer1 output1;
+    OutputContainer2 output2;
+    auto ins1 = std::inserter(output1, output1.begin());
+    auto ins2 = std::inserter(output2, output2.begin());
+    for (const auto& item : input) {
+        auto out_pair = VALUE_OR_RETURN(itemConversion(item));
+        *ins1 = out_pair.first;
+        *ins2 = out_pair.second;
+    }
+    return std::make_pair(output1, output2);
+}
+
 ////////////////////////////////////////////////////////////////////////////////////////////////////
 // The code below establishes:
 // IntegralTypeOf<T>, which works for either integral types (in which case it evaluates to T), or
diff --git a/media/libaudioclient/include/media/AudioCommonTypes.h b/media/libaudioclient/include/media/AudioCommonTypes.h
index bd341e3..862a0f9 100644
--- a/media/libaudioclient/include/media/AudioCommonTypes.h
+++ b/media/libaudioclient/include/media/AudioCommonTypes.h
@@ -19,9 +19,9 @@
 
 #include <functional>
 
-#include <android/media/AudioChannelLayout.h>
-#include <android/media/AudioDeviceDescription.h>
-#include <android/media/AudioFormatDescription.h>
+#include <android/media/audio/common/AudioChannelLayout.h>
+#include <android/media/audio/common/AudioDeviceDescription.h>
+#include <android/media/audio/common/AudioFormatDescription.h>
 #include <binder/Parcelable.h>
 #include <system/audio.h>
 #include <system/audio_policy.h>
@@ -42,10 +42,11 @@
 // possibility of processing types belonging to different versions of the type,
 // e.g. a HAL may be using a previous version of the AIDL interface.
 
-template<> struct hash<android::media::AudioChannelLayout>
+template<> struct hash<android::media::audio::common::AudioChannelLayout>
 {
-    std::size_t operator()(const android::media::AudioChannelLayout& acl) const noexcept {
-        using Tag = android::media::AudioChannelLayout::Tag;
+    std::size_t operator()(
+            const android::media::audio::common::AudioChannelLayout& acl) const noexcept {
+        using Tag = android::media::audio::common::AudioChannelLayout::Tag;
         const size_t seed = std::hash<Tag>{}(acl.getTag());
         switch (acl.getTag()) {
             case Tag::none:
@@ -63,22 +64,24 @@
     }
 };
 
-template<> struct hash<android::media::AudioDeviceDescription>
+template<> struct hash<android::media::audio::common::AudioDeviceDescription>
 {
-    std::size_t operator()(const android::media::AudioDeviceDescription& add) const noexcept {
+    std::size_t operator()(
+            const android::media::audio::common::AudioDeviceDescription& add) const noexcept {
         return hash_combine(
-                std::hash<android::media::AudioDeviceType>{}(add.type),
+                std::hash<android::media::audio::common::AudioDeviceType>{}(add.type),
                 std::hash<std::string>{}(add.connection));
     }
 };
 
-template<> struct hash<android::media::AudioFormatDescription>
+template<> struct hash<android::media::audio::common::AudioFormatDescription>
 {
-    std::size_t operator()(const android::media::AudioFormatDescription& afd) const noexcept {
+    std::size_t operator()(
+            const android::media::audio::common::AudioFormatDescription& afd) const noexcept {
         return hash_combine(
-                std::hash<android::media::AudioFormatType>{}(afd.type),
+                std::hash<android::media::audio::common::AudioFormatType>{}(afd.type),
                 hash_combine(
-                        std::hash<android::media::PcmType>{}(afd.pcm),
+                        std::hash<android::media::audio::common::PcmType>{}(afd.pcm),
                         std::hash<std::string>{}(afd.encoding)));
     }
 };
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 50ab42a..45ee8bd 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -19,12 +19,17 @@
 
 #include <sys/types.h>
 
+#include <set>
+#include <vector>
+
+#include <android/content/AttributionSourceState.h>
 #include <android/media/AudioVibratorInfo.h>
 #include <android/media/BnAudioFlingerClient.h>
 #include <android/media/BnAudioPolicyServiceClient.h>
 #include <android/media/INativeSpatializerCallback.h>
 #include <android/media/ISpatializer.h>
-#include <android/content/AttributionSourceState.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
 #include <media/AidlConversionUtil.h>
 #include <media/AudioContainers.h>
 #include <media/AudioDeviceTypeAddr.h>
@@ -33,13 +38,11 @@
 #include <media/AudioVolumeGroup.h>
 #include <media/AudioIoDescriptor.h>
 #include <media/MicrophoneInfo.h>
-#include <set>
 #include <system/audio.h>
 #include <system/audio_effect.h>
 #include <system/audio_policy.h>
 #include <utils/Errors.h>
 #include <utils/Mutex.h>
-#include <vector>
 
 using android::content::AttributionSourceState;
 
@@ -613,6 +616,14 @@
 
     static status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos);
 
+    static status_t getMmapPolicyInfo(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos);
+
+    static int32_t getAAudioMixerBurstCount();
+
+    static int32_t getAAudioHardwareBurstMinUsec();
+
 private:
 
     class AudioFlingerClient: public IBinder::DeathRecipient, public media::BnAudioFlingerClient
@@ -689,12 +700,12 @@
         binder::Status onRecordingConfigurationUpdate(
                 int32_t event,
                 const media::RecordClientInfo& clientInfo,
-                const media::AudioConfigBase& clientConfig,
+                const media::audio::common::AudioConfigBase& clientConfig,
                 const std::vector<media::EffectDescriptor>& clientEffects,
-                const media::AudioConfigBase& deviceConfig,
+                const media::audio::common::AudioConfigBase& deviceConfig,
                 const std::vector<media::EffectDescriptor>& effects,
                 int32_t patchHandle,
-                media::AudioSourceType source) override;
+                media::audio::common::AudioSource source) override;
         binder::Status onRoutingUpdated();
 
     private:
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index a2cd12f..5ba9f91 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -28,6 +28,7 @@
 #include <utils/threads.h>
 #include <android/content/AttributionSourceState.h>
 
+#include <chrono>
 #include <string>
 
 #include "android/media/BnAudioTrackCallback.h"
@@ -510,6 +511,14 @@
      */
             void        pause();
 
+    /* Pause and wait (with timeout) for the audio track to ramp to silence.
+     *
+     * \param timeout is the time limit to wait before returning.
+     *                A negative number is treated as 0.
+     * \return true if the track is ramped to silence, false if the timeout occurred.
+     */
+            bool        pauseAndWait(const std::chrono::milliseconds& timeout);
+
     /* Set volume for this track, mostly used for games' sound effects
      * left and right volumes. Levels must be >= 0.0 and <= 1.0.
      * This is the older API.  New applications should use setVolume(float) when possible.
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index a2ce145..485648d 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -37,10 +37,12 @@
 #include <string>
 #include <vector>
 
+#include <android/content/AttributionSourceState.h>
 #include <android/media/AudioVibratorInfo.h>
 #include <android/media/BnAudioFlingerService.h>
 #include <android/media/BpAudioFlingerService.h>
-#include <android/content/AttributionSourceState.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
 #include "android/media/CreateEffectRequest.h"
 #include "android/media/CreateEffectResponse.h"
 #include "android/media/CreateRecordRequest.h"
@@ -348,6 +350,14 @@
 
     virtual status_t updateSecondaryOutputs(
             const TrackSecondaryOutputsMap& trackSecondaryOutputs) = 0;
+
+    virtual status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) = 0;
+
+    virtual int32_t getAAudioMixerBurstCount() = 0;
+
+    virtual int32_t getAAudioHardwareBurstMinUsec() = 0;
 };
 
 /**
@@ -445,6 +455,14 @@
     status_t updateSecondaryOutputs(
             const TrackSecondaryOutputsMap& trackSecondaryOutputs) override;
 
+    status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) override;
+
+    int32_t getAAudioMixerBurstCount() override;
+
+    int32_t getAAudioHardwareBurstMinUsec() override;
+
 private:
     const sp<media::IAudioFlingerService> mDelegate;
 };
@@ -529,6 +547,9 @@
             SET_AUDIO_HAL_PIDS = media::BnAudioFlingerService::TRANSACTION_setAudioHalPids,
             SET_VIBRATOR_INFOS = media::BnAudioFlingerService::TRANSACTION_setVibratorInfos,
             UPDATE_SECONDARY_OUTPUTS = media::BnAudioFlingerService::TRANSACTION_updateSecondaryOutputs,
+            GET_MMAP_POLICY_INFOS = media::BnAudioFlingerService::TRANSACTION_getMmapPolicyInfos,
+            GET_AAUDIO_MIXER_BURST_COUNT = media::BnAudioFlingerService::TRANSACTION_getAAudioMixerBurstCount,
+            GET_AAUDIO_HARDWARE_BURST_MIN_USEC = media::BnAudioFlingerService::TRANSACTION_getAAudioHardwareBurstMinUsec,
         };
 
         /**
@@ -570,7 +591,8 @@
     Status createRecord(const media::CreateRecordRequest& request,
                         media::CreateRecordResponse* _aidl_return) override;
     Status sampleRate(int32_t ioHandle, int32_t* _aidl_return) override;
-    Status format(int32_t output, media::AudioFormatDescription* _aidl_return) override;
+    Status format(int32_t output,
+                  media::audio::common::AudioFormatDescription* _aidl_return) override;
     Status frameCount(int32_t ioHandle, int64_t* _aidl_return) override;
     Status latency(int32_t output, int32_t* _aidl_return) override;
     Status setMasterVolume(float value) override;
@@ -579,12 +601,13 @@
     Status masterMute(bool* _aidl_return) override;
     Status setMasterBalance(float balance) override;
     Status getMasterBalance(float* _aidl_return) override;
-    Status setStreamVolume(media::AudioStreamType stream, float value, int32_t output) override;
-    Status setStreamMute(media::AudioStreamType stream, bool muted) override;
-    Status
-    streamVolume(media::AudioStreamType stream, int32_t output, float* _aidl_return) override;
-    Status streamMute(media::AudioStreamType stream, bool* _aidl_return) override;
-    Status setMode(media::AudioMode mode) override;
+    Status setStreamVolume(media::audio::common::AudioStreamType stream,
+                           float value, int32_t output) override;
+    Status setStreamMute(media::audio::common::AudioStreamType stream, bool muted) override;
+    Status streamVolume(media::audio::common::AudioStreamType stream,
+                        int32_t output, float* _aidl_return) override;
+    Status streamMute(media::audio::common::AudioStreamType stream, bool* _aidl_return) override;
+    Status setMode(media::audio::common::AudioMode mode) override;
     Status setMicMute(bool state) override;
     Status getMicMute(bool* _aidl_return) override;
     Status setRecordSilenced(int32_t portId, bool silenced) override;
@@ -592,8 +615,9 @@
     Status
     getParameters(int32_t ioHandle, const std::string& keys, std::string* _aidl_return) override;
     Status registerClient(const sp<media::IAudioFlingerClient>& client) override;
-    Status getInputBufferSize(int32_t sampleRate, const media::AudioFormatDescription& format,
-                              const media::AudioChannelLayout& channelMask,
+    Status getInputBufferSize(int32_t sampleRate,
+                              const media::audio::common::AudioFormatDescription& format,
+                              const media::audio::common::AudioChannelLayout& channelMask,
                               int64_t* _aidl_return) override;
     Status openOutput(const media::OpenOutputRequest& request,
                       media::OpenOutputResponse* _aidl_return) override;
@@ -604,7 +628,7 @@
     Status openInput(const media::OpenInputRequest& request,
                      media::OpenInputResponse* _aidl_return) override;
     Status closeInput(int32_t input) override;
-    Status invalidateStream(media::AudioStreamType stream) override;
+    Status invalidateStream(media::audio::common::AudioStreamType stream) override;
     Status setVoiceVolume(float volume) override;
     Status getRenderPosition(int32_t output, media::RenderPosition* _aidl_return) override;
     Status getInputFramesLost(int32_t ioHandle, int32_t* _aidl_return) override;
@@ -613,7 +637,8 @@
     Status releaseAudioSessionId(int32_t audioSession, int32_t pid) override;
     Status queryNumberEffects(int32_t* _aidl_return) override;
     Status queryEffect(int32_t index, media::EffectDescriptor* _aidl_return) override;
-    Status getEffectDescriptor(const media::AudioUuid& effectUUID, const media::AudioUuid& typeUUID,
+    Status getEffectDescriptor(const media::audio::common::AudioUuid& effectUUID,
+                               const media::audio::common::AudioUuid& typeUUID,
                                int32_t preferredTypeFlag,
                                media::EffectDescriptor* _aidl_return) override;
     Status createEffect(const media::CreateEffectRequest& request,
@@ -639,6 +664,11 @@
     Status setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos) override;
     Status updateSecondaryOutputs(
             const std::vector<media::TrackSecondaryOutputInfo>& trackSecondaryOutputInfos) override;
+    Status getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *_aidl_return) override;
+    Status getAAudioMixerBurstCount(int32_t* _aidl_return) override;
+    Status getAAudioHardwareBurstMinUsec(int32_t* _aidl_return) override;
 
 private:
     const sp<AudioFlingerServerAdapter::Delegate> mDelegate;
diff --git a/media/libaudioclient/include/media/PolicyAidlConversion.h b/media/libaudioclient/include/media/PolicyAidlConversion.h
index 873f27a..2296fdb 100644
--- a/media/libaudioclient/include/media/PolicyAidlConversion.h
+++ b/media/libaudioclient/include/media/PolicyAidlConversion.h
@@ -23,10 +23,8 @@
 
 #include <android/media/AudioMix.h>
 #include <android/media/AudioMixCallbackFlag.h>
-#include <android/media/AudioMixLatencyClass.h>
 #include <android/media/AudioMixRouteFlag.h>
 #include <android/media/AudioMixType.h>
-#include <android/media/AudioMode.h>
 #include <android/media/AudioOffloadMode.h>
 #include <android/media/AudioPolicyForceUse.h>
 #include <android/media/AudioPolicyForcedConfig.h>
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index 2279244..891293e 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -33,6 +33,7 @@
         "libutils",
     ],
     static_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "libaudioclient_aidl_conversion",
         "libstagefright_foundation",
diff --git a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
index 424b387..f963e68 100644
--- a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
+++ b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
@@ -22,116 +22,123 @@
 using namespace android;
 using namespace android::aidl_utils;
 
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioDeviceType;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioFormatType;
+using media::audio::common::PcmType;
+
 namespace {
 
 template<typename T> size_t hash(const T& t) {
     return std::hash<T>{}(t);
 }
 
-media::AudioChannelLayout make_ACL_None() {
-    return media::AudioChannelLayout{};
+AudioChannelLayout make_ACL_None() {
+    return AudioChannelLayout{};
 }
 
-media::AudioChannelLayout make_ACL_Invalid() {
-    return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::invalid>(0);
+AudioChannelLayout make_ACL_Invalid() {
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::invalid>(0);
 }
 
-media::AudioChannelLayout make_ACL_Stereo() {
-    return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::layoutMask>(
-            media::AudioChannelLayout::LAYOUT_STEREO);
+AudioChannelLayout make_ACL_Stereo() {
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>(
+            AudioChannelLayout::LAYOUT_STEREO);
 }
 
-media::AudioChannelLayout make_ACL_ChannelIndex2() {
-    return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::indexMask>(
-            media::AudioChannelLayout::INDEX_MASK_2);
+AudioChannelLayout make_ACL_ChannelIndex2() {
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::indexMask>(
+            AudioChannelLayout::INDEX_MASK_2);
 }
 
-media::AudioChannelLayout make_ACL_ChannelIndexArbitrary() {
+AudioChannelLayout make_ACL_ChannelIndexArbitrary() {
     // Use channels 1 and 3.
-    return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::indexMask>(5);
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::indexMask>(5);
 }
 
-media::AudioChannelLayout make_ACL_VoiceCall() {
-    return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::voiceMask>(
-            media::AudioChannelLayout::VOICE_CALL_MONO);
+AudioChannelLayout make_ACL_VoiceCall() {
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::voiceMask>(
+            AudioChannelLayout::VOICE_CALL_MONO);
 }
 
-media::AudioDeviceDescription make_AudioDeviceDescription(media::AudioDeviceType type,
+AudioDeviceDescription make_AudioDeviceDescription(AudioDeviceType type,
         const std::string& connection = "") {
-    media::AudioDeviceDescription result;
+    AudioDeviceDescription result;
     result.type = type;
     result.connection = connection;
     return result;
 }
 
-media::AudioDeviceDescription make_ADD_None() {
-    return media::AudioDeviceDescription{};
+AudioDeviceDescription make_ADD_None() {
+    return AudioDeviceDescription{};
 }
 
-media::AudioDeviceDescription make_ADD_DefaultIn() {
-    return make_AudioDeviceDescription(media::AudioDeviceType::IN_DEFAULT);
+AudioDeviceDescription make_ADD_DefaultIn() {
+    return make_AudioDeviceDescription(AudioDeviceType::IN_DEFAULT);
 }
 
-media::AudioDeviceDescription make_ADD_DefaultOut() {
-    return make_AudioDeviceDescription(media::AudioDeviceType::OUT_DEFAULT);
+AudioDeviceDescription make_ADD_DefaultOut() {
+    return make_AudioDeviceDescription(AudioDeviceType::OUT_DEFAULT);
 }
 
-media::AudioDeviceDescription make_ADD_WiredHeadset() {
-    return make_AudioDeviceDescription(media::AudioDeviceType::OUT_HEADSET,
-            media::AudioDeviceDescription::CONNECTION_ANALOG());
+AudioDeviceDescription make_ADD_WiredHeadset() {
+    return make_AudioDeviceDescription(AudioDeviceType::OUT_HEADSET,
+            AudioDeviceDescription::CONNECTION_ANALOG());
 }
 
-media::AudioDeviceDescription make_ADD_BtScoHeadset() {
-    return make_AudioDeviceDescription(media::AudioDeviceType::OUT_HEADSET,
-            media::AudioDeviceDescription::CONNECTION_BT_SCO());
+AudioDeviceDescription make_ADD_BtScoHeadset() {
+    return make_AudioDeviceDescription(AudioDeviceType::OUT_HEADSET,
+            AudioDeviceDescription::CONNECTION_BT_SCO());
 }
 
-media::AudioFormatDescription make_AudioFormatDescription(media::AudioFormatType type) {
-    media::AudioFormatDescription result;
+AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
+    AudioFormatDescription result;
     result.type = type;
     return result;
 }
 
-media::AudioFormatDescription make_AudioFormatDescription(media::PcmType pcm) {
-    auto result = make_AudioFormatDescription(media::AudioFormatType::PCM);
+AudioFormatDescription make_AudioFormatDescription(PcmType pcm) {
+    auto result = make_AudioFormatDescription(AudioFormatType::PCM);
     result.pcm = pcm;
     return result;
 }
 
-media::AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
-    media::AudioFormatDescription result;
+AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
+    AudioFormatDescription result;
     result.encoding = encoding;
     return result;
 }
 
-media::AudioFormatDescription make_AudioFormatDescription(media::PcmType transport,
+AudioFormatDescription make_AudioFormatDescription(PcmType transport,
         const std::string& encoding) {
     auto result = make_AudioFormatDescription(encoding);
     result.pcm = transport;
     return result;
 }
 
-media::AudioFormatDescription make_AFD_Default() {
-    return media::AudioFormatDescription{};
+AudioFormatDescription make_AFD_Default() {
+    return AudioFormatDescription{};
 }
 
-media::AudioFormatDescription make_AFD_Invalid() {
-    return make_AudioFormatDescription(media::AudioFormatType::SYS_RESERVED_INVALID);
+AudioFormatDescription make_AFD_Invalid() {
+    return make_AudioFormatDescription(AudioFormatType::SYS_RESERVED_INVALID);
 }
 
-media::AudioFormatDescription make_AFD_Pcm16Bit() {
-    return make_AudioFormatDescription(media::PcmType::INT_16_BIT);
+AudioFormatDescription make_AFD_Pcm16Bit() {
+    return make_AudioFormatDescription(PcmType::INT_16_BIT);
 }
 
-media::AudioFormatDescription make_AFD_Bitstream() {
+AudioFormatDescription make_AFD_Bitstream() {
     return make_AudioFormatDescription("example");
 }
 
-media::AudioFormatDescription make_AFD_Encap() {
-    return make_AudioFormatDescription(media::PcmType::INT_16_BIT, "example.encap");
+AudioFormatDescription make_AFD_Encap() {
+    return make_AudioFormatDescription(PcmType::INT_16_BIT, "example.encap");
 }
 
-media::AudioFormatDescription make_AFD_Encap_with_Enc() {
+AudioFormatDescription make_AFD_Encap_with_Enc() {
     auto afd = make_AFD_Encap();
     afd.encoding += "+example";
     return afd;
@@ -160,24 +167,24 @@
 };
 
 TEST_F(HashIdentityTest, AudioChannelLayoutHashIdentity) {
-    verifyHashIdentity<media::AudioChannelLayout>({
+    verifyHashIdentity<AudioChannelLayout>({
             make_ACL_None, make_ACL_Invalid, make_ACL_Stereo, make_ACL_ChannelIndex2,
             make_ACL_ChannelIndexArbitrary, make_ACL_VoiceCall});
 }
 
 TEST_F(HashIdentityTest, AudioDeviceDescriptionHashIdentity) {
-    verifyHashIdentity<media::AudioDeviceDescription>({
+    verifyHashIdentity<AudioDeviceDescription>({
             make_ADD_None, make_ADD_DefaultIn, make_ADD_DefaultOut, make_ADD_WiredHeadset,
             make_ADD_BtScoHeadset});
 }
 
 TEST_F(HashIdentityTest, AudioFormatDescriptionHashIdentity) {
-    verifyHashIdentity<media::AudioFormatDescription>({
+    verifyHashIdentity<AudioFormatDescription>({
             make_AFD_Default, make_AFD_Invalid, make_AFD_Pcm16Bit, make_AFD_Bitstream,
             make_AFD_Encap, make_AFD_Encap_with_Enc});
 }
 
-using ChannelLayoutParam = std::tuple<media::AudioChannelLayout, bool /*isInput*/>;
+using ChannelLayoutParam = std::tuple<AudioChannelLayout, bool /*isInput*/>;
 class AudioChannelLayoutRoundTripTest :
         public testing::TestWithParam<ChannelLayoutParam> {};
 TEST_P(AudioChannelLayoutRoundTripTest, Aidl2Legacy2Aidl) {
@@ -192,7 +199,7 @@
 INSTANTIATE_TEST_SUITE_P(AudioChannelLayoutRoundTrip,
         AudioChannelLayoutRoundTripTest,
         testing::Combine(
-                testing::Values(media::AudioChannelLayout{}, make_ACL_Invalid(), make_ACL_Stereo(),
+                testing::Values(AudioChannelLayout{}, make_ACL_Invalid(), make_ACL_Stereo(),
                         make_ACL_ChannelIndex2(), make_ACL_ChannelIndexArbitrary()),
                 testing::Values(false, true)));
 INSTANTIATE_TEST_SUITE_P(AudioChannelVoiceRoundTrip,
@@ -201,7 +208,7 @@
         testing::Combine(testing::Values(make_ACL_VoiceCall()), testing::Values(true)));
 
 class AudioDeviceDescriptionRoundTripTest :
-        public testing::TestWithParam<media::AudioDeviceDescription> {};
+        public testing::TestWithParam<AudioDeviceDescription> {};
 TEST_P(AudioDeviceDescriptionRoundTripTest, Aidl2Legacy2Aidl) {
     const auto initial = GetParam();
     auto conv = aidl2legacy_AudioDeviceDescription_audio_devices_t(initial);
@@ -212,11 +219,11 @@
 }
 INSTANTIATE_TEST_SUITE_P(AudioDeviceDescriptionRoundTrip,
         AudioDeviceDescriptionRoundTripTest,
-        testing::Values(media::AudioDeviceDescription{}, make_ADD_DefaultIn(),
+        testing::Values(AudioDeviceDescription{}, make_ADD_DefaultIn(),
                 make_ADD_DefaultOut(), make_ADD_WiredHeadset(), make_ADD_BtScoHeadset()));
 
 class AudioFormatDescriptionRoundTripTest :
-        public testing::TestWithParam<media::AudioFormatDescription> {};
+        public testing::TestWithParam<AudioFormatDescription> {};
 TEST_P(AudioFormatDescriptionRoundTripTest, Aidl2Legacy2Aidl) {
     const auto initial = GetParam();
     auto conv = aidl2legacy_AudioFormatDescription_audio_format_t(initial);
@@ -227,4 +234,4 @@
 }
 INSTANTIATE_TEST_SUITE_P(AudioFormatDescriptionRoundTrip,
         AudioFormatDescriptionRoundTripTest,
-        testing::Values(make_AFD_Invalid(), media::AudioFormatDescription{}, make_AFD_Pcm16Bit()));
+        testing::Values(make_AFD_Invalid(), AudioFormatDescription{}, make_AFD_Pcm16Bit()));
diff --git a/media/libaudiofoundation/Android.bp b/media/libaudiofoundation/Android.bp
index 3bef55b..727b86f 100644
--- a/media/libaudiofoundation/Android.bp
+++ b/media/libaudiofoundation/Android.bp
@@ -24,9 +24,11 @@
         "libmedia_helper_headers",
     ],
     static_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
     ],
     export_static_lib_headers: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
     ],
     host_supported: true,
@@ -52,6 +54,7 @@
     ],
 
     shared_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "libaudioclient_aidl_conversion",
         "libaudioutils",
@@ -63,6 +66,7 @@
     ],
 
     export_shared_lib_headers: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "libaudioclient_aidl_conversion",
     ],
diff --git a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
index 7f54474..26eea87 100644
--- a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
+++ b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
@@ -25,6 +25,9 @@
 
 namespace android {
 
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceAddress;
+
 namespace {
 
 static const std::string SUPPRESSED = "SUPPRESSED";
@@ -157,18 +160,16 @@
 }
 
 ConversionResult<AudioDeviceTypeAddr>
-aidl2legacy_AudioDeviceTypeAddress(const media::AudioDevice& aidl) {
-    audio_devices_t type = VALUE_OR_RETURN(
-            aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
-    return AudioDeviceTypeAddr(type, aidl.address);
+aidl2legacy_AudioDeviceTypeAddress(const AudioDevice& aidl) {
+    audio_devices_t type;
+    std::string address;
+    RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(aidl, &type, &address));
+    return AudioDeviceTypeAddr(type, address);
 }
 
-ConversionResult<media::AudioDevice>
+ConversionResult<AudioDevice>
 legacy2aidl_AudioDeviceTypeAddress(const AudioDeviceTypeAddr& legacy) {
-    media::AudioDevice aidl;
-    aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_AudioDeviceDescription(legacy.mType));
-    aidl.address = legacy.getAddress();
-    return aidl;
+    return legacy2aidl_audio_device_AudioDevice(legacy.mType, legacy.getAddress());
 }
 
 } // namespace android
diff --git a/media/libaudiofoundation/AudioGain.cpp b/media/libaudiofoundation/AudioGain.cpp
index ea0258a..1a8fbf0 100644
--- a/media/libaudiofoundation/AudioGain.cpp
+++ b/media/libaudiofoundation/AudioGain.cpp
@@ -24,22 +24,18 @@
 #define ALOGVV(a...) do { } while(0)
 #endif
 
+#include <math.h>
+
 #include <algorithm>
 
 #include <android-base/stringprintf.h>
 #include <media/AudioGain.h>
 #include <utils/Log.h>
 
-#include <math.h>
-
 namespace android {
 
 AudioGain::AudioGain(int index, bool isInput)
-{
-    mIndex = index;
-    mIsInput = isInput;
-    memset(&mGain, 0, sizeof(struct audio_gain));
-}
+        : mIndex(index), mIsInput(isInput) {}
 
 void AudioGain::getDefaultConfig(struct audio_gain_config *config)
 {
@@ -123,53 +119,24 @@
            mGain.max_ramp_ms == other->mGain.max_ramp_ms;
 }
 
-status_t AudioGain::writeToParcel(android::Parcel *parcel) const {
-    media::AudioGain parcelable;
-    return writeToParcelable(&parcelable)
-        ?: parcelable.writeToParcel(parcel);
+ConversionResult<AudioGain::Aidl> AudioGain::toParcelable() const {
+    media::audio::common::AudioGain aidl = VALUE_OR_RETURN(
+            legacy2aidl_audio_gain_AudioGain(mGain, mIsInput));
+    media::AudioGainSys aidlSys;
+    aidlSys.index = VALUE_OR_RETURN(convertIntegral<int32_t>(mIndex));
+    aidlSys.isInput = mIsInput;
+    aidlSys.useForVolume = mUseForVolume;
+    return std::make_pair(aidl, aidlSys);
 }
 
-status_t AudioGain::writeToParcelable(media::AudioGain* parcelable) const {
-    parcelable->index = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mIndex));
-    parcelable->isInput = mIsInput;
-    parcelable->useForVolume = mUseForVolume;
-    parcelable->mode = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_gain_mode_t_int32_t_mask(mGain.mode));
-    parcelable->channelMask = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
-                    mGain.channel_mask, mIsInput));
-    parcelable->minValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.min_value));
-    parcelable->maxValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.max_value));
-    parcelable->defaultValue = VALUE_OR_RETURN_STATUS(
-            convertIntegral<int32_t>(mGain.default_value));
-    parcelable->stepValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.step_value));
-    parcelable->minRampMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.min_ramp_ms));
-    parcelable->maxRampMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.max_ramp_ms));
-    return OK;
-}
-
-status_t AudioGain::readFromParcel(const android::Parcel *parcel) {
-    media::AudioGain parcelable;
-    return parcelable.readFromParcel(parcel)
-        ?: readFromParcelable(parcelable);
-}
-
-status_t AudioGain::readFromParcelable(const media::AudioGain& parcelable) {
-    mIndex = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.index));
-    mIsInput = parcelable.isInput;
-    mUseForVolume = parcelable.useForVolume;
-    mGain.mode = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_int32_t_audio_gain_mode_t_mask(parcelable.mode));
-    mGain.channel_mask = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
-                    parcelable.channelMask, parcelable.isInput));
-    mGain.min_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.minValue));
-    mGain.max_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.maxValue));
-    mGain.default_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.defaultValue));
-    mGain.step_value = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.stepValue));
-    mGain.min_ramp_ms = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.minRampMs));
-    mGain.max_ramp_ms = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.maxRampMs));
-    return OK;
+ConversionResult<sp<AudioGain>> AudioGain::fromParcelable(const AudioGain::Aidl& aidl) {
+    const media::AudioGainSys& sys = aidl.second;
+    auto index = VALUE_OR_RETURN(convertIntegral<int>(sys.index));
+    sp<AudioGain> legacy = sp<AudioGain>::make(index, sys.isInput);
+    legacy->mGain = VALUE_OR_RETURN(
+            aidl2legacy_AudioGain_audio_gain(aidl.first, sys.isInput));
+    legacy->mUseForVolume = sys.useForVolume;
+    return legacy;
 }
 
 bool AudioGains::equals(const AudioGains &other) const
@@ -180,59 +147,30 @@
                       });
 }
 
-status_t AudioGains::writeToParcel(android::Parcel *parcel) const {
-    status_t status = NO_ERROR;
-    if ((status = parcel->writeVectorSize(*this)) != NO_ERROR) return status;
-    for (const auto &audioGain : *this) {
-        if ((status = parcel->writeParcelable(*audioGain)) != NO_ERROR) {
-            break;
-        }
-    }
-    return status;
-}
-
-status_t AudioGains::readFromParcel(const android::Parcel *parcel) {
-    status_t status = NO_ERROR;
-    this->clear();
-    if ((status = parcel->resizeOutVector(this)) != NO_ERROR) return status;
-    for (size_t i = 0; i < this->size(); i++) {
-        this->at(i) = new AudioGain(0, false);
-        if ((status = parcel->readParcelable(this->at(i).get())) != NO_ERROR) {
-            this->clear();
-            break;
-        }
-    }
-    return status;
-}
-
 ConversionResult<sp<AudioGain>>
-aidl2legacy_AudioGain(const media::AudioGain& aidl) {
-    sp<AudioGain> legacy = new AudioGain(0, false);
-    status_t status = legacy->readFromParcelable(aidl);
-    if (status != OK) {
-        return base::unexpected(status);
-    }
-    return legacy;
+aidl2legacy_AudioGain(const AudioGain::Aidl& aidl) {
+    return AudioGain::fromParcelable(aidl);
 }
 
-ConversionResult<media::AudioGain>
+ConversionResult<AudioGain::Aidl>
 legacy2aidl_AudioGain(const sp<AudioGain>& legacy) {
-    media::AudioGain aidl;
-    status_t status = legacy->writeToParcelable(&aidl);
-    if (status != OK) {
-        return base::unexpected(status);
-    }
-    return aidl;
+    return legacy->toParcelable();
 }
 
 ConversionResult<AudioGains>
-aidl2legacy_AudioGains(const std::vector<media::AudioGain>& aidl) {
-    return convertContainer<AudioGains>(aidl, aidl2legacy_AudioGain);
+aidl2legacy_AudioGains(const AudioGains::Aidl& aidl) {
+    return convertContainers<AudioGains>(aidl.first, aidl.second,
+            [](const media::audio::common::AudioGain& g,
+               const media::AudioGainSys& gs) {
+                return aidl2legacy_AudioGain(std::make_pair(g, gs));
+            });
 }
 
-ConversionResult<std::vector<media::AudioGain>>
+ConversionResult<AudioGains::Aidl>
 legacy2aidl_AudioGains(const AudioGains& legacy) {
-    return convertContainer<std::vector<media::AudioGain>>(legacy, legacy2aidl_AudioGain);
+    return convertContainerSplit<
+            std::vector<media::audio::common::AudioGain>,
+            std::vector<media::AudioGainSys>>(legacy, legacy2aidl_AudioGain);
 }
 
 } // namespace android
diff --git a/media/libaudiofoundation/AudioPort.cpp b/media/libaudiofoundation/AudioPort.cpp
index c70a6c2..099aff4 100644
--- a/media/libaudiofoundation/AudioPort.cpp
+++ b/media/libaudiofoundation/AudioPort.cpp
@@ -18,7 +18,6 @@
 #include <algorithm>
 #include <utility>
 
-#include <android/media/ExtraAudioDescriptor.h>
 #include <android-base/stringprintf.h>
 #include <media/AudioPort.h>
 #include <utils/Log.h>
@@ -207,13 +206,19 @@
 }
 
 status_t AudioPort::writeToParcelable(media::AudioPort* parcelable) const {
-    parcelable->name = mName;
-    parcelable->type = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_type_t_AudioPortType(mType));
-    parcelable->role = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_role_t_AudioPortRole(mRole));
-    parcelable->profiles = VALUE_OR_RETURN_STATUS(
+    parcelable->hal.name = mName;
+    parcelable->sys.type = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_port_type_t_AudioPortType(mType));
+    parcelable->sys.role = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_port_role_t_AudioPortRole(mRole));
+    auto aidlProfiles = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioProfileVector(mProfiles, useInputChannelMask()));
-    parcelable->extraAudioDescriptors = mExtraAudioDescriptors;
-    parcelable->gains = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioGains(mGains));
+    parcelable->hal.profiles = aidlProfiles.first;
+    parcelable->sys.profiles = aidlProfiles.second;
+    parcelable->hal.extraAudioDescriptors = mExtraAudioDescriptors;
+    auto aidlGains = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioGains(mGains));
+    parcelable->hal.gains = aidlGains.first;
+    parcelable->sys.gains = aidlGains.second;
     return OK;
 }
 
@@ -224,13 +229,18 @@
 }
 
 status_t AudioPort::readFromParcelable(const media::AudioPort& parcelable) {
-    mName = parcelable.name;
-    mType = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPortType_audio_port_type_t(parcelable.type));
-    mRole = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPortRole_audio_port_role_t(parcelable.role));
+    mName = parcelable.hal.name;
+    mType = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioPortType_audio_port_type_t(parcelable.sys.type));
+    mRole = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioPortRole_audio_port_role_t(parcelable.sys.role));
     mProfiles = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioProfileVector(parcelable.profiles, useInputChannelMask()));
-    mExtraAudioDescriptors = parcelable.extraAudioDescriptors;
-    mGains = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioGains(parcelable.gains));
+            aidl2legacy_AudioProfileVector(
+                    std::make_pair(parcelable.hal.profiles, parcelable.sys.profiles),
+                    useInputChannelMask()));
+    mExtraAudioDescriptors = parcelable.hal.extraAudioDescriptors;
+    mGains = VALUE_OR_RETURN_STATUS(
+            aidl2legacy_AudioGains(std::make_pair(parcelable.hal.gains, parcelable.sys.gains)));
     return OK;
 }
 
@@ -333,46 +343,40 @@
 }
 
 status_t AudioPortConfig::writeToParcelable(
-        media::AudioPortConfig* parcelable, bool isInput) const {
-    parcelable->sampleRate = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mSamplingRate));
+        media::audio::common::AudioPortConfig* parcelable, bool isInput) const {
+    media::audio::common::Int aidl_sampleRate;
+    aidl_sampleRate.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mSamplingRate));
+    parcelable->sampleRate = aidl_sampleRate;
     parcelable->format = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_format_t_AudioFormatDescription(mFormat));
     parcelable->channelMask = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_channel_mask_t_AudioChannelLayout(mChannelMask, isInput));
     parcelable->id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
-    parcelable->gain.index = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.index));
-    parcelable->gain.mode = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_gain_mode_t_int32_t_mask(mGain.mode));
-    parcelable->gain.channelMask = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_channel_mask_t_AudioChannelLayout(mGain.channel_mask, isInput));
-    parcelable->gain.rampDurationMs = VALUE_OR_RETURN_STATUS(
-            convertIntegral<int32_t>(mGain.ramp_duration_ms));
-    parcelable->gain.values = VALUE_OR_RETURN_STATUS(convertContainer<std::vector<int32_t>>(
-            mGain.values, convertIntegral<int32_t, int>));
+    media::audio::common::AudioGainConfig aidl_gain = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_gain_config_AudioGainConfig(mGain, isInput));
+    parcelable->gain = aidl_gain;
     return OK;
 }
 
 status_t AudioPortConfig::readFromParcelable(
-        const media::AudioPortConfig& parcelable, bool isInput) {
-    mSamplingRate = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.sampleRate));
-    mFormat = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioFormatDescription_audio_format_t(parcelable.format));
-    mChannelMask = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioChannelLayout_audio_channel_mask_t(parcelable.channelMask, isInput));
-    mId = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_port_handle_t(parcelable.id));
-    mGain.index = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.gain.index));
-    mGain.mode = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_int32_t_audio_gain_mode_t_mask(parcelable.gain.mode));
-    mGain.channel_mask = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
-                    parcelable.gain.channelMask, isInput));
-    mGain.ramp_duration_ms = VALUE_OR_RETURN_STATUS(
-            convertIntegral<unsigned int>(parcelable.gain.rampDurationMs));
-    if (parcelable.gain.values.size() > std::size(mGain.values)) {
-        return BAD_VALUE;
+        const media::audio::common::AudioPortConfig& parcelable, bool isInput) {
+    if (parcelable.sampleRate.has_value()) {
+        mSamplingRate = VALUE_OR_RETURN_STATUS(
+                convertIntegral<unsigned int>(parcelable.sampleRate.value().value));
     }
-    for (size_t i = 0; i < parcelable.gain.values.size(); ++i) {
-        mGain.values[i] = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.gain.values[i]));
+    if (parcelable.format.has_value()) {
+        mFormat = VALUE_OR_RETURN_STATUS(
+                aidl2legacy_AudioFormatDescription_audio_format_t(parcelable.format.value()));
+    }
+    if (parcelable.channelMask.has_value()) {
+        mChannelMask = VALUE_OR_RETURN_STATUS(
+                aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+                        parcelable.channelMask.value(), isInput));
+    }
+    mId = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_port_handle_t(parcelable.id));
+    if (parcelable.gain.has_value()) {
+        mGain = VALUE_OR_RETURN_STATUS(
+                aidl2legacy_AudioGainConfig_audio_gain_config(parcelable.gain.value(), isInput));
     }
     return OK;
 }
diff --git a/media/libaudiofoundation/AudioProfile.cpp b/media/libaudiofoundation/AudioProfile.cpp
index 47b2d54..15f2d68 100644
--- a/media/libaudiofoundation/AudioProfile.cpp
+++ b/media/libaudiofoundation/AudioProfile.cpp
@@ -27,6 +27,8 @@
 
 namespace android {
 
+using media::audio::common::AudioChannelLayout;
+
 bool operator == (const AudioProfile &left, const AudioProfile &right)
 {
     return (left.getFormat() == right.getFormat()) &&
@@ -154,57 +156,65 @@
     return *this;
 }
 
-ConversionResult<media::AudioProfile>
+ConversionResult<AudioProfile::Aidl>
 AudioProfile::toParcelable(bool isInput) const {
-    media::AudioProfile parcelable;
+    media::audio::common::AudioProfile parcelable;
     parcelable.name = mName;
-    parcelable.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(mFormat));
+    parcelable.format = VALUE_OR_RETURN(
+            legacy2aidl_audio_format_t_AudioFormatDescription(mFormat));
+    // Note: legacy 'audio_profile' imposes a limit on the number of
+    // channel masks and sampling rates. That's why it's not used here
+    // and conversions are performed directly on the fields instead
+    // of using 'legacy2aidl_audio_profile_AudioProfile' from AidlConversion.
     parcelable.channelMasks = VALUE_OR_RETURN(
-            convertContainer<std::vector<media::AudioChannelLayout>>(
+            convertContainer<std::vector<AudioChannelLayout>>(
                     mChannelMasks,
                     [isInput](audio_channel_mask_t m) {
                         return legacy2aidl_audio_channel_mask_t_AudioChannelLayout(m, isInput);
                     }));
-    parcelable.samplingRates = VALUE_OR_RETURN(
+    parcelable.sampleRates = VALUE_OR_RETURN(
             convertContainer<std::vector<int32_t>>(mSamplingRates,
                                                    convertIntegral<int32_t, uint32_t>));
-    parcelable.isDynamicFormat = mIsDynamicFormat;
-    parcelable.isDynamicChannels = mIsDynamicChannels;
-    parcelable.isDynamicRate = mIsDynamicRate;
     parcelable.encapsulationType = VALUE_OR_RETURN(
             legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(mEncapsulationType));
-    return parcelable;
+    media::AudioProfileSys parcelableSys;
+    parcelableSys.isDynamicFormat = mIsDynamicFormat;
+    parcelableSys.isDynamicChannels = mIsDynamicChannels;
+    parcelableSys.isDynamicRate = mIsDynamicRate;
+    return std::make_pair(parcelable, parcelableSys);
 }
 
-ConversionResult<sp<AudioProfile>>
-AudioProfile::fromParcelable(const media::AudioProfile& parcelable, bool isInput) {
+ConversionResult<sp<AudioProfile>> AudioProfile::fromParcelable(
+        const AudioProfile::Aidl& aidl, bool isInput) {
     sp<AudioProfile> legacy = new AudioProfile();
+    const auto& parcelable = aidl.first;
     legacy->mName = parcelable.name;
     legacy->mFormat = VALUE_OR_RETURN(
             aidl2legacy_AudioFormatDescription_audio_format_t(parcelable.format));
     legacy->mChannelMasks = VALUE_OR_RETURN(
             convertContainer<ChannelMaskSet>(parcelable.channelMasks,
-                    [isInput](const media::AudioChannelLayout& l) {
+                    [isInput](const AudioChannelLayout& l) {
                         return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
                     }));
     legacy->mSamplingRates = VALUE_OR_RETURN(
-            convertContainer<SampleRateSet>(parcelable.samplingRates,
+            convertContainer<SampleRateSet>(parcelable.sampleRates,
                                             convertIntegral<uint32_t, int32_t>));
-    legacy->mIsDynamicFormat = parcelable.isDynamicFormat;
-    legacy->mIsDynamicChannels = parcelable.isDynamicChannels;
-    legacy->mIsDynamicRate = parcelable.isDynamicRate;
     legacy->mEncapsulationType = VALUE_OR_RETURN(
             aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
                     parcelable.encapsulationType));
+    const auto& parcelableSys = aidl.second;
+    legacy->mIsDynamicFormat = parcelableSys.isDynamicFormat;
+    legacy->mIsDynamicChannels = parcelableSys.isDynamicChannels;
+    legacy->mIsDynamicRate = parcelableSys.isDynamicRate;
     return legacy;
 }
 
 ConversionResult<sp<AudioProfile>>
-aidl2legacy_AudioProfile(const media::AudioProfile& aidl, bool isInput) {
+aidl2legacy_AudioProfile(const AudioProfile::Aidl& aidl, bool isInput) {
     return AudioProfile::fromParcelable(aidl, isInput);
 }
 
-ConversionResult<media::AudioProfile>
+ConversionResult<AudioProfile::Aidl>
 legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy, bool isInput) {
     return legacy->toParcelable(isInput);
 }
@@ -329,16 +339,19 @@
 }
 
 ConversionResult<AudioProfileVector>
-aidl2legacy_AudioProfileVector(const std::vector<media::AudioProfile>& aidl, bool isInput) {
-    return convertContainer<AudioProfileVector>(aidl,
-            [isInput](const media::AudioProfile& p) {
-                return aidl2legacy_AudioProfile(p, isInput);
+aidl2legacy_AudioProfileVector(const AudioProfileVector::Aidl& aidl, bool isInput) {
+    return convertContainers<AudioProfileVector>(aidl.first, aidl.second,
+            [isInput](const media::audio::common::AudioProfile& p,
+                      const media::AudioProfileSys& ps) {
+                return aidl2legacy_AudioProfile(std::make_pair(p, ps), isInput);
             });
 }
 
-ConversionResult<std::vector<media::AudioProfile>>
+ConversionResult<AudioProfileVector::Aidl>
 legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy, bool isInput) {
-    return convertContainer<std::vector<media::AudioProfile>>(legacy,
+    return convertContainerSplit<
+            std::vector<media::audio::common::AudioProfile>,
+            std::vector<media::AudioProfileSys>>(legacy,
             [isInput](const sp<AudioProfile>& p) {
                 return legacy2aidl_AudioProfile(p, isInput);
             });
diff --git a/media/libaudiofoundation/DeviceDescriptorBase.cpp b/media/libaudiofoundation/DeviceDescriptorBase.cpp
index 3cce722..cf829b5 100644
--- a/media/libaudiofoundation/DeviceDescriptorBase.cpp
+++ b/media/libaudiofoundation/DeviceDescriptorBase.cpp
@@ -166,16 +166,18 @@
 
 status_t DeviceDescriptorBase::writeToParcelable(media::AudioPort* parcelable) const {
     AudioPort::writeToParcelable(parcelable);
-    AudioPortConfig::writeToParcelable(&parcelable->activeConfig, useInputChannelMask());
-    parcelable->id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
+    AudioPortConfig::writeToParcelable(&parcelable->hal.activeConfig, useInputChannelMask());
+    parcelable->hal.id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
 
-    media::AudioPortDeviceExt ext;
-    ext.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(mDeviceTypeAddr));
-    ext.encapsulationModes = VALUE_OR_RETURN_STATUS(
+    media::audio::common::AudioDevice device = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_AudioDeviceTypeAddress(mDeviceTypeAddr));
+    UNION_SET(parcelable->hal.ext, device, device);
+    media::AudioPortDeviceExtSys deviceSys;
+    deviceSys.encapsulationModes = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioEncapsulationMode_mask(mEncapsulationModes));
-    ext.encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
+    deviceSys.encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
             legacy2aidl_AudioEncapsulationMetadataType_mask(mEncapsulationMetadataTypes));
-    UNION_SET(parcelable->ext, device, std::move(ext));
+    UNION_SET(parcelable->sys.ext, device, deviceSys);
     return OK;
 }
 
@@ -186,22 +188,26 @@
 }
 
 status_t DeviceDescriptorBase::readFromParcelable(const media::AudioPort& parcelable) {
-    if (parcelable.type != media::AudioPortType::DEVICE) {
+    if (parcelable.sys.type != media::AudioPortType::DEVICE) {
         return BAD_VALUE;
     }
     status_t status = AudioPort::readFromParcelable(parcelable)
-            ?: AudioPortConfig::readFromParcelable(parcelable.activeConfig, useInputChannelMask());
+            ?: AudioPortConfig::readFromParcelable(
+                    parcelable.hal.activeConfig, useInputChannelMask());
     if (status != OK) {
         return status;
     }
 
-    media::AudioPortDeviceExt ext = VALUE_OR_RETURN_STATUS(UNION_GET(parcelable.ext, device));
+    media::audio::common::AudioDevice device = VALUE_OR_RETURN_STATUS(
+            UNION_GET(parcelable.hal.ext, device));
     mDeviceTypeAddr = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioDeviceTypeAddress(ext.device));
+            aidl2legacy_AudioDeviceTypeAddress(device));
+    media::AudioPortDeviceExtSys deviceSys = VALUE_OR_RETURN_STATUS(
+            UNION_GET(parcelable.sys.ext, device));
     mEncapsulationModes = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioEncapsulationMode_mask(ext.encapsulationModes));
+            aidl2legacy_AudioEncapsulationMode_mask(deviceSys.encapsulationModes));
     mEncapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_AudioEncapsulationMetadataType_mask(ext.encapsulationMetadataTypes));
+            aidl2legacy_AudioEncapsulationMetadataType_mask(deviceSys.encapsulationMetadataTypes));
     return OK;
 }
 
diff --git a/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h b/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
index 8edcc58..11aa222 100644
--- a/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
+++ b/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
@@ -19,7 +19,7 @@
 #include <string>
 #include <vector>
 
-#include <android/media/AudioDevice.h>
+#include <android/media/audio/common/AudioDevice.h>
 #include <binder/Parcelable.h>
 #include <binder/Parcel.h>
 #include <media/AudioContainers.h>
@@ -32,6 +32,7 @@
 class AudioDeviceTypeAddr : public Parcelable {
 public:
     AudioDeviceTypeAddr() = default;
+    AudioDeviceTypeAddr(const AudioDeviceTypeAddr&) = default;
 
     AudioDeviceTypeAddr(audio_devices_t type, const std::string& address);
 
@@ -88,8 +89,8 @@
 
 // Conversion routines, according to AidlConversion.h conventions.
 ConversionResult<AudioDeviceTypeAddr>
-aidl2legacy_AudioDeviceTypeAddress(const media::AudioDevice& aidl);
-ConversionResult<media::AudioDevice>
+aidl2legacy_AudioDeviceTypeAddress(const media::audio::common::AudioDevice& aidl);
+ConversionResult<media::audio::common::AudioDevice>
 legacy2aidl_AudioDeviceTypeAddress(const AudioDeviceTypeAddr& legacy);
 
 } // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioGain.h b/media/libaudiofoundation/include/media/AudioGain.h
index 28769d2..10088f2 100644
--- a/media/libaudiofoundation/include/media/AudioGain.h
+++ b/media/libaudiofoundation/include/media/AudioGain.h
@@ -16,23 +16,23 @@
 
 #pragma once
 
-#include <android/media/AudioGain.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <android/media/AudioGainSys.h>
 #include <media/AidlConversion.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 #include <system/audio.h>
-#include <string>
-#include <vector>
 
 namespace android {
 
-class AudioGain: public RefBase, public Parcelable
+class AudioGain: public RefBase
 {
 public:
     AudioGain(int index, bool isInput);
-    virtual ~AudioGain() {}
+    virtual ~AudioGain() = default;
 
     void setMode(audio_gain_mode_t mode) { mGain.mode = mode; }
     const audio_gain_mode_t &getMode() const { return mGain.mode; }
@@ -71,26 +71,24 @@
 
     bool equals(const sp<AudioGain>& other) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
-
-    status_t writeToParcelable(media::AudioGain* parcelable) const;
-    status_t readFromParcelable(const media::AudioGain& parcelable);
+    using Aidl = std::pair<media::audio::common::AudioGain, media::AudioGainSys>;
+    ConversionResult<Aidl> toParcelable() const;
+    static ConversionResult<sp<AudioGain>> fromParcelable(const Aidl& aidl);
 
 private:
     int               mIndex;
-    struct audio_gain mGain;
     bool              mIsInput;
+    struct audio_gain mGain = {};
     bool              mUseForVolume = false;
 };
 
 // Conversion routines, according to AidlConversion.h conventions.
 ConversionResult<sp<AudioGain>>
-aidl2legacy_AudioGain(const media::AudioGain& aidl);
-ConversionResult<media::AudioGain>
+aidl2legacy_AudioGain(const AudioGain::Aidl& aidl);
+ConversionResult<AudioGain::Aidl>
 legacy2aidl_AudioGain(const sp<AudioGain>& legacy);
 
-class AudioGains : public std::vector<sp<AudioGain> >, public Parcelable
+class AudioGains : public std::vector<sp<AudioGain>>
 {
 public:
     bool canUseForVolume() const
@@ -103,7 +101,7 @@
         return false;
     }
 
-    int32_t add(const sp<AudioGain> gain)
+    int32_t add(const sp<AudioGain>& gain)
     {
         push_back(gain);
         return 0;
@@ -111,14 +109,15 @@
 
     bool equals(const AudioGains& other) const;
 
-    status_t writeToParcel(Parcel* parcel) const override;
-    status_t readFromParcel(const Parcel* parcel) override;
+    using Aidl = std::pair<
+            std::vector<media::audio::common::AudioGain>,
+            std::vector<media::AudioGainSys>>;
 };
 
 // Conversion routines, according to AidlConversion.h conventions.
 ConversionResult<AudioGains>
-aidl2legacy_AudioGains(const std::vector<media::AudioGain>& aidl);
-ConversionResult<std::vector<media::AudioGain>>
+aidl2legacy_AudioGains(const AudioGains::Aidl& aidl);
+ConversionResult<AudioGains::Aidl>
 legacy2aidl_AudioGains(const AudioGains& legacy);
 
 } // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioPort.h b/media/libaudiofoundation/include/media/AudioPort.h
index 6e1d032..aa9b5f6 100644
--- a/media/libaudiofoundation/include/media/AudioPort.h
+++ b/media/libaudiofoundation/include/media/AudioPort.h
@@ -21,7 +21,7 @@
 
 #include <android/media/AudioPort.h>
 #include <android/media/AudioPortConfig.h>
-#include <android/media/ExtraAudioDescriptor.h>
+#include <android/media/audio/common/ExtraAudioDescriptor.h>
 #include <binder/Parcel.h>
 #include <binder/Parcelable.h>
 #include <media/AudioGain.h>
@@ -69,10 +69,10 @@
     AudioProfileVector &getAudioProfiles() { return mProfiles; }
 
     void setExtraAudioDescriptors(
-            const std::vector<media::ExtraAudioDescriptor> extraAudioDescriptors) {
+            const std::vector<media::audio::common::ExtraAudioDescriptor> extraAudioDescriptors) {
         mExtraAudioDescriptors = extraAudioDescriptors;
     }
-    std::vector<media::ExtraAudioDescriptor> &getExtraAudioDescriptors() {
+    std::vector<media::audio::common::ExtraAudioDescriptor> &getExtraAudioDescriptors() {
         return mExtraAudioDescriptors;
     }
 
@@ -114,7 +114,7 @@
 
     // Audio capabilities that are defined by hardware descriptors when the format is unrecognized
     // by the platform, e.g. short audio descriptor in EDID for HDMI.
-    std::vector<media::ExtraAudioDescriptor> mExtraAudioDescriptors;
+    std::vector<media::audio::common::ExtraAudioDescriptor> mExtraAudioDescriptors;
 private:
     template <typename T, std::enable_if_t<std::is_same<T, struct audio_port>::value
                                         || std::is_same<T, struct audio_port_v7>::value, int> = 0>
@@ -152,8 +152,10 @@
 
     bool equals(const sp<AudioPortConfig>& other) const;
 
-    status_t writeToParcelable(media::AudioPortConfig* parcelable, bool isInput) const;
-    status_t readFromParcelable(const media::AudioPortConfig& parcelable, bool isInput);
+    status_t writeToParcelable(
+            media::audio::common::AudioPortConfig* parcelable, bool isInput) const;
+    status_t readFromParcelable(
+            const media::audio::common::AudioPortConfig& parcelable, bool isInput);
 
 protected:
     unsigned int mSamplingRate = 0u;
diff --git a/media/libaudiofoundation/include/media/AudioProfile.h b/media/libaudiofoundation/include/media/AudioProfile.h
index e34a49f..62670e4 100644
--- a/media/libaudiofoundation/include/media/AudioProfile.h
+++ b/media/libaudiofoundation/include/media/AudioProfile.h
@@ -17,11 +17,10 @@
 #pragma once
 
 #include <string>
+#include <utility>
 #include <vector>
 
-#include <android/media/AudioProfile.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
+#include <android/media/AudioProfileSys.h>
 #include <media/AidlConversion.h>
 #include <media/AudioContainers.h>
 #include <system/audio.h>
@@ -81,9 +80,10 @@
 
     bool equals(const sp<AudioProfile>& other) const;
 
-    ConversionResult<media::AudioProfile> toParcelable(bool isInput) const;
+    using Aidl = std::pair<media::audio::common::AudioProfile, media::AudioProfileSys>;
+    ConversionResult<Aidl> toParcelable(bool isInput) const;
     static ConversionResult<sp<AudioProfile>> fromParcelable(
-            const media::AudioProfile& parcelable, bool isInput);
+            const Aidl& aidl, bool isInput);
 
 private:
 
@@ -104,8 +104,8 @@
 
 // Conversion routines, according to AidlConversion.h conventions.
 ConversionResult<sp<AudioProfile>>
-aidl2legacy_AudioProfile(const media::AudioProfile& aidl, bool isInput);
-ConversionResult<media::AudioProfile>
+aidl2legacy_AudioProfile(const AudioProfile::Aidl& aidl, bool isInput);
+ConversionResult<AudioProfile::Aidl>
 legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy, bool isInput);
 
 class AudioProfileVector : public std::vector<sp<AudioProfile>>
@@ -134,14 +134,18 @@
     virtual void dump(std::string *dst, int spaces) const;
 
     bool equals(const AudioProfileVector& other) const;
+
+    using Aidl = std::pair<
+            std::vector<media::audio::common::AudioProfile>,
+            std::vector<media::AudioProfileSys>>;
 };
 
 bool operator == (const AudioProfile &left, const AudioProfile &right);
 
 // Conversion routines, according to AidlConversion.h conventions.
 ConversionResult<AudioProfileVector>
-aidl2legacy_AudioProfileVector(const std::vector<media::AudioProfile>& aidl, bool isInput);
-ConversionResult<std::vector<media::AudioProfile>>
+aidl2legacy_AudioProfileVector(const AudioProfileVector::Aidl& aidl, bool isInput);
+ConversionResult<AudioProfileVector::Aidl>
 legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy, bool isInput);
 
 AudioProfileVector intersectAudioProfiles(const AudioProfileVector& profiles1,
diff --git a/media/libaudiofoundation/tests/Android.bp b/media/libaudiofoundation/tests/Android.bp
index f3cd446..3f1fbea 100644
--- a/media/libaudiofoundation/tests/Android.bp
+++ b/media/libaudiofoundation/tests/Android.bp
@@ -18,6 +18,7 @@
     ],
 
     static_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "libaudioclient_aidl_conversion",
         "libaudiofoundation",
diff --git a/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp b/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
index 1696980..0315a59 100644
--- a/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
+++ b/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
@@ -75,15 +75,40 @@
     return audioProfiles;
 }
 
-TEST(AudioFoundationParcelableTest, ParcelingAudioGain) {
-    Parcel data;
-    AudioGains audioGains = getAudioGainsForTest();
+TEST(AudioFoundationParcelableTest, ParcelingAudioProfile) {
+    sp<AudioProfile> profile = getAudioProfileVectorForTest()[0];
+    auto conv = legacy2aidl_AudioProfile(profile, false /*isInput*/);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = aidl2legacy_AudioProfile(conv.value(), false /*isInput*/);
+    ASSERT_TRUE(convBack.ok());
+    ASSERT_TRUE(profile->equals(convBack.value()));
+}
 
-    ASSERT_EQ(data.writeParcelable(audioGains), NO_ERROR);
-    data.setDataPosition(0);
-    AudioGains audioGainsFromParcel;
-    ASSERT_EQ(data.readParcelable(&audioGainsFromParcel), NO_ERROR);
-    ASSERT_TRUE(audioGainsFromParcel.equals(audioGains));
+TEST(AudioFoundationParcelableTest, ParcelingAudioProfileVector) {
+    AudioProfileVector profiles = getAudioProfileVectorForTest();
+    auto conv = legacy2aidl_AudioProfileVector(profiles, false /*isInput*/);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = aidl2legacy_AudioProfileVector(conv.value(), false /*isInput*/);
+    ASSERT_TRUE(convBack.ok());
+    ASSERT_TRUE(profiles.equals(convBack.value()));
+}
+
+TEST(AudioFoundationParcelableTest, ParcelingAudioGain) {
+    sp<AudioGain> audioGain = getAudioGainsForTest()[0];
+    auto conv = legacy2aidl_AudioGain(audioGain);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = aidl2legacy_AudioGain(conv.value());
+    ASSERT_TRUE(convBack.ok());
+    ASSERT_TRUE(audioGain->equals(convBack.value()));
+}
+
+TEST(AudioFoundationParcelableTest, ParcelingAudioGains) {
+    AudioGains audioGains = getAudioGainsForTest();
+    auto conv = legacy2aidl_AudioGains(audioGains);
+    ASSERT_TRUE(conv.ok());
+    auto convBack = aidl2legacy_AudioGains(conv.value());
+    ASSERT_TRUE(convBack.ok());
+    ASSERT_TRUE(audioGains.equals(convBack.value()));
 }
 
 TEST(AudioFoundationParcelableTest, ParcelingAudioPort) {
@@ -105,11 +130,11 @@
     Parcel data;
     sp<AudioPortConfig> audioPortConfig = new AudioPortConfigTestStub();
     audioPortConfig->applyAudioPortConfig(&TEST_AUDIO_PORT_CONFIG);
-    media::AudioPortConfig parcelable{};
+    media::audio::common::AudioPortConfig parcelable{};
     ASSERT_EQ(NO_ERROR, audioPortConfig->writeToParcelable(&parcelable, false /*isInput*/));
     ASSERT_EQ(NO_ERROR, data.writeParcelable(parcelable));
     data.setDataPosition(0);
-    media::AudioPortConfig parcelableFromParcel{};
+    media::audio::common::AudioPortConfig parcelableFromParcel{};
     ASSERT_EQ(NO_ERROR, data.readParcelable(&parcelableFromParcel));
     sp<AudioPortConfig> audioPortConfigFromParcel = new AudioPortConfigTestStub();
     ASSERT_EQ(NO_ERROR, audioPortConfigFromParcel->readFromParcelable(
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index bd24c84..5fe74f9 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -30,6 +30,7 @@
     ],
 
     shared_libs: [
+        "audioclient-types-aidl-cpp",
         "libdl",
         "libhidlbase",
         "liblog",
diff --git a/media/libaudiohal/impl/Android.bp b/media/libaudiohal/impl/Android.bp
index a2c6e8a..d6576f5 100644
--- a/media/libaudiohal/impl/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -50,6 +50,7 @@
         "libmedia_helper",
         "libmediautils",
         "libutils",
+        "audioclient-types-aidl-cpp",
     ],
     header_libs: [
         "android.hardware.audio.common.util@all-versions",
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index aa94eea..47acb19 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -20,6 +20,7 @@
 //#define LOG_NDEBUG 0
 
 #include <cutils/native_handle.h>
+#include <cutils/properties.h>
 #include <hwbinder/IPCThreadState.h>
 #include <media/AudioContainers.h>
 #include <utils/Log.h>
diff --git a/media/libaudiohal/impl/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
index 2694ab3..9fd0ac0 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -119,6 +119,23 @@
     status_t addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
     status_t removeDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
 
+    status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType __unused,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos __unused) override {
+        // TODO: Implement the HAL query when moving to AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
+    int32_t getAAudioMixerBurstCount() override {
+        // TODO: Implement the HAL query when moving to AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
+    int32_t getAAudioHardwareBurstMinUsec() override {
+        // TODO: Implement the HAL query when moving to AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
     status_t dump(int fd, const Vector<String16>& args) override;
 
   private:
diff --git a/media/libaudiohal/impl/DeviceHalLocal.h b/media/libaudiohal/impl/DeviceHalLocal.h
index 2fde936..ee1d2c5 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.h
+++ b/media/libaudiohal/impl/DeviceHalLocal.h
@@ -112,6 +112,23 @@
     status_t addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
     status_t removeDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
 
+    status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType __unused,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos __unused) override {
+        // This function will only be available on AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
+    int32_t getAAudioMixerBurstCount() override {
+        // This function will only be available on AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
+    int32_t getAAudioHardwareBurstMinUsec() override {
+        // This function will only be available on AIDL HAL.
+        return INVALID_OPERATION;
+    }
+
     status_t dump(int fd, const Vector<String16>& args) override;
 
     void closeOutputStream(struct audio_stream_out *stream_out);
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.h b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
index 6f84efe..b46259b 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
@@ -46,6 +46,8 @@
 
     status_t setCallbackOnce(sp<DevicesFactoryHalCallback> callback) override;
 
+    float getHalVersion() const override { return MAJOR_VERSION + (float)MINOR_VERSION / 10; }
+
   private:
     friend class ServiceNotificationListener;
     void addDeviceFactory(sp<IDevicesFactory> factory, bool needToNotify);
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
index 568a1fb..5baefa4b 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
@@ -40,6 +40,10 @@
 
             status_t setCallbackOnce(sp<DevicesFactoryHalCallback> callback) override;
 
+            float getHalVersion() const override {
+                return MAJOR_VERSION + (float)MINOR_VERSION / 10;
+            }
+
   private:
     sp<DevicesFactoryHalInterface> mLocalFactory;
     sp<DevicesFactoryHalInterface> mHidlFactory;
diff --git a/media/libaudiohal/impl/DevicesFactoryHalLocal.h b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
index 32bf362..d2b9104 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalLocal.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
@@ -41,6 +41,10 @@
                 return INVALID_OPERATION;
             }
 
+            float getHalVersion() const override {
+                return MAJOR_VERSION + (float)MINOR_VERSION / 10;
+            }
+
   private:
     friend class DevicesFactoryHalHybrid;
 
diff --git a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
index 69cbcec..70c3199 100644
--- a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
 #define ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
 
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
 #include <media/audiohal/EffectHalInterface.h>
 #include <media/MicrophoneInfo.h>
 #include <system/audio.h>
@@ -120,6 +122,12 @@
     virtual status_t removeDeviceEffect(
             audio_port_handle_t device, sp<EffectHalInterface> effect) = 0;
 
+    virtual status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos)  = 0;
+    virtual int32_t getAAudioMixerBurstCount() = 0;
+    virtual int32_t getAAudioHardwareBurstMinUsec() = 0;
+
     virtual status_t dump(int fd, const Vector<String16>& args) = 0;
 
   protected:
diff --git a/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h b/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h
index 5091558..17010e6 100644
--- a/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h
@@ -43,6 +43,8 @@
     // The callback can be only set once.
     virtual status_t setCallbackOnce(sp<DevicesFactoryHalCallback> callback) = 0;
 
+    virtual float getHalVersion() const = 0;
+
     static sp<DevicesFactoryHalInterface> create();
 
   protected:
diff --git a/media/libaudioprocessing/AudioMixerOps.h b/media/libaudioprocessing/AudioMixerOps.h
index 2988c67..ab6a8b6 100644
--- a/media/libaudioprocessing/AudioMixerOps.h
+++ b/media/libaudioprocessing/AudioMixerOps.h
@@ -328,7 +328,9 @@
     DO_CHANNEL_POSITION(21);
     DO_CHANNEL_POSITION(22);
     DO_CHANNEL_POSITION(23);
-    static_assert(FCC_LIMIT <= FCC_24); // Note: this may need to change.
+    DO_CHANNEL_POSITION(24);
+    DO_CHANNEL_POSITION(25);
+    static_assert(FCC_LIMIT <= FCC_26); // Note: this may need to change.
 #pragma pop_macro("DO_CHANNEL_POSITION")
 }
 
@@ -346,6 +348,8 @@
         [7] = AUDIO_CHANNEL_OUT_6POINT1,
         [8] = AUDIO_CHANNEL_OUT_7POINT1,
         [12] = AUDIO_CHANNEL_OUT_7POINT1POINT4,
+        [14] = AUDIO_CHANNEL_OUT_9POINT1POINT4,
+        [16] = AUDIO_CHANNEL_OUT_9POINT1POINT6,
         [24] = AUDIO_CHANNEL_OUT_22POINT2,
     };
     return channelCount < std::size(canonical) ? canonical[channelCount] : AUDIO_CHANNEL_NONE;
diff --git a/media/libeffects/downmix/EffectDownmix.cpp b/media/libeffects/downmix/EffectDownmix.cpp
index f500bc3..d8f5787 100644
--- a/media/libeffects/downmix/EffectDownmix.cpp
+++ b/media/libeffects/downmix/EffectDownmix.cpp
@@ -19,7 +19,7 @@
 #include <log/log.h>
 
 #include "EffectDownmix.h"
-#include <math.h>
+#include <audio_utils/ChannelMix.h>
 
 // Do not submit with DOWNMIX_TEST_CHANNEL_INDEX defined, strictly for testing
 //#define DOWNMIX_TEST_CHANNEL_INDEX 0
@@ -35,12 +35,13 @@
 } downmix_state_t;
 
 /* parameters for each downmixer */
-typedef struct {
+struct downmix_object_t {
     downmix_state_t state;
     downmix_type_t type;
     bool apply_volume_correction;
     uint8_t input_channel_count;
-} downmix_object_t;
+    android::audio_utils::channels::ChannelMix channelMix;
+};
 
 typedef struct downmix_module_s {
     const struct effect_interface_s *itfe;
@@ -77,11 +78,6 @@
         downmix_object_t *pDownmixer, int32_t param, uint32_t size, void *pValue);
 static int Downmix_getParameter(
         downmix_object_t *pDownmixer, int32_t param, uint32_t *pSize, void *pValue);
-static void Downmix_foldFromQuad(float *pSrc, float *pDst, size_t numFrames, bool accumulate);
-static void Downmix_foldFrom5Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate);
-static void Downmix_foldFrom7Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate);
-static bool Downmix_foldGeneric(
-        uint32_t mask, float *pSrc, float *pDst, size_t numFrames, bool accumulate);
 
 // effect_handle_t interface implementation for downmix effect
 const struct effect_interface_s gDownmixInterface = {
@@ -192,9 +188,11 @@
     if (!mask) {
         return false;
     }
-    // check against unsupported channels
-    if (mask & ~AUDIO_CHANNEL_OUT_22POINT2) {
-        ALOGE("Unsupported channels in %u", mask & ~AUDIO_CHANNEL_OUT_22POINT2);
+    // check against unsupported channels (up to FCC_26)
+    constexpr uint32_t MAXIMUM_CHANNEL_MASK = AUDIO_CHANNEL_OUT_22POINT2
+            | AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT | AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT;
+    if (mask & ~MAXIMUM_CHANNEL_MASK) {
+        ALOGE("Unsupported channels in %#x", mask & ~MAXIMUM_CHANNEL_MASK);
         return false;
     }
     return true;
@@ -315,7 +313,8 @@
         audio_buffer_t *inBuffer, audio_buffer_t *outBuffer) {
 
     downmix_object_t *pDownmixer;
-    float *pSrc, *pDst;
+    const float *pSrc;
+    float *pDst;
     downmix_module_t *pDwmModule = (downmix_module_t *)self;
 
     if (pDwmModule == NULL) {
@@ -344,7 +343,8 @@
 
     const bool accumulate =
             (pDwmModule->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
-    const uint32_t downmixInputChannelMask = pDwmModule->config.inputCfg.channels;
+    const audio_channel_mask_t downmixInputChannelMask =
+            (audio_channel_mask_t)pDwmModule->config.inputCfg.channels;
 
     switch(pDownmixer->type) {
 
@@ -368,38 +368,13 @@
           }
           break;
 
-      case DOWNMIX_TYPE_FOLD:
-#ifdef DOWNMIX_ALWAYS_USE_GENERIC_DOWNMIXER
-          // bypass the optimized downmix routines for the common formats
-          if (!Downmix_foldGeneric(
-                  downmixInputChannelMask, pSrc, pDst, numFrames, accumulate)) {
-              ALOGE("Multichannel configuration %#x is not supported",
-                    downmixInputChannelMask);
-              return -EINVAL;
-          }
-          break;
-#endif
-        // optimize for the common formats
-        switch (downmixInputChannelMask) {
-        case AUDIO_CHANNEL_OUT_QUAD_BACK:
-        case AUDIO_CHANNEL_OUT_QUAD_SIDE:
-            Downmix_foldFromQuad(pSrc, pDst, numFrames, accumulate);
-            break;
-        case AUDIO_CHANNEL_OUT_5POINT1_BACK:
-        case AUDIO_CHANNEL_OUT_5POINT1_SIDE:
-            Downmix_foldFrom5Point1(pSrc, pDst, numFrames, accumulate);
-            break;
-        case AUDIO_CHANNEL_OUT_7POINT1:
-            Downmix_foldFrom7Point1(pSrc, pDst, numFrames, accumulate);
-            break;
-        default:
-            if (!Downmix_foldGeneric(
-                    downmixInputChannelMask, pSrc, pDst, numFrames, accumulate)) {
+      case DOWNMIX_TYPE_FOLD: {
+            if (!pDownmixer->channelMix.process(
+                    pSrc, pDst, numFrames, accumulate, downmixInputChannelMask)) {
                 ALOGE("Multichannel configuration %#x is not supported",
                       downmixInputChannelMask);
                 return -EINVAL;
             }
-            break;
         }
         break;
 
@@ -674,6 +649,12 @@
         ALOGE("Downmix_Configure error: invalid config");
         return -EINVAL;
     }
+    // when configuring the effect, do not allow a blank or unsupported channel mask
+    if (!Downmix_validChannelMask(pConfig->inputCfg.channels)) {
+        ALOGE("Downmix_Configure error: input channel mask(0x%x) not supported",
+                                                    pConfig->inputCfg.channels);
+        return -EINVAL;
+    }
 
     if (&pDwmModule->config != pConfig) {
         memcpy(&pDwmModule->config, pConfig, sizeof(effect_config_t));
@@ -684,12 +665,6 @@
         pDownmixer->apply_volume_correction = false;
         pDownmixer->input_channel_count = 8; // matches default input of AUDIO_CHANNEL_OUT_7POINT1
     } else {
-        // when configuring the effect, do not allow a blank or unsupported channel mask
-        if (!Downmix_validChannelMask(pConfig->inputCfg.channels)) {
-            ALOGE("Downmix_Configure error: input channel mask(0x%x) not supported",
-                                                        pConfig->inputCfg.channels);
-            return -EINVAL;
-        }
         pDownmixer->input_channel_count =
                 audio_channel_count_from_out_mask(pConfig->inputCfg.channels);
     }
@@ -780,7 +755,6 @@
     return 0;
 } /* end Downmix_setParameter */
 
-
 /*----------------------------------------------------------------------------
  * Downmix_getParameter()
  *----------------------------------------------------------------------------
@@ -829,299 +803,3 @@
     return 0;
 } /* end Downmix_getParameter */
 
-
-/*----------------------------------------------------------------------------
- * Downmix_foldFromQuad()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix a quad signal to stereo
- *
- * Inputs:
- *  pSrc       quad audio samples to downmix
- *  numFrames  the number of quad frames to downmix
- *  accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- *               or overwrite pDst (when false)
- *
- * Outputs:
- *  pDst       downmixed stereo audio samples
- *
- *----------------------------------------------------------------------------
- */
-void Downmix_foldFromQuad(float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
-    // sample at index 0 is FL
-    // sample at index 1 is FR
-    // sample at index 2 is RL
-    // sample at index 3 is RR
-    if (accumulate) {
-        while (numFrames) {
-            // FL + RL
-            pDst[0] = clamp_float(pDst[0] + ((pSrc[0] + pSrc[2]) / 2.0f));
-            // FR + RR
-            pDst[1] = clamp_float(pDst[1] + ((pSrc[1] + pSrc[3]) / 2.0f));
-            pSrc += 4;
-            pDst += 2;
-            numFrames--;
-        }
-    } else { // same code as above but without adding and clamping pDst[i] to itself
-        while (numFrames) {
-            // FL + RL
-            pDst[0] = clamp_float((pSrc[0] + pSrc[2]) / 2.0f);
-            // FR + RR
-            pDst[1] = clamp_float((pSrc[1] + pSrc[3]) / 2.0f);
-            pSrc += 4;
-            pDst += 2;
-            numFrames--;
-        }
-    }
-}
-
-/*----------------------------------------------------------------------------
- * Downmix_foldFrom5Point1()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix a 5.1 signal to stereo
- *
- * Inputs:
- *  pSrc       5.1 audio samples to downmix
- *  numFrames  the number of 5.1 frames to downmix
- *  accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- *               or overwrite pDst (when false)
- *
- * Outputs:
- *  pDst       downmixed stereo audio samples
- *
- *----------------------------------------------------------------------------
- */
-void Downmix_foldFrom5Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
-    float lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
-    // sample at index 0 is FL
-    // sample at index 1 is FR
-    // sample at index 2 is FC
-    // sample at index 3 is LFE
-    // sample at index 4 is RL
-    // sample at index 5 is RR
-    // code is mostly duplicated between the two values of accumulate to avoid repeating the test
-    // for every sample
-    if (accumulate) {
-        while (numFrames) {
-            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
-            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
-                    + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
-            // FL + centerPlusLfeContrib + RL
-            lt = pSrc[0] + centerPlusLfeContrib + pSrc[4];
-            // FR + centerPlusLfeContrib + RR
-            rt = pSrc[1] + centerPlusLfeContrib + pSrc[5];
-            // accumulate in destination
-            pDst[0] = clamp_float(pDst[0] + (lt / 2.0f));
-            pDst[1] = clamp_float(pDst[1] + (rt / 2.0f));
-            pSrc += 6;
-            pDst += 2;
-            numFrames--;
-        }
-    } else { // same code as above but without adding and clamping pDst[i] to itself
-        while (numFrames) {
-            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
-            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
-                    + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
-            // FL + centerPlusLfeContrib + RL
-            lt = pSrc[0] + centerPlusLfeContrib + pSrc[4];
-            // FR + centerPlusLfeContrib + RR
-            rt = pSrc[1] + centerPlusLfeContrib + pSrc[5];
-            // store in destination
-            pDst[0] = clamp_float(lt / 2.0f); // differs from when accumulate is true above
-            pDst[1] = clamp_float(rt / 2.0f); // differs from when accumulate is true above
-            pSrc += 6;
-            pDst += 2;
-            numFrames--;
-        }
-    }
-}
-
-/*----------------------------------------------------------------------------
- * Downmix_foldFrom7Point1()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix a 7.1 signal to stereo
- *
- * Inputs:
- *  pSrc       7.1 audio samples to downmix
- *  numFrames  the number of 7.1 frames to downmix
- *  accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- *               or overwrite pDst (when false)
- *
- * Outputs:
- *  pDst       downmixed stereo audio samples
- *
- *----------------------------------------------------------------------------
- */
-void Downmix_foldFrom7Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
-    float lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
-    // sample at index 0 is FL
-    // sample at index 1 is FR
-    // sample at index 2 is FC
-    // sample at index 3 is LFE
-    // sample at index 4 is RL
-    // sample at index 5 is RR
-    // sample at index 6 is SL
-    // sample at index 7 is SR
-    // code is mostly duplicated between the two values of accumulate to avoid repeating the test
-    // for every sample
-    if (accumulate) {
-        while (numFrames) {
-            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
-            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
-                    + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
-            // FL + centerPlusLfeContrib + SL + RL
-            lt = pSrc[0] + centerPlusLfeContrib + pSrc[6] + pSrc[4];
-            // FR + centerPlusLfeContrib + SR + RR
-            rt = pSrc[1] + centerPlusLfeContrib + pSrc[7] + pSrc[5];
-            //accumulate in destination
-            pDst[0] = clamp_float(pDst[0] + (lt / 2.0f));
-            pDst[1] = clamp_float(pDst[1] + (rt / 2.0f));
-            pSrc += 8;
-            pDst += 2;
-            numFrames--;
-        }
-    } else { // same code as above but without adding and clamping pDst[i] to itself
-        while (numFrames) {
-            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
-            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
-                    + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
-            // FL + centerPlusLfeContrib + SL + RL
-            lt = pSrc[0] + centerPlusLfeContrib + pSrc[6] + pSrc[4];
-            // FR + centerPlusLfeContrib + SR + RR
-            rt = pSrc[1] + centerPlusLfeContrib + pSrc[7] + pSrc[5];
-            // store in destination
-            pDst[0] = clamp_float(lt / 2.0f); // differs from when accumulate is true above
-            pDst[1] = clamp_float(rt / 2.0f); // differs from when accumulate is true above
-            pSrc += 8;
-            pDst += 2;
-            numFrames--;
-        }
-    }
-}
-
-/*----------------------------------------------------------------------------
- * Downmix_foldGeneric()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix to stereo a multichannel signal of arbitrary channel position mask.
- *
- * Inputs:
- *  mask       the channel mask of pSrc
- *  pSrc       multichannel audio buffer to downmix
- *  numFrames  the number of multichannel frames to downmix
- *  accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- *               or overwrite pDst (when false)
- *
- * Outputs:
- *  pDst       downmixed stereo audio samples
- *
- * Returns: false if multichannel format is not supported
- *
- *----------------------------------------------------------------------------
- */
-bool Downmix_foldGeneric(
-        uint32_t mask, float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
-
-    if (!Downmix_validChannelMask(mask)) {
-        return false;
-    }
-    const int numChan = audio_channel_count_from_out_mask(mask);
-
-    // compute at what index each channel is: samples will be in the following order:
-    //   FL  FR  FC    LFE   BL  BR  BC    SL  SR
-    //
-    //  (transfer matrix)
-    //   FL  FR  FC    LFE   BL  BR  BC    SL  SR
-    //   0.5     0.353 0.353 0.5     0.353 0.5
-    //       0.5 0.353 0.353     0.5 0.353     0.5
-
-    // derive the indices for the transfer matrix columns that have non-zero values.
-    int indexFL = -1;
-    int indexFR = -1;
-    int indexFC = -1;
-    int indexLFE = -1;
-    int indexBL = -1;
-    int indexBR = -1;
-    int indexBC = -1;
-    int indexSL = -1;
-    int indexSR = -1;
-    int index = 0;
-    for (unsigned tmp = mask;
-         (tmp & (AUDIO_CHANNEL_OUT_7POINT1 | AUDIO_CHANNEL_OUT_BACK_CENTER)) != 0;
-         ++index) {
-        const unsigned lowestBit = tmp & -(signed)tmp;
-        switch (lowestBit) {
-        case AUDIO_CHANNEL_OUT_FRONT_LEFT:
-            indexFL = index;
-            break;
-        case AUDIO_CHANNEL_OUT_FRONT_RIGHT:
-            indexFR = index;
-            break;
-        case AUDIO_CHANNEL_OUT_FRONT_CENTER:
-            indexFC = index;
-            break;
-        case AUDIO_CHANNEL_OUT_LOW_FREQUENCY:
-            indexLFE = index;
-            break;
-        case AUDIO_CHANNEL_OUT_BACK_LEFT:
-            indexBL = index;
-            break;
-        case AUDIO_CHANNEL_OUT_BACK_RIGHT:
-            indexBR = index;
-            break;
-        case AUDIO_CHANNEL_OUT_BACK_CENTER:
-            indexBC = index;
-            break;
-        case AUDIO_CHANNEL_OUT_SIDE_LEFT:
-            indexSL = index;
-            break;
-        case AUDIO_CHANNEL_OUT_SIDE_RIGHT:
-            indexSR = index;
-            break;
-        }
-        tmp ^= lowestBit;
-    }
-
-    // With good branch prediction, this should run reasonably fast.
-    // Also consider using a transfer matrix form.
-    while (numFrames) {
-        // compute contribution of FC, BC and LFE
-        float centersLfeContrib = 0;
-        if (indexFC >= 0) centersLfeContrib = pSrc[indexFC];
-        if (indexLFE >= 0) centersLfeContrib += pSrc[indexLFE];
-        if (indexBC >= 0) centersLfeContrib += pSrc[indexBC];
-        centersLfeContrib *= MINUS_3_DB_IN_FLOAT;
-
-        float ch[2];
-        ch[0] = centersLfeContrib;
-        ch[1] = centersLfeContrib;
-
-        // mix in left / right channels
-        if (indexFL >= 0) ch[0] += pSrc[indexFL];
-        if (indexFR >= 0) ch[1] += pSrc[indexFR];
-
-        if (indexSL >= 0) ch[0] += pSrc[indexSL];
-        if (indexSR >= 0) ch[1] += pSrc[indexSR]; // note pair checks enforce this if indexSL != 0
-
-        if (indexBL >= 0) ch[0] += pSrc[indexBL];
-        if (indexBR >= 0) ch[1] += pSrc[indexBR]; // note pair checks enforce this if indexBL != 0
-
-        // scale to prevent overflow.
-        ch[0] *= 0.5f;
-        ch[1] *= 0.5f;
-
-        if (accumulate) {
-            ch[0] += pDst[0];
-            ch[1] += pDst[1];
-        }
-
-        pDst[0] = clamp_float(ch[0]);
-        pDst[1] = clamp_float(ch[1]);
-        pSrc += numChan;
-        pDst += 2;
-        numFrames--;
-    }
-    return true;
-}
diff --git a/media/libeffects/downmix/benchmark/downmix_benchmark.cpp b/media/libeffects/downmix/benchmark/downmix_benchmark.cpp
index ee169c2..d9d40ed 100644
--- a/media/libeffects/downmix/benchmark/downmix_benchmark.cpp
+++ b/media/libeffects/downmix/benchmark/downmix_benchmark.cpp
@@ -35,16 +35,14 @@
     AUDIO_CHANNEL_OUT_STEREO,
     AUDIO_CHANNEL_OUT_2POINT1,
     AUDIO_CHANNEL_OUT_2POINT0POINT2,
-    AUDIO_CHANNEL_OUT_QUAD,
-    AUDIO_CHANNEL_OUT_QUAD_BACK,
+    AUDIO_CHANNEL_OUT_QUAD, // AUDIO_CHANNEL_OUT_QUAD_BACK
     AUDIO_CHANNEL_OUT_QUAD_SIDE,
     AUDIO_CHANNEL_OUT_SURROUND,
     AUDIO_CHANNEL_OUT_2POINT1POINT2,
     AUDIO_CHANNEL_OUT_3POINT0POINT2,
     AUDIO_CHANNEL_OUT_PENTA,
     AUDIO_CHANNEL_OUT_3POINT1POINT2,
-    AUDIO_CHANNEL_OUT_5POINT1,
-    AUDIO_CHANNEL_OUT_5POINT1_BACK,
+    AUDIO_CHANNEL_OUT_5POINT1, // AUDIO_CHANNEL_OUT_5POINT1_BACK
     AUDIO_CHANNEL_OUT_5POINT1_SIDE,
     AUDIO_CHANNEL_OUT_6POINT1,
     AUDIO_CHANNEL_OUT_5POINT1POINT2,
@@ -62,58 +60,34 @@
 static constexpr size_t kFrameCount = 1000;
 
 /*
-Pixel 3XL
-downmix_benchmark:
-  #BM_Downmix/0     4723 ns    4708 ns       148694
-  #BM_Downmix/1     4717 ns    4702 ns       148873
-  #BM_Downmix/2     4803 ns    4788 ns       145893
-  #BM_Downmix/3     5056 ns    5041 ns       139110
-  #BM_Downmix/4     4710 ns    4696 ns       149625
-  #BM_Downmix/5     1514 ns    1509 ns       463694
-  #BM_Downmix/6     1513 ns    1509 ns       463451
-  #BM_Downmix/7     1516 ns    1511 ns       463899
-  #BM_Downmix/8     4445 ns    4431 ns       157831
-  #BM_Downmix/9     5081 ns    5065 ns       138412
-  #BM_Downmix/10    4354 ns    4341 ns       161247
-  #BM_Downmix/11    4411 ns    4397 ns       158893
-  #BM_Downmix/12    4434 ns    4420 ns       157992
-  #BM_Downmix/13    4845 ns    4830 ns       144873
-  #BM_Downmix/14    4851 ns    4835 ns       144954
-  #BM_Downmix/15    4884 ns    4870 ns       144233
-  #BM_Downmix/16    5832 ns    5813 ns       120565
-  #BM_Downmix/17    5241 ns    5224 ns       133927
-  #BM_Downmix/18    5044 ns    5028 ns       139131
-  #BM_Downmix/19    5244 ns    5227 ns       132315
-  #BM_Downmix/20    5943 ns    5923 ns       117759
-  #BM_Downmix/21    5990 ns    5971 ns       117263
-  #BM_Downmix/22    4468 ns    4454 ns       156689
-  #BM_Downmix/23    7306 ns    7286 ns        95911
---
-downmix_benchmark: (generic fold)
-  #BM_Downmix/0     4722 ns    4707 ns       149847
-  #BM_Downmix/1     4714 ns    4698 ns       148748
-  #BM_Downmix/2     4794 ns    4779 ns       145661
-  #BM_Downmix/3     5053 ns    5035 ns       139172
-  #BM_Downmix/4     4695 ns    4678 ns       149762
-  #BM_Downmix/5     4381 ns    4368 ns       159675
-  #BM_Downmix/6     4387 ns    4373 ns       160267
-  #BM_Downmix/7     4732 ns    4717 ns       148514
-  #BM_Downmix/8     4430 ns    4415 ns       158133
-  #BM_Downmix/9     5101 ns    5084 ns       138353
-  #BM_Downmix/10    4356 ns    4343 ns       160821
-  #BM_Downmix/11    4397 ns    4383 ns       159995
-  #BM_Downmix/12    4438 ns    4424 ns       158117
-  #BM_Downmix/13    5243 ns    5226 ns       133863
-  #BM_Downmix/14    5259 ns    5242 ns       131855
-  #BM_Downmix/15    5245 ns    5228 ns       133686
-  #BM_Downmix/16    5829 ns    5809 ns       120543
-  #BM_Downmix/17    5245 ns    5228 ns       133533
-  #BM_Downmix/18    5935 ns    5916 ns       118282
-  #BM_Downmix/19    5263 ns    5245 ns       133657
-  #BM_Downmix/20    5998 ns    5978 ns       114693
-  #BM_Downmix/21    5989 ns    5969 ns       117450
-  #BM_Downmix/22    4442 ns    4431 ns       157913
-  #BM_Downmix/23    7309 ns    7290 ns        95797
+Pixel 4XL
+$ adb shell /data/benchmarktest/downmix_benchmark/vendor/downmix_benchmark
+
+--------------------------------------------------------
+Benchmark              Time             CPU   Iterations
+--------------------------------------------------------
+BM_Downmix/0        3638 ns         3624 ns       197517 AUDIO_CHANNEL_OUT_MONO
+BM_Downmix/1        4040 ns         4024 ns       178766
+BM_Downmix/2        4759 ns         4740 ns       134741 AUDIO_CHANNEL_OUT_STEREO
+BM_Downmix/3        6042 ns         6017 ns       129546 AUDIO_CHANNEL_OUT_2POINT1
+BM_Downmix/4        6897 ns         6868 ns        96316 AUDIO_CHANNEL_OUT_2POINT0POINT2
+BM_Downmix/5        2117 ns         2109 ns       331705 AUDIO_CHANNEL_OUT_QUAD
+BM_Downmix/6        2097 ns         2088 ns       335421 AUDIO_CHANNEL_OUT_QUAD_SIDE
+BM_Downmix/7        7291 ns         7263 ns        96256 AUDIO_CHANNEL_OUT_SURROUND
+BM_Downmix/8        8246 ns         8206 ns        84318 AUDIO_CHANNEL_OUT_2POINT1POINT2
+BM_Downmix/9        8341 ns         8303 ns        84298 AUDIO_CHANNEL_OUT_3POINT0POINT2
+BM_Downmix/10       7549 ns         7517 ns        84293 AUDIO_CHANNEL_OUT_PENTA
+BM_Downmix/11       9395 ns         9354 ns        75209 AUDIO_CHANNEL_OUT_3POINT1POINT2
+BM_Downmix/12       3267 ns         3253 ns       215596 AUDIO_CHANNEL_OUT_5POINT1
+BM_Downmix/13       3178 ns         3163 ns       220132 AUDIO_CHANNEL_OUT_5POINT1_SIDE
+BM_Downmix/14      10245 ns        10199 ns        67486 AUDIO_CHANNEL_OUT_6POINT1
+BM_Downmix/15      10975 ns        10929 ns        61359 AUDIO_CHANNEL_OUT_5POINT1POINT2
+BM_Downmix/16       3796 ns         3780 ns       184728 AUDIO_CHANNEL_OUT_7POINT1
+BM_Downmix/17      13562 ns        13503 ns        51823 AUDIO_CHANNEL_OUT_5POINT1POINT4
+BM_Downmix/18      13573 ns        13516 ns        51800 AUDIO_CHANNEL_OUT_7POINT1POINT2
+BM_Downmix/19      15502 ns        15435 ns        47147 AUDIO_CHANNEL_OUT_7POINT1POINT4
+BM_Downmix/20      16693 ns        16624 ns        42109 AUDIO_CHANNEL_OUT_13POINT_360RA
+BM_Downmix/21      28267 ns        28116 ns        24982 AUDIO_CHANNEL_OUT_22POINT2
 */
 
 static void BM_Downmix(benchmark::State& state) {
@@ -125,7 +99,7 @@
     std::minstd_rand gen(channelMask);
     std::uniform_real_distribution<> dis(-1.0f, 1.0f);
     std::vector<float> input(kFrameCount * channelCount);
-    std::vector<float> output(kFrameCount * 2);
+    std::vector<float> output(kFrameCount * FCC_2);
     for (auto& in : input) {
         in = dis(gen);
     }
@@ -187,7 +161,8 @@
         benchmark::ClobberMemory();
     }
 
-    state.SetComplexityN(state.range(0));
+    state.SetComplexityN(channelCount);
+    state.SetLabel(audio_channel_out_mask_to_string(channelMask));
 
     if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(effectHandle); status != 0) {
         ALOGE("release_effect returned an error = %d\n", status);
diff --git a/media/libeffects/downmix/tests/downmix_tests.cpp b/media/libeffects/downmix/tests/downmix_tests.cpp
index d4b7a3a..20e19a3 100644
--- a/media/libeffects/downmix/tests/downmix_tests.cpp
+++ b/media/libeffects/downmix/tests/downmix_tests.cpp
@@ -33,16 +33,14 @@
     AUDIO_CHANNEL_OUT_STEREO,
     AUDIO_CHANNEL_OUT_2POINT1,
     AUDIO_CHANNEL_OUT_2POINT0POINT2,
-    AUDIO_CHANNEL_OUT_QUAD,
-    AUDIO_CHANNEL_OUT_QUAD_BACK,
+    AUDIO_CHANNEL_OUT_QUAD, // AUDIO_CHANNEL_OUT_QUAD_BACK
     AUDIO_CHANNEL_OUT_QUAD_SIDE,
     AUDIO_CHANNEL_OUT_SURROUND,
     AUDIO_CHANNEL_OUT_2POINT1POINT2,
     AUDIO_CHANNEL_OUT_3POINT0POINT2,
     AUDIO_CHANNEL_OUT_PENTA,
     AUDIO_CHANNEL_OUT_3POINT1POINT2,
-    AUDIO_CHANNEL_OUT_5POINT1,
-    AUDIO_CHANNEL_OUT_5POINT1_BACK,
+    AUDIO_CHANNEL_OUT_5POINT1, // AUDIO_CHANNEL_OUT_5POINT1_BACK
     AUDIO_CHANNEL_OUT_5POINT1_SIDE,
     AUDIO_CHANNEL_OUT_6POINT1,
     AUDIO_CHANNEL_OUT_5POINT1POINT2,
@@ -52,10 +50,72 @@
     AUDIO_CHANNEL_OUT_7POINT1POINT4,
     AUDIO_CHANNEL_OUT_13POINT_360RA,
     AUDIO_CHANNEL_OUT_22POINT2,
+    audio_channel_mask_t(AUDIO_CHANNEL_OUT_22POINT2
+            | AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT | AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT),
 };
 
-static constexpr audio_channel_mask_t kConsideredChannels =
-    (audio_channel_mask_t)(AUDIO_CHANNEL_OUT_7POINT1 | AUDIO_CHANNEL_OUT_BACK_CENTER);
+constexpr float COEF_25 = 0.2508909536f;
+constexpr float COEF_35 = 0.3543928915f;
+constexpr float COEF_36 = 0.3552343859f;
+constexpr float COEF_61 = 0.6057043428f;
+
+constexpr inline float kScaleFromChannelIdxLeft[] = {
+    1.f,       // AUDIO_CHANNEL_OUT_FRONT_LEFT            = 0x1u,
+    0.f,       // AUDIO_CHANNEL_OUT_FRONT_RIGHT           = 0x2u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_CENTER          = 0x4u,
+    0.5f,      // AUDIO_CHANNEL_OUT_LOW_FREQUENCY         = 0x8u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_BACK_LEFT             = 0x10u,
+    0.f,       // AUDIO_CHANNEL_OUT_BACK_RIGHT            = 0x20u,
+    COEF_61,   // AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER  = 0x40u,
+    COEF_25,   // AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80u,
+    0.5f,      // AUDIO_CHANNEL_OUT_BACK_CENTER           = 0x100u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_SIDE_LEFT             = 0x200u,
+    0.f,       // AUDIO_CHANNEL_OUT_SIDE_RIGHT            = 0x400u,
+    COEF_36,   // AUDIO_CHANNEL_OUT_TOP_CENTER            = 0x800u,
+    1.f,       // AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT        = 0x1000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER      = 0x2000u,
+    0.f,       // AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT       = 0x4000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_BACK_LEFT         = 0x8000u,
+    COEF_35,   // AUDIO_CHANNEL_OUT_TOP_BACK_CENTER       = 0x10000u,
+    0.f,       // AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT        = 0x20000u,
+    COEF_61,   // AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT         = 0x40000u,
+    0.f,       // AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT        = 0x80000u,
+    1.f,       // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT     = 0x100000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER   = 0x200000u,
+    0.f, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT    = 0x400000u,
+    0.f, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2       = 0x800000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT       = 0x1000000u,
+    0.f,       // AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT      = 0x2000000u,
+};
+
+constexpr inline float kScaleFromChannelIdxRight[] = {
+    0.f,       // AUDIO_CHANNEL_OUT_FRONT_LEFT            = 0x1u,
+    1.f,       // AUDIO_CHANNEL_OUT_FRONT_RIGHT           = 0x2u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_CENTER          = 0x4u,
+    0.5f,      // AUDIO_CHANNEL_OUT_LOW_FREQUENCY         = 0x8u,
+    0.f,       // AUDIO_CHANNEL_OUT_BACK_LEFT             = 0x10u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_BACK_RIGHT            = 0x20u,
+    COEF_25,   // AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER  = 0x40u,
+    COEF_61,   // AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80u,
+    0.5f,      // AUDIO_CHANNEL_OUT_BACK_CENTER           = 0x100u,
+    0.f,       // AUDIO_CHANNEL_OUT_SIDE_LEFT             = 0x200u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_SIDE_RIGHT            = 0x400u,
+    COEF_36,   // AUDIO_CHANNEL_OUT_TOP_CENTER            = 0x800u,
+    0.f,       // AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT        = 0x1000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER      = 0x2000u,
+    1.f,       // AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT       = 0x4000u,
+    0.f,       // AUDIO_CHANNEL_OUT_TOP_BACK_LEFT         = 0x8000u,
+    COEF_35,   // AUDIO_CHANNEL_OUT_TOP_BACK_CENTER       = 0x10000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT        = 0x20000u,
+    0.f,       // AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT         = 0x40000u,
+    COEF_61,   // AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT        = 0x80000u,
+    0.f,       // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT     = 0x100000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER   = 0x200000u,
+    1.f,       // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT    = 0x400000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2       = 0x800000u,
+    0.f,       // AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT       = 0x1000000u,
+    M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT      = 0x2000000u,
+};
 
 // Downmix doesn't change with sample rate
 static constexpr size_t kSampleRates[] = {
@@ -93,8 +153,8 @@
     void testBalance(int sampleRate, audio_channel_mask_t channelMask) {
         using namespace ::android::audio_utils::channels;
 
-        size_t frames = 100;
-        unsigned outChannels = 2;
+        size_t frames = 100; // set to an even number (2, 4, 6 ... ) stream alternates +1, -1.
+        constexpr unsigned outChannels = 2;
         unsigned inChannels = audio_channel_count_from_out_mask(channelMask);
         std::vector<float> input(frames * inChannels);
         std::vector<float> output(frames * outChannels);
@@ -102,7 +162,7 @@
         double savedPower[32][2]{};
         for (unsigned i = 0, channel = channelMask; channel != 0; ++i) {
             const int index = __builtin_ctz(channel);
-            ASSERT_LT(index, FCC_24);
+            ASSERT_LT(index, FCC_26);
             const int pairIndex = pairIdxFromChannelIdx(index);
             const AUDIO_GEOMETRY_SIDE side = sideFromChannelIdx(index);
             const int channelBit = 1 << index;
@@ -119,7 +179,7 @@
 
             auto stats = channelStatistics(output, 2 /* channels */);
             // printf("power: %s %s\n", stats[0].toString().c_str(), stats[1].toString().c_str());
-            double power[2] = { stats[0].getVariance(), stats[1].getVariance() };
+            double power[2] = { stats[0].getPopVariance(), stats[1].getPopVariance() };
 
             // Check symmetric power for pair channels on exchange of left/right position.
             // to do this, we save previous power measurements.
@@ -130,28 +190,39 @@
             savedPower[index][0] = power[0];
             savedPower[index][1] = power[1];
 
-            // Confirm exactly the mix amount prescribed by the existing downmix effect.
-            // For future changes to the downmix effect, the nearness needs to be relaxed
-            // to compare behavior S or earlier.
-            if ((channelBit & kConsideredChannels) == 0) {
-                // for channels not considered, expect 0 power for legacy downmix
-                EXPECT_EQ(0.f, power[0]);
-                EXPECT_EQ(0.f, power[1]);
-                continue;
-            }
-            constexpr float POWER_TOLERANCE = 0.01;  // for variance sum error.
+            constexpr float POWER_TOLERANCE = 0.001;
+            const float expectedPower =
+                    kScaleFromChannelIdxLeft[index] * kScaleFromChannelIdxLeft[index]
+                    + kScaleFromChannelIdxRight[index] * kScaleFromChannelIdxRight[index];
+            EXPECT_NEAR(expectedPower, power[0] + power[1], POWER_TOLERANCE);
             switch (side) {
             case AUDIO_GEOMETRY_SIDE_LEFT:
-                EXPECT_NEAR(0.25f, power[0], POWER_TOLERANCE);
+                if (channelBit == AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) {
+                    break;
+                }
                 EXPECT_EQ(0.f, power[1]);
                 break;
             case AUDIO_GEOMETRY_SIDE_RIGHT:
+                if (channelBit == AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) {
+                    break;
+                }
                 EXPECT_EQ(0.f, power[0]);
-                EXPECT_NEAR(0.25f, power[1], POWER_TOLERANCE);
                 break;
             case AUDIO_GEOMETRY_SIDE_CENTER:
-                EXPECT_NEAR(0.125f, power[0], POWER_TOLERANCE);
-                EXPECT_NEAR(0.125f, power[1], POWER_TOLERANCE);
+                if (channelBit == AUDIO_CHANNEL_OUT_LOW_FREQUENCY) {
+                    if (channelMask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) {
+                        EXPECT_EQ(0.f, power[1]);
+                        break;
+                    } else {
+                        EXPECT_NEAR_EPSILON(power[0], power[1]); // always true
+                        EXPECT_NEAR(expectedPower, power[0] + power[1], POWER_TOLERANCE);
+                        break;
+                    }
+                } else if (channelBit == AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) {
+                    EXPECT_EQ(0.f, power[0]);
+                    EXPECT_NEAR(expectedPower, power[1], POWER_TOLERANCE);
+                    break;
+                }
                 EXPECT_NEAR_EPSILON(power[0], power[1]);
                 break;
             }
@@ -178,6 +249,7 @@
                 handle_, EFFECT_CMD_SET_CONFIG,
                 sizeof(effect_config_t), &config_, &replySize, &reply);
         ASSERT_EQ(0, err);
+        ASSERT_EQ(0, reply);
         err = (downmixApi->command)(
                 handle_, EFFECT_CMD_ENABLE,
                 0, nullptr, &replySize, &reply);
@@ -188,6 +260,27 @@
         ASSERT_EQ(0, err);
     }
 
+    // This test assumes the channel mask is invalid.
+    void testInvalidChannelMask(audio_channel_mask_t invalidChannelMask) {
+        reconfig(48000 /* sampleRate */, invalidChannelMask);
+        const int32_t sessionId = 0;
+        const int32_t ioId = 0;
+        int32_t err = AUDIO_EFFECT_LIBRARY_INFO_SYM.create_effect(
+                &downmix_uuid_, sessionId, ioId,  &handle_);
+        ASSERT_EQ(0, err);
+
+        const struct effect_interface_s * const downmixApi = *handle_;
+        int32_t reply = 0;
+        uint32_t replySize = (uint32_t)sizeof(reply);
+        err = (downmixApi->command)(
+                handle_, EFFECT_CMD_SET_CONFIG,
+                sizeof(effect_config_t), &config_, &replySize, &reply);
+        ASSERT_EQ(0, err);
+        ASSERT_NE(0, reply);  // error has occurred.
+        err = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(handle_);
+        ASSERT_EQ(0, err);
+    }
+
 private:
     void reconfig(int sampleRate, audio_channel_mask_t channelMask) {
         config_.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
@@ -234,6 +327,16 @@
     int inputChannelCount_{};
 };
 
+TEST(DownmixTestSimple, invalidChannelMask) {
+    // Fill in a dummy test method to use DownmixTest outside of a parameterized test.
+    class DownmixTestComplete : public DownmixTest {
+        void TestBody() override {}
+    } downmixtest;
+
+    constexpr auto INVALID_CHANNEL_MASK = audio_channel_mask_t(1 << 31);
+    downmixtest.testInvalidChannelMask(INVALID_CHANNEL_MASK);
+}
+
 TEST_P(DownmixTest, basic) {
     testBalance(kSampleRates[std::get<0>(GetParam())],
             kChannelPositionMasks[std::get<1>(GetParam())]);
@@ -244,10 +347,11 @@
         ::testing::Combine(
                 ::testing::Range(0, (int)std::size(kSampleRates)),
                 ::testing::Range(0, (int)std::size(kChannelPositionMasks))
-                ));
-
-int main(int argc, /* const */ char** argv) {
-    ::testing::InitGoogleTest(&argc, argv);
-    int status = RUN_ALL_TESTS();
-    return status;
-}
+                ),
+        [](const testing::TestParamInfo<DownmixTest::ParamType>& info) {
+            const int index = std::get<1>(info.param);
+            const audio_channel_mask_t channelMask = kChannelPositionMasks[index];
+            const std::string name = std::string(audio_channel_out_mask_to_string(channelMask))
+                + "_" + std::to_string(std::get<0>(info.param)) + "_" + std::to_string(index);
+            return name;
+        });
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp
index 7e5caed..ccef5ab 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp
@@ -135,7 +135,6 @@
     LVM_UINT32 fs =
             (LVM_UINT32)LVEQNB_SampleRateTab[(LVM_UINT16)pParams->SampleRate]; /* Sample rate */
     LVM_UINT32 fc;     /* Filter centre frequency */
-    LVM_INT16 QFactor; /* Filter Q factor */
 
     pInstance->NBands = pParams->NBands;
 
@@ -144,7 +143,6 @@
          * Get the filter settings
          */
         fc = (LVM_UINT32)pParams->pBandDefinition[i].Frequency; /* Get the band centre frequency */
-        QFactor = (LVM_INT16)pParams->pBandDefinition[i].QFactor; /* Get the band Q factor */
 
         pInstance->pBiquadType[i] = LVEQNB_SinglePrecision_Float; /* Default to single precision */
 
@@ -313,9 +311,9 @@
      */
     pInstance->eqBiquad.resize(pParams->NBands,
                                android::audio_utils::BiquadFilter<LVM_FLOAT>(pParams->NrChannels));
-    LVEQNB_ClearFilterHistory(pInstance);
 
     if (bChange || modeChange) {
+        LVEQNB_ClearFilterHistory(pInstance);
         /*
          * If the sample rate has changed clear the history
          */
diff --git a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Control.cpp b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Control.cpp
index 8e63502..ffed6d4 100644
--- a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Control.cpp
+++ b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Control.cpp
@@ -421,7 +421,6 @@
      * Intermediate variables and temporary values
      */
     LVM_FLOAT T0;
-    LVM_FLOAT D;
     LVM_FLOAT A0;
     LVM_FLOAT B1;
     LVM_FLOAT B2;
@@ -444,9 +443,6 @@
      * Calculating the intermediate values
      */
     T0 = Frequency * LVPSA_Float_TwoPiOnFsTable[Fs]; /* T0 = 2 * Pi * Fc / Fs */
-    D = 3200;                                        /* Floating point value 1.000000 (1*100*2^5) */
-    /* Force D = 1 : the function was originally used for a peaking filter.
-       The D parameter do not exist for a BandPass filter coefficients */
 
     /*
      * Calculate the B2 coefficient
@@ -535,7 +531,6 @@
      * Intermediate variables and temporary values
      */
     LVM_FLOAT T0;
-    LVM_FLOAT D;
     LVM_FLOAT A0;
     LVM_FLOAT B1;
     LVM_FLOAT B2;
@@ -558,9 +553,6 @@
      * Calculating the intermediate values
      */
     T0 = Frequency * LVPSA_Float_TwoPiOnFsTable[Fs]; /* T0 = 2 * Pi * Fc / Fs */
-    D = 3200;                                        /* Floating point value 1.000000 (1*100*2^5) */
-    /* Force D = 1 : the function was originally used for a peaking filter.
-       The D parameter do not exist for a BandPass filter coefficients */
 
     /*
      * Calculate the B2 coefficient
diff --git a/media/libeffects/lvm/tests/EffectBundleTest.cpp b/media/libeffects/lvm/tests/EffectBundleTest.cpp
index 881ffb1..018cb7c 100644
--- a/media/libeffects/lvm/tests/EffectBundleTest.cpp
+++ b/media/libeffects/lvm/tests/EffectBundleTest.cpp
@@ -14,29 +14,39 @@
  * limitations under the License.
  */
 
+#include <system/audio_effects/effect_bassboost.h>
+#include <system/audio_effects/effect_equalizer.h>
+#include <system/audio_effects/effect_virtualizer.h>
 #include "EffectTestHelper.h"
-using namespace android;
 
-// Update isBassBoost, if the order of effects is updated
-constexpr effect_uuid_t kEffectUuids[] = {
+using namespace android;
+typedef enum {
+    EFFECT_BASS_BOOST,
+    EFFECT_EQUALIZER,
+    EFFECT_VIRTUALIZER,
+    EFFECT_VOLUME
+} effect_type_t;
+
+const std::map<effect_type_t, effect_uuid_t> kEffectUuids = {
         // NXP SW BassBoost
-        {0x8631f300, 0x72e2, 0x11df, 0xb57e, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
-        // NXP SW Virtualizer
-        {0x1d4033c0, 0x8557, 0x11df, 0x9f2d, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+        {EFFECT_BASS_BOOST,
+         {0x8631f300, 0x72e2, 0x11df, 0xb57e, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}},
         // NXP SW Equalizer
-        {0xce772f20, 0x847d, 0x11df, 0xbb17, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+        {EFFECT_EQUALIZER,
+         {0xce772f20, 0x847d, 0x11df, 0xbb17, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}},
+        // NXP SW Virtualizer
+        {EFFECT_VIRTUALIZER,
+         {0x1d4033c0, 0x8557, 0x11df, 0x9f2d, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}},
         // NXP SW Volume
-        {0x119341a0, 0x8469, 0x11df, 0x81f9, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+        {EFFECT_VOLUME, {0x119341a0, 0x8469, 0x11df, 0x81f9, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}},
 };
 
-static bool isBassBoost(const effect_uuid_t* uuid) {
-    // Update this, if the order of effects in kEffectUuids is updated
-    return uuid == &kEffectUuids[0];
-}
+const size_t kNumEffectUuids = std::size(kEffectUuids);
 
-constexpr size_t kNumEffectUuids = std::size(kEffectUuids);
+constexpr float kMinAmplitude = -1.0f;
+constexpr float kMaxAmplitude = 1.0f;
 
-typedef std::tuple<int, int, int, int, int> SingleEffectTestParam;
+using SingleEffectTestParam = std::tuple<int, int, int, int, int>;
 class SingleEffectTest : public ::testing::TestWithParam<SingleEffectTestParam> {
   public:
     SingleEffectTest()
@@ -46,7 +56,8 @@
           mFrameCount(EffectTestHelper::kFrameCounts[std::get<2>(GetParam())]),
           mLoopCount(EffectTestHelper::kLoopCounts[std::get<3>(GetParam())]),
           mTotalFrameCount(mFrameCount * mLoopCount),
-          mUuid(&kEffectUuids[std::get<4>(GetParam())]) {}
+          mEffectType((effect_type_t)std::get<4>(GetParam())),
+          mUuid(kEffectUuids.at(mEffectType)) {}
 
     const size_t mChMask;
     const size_t mChannelCount;
@@ -54,7 +65,8 @@
     const size_t mFrameCount;
     const size_t mLoopCount;
     const size_t mTotalFrameCount;
-    const effect_uuid_t* mUuid;
+    const effect_type_t mEffectType;
+    const effect_uuid_t mUuid;
 };
 
 // Tests applying a single effect
@@ -63,7 +75,7 @@
                  << "chMask: " << mChMask << " sampleRate: " << mSampleRate
                  << " frameCount: " << mFrameCount << " loopCount: " << mLoopCount);
 
-    EffectTestHelper effect(mUuid, mChMask, mChMask, mSampleRate, mFrameCount, mLoopCount);
+    EffectTestHelper effect(&mUuid, mChMask, mChMask, mSampleRate, mFrameCount, mLoopCount);
 
     ASSERT_NO_FATAL_FAILURE(effect.createEffect());
     ASSERT_NO_FATAL_FAILURE(effect.setConfig());
@@ -72,7 +84,7 @@
     std::vector<float> input(mTotalFrameCount * mChannelCount);
     std::vector<float> output(mTotalFrameCount * mChannelCount);
     std::minstd_rand gen(mChMask);
-    std::uniform_real_distribution<> dis(-1.0f, 1.0f);
+    std::uniform_real_distribution<> dis(kMinAmplitude, kMaxAmplitude);
     for (auto& in : input) {
         in = dis(gen);
     }
@@ -88,7 +100,7 @@
                            ::testing::Range(0, (int)EffectTestHelper::kNumLoopCounts),
                            ::testing::Range(0, (int)kNumEffectUuids)));
 
-typedef std::tuple<int, int, int, int> SingleEffectComparisonTestParam;
+using SingleEffectComparisonTestParam = std::tuple<int, int, int, int>;
 class SingleEffectComparisonTest
     : public ::testing::TestWithParam<SingleEffectComparisonTestParam> {
   public:
@@ -97,13 +109,15 @@
           mFrameCount(EffectTestHelper::kFrameCounts[std::get<1>(GetParam())]),
           mLoopCount(EffectTestHelper::kLoopCounts[std::get<2>(GetParam())]),
           mTotalFrameCount(mFrameCount * mLoopCount),
-          mUuid(&kEffectUuids[std::get<3>(GetParam())]) {}
+          mEffectType((effect_type_t)std::get<3>(GetParam())),
+          mUuid(kEffectUuids.at(mEffectType)) {}
 
     const size_t mSampleRate;
     const size_t mFrameCount;
     const size_t mLoopCount;
     const size_t mTotalFrameCount;
-    const effect_uuid_t* mUuid;
+    const effect_type_t mEffectType;
+    const effect_uuid_t mUuid;
 };
 
 // Compares first two channels in multi-channel output to stereo output when same effect is applied
@@ -115,7 +129,7 @@
     std::vector<float> monoInput(mTotalFrameCount);
 
     std::minstd_rand gen(mSampleRate);
-    std::uniform_real_distribution<> dis(-1.0f, 1.0f);
+    std::uniform_real_distribution<> dis(kMinAmplitude, kMaxAmplitude);
     for (auto& in : monoInput) {
         in = dis(gen);
     }
@@ -126,7 +140,7 @@
                     mTotalFrameCount * sizeof(float) * FCC_1);
 
     // Apply effect on stereo channels
-    EffectTestHelper stereoEffect(mUuid, AUDIO_CHANNEL_OUT_STEREO, AUDIO_CHANNEL_OUT_STEREO,
+    EffectTestHelper stereoEffect(&mUuid, AUDIO_CHANNEL_OUT_STEREO, AUDIO_CHANNEL_OUT_STEREO,
                                   mSampleRate, mFrameCount, mLoopCount);
 
     ASSERT_NO_FATAL_FAILURE(stereoEffect.createEffect());
@@ -142,7 +156,7 @@
 
     for (size_t chMask : EffectTestHelper::kChMasks) {
         size_t channelCount = audio_channel_count_from_out_mask(chMask);
-        EffectTestHelper testEffect(mUuid, chMask, chMask, mSampleRate, mFrameCount, mLoopCount);
+        EffectTestHelper testEffect(&mUuid, chMask, chMask, mSampleRate, mFrameCount, mLoopCount);
 
         ASSERT_NO_FATAL_FAILURE(testEffect.createEffect());
         ASSERT_NO_FATAL_FAILURE(testEffect.setConfig());
@@ -170,7 +184,7 @@
         memcpy_to_i16_from_float(stereoTestI16.data(), stereoTestOutput.data(),
                                  mTotalFrameCount * FCC_2);
 
-        if (isBassBoost(mUuid)) {
+        if (EFFECT_BASS_BOOST == mEffectType) {
             // SNR must be above the threshold
             float snr = computeSnr<int16_t>(stereoRefI16.data(), stereoTestI16.data(),
                                             mTotalFrameCount * FCC_2);
@@ -191,6 +205,135 @@
                            ::testing::Range(0, (int)EffectTestHelper::kNumLoopCounts),
                            ::testing::Range(0, (int)kNumEffectUuids)));
 
+using SingleEffectDefaultSetParamTestParam = std::tuple<int, int, int>;
+class SingleEffectDefaultSetParamTest
+    : public ::testing::TestWithParam<SingleEffectDefaultSetParamTestParam> {
+  public:
+    SingleEffectDefaultSetParamTest()
+        : mChMask(EffectTestHelper::kChMasks[std::get<0>(GetParam())]),
+          mChannelCount(audio_channel_count_from_out_mask(mChMask)),
+          mSampleRate(16000),
+          mFrameCount(EffectTestHelper::kFrameCounts[std::get<1>(GetParam())]),
+          mLoopCount(1),
+          mTotalFrameCount(mFrameCount * mLoopCount),
+          mEffectType((effect_type_t)std::get<2>(GetParam())),
+          mUuid(kEffectUuids.at(mEffectType)) {}
+
+    const size_t mChMask;
+    const size_t mChannelCount;
+    const size_t mSampleRate;
+    const size_t mFrameCount;
+    const size_t mLoopCount;
+    const size_t mTotalFrameCount;
+    const effect_type_t mEffectType;
+    const effect_uuid_t mUuid;
+};
+
+// Tests verifying that redundant setParam calls do not alter output
+TEST_P(SingleEffectDefaultSetParamTest, SimpleProcess) {
+    SCOPED_TRACE(testing::Message()
+                 << "chMask: " << mChMask << " sampleRate: " << mSampleRate
+                 << " frameCount: " << mFrameCount << " loopCount: " << mLoopCount);
+    // effect.process() handles mTotalFrameCount * mChannelCount samples in each call.
+    // This test calls process() twice per effect, hence total samples when allocating
+    // input and output vectors is twice the number of samples processed in one call.
+    size_t totalNumSamples = 2 * mTotalFrameCount * mChannelCount;
+    // Initialize input buffer with deterministic pseudo-random values
+    std::vector<float> input(totalNumSamples);
+    std::minstd_rand gen(mChMask);
+    std::uniform_real_distribution<> dis(kMinAmplitude, kMaxAmplitude);
+    for (auto& in : input) {
+        in = dis(gen);
+    }
+
+    uint32_t key;
+    int32_t value1, value2;
+    switch (mEffectType) {
+        case EFFECT_BASS_BOOST:
+            key = BASSBOOST_PARAM_STRENGTH;
+            value1 = 1;
+            value2 = 14;
+            break;
+        case EFFECT_VIRTUALIZER:
+            key = VIRTUALIZER_PARAM_STRENGTH;
+            value1 = 0;
+            value2 = 100;
+            break;
+        case EFFECT_EQUALIZER:
+            key = EQ_PARAM_CUR_PRESET;
+            value1 = 0;
+            value2 = 1;
+            break;
+        case EFFECT_VOLUME:
+            key = 0 /* VOLUME_PARAM_LEVEL */;
+            value1 = 0;
+            value2 = -100;
+            break;
+        default:
+            FAIL() << "Unsupported effect type : " << mEffectType;
+    }
+
+    EffectTestHelper refEffect(&mUuid, mChMask, mChMask, mSampleRate, mFrameCount, mLoopCount);
+
+    ASSERT_NO_FATAL_FAILURE(refEffect.createEffect());
+    ASSERT_NO_FATAL_FAILURE(refEffect.setConfig());
+
+    if (EFFECT_BASS_BOOST == mEffectType) {
+        ASSERT_NO_FATAL_FAILURE(refEffect.setParam<int16_t>(key, value1));
+    } else {
+        ASSERT_NO_FATAL_FAILURE(refEffect.setParam<int32_t>(key, value1));
+    }
+    std::vector<float> refOutput(totalNumSamples);
+    float* pInput = input.data();
+    float* pOutput = refOutput.data();
+    ASSERT_NO_FATAL_FAILURE(refEffect.process(pInput, pOutput));
+
+    pInput += totalNumSamples / 2;
+    pOutput += totalNumSamples / 2;
+    ASSERT_NO_FATAL_FAILURE(refEffect.process(pInput, pOutput));
+    ASSERT_NO_FATAL_FAILURE(refEffect.releaseEffect());
+
+    EffectTestHelper testEffect(&mUuid, mChMask, mChMask, mSampleRate, mFrameCount, mLoopCount);
+
+    ASSERT_NO_FATAL_FAILURE(testEffect.createEffect());
+    ASSERT_NO_FATAL_FAILURE(testEffect.setConfig());
+
+    if (EFFECT_BASS_BOOST == mEffectType) {
+        ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int16_t>(key, value1));
+    } else {
+        ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int32_t>(key, value1));
+    }
+
+    std::vector<float> testOutput(totalNumSamples);
+    pInput = input.data();
+    pOutput = testOutput.data();
+    ASSERT_NO_FATAL_FAILURE(testEffect.process(pInput, pOutput));
+
+    // Call setParam once to change the parameters, and then call setParam again
+    // to restore the parameters to the initial state, making the first setParam
+    // call redundant
+    if (EFFECT_BASS_BOOST == mEffectType) {
+        ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int16_t>(key, value2));
+        ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int16_t>(key, value1));
+    } else {
+        ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int32_t>(key, value2));
+        ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int32_t>(key, value1));
+    }
+
+    pInput += totalNumSamples / 2;
+    pOutput += totalNumSamples / 2;
+    ASSERT_NO_FATAL_FAILURE(testEffect.process(pInput, pOutput));
+    ASSERT_NO_FATAL_FAILURE(testEffect.releaseEffect());
+    ASSERT_TRUE(areNearlySame(refOutput.data(), testOutput.data(), totalNumSamples))
+            << "Outputs do not match with default setParam calls";
+}
+
+INSTANTIATE_TEST_SUITE_P(
+        EffectBundleTestAll, SingleEffectDefaultSetParamTest,
+        ::testing::Combine(::testing::Range(0, (int)EffectTestHelper::kNumChMasks),
+                           ::testing::Range(0, (int)EffectTestHelper::kNumFrameCounts),
+                           ::testing::Range(0, (int)kNumEffectUuids)));
+
 int main(int argc, char** argv) {
     ::testing::InitGoogleTest(&argc, argv);
     int status = RUN_ALL_TESTS();
diff --git a/media/libeffects/lvm/tests/EffectTestHelper.cpp b/media/libeffects/lvm/tests/EffectTestHelper.cpp
index 625c15a..ec727c7 100644
--- a/media/libeffects/lvm/tests/EffectTestHelper.cpp
+++ b/media/libeffects/lvm/tests/EffectTestHelper.cpp
@@ -50,23 +50,6 @@
     ASSERT_EQ(reply, 0) << "cmd_enable reply non zero " << reply;
 }
 
-void EffectTestHelper::setParam(uint32_t type, uint32_t value) {
-    int reply = 0;
-    uint32_t replySize = sizeof(reply);
-    uint32_t paramData[2] = {type, value};
-    auto effectParam = new effect_param_t[sizeof(effect_param_t) + sizeof(paramData)];
-    memcpy(&effectParam->data[0], &paramData[0], sizeof(paramData));
-    effectParam->psize = sizeof(paramData[0]);
-    effectParam->vsize = sizeof(paramData[1]);
-    int status = (*mEffectHandle)
-                         ->command(mEffectHandle, EFFECT_CMD_SET_PARAM,
-                                   sizeof(effect_param_t) + sizeof(paramData), effectParam,
-                                   &replySize, &reply);
-    delete[] effectParam;
-    ASSERT_EQ(status, 0) << "set_param returned an error " << status;
-    ASSERT_EQ(reply, 0) << "set_param reply non zero " << reply;
-}
-
 void EffectTestHelper::process(float* input, float* output) {
     audio_buffer_t inBuffer = {.frameCount = mFrameCount, .f32 = input};
     audio_buffer_t outBuffer = {.frameCount = mFrameCount, .f32 = output};
diff --git a/media/libeffects/lvm/tests/EffectTestHelper.h b/media/libeffects/lvm/tests/EffectTestHelper.h
index 3854d46..bcee84e 100644
--- a/media/libeffects/lvm/tests/EffectTestHelper.h
+++ b/media/libeffects/lvm/tests/EffectTestHelper.h
@@ -50,6 +50,23 @@
     return snr;
 }
 
+template <typename T>
+static float areNearlySame(const T* ref, const T* tst, size_t count) {
+    T delta;
+    if constexpr (std::is_floating_point_v<T>) {
+        delta = std::numeric_limits<T>::epsilon();
+    } else {
+        delta = 1;
+    }
+    for (size_t i = 0; i < count; ++i) {
+        const double diff(tst[i] - ref[i]);
+        if (abs(diff) > delta) {
+            return false;
+        }
+    }
+    return true;
+}
+
 class EffectTestHelper {
   public:
     EffectTestHelper(const effect_uuid_t* uuid, size_t inChMask, size_t outChMask,
@@ -65,7 +82,25 @@
     void createEffect();
     void releaseEffect();
     void setConfig();
-    void setParam(uint32_t type, uint32_t val);
+    template <typename VALUE_DTYPE>
+    void setParam(uint32_t type, VALUE_DTYPE const value) {
+        int reply = 0;
+        uint32_t replySize = sizeof(reply);
+
+        uint8_t paramData[sizeof(effect_param_t) + sizeof(type) + sizeof(value)];
+        auto effectParam = (effect_param_t*)paramData;
+
+        memcpy(&effectParam->data[0], &type, sizeof(type));
+        memcpy(&effectParam->data[sizeof(type)], &value, sizeof(value));
+        effectParam->psize = sizeof(type);
+        effectParam->vsize = sizeof(value);
+        int status = (*mEffectHandle)
+                             ->command(mEffectHandle, EFFECT_CMD_SET_PARAM,
+                                       sizeof(effect_param_t) + sizeof(type) + sizeof(value),
+                                       effectParam, &replySize, &reply);
+        ASSERT_EQ(status, 0) << "set_param returned an error " << status;
+        ASSERT_EQ(reply, 0) << "set_param reply non zero " << reply;
+    };
     void process(float* input, float* output);
 
     // Corresponds to SNR for 1 bit difference between two int16_t signals
diff --git a/media/libeffects/preprocessing/.clang-format b/media/libeffects/preprocessing/.clang-format
deleted file mode 120000
index f1b4f69..0000000
--- a/media/libeffects/preprocessing/.clang-format
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../build/soong/scripts/system-clang-format
\ No newline at end of file
diff --git a/media/libeffects/preprocessing/tests/correlation.cpp b/media/libeffects/preprocessing/tests/correlation.cpp
index eb56fc3..0853673 100644
--- a/media/libeffects/preprocessing/tests/correlation.cpp
+++ b/media/libeffects/preprocessing/tests/correlation.cpp
@@ -36,7 +36,7 @@
                                                                    const int16_t* sigY, int len,
                                                                    int16_t enableCrossCorr) {
     float maxCorrVal = 0.f, prevCorrVal = 0.f;
-    int delay = 0, peakIndex = 0, flag = 0;
+    int peakIndex = 0, flag = 0;
     int loopLim = (1 == enableCrossCorr) ? len : kMinLoopLimitValue;
     std::vector<int> peakIndexVect(kNumPeaks, 0);
     std::vector<float> peakValueVect(kNumPeaks, 0.f);
@@ -47,7 +47,6 @@
         }
         corrVal /= len - i;
         if (corrVal > maxCorrVal) {
-            delay = i;
             maxCorrVal = corrVal;
         }
         // Correlation peaks are expected to be observed at equal intervals. The interval length is
diff --git a/media/libheadtracking/Android.bp b/media/libheadtracking/Android.bp
index 6141824..63b769e 100644
--- a/media/libheadtracking/Android.bp
+++ b/media/libheadtracking/Android.bp
@@ -1,3 +1,12 @@
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_av_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["frameworks_av_license"],
+}
+
 cc_library {
     name: "libheadtracking",
     host_supported: true,
@@ -22,6 +31,35 @@
     ],
 }
 
+cc_library {
+    name: "libheadtracking-binding",
+    srcs: [
+      "SensorPoseProvider.cpp",
+    ],
+    shared_libs: [
+        "libheadtracking",
+        "liblog",
+        "libsensor",
+        "libutils",
+    ],
+    export_shared_lib_headers: [
+        "libheadtracking",
+    ],
+}
+
+cc_binary {
+    name: "SensorPoseProvider-example",
+    srcs: [
+        "SensorPoseProvider-example.cpp",
+    ],
+    shared_libs: [
+        "libheadtracking",
+        "libheadtracking-binding",
+        "libsensor",
+        "libutils",
+    ],
+}
+
 cc_test_host {
     name: "libheadtracking-test",
     srcs: [
diff --git a/media/libheadtracking/HeadTrackingProcessor-test.cpp b/media/libheadtracking/HeadTrackingProcessor-test.cpp
index ae5567d..299192f 100644
--- a/media/libheadtracking/HeadTrackingProcessor-test.cpp
+++ b/media/libheadtracking/HeadTrackingProcessor-test.cpp
@@ -48,10 +48,15 @@
 
     std::unique_ptr<HeadTrackingProcessor> processor =
             createHeadTrackingProcessor(Options{}, HeadTrackingMode::SCREEN_RELATIVE);
+
+    // Establish a baseline for the drift compensators.
+    processor->setWorldToHeadPose(0, Pose3f(), Twist3f());
+    processor->setWorldToScreenPose(0, Pose3f());
+
+    processor->setDisplayOrientation(physicalToLogical);
     processor->setWorldToHeadPose(0, worldToHead, Twist3f());
     processor->setWorldToScreenPose(0, worldToScreen);
     processor->setScreenToStagePose(screenToStage);
-    processor->setDisplayOrientation(physicalToLogical);
     processor->calculate(0);
     ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::SCREEN_RELATIVE);
     EXPECT_EQ(processor->getHeadToStagePose(), worldToHead.inverse() * worldToScreen *
@@ -76,6 +81,11 @@
 
     std::unique_ptr<HeadTrackingProcessor> processor = createHeadTrackingProcessor(
             Options{.predictionDuration = 2.f}, HeadTrackingMode::WORLD_RELATIVE);
+
+    // Establish a baseline for the drift compensators.
+    processor->setWorldToHeadPose(0, Pose3f(), Twist3f());
+    processor->setWorldToScreenPose(0, Pose3f());
+
     processor->setWorldToHeadPose(0, worldToHead, headTwist);
     processor->setWorldToScreenPose(0, worldToScreen);
     processor->calculate(0);
@@ -100,6 +110,10 @@
     std::unique_ptr<HeadTrackingProcessor> processor = createHeadTrackingProcessor(
             Options{.maxTranslationalVelocity = 1}, HeadTrackingMode::STATIC);
 
+    // Establish a baseline for the drift compensators.
+    processor->setWorldToHeadPose(0, Pose3f(), Twist3f());
+    processor->setWorldToScreenPose(0, Pose3f());
+
     processor->calculate(0);
 
     processor->setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
diff --git a/media/libheadtracking/HeadTrackingProcessor.cpp b/media/libheadtracking/HeadTrackingProcessor.cpp
index ee60fa5..47f7cf0 100644
--- a/media/libheadtracking/HeadTrackingProcessor.cpp
+++ b/media/libheadtracking/HeadTrackingProcessor.cpp
@@ -57,7 +57,14 @@
     }
 
     void setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen) override {
-        mScreenPoseDriftCompensator.setInput(timestamp, worldToScreen);
+        if (mPhysicalToLogicalAngle != mPendingPhysicalToLogicalAngle) {
+            // We're introducing an artificial discontinuity. Enable the rate limiter.
+            mRateLimiter.enable();
+            mPhysicalToLogicalAngle = mPendingPhysicalToLogicalAngle;
+        }
+
+        mScreenPoseDriftCompensator.setInput(
+                timestamp, worldToScreen * Pose3f(rotateY(-mPhysicalToLogicalAngle)));
         mWorldToScreenTimestamp = timestamp;
     }
 
@@ -66,10 +73,7 @@
     }
 
     void setDisplayOrientation(float physicalToLogicalAngle) override {
-        if (mPhysicalToLogicalAngle != physicalToLogicalAngle) {
-            mRateLimiter.enable();
-        }
-        mPhysicalToLogicalAngle = physicalToLogicalAngle;
+        mPendingPhysicalToLogicalAngle = physicalToLogicalAngle;
     }
 
     void calculate(int64_t timestamp) override {
@@ -80,8 +84,7 @@
         }
 
         if (mWorldToScreenTimestamp.has_value()) {
-            const Pose3f worldToLogicalScreen = mScreenPoseDriftCompensator.getOutput() *
-                                                Pose3f(rotateY(-mPhysicalToLogicalAngle));
+            const Pose3f worldToLogicalScreen = mScreenPoseDriftCompensator.getOutput();
             mScreenHeadFusion.setWorldToScreenPose(mWorldToScreenTimestamp.value(),
                                                    worldToLogicalScreen);
         }
@@ -108,15 +111,30 @@
 
     HeadTrackingMode getActualMode() const override { return mModeSelector.getActualMode(); }
 
-    void recenter() override {
-        mHeadPoseDriftCompensator.recenter();
-        mScreenPoseDriftCompensator.recenter();
-        mRateLimiter.enable();
+    void recenter(bool recenterHead, bool recenterScreen) override {
+        if (recenterHead) {
+            mHeadPoseDriftCompensator.recenter();
+        }
+        if (recenterScreen) {
+            mScreenPoseDriftCompensator.recenter();
+        }
+
+        // If a sensor being recentered is included in the current mode, apply rate limiting to
+        // avoid discontinuities.
+        HeadTrackingMode mode = mModeSelector.getActualMode();
+        if ((recenterHead && (mode == HeadTrackingMode::WORLD_RELATIVE ||
+                              mode == HeadTrackingMode::SCREEN_RELATIVE)) ||
+            (recenterScreen && mode == HeadTrackingMode::SCREEN_RELATIVE)) {
+            mRateLimiter.enable();
+        }
     }
 
   private:
     const Options mOptions;
     float mPhysicalToLogicalAngle = 0;
+    // We store the physical to logical angle as "pending" until the next world-to-screen sample it
+    // applies to arrives.
+    float mPendingPhysicalToLogicalAngle = 0;
     std::optional<int64_t> mWorldToHeadTimestamp;
     std::optional<int64_t> mWorldToScreenTimestamp;
     Pose3f mHeadToStagePose;
diff --git a/media/libheadtracking/Pose-test.cpp b/media/libheadtracking/Pose-test.cpp
index 3ff6a9b..a9e18ce 100644
--- a/media/libheadtracking/Pose-test.cpp
+++ b/media/libheadtracking/Pose-test.cpp
@@ -110,6 +110,29 @@
     EXPECT_TRUE(std::get<1>(result));
 }
 
+TEST(Pose, FloatVectorRoundTrip1) {
+    // Rotation vector magnitude must be less than Pi.
+    std::vector<float> vec = { 1, 2, 3, 0.4, 0.5, 0.6};
+    std::optional<Pose3f> pose = Pose3f::fromVector(vec);
+    ASSERT_TRUE(pose.has_value());
+    std::vector<float> reconstructed = pose->toVector();
+    EXPECT_EQ(vec, reconstructed);
+}
+
+TEST(Pose, FloatVectorRoundTrip2) {
+    Pose3f pose({1, 2, 3}, Quaternionf::UnitRandom());
+    std::vector<float> vec = pose.toVector();
+    std::optional<Pose3f> reconstructed = Pose3f::fromVector(vec);
+    ASSERT_TRUE(reconstructed.has_value());
+    EXPECT_EQ(pose, reconstructed.value());
+}
+
+TEST(Pose, FloatVectorInvalid) {
+    EXPECT_FALSE(Pose3f::fromVector({}).has_value());
+    EXPECT_FALSE(Pose3f::fromVector({1, 2, 3, 4, 5}).has_value());
+    EXPECT_FALSE(Pose3f::fromVector({1, 2, 3, 4, 5, 6, 7}).has_value());
+}
+
 }  // namespace
 }  // namespace media
 }  // namespace android
diff --git a/media/libheadtracking/Pose.cpp b/media/libheadtracking/Pose.cpp
index 9eeb2b1..47241ce 100644
--- a/media/libheadtracking/Pose.cpp
+++ b/media/libheadtracking/Pose.cpp
@@ -16,10 +16,25 @@
 
 #include "media/Pose.h"
 #include "media/Twist.h"
+#include "QuaternionUtil.h"
 
 namespace android {
 namespace media {
 
+using Eigen::Vector3f;
+
+std::optional<Pose3f> Pose3f::fromVector(const std::vector<float>& vec) {
+    if (vec.size() != 6) {
+        return std::nullopt;
+    }
+    return Pose3f({vec[0], vec[1], vec[2]}, rotationVectorToQuaternion({vec[3], vec[4], vec[5]}));
+}
+
+std::vector<float> Pose3f::toVector() const {
+    Eigen::Vector3f rot = quaternionToRotationVector(mRotation);
+    return {mTranslation[0], mTranslation[1], mTranslation[2], rot[0], rot[1], rot[2]};
+}
+
 std::tuple<Pose3f, bool> moveWithRateLimit(const Pose3f& from, const Pose3f& to, float t,
                                            float maxTranslationalVelocity,
                                            float maxRotationalVelocity) {
diff --git a/media/libheadtracking/PoseDriftCompensator-test.cpp b/media/libheadtracking/PoseDriftCompensator-test.cpp
index 74f4bee..df0a05f 100644
--- a/media/libheadtracking/PoseDriftCompensator-test.cpp
+++ b/media/libheadtracking/PoseDriftCompensator-test.cpp
@@ -39,17 +39,22 @@
     Pose3f pose2({4, 5, 6}, Quaternionf::UnitRandom());
     PoseDriftCompensator comp(Options{});
 
+    // First pose sets the baseline.
     comp.setInput(1000, pose1);
-    EXPECT_EQ(comp.getOutput(), pose1);
+    EXPECT_EQ(comp.getOutput(), Pose3f());
 
     comp.setInput(2000, pose2);
-    EXPECT_EQ(comp.getOutput(), pose2);
+    EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
 
+    // Recentering resets the baseline.
     comp.recenter();
     EXPECT_EQ(comp.getOutput(), Pose3f());
 
     comp.setInput(3000, pose1);
-    EXPECT_EQ(comp.getOutput(), pose2.inverse() * pose1);
+    EXPECT_EQ(comp.getOutput(), Pose3f());
+
+    comp.setInput(4000, pose2);
+    EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
 }
 
 TEST(PoseDriftCompensator, NoDriftZeroTime) {
@@ -58,16 +63,19 @@
     PoseDriftCompensator comp(Options{});
 
     comp.setInput(1000, pose1);
-    EXPECT_EQ(comp.getOutput(), pose1);
+    EXPECT_EQ(comp.getOutput(), Pose3f());
 
     comp.setInput(1000, pose2);
-    EXPECT_EQ(comp.getOutput(), pose2);
+    EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
 
     comp.recenter();
     EXPECT_EQ(comp.getOutput(), Pose3f());
 
     comp.setInput(1000, pose1);
-    EXPECT_EQ(comp.getOutput(), pose2.inverse() * pose1);
+    EXPECT_EQ(comp.getOutput(), Pose3f());
+
+    comp.setInput(1000, pose2);
+    EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
 }
 
 TEST(PoseDriftCompensator, Asymptotic) {
@@ -92,16 +100,19 @@
             Options{.translationalDriftTimeConstant = 1e7, .rotationalDriftTimeConstant = 1e7});
 
     comp.setInput(0, pose1);
-    EXPECT_EQ(comp.getOutput(), pose1);
+    EXPECT_EQ(comp.getOutput(), Pose3f());
 
     comp.setInput(1, pose2);
-    EXPECT_EQ(comp.getOutput(), pose2);
+    EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
 
     comp.recenter();
     EXPECT_EQ(comp.getOutput(), Pose3f());
 
     comp.setInput(2, pose1);
-    EXPECT_EQ(comp.getOutput(), pose2.inverse() * pose1);
+    EXPECT_EQ(comp.getOutput(), Pose3f());
+
+    comp.setInput(3, pose2);
+    EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
 }
 
 TEST(PoseDriftCompensator, Drift) {
@@ -109,6 +120,9 @@
     PoseDriftCompensator comp(
             Options{.translationalDriftTimeConstant = 500, .rotationalDriftTimeConstant = 1000});
 
+    // Establish a baseline.
+    comp.setInput(1000, Pose3f());
+
     // Initial pose is used as is.
     comp.setInput(1000, pose1);
     EXPECT_EQ(comp.getOutput(), pose1);
diff --git a/media/libheadtracking/PoseDriftCompensator.cpp b/media/libheadtracking/PoseDriftCompensator.cpp
index 9dfe172..0e90cad 100644
--- a/media/libheadtracking/PoseDriftCompensator.cpp
+++ b/media/libheadtracking/PoseDriftCompensator.cpp
@@ -29,10 +29,8 @@
 PoseDriftCompensator::PoseDriftCompensator(const Options& options) : mOptions(options) {}
 
 void PoseDriftCompensator::setInput(int64_t timestamp, const Pose3f& input) {
-    if (!mTimestamp.has_value()) {
-        // First input sample sets the output directly.
-        mOutput = input;
-    } else {
+    if (mTimestamp.has_value()) {
+        // Avoid computation upon first input (only sets the initial state).
         Pose3f prevInputToInput = mPrevInput.inverse() * input;
         mOutput = scale(mOutput, timestamp - mTimestamp.value()) * prevInputToInput;
     }
@@ -41,6 +39,7 @@
 }
 
 void PoseDriftCompensator::recenter() {
+    mTimestamp.reset();
     mOutput = Pose3f();
 }
 
diff --git a/media/libheadtracking/PoseProcessingGraph.png b/media/libheadtracking/PoseProcessingGraph.png
index 8e6dfd2..0363068 100644
--- a/media/libheadtracking/PoseProcessingGraph.png
+++ b/media/libheadtracking/PoseProcessingGraph.png
Binary files differ
diff --git a/media/libheadtracking/SensorPoseProvider-example.cpp b/media/libheadtracking/SensorPoseProvider-example.cpp
new file mode 100644
index 0000000..a246e8b
--- /dev/null
+++ b/media/libheadtracking/SensorPoseProvider-example.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include <iostream>
+
+#include <android/sensor.h>
+#include <hardware/sensors.h>
+#include <utils/SystemClock.h>
+
+#include <media/SensorPoseProvider.h>
+#include <sensor/Sensor.h>
+#include <sensor/SensorManager.h>
+
+using android::elapsedRealtimeNano;
+using android::Sensor;
+using android::SensorManager;
+using android::String16;
+using android::media::Pose3f;
+using android::media::SensorPoseProvider;
+using android::media::Twist3f;
+
+using namespace std::chrono_literals;
+
+const char kPackageName[] = "SensorPoseProvider-example";
+
+class Listener : public SensorPoseProvider::Listener {
+  public:
+    void onPose(int64_t timestamp, int32_t handle, const Pose3f& pose,
+                const std::optional<Twist3f>& twist) override {
+        int64_t now = elapsedRealtimeNano();
+
+        std::cout << "onPose t=" << timestamp
+                  << " lag=" << ((now - timestamp) / 1e6) << "[ms]"
+                  << " sensor=" << handle
+                  << " pose=" << pose
+                  << " twist=";
+        if (twist.has_value()) {
+            std::cout << twist.value();
+        } else {
+            std::cout << "<none>";
+        }
+        std::cout << std::endl;
+    }
+};
+
+int main() {
+    SensorManager& sensorManager = SensorManager::getInstanceForPackage(String16(kPackageName));
+
+    const Sensor* headSensor = sensorManager.getDefaultSensor(SENSOR_TYPE_GAME_ROTATION_VECTOR);
+    const Sensor* screenSensor = sensorManager.getDefaultSensor(SENSOR_TYPE_ROTATION_VECTOR);
+
+    Listener listener;
+
+    std::unique_ptr<SensorPoseProvider> provider =
+            SensorPoseProvider::create(kPackageName, &listener);
+    int32_t headHandle = provider->startSensor(headSensor->getHandle(), 500ms);
+    sleep(2);
+    provider->startSensor(screenSensor->getHandle(), 500ms);
+    sleep(2);
+    provider->stopSensor(headHandle);
+    sleep(2);
+    return 0;
+}
diff --git a/media/libheadtracking/SensorPoseProvider.cpp b/media/libheadtracking/SensorPoseProvider.cpp
new file mode 100644
index 0000000..c4c031d
--- /dev/null
+++ b/media/libheadtracking/SensorPoseProvider.cpp
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/SensorPoseProvider.h>
+
+#define LOG_TAG "SensorPoseProvider"
+
+#include <inttypes.h>
+
+#include <future>
+#include <map>
+#include <thread>
+
+#include <log/log_main.h>
+#include <sensor/Sensor.h>
+#include <sensor/SensorEventQueue.h>
+#include <sensor/SensorManager.h>
+#include <utils/Looper.h>
+
+#include "QuaternionUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+// Identifier to use for our event queue on the loop.
+// The number 19 is arbitrary, only useful if using multiple objects on the same looper.
+constexpr int kIdent = 19;
+
+static inline Looper* ALooper_to_Looper(ALooper* alooper) {
+    return reinterpret_cast<Looper*>(alooper);
+}
+
+static inline ALooper* Looper_to_ALooper(Looper* looper) {
+    return reinterpret_cast<ALooper*>(looper);
+}
+
+/**
+ * RAII-wrapper around SensorEventQueue, which unregisters it on destruction.
+ */
+class EventQueueGuard {
+  public:
+    EventQueueGuard(const sp<SensorEventQueue>& queue, Looper* looper) : mQueue(queue) {
+        mQueue->looper = Looper_to_ALooper(looper);
+        mQueue->requestAdditionalInfo = false;
+        looper->addFd(mQueue->getFd(), kIdent, ALOOPER_EVENT_INPUT, nullptr, nullptr);
+    }
+
+    ~EventQueueGuard() {
+        if (mQueue) {
+            ALooper_to_Looper(mQueue->looper)->removeFd(mQueue->getFd());
+        }
+    }
+
+    EventQueueGuard(const EventQueueGuard&) = delete;
+    EventQueueGuard& operator=(const EventQueueGuard&) = delete;
+
+    [[nodiscard]] SensorEventQueue* get() const { return mQueue.get(); }
+
+  private:
+    sp<SensorEventQueue> mQueue;
+};
+
+/**
+ * RAII-wrapper around an enabled sensor, which disables it upon destruction.
+ */
+class SensorEnableGuard {
+  public:
+    SensorEnableGuard(const sp<SensorEventQueue>& queue, int32_t sensor)
+        : mQueue(queue), mSensor(sensor) {}
+
+    ~SensorEnableGuard() {
+        if (mSensor != SensorPoseProvider::INVALID_HANDLE) {
+            int ret = mQueue->disableSensor(mSensor);
+            if (ret) {
+                ALOGE("Failed to disable sensor: %s\n", strerror(ret));
+            }
+        }
+    }
+
+    SensorEnableGuard(const SensorEnableGuard&) = delete;
+    SensorEnableGuard& operator=(const SensorEnableGuard&) = delete;
+
+    // Enable moving.
+    SensorEnableGuard(SensorEnableGuard&& other) : mQueue(other.mQueue), mSensor(other.mSensor) {
+        other.mSensor = SensorPoseProvider::INVALID_HANDLE;
+    }
+
+  private:
+    sp<SensorEventQueue> const mQueue;
+    int32_t mSensor;
+};
+
+/**
+ * Streams the required events to a PoseListener, based on events originating from the Sensor stack.
+ */
+class SensorPoseProviderImpl : public SensorPoseProvider {
+  public:
+    static std::unique_ptr<SensorPoseProvider> create(const char* packageName, Listener* listener) {
+        std::unique_ptr<SensorPoseProviderImpl> result(
+                new SensorPoseProviderImpl(packageName, listener));
+        return result->waitInitFinished() ? std::move(result) : nullptr;
+    }
+
+    ~SensorPoseProviderImpl() override {
+        // Disable all active sensors.
+        mEnabledSensors.clear();
+        mLooper->wake();
+        mThread.join();
+    }
+
+    bool startSensor(int32_t sensor, std::chrono::microseconds samplingPeriod) override {
+        // Enable the sensor.
+        if (mQueue->enableSensor(sensor, samplingPeriod.count(), 0, 0)) {
+            ALOGE("Failed to enable sensor");
+            return false;
+        }
+
+        mEnabledSensors.emplace(sensor, SensorEnableGuard(mQueue.get(), sensor));
+        return true;
+    }
+
+    void stopSensor(int handle) override { mEnabledSensors.erase(handle); }
+
+  private:
+    sp<Looper> mLooper;
+    Listener* const mListener;
+
+    std::thread mThread;
+    std::map<int32_t, SensorEnableGuard> mEnabledSensors;
+    sp<SensorEventQueue> mQueue;
+
+    // We must do some of the initialization operations on the worker thread, because the API relies
+    // on the thread-local looper. In addition, as a matter of convenience, we store some of the
+    // state on the stack.
+    // For that reason, we use a two-step initialization approach, where the ctor mostly just starts
+    // the worker thread and that thread would notify, via the promise below whenever initialization
+    // is finished, and whether it was successful.
+    std::promise<bool> mInitPromise;
+
+    SensorPoseProviderImpl(const char* packageName, Listener* listener)
+        : mListener(listener),
+          mThread([this, p = std::string(packageName)] { threadFunc(p.c_str()); }) {}
+
+    void initFinished(bool success) { mInitPromise.set_value(success); }
+
+    bool waitInitFinished() { return mInitPromise.get_future().get(); }
+
+    void threadFunc(const char* packageName) {
+        // Obtain looper.
+        mLooper = Looper::prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
+
+        // Obtain sensor manager.
+        SensorManager& sensorManager = SensorManager::getInstanceForPackage(String16(packageName));
+
+        // Create event queue.
+        mQueue = sensorManager.createEventQueue();
+
+        if (mQueue == nullptr) {
+            ALOGE("Failed to create a sensor event queue");
+            initFinished(false);
+            return;
+        }
+
+        EventQueueGuard eventQueueGuard(mQueue, mLooper.get());
+
+        initFinished(true);
+
+        while (true) {
+            int ret = mLooper->pollOnce(-1 /* no timeout */, nullptr, nullptr, nullptr);
+
+            switch (ret) {
+                case ALOOPER_POLL_WAKE:
+                    // Normal way to exit.
+                    return;
+
+                case kIdent:
+                    // Possible events on our queue.
+                    break;
+
+                default:
+                    ALOGE("Unexpected status out of Looper::pollOnce: %d", ret);
+            }
+
+            // Process an event.
+            ASensorEvent event;
+            ssize_t actual = mQueue->read(&event, 1);
+            if (actual > 0) {
+                mQueue->sendAck(&event, actual);
+            }
+            ssize_t size = mQueue->filterEvents(&event, actual);
+
+            if (size < 0 || size > 1) {
+                ALOGE("Unexpected return value from SensorEventQueue::filterEvents: %zd", size);
+                break;
+            }
+            if (size == 0) {
+                // No events.
+                continue;
+            }
+
+            handleEvent(event);
+        }
+    }
+
+    void handleEvent(const ASensorEvent& event) {
+        auto value = parseEvent(event);
+        mListener->onPose(event.timestamp, event.sensor, std::get<0>(value), std::get<1>(value));
+    }
+
+    static std::tuple<Pose3f, std::optional<Twist3f>> parseEvent(const ASensorEvent& event) {
+        // TODO(ytai): Add more types.
+        switch (event.type) {
+            case ASENSOR_TYPE_ROTATION_VECTOR:
+            case ASENSOR_TYPE_GAME_ROTATION_VECTOR: {
+                Eigen::Quaternionf quat(event.data[3], event.data[0], event.data[1], event.data[2]);
+                // Adapt to different frame convention.
+                quat *= rotateX(-M_PI_2);
+                return std::make_tuple(Pose3f(quat), std::optional<Twist3f>());
+            }
+
+            default:
+                ALOGE("Unsupported sensor type: %" PRId32, event.type);
+                return std::make_tuple(Pose3f(), std::optional<Twist3f>());
+        }
+    }
+};
+
+}  // namespace
+
+std::unique_ptr<SensorPoseProvider> SensorPoseProvider::create(const char* packageName,
+                                                               Listener* listener) {
+    return SensorPoseProviderImpl::create(packageName, listener);
+}
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/include/media/HeadTrackingProcessor.h b/media/libheadtracking/include/media/HeadTrackingProcessor.h
index 23de540..9fea273 100644
--- a/media/libheadtracking/include/media/HeadTrackingProcessor.h
+++ b/media/libheadtracking/include/media/HeadTrackingProcessor.h
@@ -89,9 +89,9 @@
     virtual HeadTrackingMode getActualMode() const = 0;
 
     /**
-     * This causes the current poses for both the head and screen to be considered "center".
+     * This causes the current poses for both the head and/or screen to be considered "center".
      */
-    virtual void recenter() = 0;
+    virtual void recenter(bool recenterHead = true, bool recenterScreen = true) = 0;
 };
 
 /**
diff --git a/media/libheadtracking/include/media/Pose.h b/media/libheadtracking/include/media/Pose.h
index 06b33f3..e660bb9 100644
--- a/media/libheadtracking/include/media/Pose.h
+++ b/media/libheadtracking/include/media/Pose.h
@@ -15,6 +15,8 @@
  */
 #pragma once
 
+#include <optional>
+#include <vector>
 #include <Eigen/Geometry>
 
 namespace android {
@@ -45,6 +47,22 @@
 
     Pose3f(const Pose3f& other) { *this = other; }
 
+    /**
+     * Create instance from a vector-of-floats representation.
+     * The vector is expected to have exactly 6 elements, where the first three are a translation
+     * vector and the last three are a rotation vector.
+     *
+     * Returns nullopt if the input vector is illegal.
+     */
+    static std::optional<Pose3f> fromVector(const std::vector<float>& vec);
+
+    /**
+     * Convert instance to a vector-of-floats representation.
+     * The vector will have exactly 6 elements, where the first three are a translation vector and
+     * the last three are a rotation vector.
+     */
+    std::vector<float> toVector() const;
+
     Pose3f& operator=(const Pose3f& other) {
         mTranslation = other.mTranslation;
         mRotation = other.mRotation;
diff --git a/media/libheadtracking/include/media/SensorPoseProvider.h b/media/libheadtracking/include/media/SensorPoseProvider.h
new file mode 100644
index 0000000..1a5deb0
--- /dev/null
+++ b/media/libheadtracking/include/media/SensorPoseProvider.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <chrono>
+#include <memory>
+#include <optional>
+
+#include <android/sensor.h>
+
+#include "Pose.h"
+#include "Twist.h"
+
+namespace android {
+namespace media {
+
+/**
+ * A utility providing streaming of pose data from motion sensors provided by the Sensor Framework.
+ *
+ * A live instance of this interface keeps around some resources required for accessing sensor
+ * readings (e.g. a thread and a queue). Those would be released when the instance is deleted.
+ *
+ * Once alive, individual sensors can be subscribed to using startSensor() and updates can be
+ * stopped via stopSensor(). Those two methods should not be called concurrently and correct usage
+ * is assumed.
+ */
+class SensorPoseProvider {
+  public:
+    static constexpr int32_t INVALID_HANDLE = ASENSOR_INVALID;
+
+    /**
+     * Interface for consuming pose-related sensor events.
+     *
+     * The listener will be provided with a stream of events, each including:
+     * - A handle of the sensor responsible for the event.
+     * - Timestamp.
+     * - Pose.
+     * - Optional twist (time-derivative of pose).
+     *
+     * Sensors having only orientation data will have the translation part of the pose set to
+     * identity.
+     *
+     * Events are delivered in a serialized manner (i.e. callbacks do not need to be reentrant).
+     * Callbacks should not block.
+     */
+    class Listener {
+      public:
+        virtual ~Listener() = default;
+
+        virtual void onPose(int64_t timestamp, int32_t handle, const Pose3f& pose,
+                            const std::optional<Twist3f>& twist) = 0;
+    };
+
+    /**
+     * Creates a new SensorPoseProvider instance.
+     * Events will be delivered to the listener as long as the returned instance is kept alive.
+     * @param packageName Client's package name.
+     * @param listener The listener that will get the events.
+     * @return The new instance, or nullptr in case of failure.
+     */
+    static std::unique_ptr<SensorPoseProvider> create(const char* packageName, Listener* listener);
+
+    virtual ~SensorPoseProvider() = default;
+
+    /**
+     * Start receiving pose updates from a given sensor.
+     * Attempting to start a sensor that has already been started results in undefined behavior.
+     * @param sensor The sensor to subscribe to.
+     * @param samplingPeriod Sampling interval, in microseconds. Actual rate might be slightly
+     * different.
+     * @return true iff succeeded.
+     */
+    virtual bool startSensor(int32_t sensor, std::chrono::microseconds samplingPeriod) = 0;
+
+    /**
+     * Stop a sensor, previously started with startSensor(). It is not required to stop all sensors
+     * before deleting the SensorPoseProvider instance.
+     * @param handle The sensor handle, as provided to startSensor().
+     */
+    virtual void stopSensor(int32_t handle) = 0;
+};
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 9c1b563..4a2523f 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -347,6 +347,7 @@
 
     shared_libs: [
         "android.hidl.token@1.0-utils",
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "av-types-aidl-cpp",
         "liblog",
diff --git a/media/libmedia/tests/codeclist/Android.bp b/media/libmedia/tests/codeclist/Android.bp
index 57af9a9..2ed3126 100644
--- a/media/libmedia/tests/codeclist/Android.bp
+++ b/media/libmedia/tests/codeclist/Android.bp
@@ -28,6 +28,22 @@
     test_suites: ["device-tests", "mts"],
     gtest: true,
 
+    // Support multilib variants (using different suffix per sub-architecture), which is needed on
+    // build targets with secondary architectures, as the MTS test suite packaging logic flattens
+    // all test artifacts into a single `testcases` directory.
+    compile_multilib: "both",
+    multilib: {
+        lib32: {
+            suffix: "32",
+        },
+        lib64: {
+            suffix: "64",
+        },
+    },
+
+    // used within mainline MTS, but only to R, not to Q.
+    min_sdk_version: "30",
+
     srcs: [
         "CodecListTest.cpp",
     ],
@@ -35,13 +51,13 @@
     shared_libs: [
         "libbinder",
         "liblog",
-        "libmedia_codeclist",
+        "libmedia_codeclist", // available >= R
         "libstagefright",
         "libstagefright_foundation",
         "libstagefright_xmlparser",
         "libutils",
     ],
-    compile_multilib: "first",
+
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/media/libmedia/tests/codeclist/AndroidTest.xml b/media/libmedia/tests/codeclist/AndroidTest.xml
new file mode 100644
index 0000000..eeaab8e
--- /dev/null
+++ b/media/libmedia/tests/codeclist/AndroidTest.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2020 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<configuration description="Test module config for CodecList unit tests">
+    <option name="test-suite-tag" value="CodecListTest" />
+    <object type="module_controller" class="com.android.tradefed.testtype.suite.module.Sdk30ModuleController" />
+
+    <target_preparer class="com.android.compatibility.common.tradefed.targetprep.FilePusher">
+        <option name="cleanup" value="true" />
+        <option name="append-bitness" value="true" />
+        <option name="push" value="CodecListTest->/data/local/tmp/CodecListTest" />
+    </target_preparer>
+
+    <test class="com.android.tradefed.testtype.GTest" >
+        <option name="native-test-device-path" value="/data/local/tmp" />
+        <option name="module-name" value="CodecListTest" />
+    </test>
+
+
+</configuration>
diff --git a/media/libmedia/xsd/vts/OWNERS b/media/libmedia/xsd/vts/OWNERS
new file mode 100644
index 0000000..9af2eba
--- /dev/null
+++ b/media/libmedia/xsd/vts/OWNERS
@@ -0,0 +1,2 @@
+# Bug component: 151862
+sundongahn@google.com
diff --git a/media/libmediahelper/TypeConverter.cpp b/media/libmediahelper/TypeConverter.cpp
index d3a517f..97b5b95 100644
--- a/media/libmediahelper/TypeConverter.cpp
+++ b/media/libmediahelper/TypeConverter.cpp
@@ -50,6 +50,8 @@
     MAKE_STRING_FROM_ENUM(AUDIO_FLAG_MUTE_HAPTIC),
     MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NO_SYSTEM_CAPTURE),
     MAKE_STRING_FROM_ENUM(AUDIO_FLAG_CAPTURE_PRIVATE),
+    MAKE_STRING_FROM_ENUM(AUDIO_FLAG_CONTENT_SPATIALIZED),
+    MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NEVER_SPATIALIZE),
     TERMINATOR
 };
 
diff --git a/media/libmediametrics/IMediaMetricsService.cpp b/media/libmediametrics/IMediaMetricsService.cpp
deleted file mode 100644
index b5675e6..0000000
--- a/media/libmediametrics/IMediaMetricsService.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MediaMetrics"
-
-#include <stdint.h>
-#include <inttypes.h>
-#include <sys/types.h>
-
-#include <binder/Parcel.h>
-#include <binder/IMemory.h>
-#include <binder/IPCThreadState.h>
-
-#include <utils/Errors.h>  // for status_t
-#include <utils/List.h>
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-#include <media/MediaMetricsItem.h>
-#include <media/IMediaMetricsService.h>
-
-namespace android {
-
-// TODO: Currently ONE_WAY transactions, make both ONE_WAY and synchronous options.
-
-enum {
-    SUBMIT_ITEM = IBinder::FIRST_CALL_TRANSACTION,
-    SUBMIT_BUFFER,
-};
-
-class BpMediaMetricsService: public BpInterface<IMediaMetricsService>
-{
-public:
-    explicit BpMediaMetricsService(const sp<IBinder>& impl)
-        : BpInterface<IMediaMetricsService>(impl)
-    {
-    }
-
-    status_t submit(mediametrics::Item *item) override
-    {
-        if (item == nullptr) {
-            return BAD_VALUE;
-        }
-        ALOGV("%s: (ONEWAY) item=%s", __func__, item->toString().c_str());
-
-        Parcel data;
-        data.writeInterfaceToken(IMediaMetricsService::getInterfaceDescriptor());
-
-        status_t status = item->writeToParcel(&data);
-        if (status != NO_ERROR) { // assume failure logged in item
-            return status;
-        }
-
-        status = remote()->transact(
-                SUBMIT_ITEM, data, nullptr /* reply */, IBinder::FLAG_ONEWAY);
-        ALOGW_IF(status != NO_ERROR, "%s: bad response from service for submit, status=%d",
-                __func__, status);
-        return status;
-    }
-
-    status_t submitBuffer(const char *buffer, size_t length) override
-    {
-        if (buffer == nullptr || length > INT32_MAX) {
-            return BAD_VALUE;
-        }
-        ALOGV("%s: (ONEWAY) length:%zu", __func__, length);
-
-        Parcel data;
-        data.writeInterfaceToken(IMediaMetricsService::getInterfaceDescriptor());
-
-        status_t status = data.writeInt32(length)
-                ?: data.write((uint8_t*)buffer, length);
-        if (status != NO_ERROR) {
-            return status;
-        }
-
-        status = remote()->transact(
-                SUBMIT_BUFFER, data, nullptr /* reply */, IBinder::FLAG_ONEWAY);
-        ALOGW_IF(status != NO_ERROR, "%s: bad response from service for submit, status=%d",
-                __func__, status);
-        return status;
-    }
-};
-
-IMPLEMENT_META_INTERFACE(MediaMetricsService, "android.media.IMediaMetricsService");
-
-// ----------------------------------------------------------------------
-
-status_t BnMediaMetricsService::onTransact(
-    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
-    switch (code) {
-    case SUBMIT_ITEM: {
-        CHECK_INTERFACE(IMediaMetricsService, data, reply);
-
-        mediametrics::Item * const item = mediametrics::Item::create();
-        status_t status = item->readFromParcel(data);
-        if (status != NO_ERROR) { // assume failure logged in item
-            return status;
-        }
-        status = submitInternal(item, true /* release */);
-        // assume failure logged by submitInternal
-        return NO_ERROR;
-    }
-    case SUBMIT_BUFFER: {
-        CHECK_INTERFACE(IMediaMetricsService, data, reply);
-        int32_t length;
-        status_t status = data.readInt32(&length);
-        if (status != NO_ERROR || length <= 0) {
-            return BAD_VALUE;
-        }
-        const void *ptr = data.readInplace(length);
-        if (ptr == nullptr) {
-            return BAD_VALUE;
-        }
-        status = submitBuffer(static_cast<const char *>(ptr), length);
-        // assume failure logged by submitBuffer
-        return NO_ERROR;
-    }
-
-    default:
-        return BBinder::onTransact(code, data, reply, flags);
-    }
-}
-
-// ----------------------------------------------------------------------------
-
-} // namespace android
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
index f55678d..37f8103 100644
--- a/media/libmediaplayerservice/Android.bp
+++ b/media/libmediaplayerservice/Android.bp
@@ -65,6 +65,7 @@
         "libstagefright_foundation",
         "libstagefright_httplive",
         "libutils",
+        "packagemanager_aidl-cpp",
     ],
 
     header_libs: [
@@ -82,6 +83,7 @@
     export_shared_lib_headers: [
         "libmedia",
         "framework-permission-aidl-cpp",
+        "packagemanager_aidl-cpp",
     ],
 
     include_dirs: [
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index d278a01..f85887e 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -21,6 +21,7 @@
 #define LOG_TAG "MediaPlayerService"
 #include <utils/Log.h>
 
+#include <chrono>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <sys/time.h>
@@ -2467,8 +2468,13 @@
 void MediaPlayerService::AudioOutput::pause()
 {
     ALOGV("pause");
+    // We use pauseAndWait() instead of pause() to ensure tracks ramp to silence before
+    // any flush. We choose 40 ms timeout to allow 1 deep buffer mixer period
+    // to occur.  Often waiting is 0 - 20 ms.
+    using namespace std::chrono_literals;
+    constexpr auto TIMEOUT_MS = 40ms;
     Mutex::Autolock lock(mLock);
-    if (mTrack != 0) mTrack->pause();
+    if (mTrack != 0) mTrack->pauseAndWait(TIMEOUT_MS);
 }
 
 void MediaPlayerService::AudioOutput::close()
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index a914006..4aa80be 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -126,8 +126,13 @@
     }
 
     if ((as == AUDIO_SOURCE_FM_TUNER
-            && !(captureAudioOutputAllowed(mAttributionSource)
+                && !(captureAudioOutputAllowed(mAttributionSource)
                     || captureTunerAudioInputAllowed(mAttributionSource)))
+            || (as == AUDIO_SOURCE_REMOTE_SUBMIX
+                && !(captureAudioOutputAllowed(mAttributionSource)
+                    || modifyAudioRoutingAllowed(mAttributionSource)))
+            || (as == AUDIO_SOURCE_ECHO_REFERENCE
+                && !captureAudioOutputAllowed(mAttributionSource))
             || !recordingAllowed(mAttributionSource, (audio_source_t)as)) {
         return PERMISSION_DENIED;
     }
diff --git a/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
index 2aabd53..a372b7f 100644
--- a/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
+++ b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
@@ -225,10 +225,17 @@
             "media.stagefright.thumbnail.prefer_hw_codecs", false);
     uint32_t flags = preferhw ? 0 : MediaCodecList::kPreferSoftwareCodecs;
     Vector<AString> matchingCodecs;
+    sp<AMessage> format = new AMessage;
+    status_t err = convertMetaDataToMessage(trackMeta, &format);
+    if (err != OK) {
+        format = NULL;
+    }
+
     MediaCodecList::findMatchingCodecs(
             mime,
             false, /* encoder */
             flags,
+            format,
             &matchingCodecs);
 
     for (size_t i = 0; i < matchingCodecs.size(); ++i) {
@@ -348,11 +355,18 @@
     bool preferhw = property_get_bool(
             "media.stagefright.thumbnail.prefer_hw_codecs", false);
     uint32_t flags = preferhw ? 0 : MediaCodecList::kPreferSoftwareCodecs;
+    sp<AMessage> format = new AMessage;
+    status_t err = convertMetaDataToMessage(trackMeta, &format);
+    if (err != OK) {
+        format = NULL;
+    }
+
     Vector<AString> matchingCodecs;
     MediaCodecList::findMatchingCodecs(
             mime,
             false, /* encoder */
             flags,
+            format,
             &matchingCodecs);
 
     for (size_t i = 0; i < matchingCodecs.size(); ++i) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 4a65f71..2828d44 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -1673,24 +1673,18 @@
 
         mDrainAudioQueuePending = false;
 
-        if (offloadingAudio()) {
-            mAudioSink->pause();
-            mAudioSink->flush();
-            if (!mPaused) {
-                mAudioSink->start();
-            }
-        } else {
-            mAudioSink->pause();
-            mAudioSink->flush();
+        mAudioSink->pause();
+        mAudioSink->flush();
+        if (!offloadingAudio()) {
             // Call stop() to signal to the AudioSink to completely fill the
             // internal buffer before resuming playback.
             // FIXME: this is ignored after flush().
             mAudioSink->stop();
-            if (!mPaused) {
-                mAudioSink->start();
-            }
             mNumFramesWritten = 0;
         }
+        if (!mPaused) {
+            mAudioSink->start();
+        }
         mNextAudioClockUpdateTimeUs = -1;
     } else {
         flushQueue(&mVideoQueue);
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 3ef89bb..a4fbbbc 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -5433,6 +5433,7 @@
                     notify->setInt32("channel-count", params.nChannels);
                     notify->setInt32("sample-rate", params.nSampleRate);
                     notify->setInt32("bitrate", params.nBitRate);
+                    notify->setInt32("aac-profile", params.eAACProfile);
                     break;
                 }
 
@@ -9207,4 +9208,19 @@
     return OK;
 }
 
+status_t ACodec::querySupportedParameters(std::vector<std::string> *names) {
+    if (!names) {
+        return BAD_VALUE;
+    }
+    return OK;
+}
+
+status_t ACodec::subscribeToParameters([[maybe_unused]] const std::vector<std::string> &names) {
+    return OK;
+}
+
+status_t ACodec::unsubscribeFromParameters([[maybe_unused]] const std::vector<std::string> &names) {
+    return OK;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index a052a70..1e9d4fe 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -109,6 +109,7 @@
 
     srcs: [
         "CodecBase.cpp",
+        "DataConverter.cpp",
         "FrameRenderTracker.cpp",
         "MediaCodecListWriter.cpp",
         "SkipCutBuffer.cpp",
@@ -125,6 +126,7 @@
     ],
 
     shared_libs: [
+        "libaudioutils",
         "libgui",
         "libhidlallocatorutils",
         "liblog",
@@ -266,7 +268,6 @@
         "CallbackMediaSource.cpp",
         "CameraSource.cpp",
         "CameraSourceTimeLapse.cpp",
-        "DataConverter.cpp",
         "FrameDecoder.cpp",
         "HevcUtils.cpp",
         "InterfaceUtils.cpp",
@@ -340,6 +341,7 @@
         "android.hardware.media.omx@1.0",
         "framework-permission-aidl-cpp",
         "libaudioclient_aidl_conversion",
+        "packagemanager_aidl-cpp",
     ],
 
     static_libs: [
@@ -371,6 +373,7 @@
         "libmedia",
         "android.hidl.allocator@1.0",
         "framework-permission-aidl-cpp",
+        "packagemanager_aidl-cpp",
     ],
 
     export_include_dirs: [
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 94a0424..01cb9b3 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -793,8 +793,16 @@
     if (overrideMeta == NULL) {
         // check if we're dealing with a tiled heif
         int32_t tileWidth, tileHeight, gridRows, gridCols;
+        int32_t widthColsProduct = 0;
+        int32_t heightRowsProduct = 0;
         if (findGridInfo(trackMeta(), &tileWidth, &tileHeight, &gridRows, &gridCols)) {
-            if (mWidth <= tileWidth * gridCols && mHeight <= tileHeight * gridRows) {
+            if (__builtin_mul_overflow(tileWidth, gridCols, &widthColsProduct) ||
+                    __builtin_mul_overflow(tileHeight, gridRows, &heightRowsProduct)) {
+                ALOGE("Multiplication overflowed Grid size: %dx%d, Picture size: %dx%d",
+                        gridCols, gridRows, tileWidth, tileHeight);
+                return nullptr;
+            }
+            if (mWidth <= widthColsProduct && mHeight <= heightRowsProduct) {
                 ALOGV("grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
                         gridCols, gridRows, tileWidth, tileHeight, mWidth, mHeight);
 
@@ -880,9 +888,18 @@
     }
 
     int32_t width, height, stride;
-    CHECK(outputFormat->findInt32("width", &width));
-    CHECK(outputFormat->findInt32("height", &height));
-    CHECK(outputFormat->findInt32("stride", &stride));
+    if (outputFormat->findInt32("width", &width) == false) {
+        ALOGE("MediaImageDecoder::onOutputReceived:width is missing in outputFormat");
+        return ERROR_MALFORMED;
+    }
+    if (outputFormat->findInt32("height", &height) == false) {
+        ALOGE("MediaImageDecoder::onOutputReceived:height is missing in outputFormat");
+        return ERROR_MALFORMED;
+    }
+    if (outputFormat->findInt32("stride", &stride) == false) {
+        ALOGE("MediaImageDecoder::onOutputReceived:stride is missing in outputFormat");
+        return ERROR_MALFORMED;
+    }
 
     if (mFrame == NULL) {
         sp<IMemory> frameMem = allocVideoFrame(
diff --git a/media/libstagefright/MediaAppender.cpp b/media/libstagefright/MediaAppender.cpp
index 5d80b30..21dcfa1 100644
--- a/media/libstagefright/MediaAppender.cpp
+++ b/media/libstagefright/MediaAppender.cpp
@@ -75,10 +75,21 @@
         return status;
     }
 
-    if (strcmp("MPEG4Extractor", mExtractor->getName()) == 0) {
+    sp<AMessage> fileFormat;
+    status = mExtractor->getFileFormat(&fileFormat);
+    if (status != OK) {
+        ALOGE("extractor_getFileFormat failed, status :%d", status);
+        return status;
+    }
+
+    AString fileMime;
+    fileFormat->findString("mime", &fileMime);
+    // only compare the end of the file MIME type to allow for vendor customized mime type
+    if (fileMime.endsWith("mp4")){
         mFormat = MediaMuxer::OUTPUT_FORMAT_MPEG_4;
     } else {
-        ALOGE("Unsupported format, extractor name:%s", mExtractor->getName());
+        ALOGE("Unsupported file format, extractor name:%s, fileformat %s",
+              mExtractor->getName(), fileMime.c_str());
         return ERROR_UNSUPPORTED;
     }
 
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index a366506..00c38d5 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -3185,8 +3185,11 @@
                         mediametrics_setInt32(mMetricsHandle, kCodecSecure, 0);
                     }
 
-                    if (mIsVideo) {
-                        // audio codec is currently ignored.
+                    MediaCodecInfo::Attributes attr = mCodecInfo
+                            ? mCodecInfo->getAttributes()
+                            : MediaCodecInfo::Attributes(0);
+                    if (!(attr & MediaCodecInfo::kFlagIsSoftwareOnly)) {
+                        // software codec is currently ignored.
                         mResourceManagerProxy->addResource(
                                 MediaResource::CodecResource(mFlags & kFlagIsSecure, mIsVideo));
                     }
@@ -3502,6 +3505,20 @@
 
                 case kWhatDrainThisBuffer:
                 {
+                    if ((mFlags & kFlagUseBlockModel) == 0 && mTunneled) {
+                        sp<RefBase> obj;
+                        CHECK(msg->findObject("buffer", &obj));
+                        sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
+                        if (mFlags & kFlagIsAsync) {
+                            // In asynchronous mode, output format change is processed immediately.
+                            handleOutputFormatChangeIfNeeded(buffer);
+                        } else {
+                            postActivityNotificationIfPossible();
+                        }
+                        mBufferChannel->discardBuffer(buffer);
+                        break;
+                    }
+
                     /* size_t index = */updateBuffers(kPortIndexOutput, msg);
 
                     if (mState == FLUSHING
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 6243828..2ffe728 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -509,6 +509,29 @@
                 }
             }
         }
+
+        int32_t profile = -1;
+        if (format->findInt32("profile", &profile)) {
+            int32_t level = -1;
+            format->findInt32("level", &level);
+            Vector<MediaCodecInfo::ProfileLevel> profileLevels;
+            capabilities->getSupportedProfileLevels(&profileLevels);
+            auto it = profileLevels.begin();
+            for (; it != profileLevels.end(); ++it) {
+                if (profile != it->mProfile) {
+                    continue;
+                }
+                if (level > -1 && level > it->mLevel) {
+                    continue;
+                }
+                break;
+            }
+
+            if (it == profileLevels.end()) {
+                ALOGV("Codec does not support profile %d with level %d", profile, level);
+                return false;
+            }
+        }
     }
 
     // haven't found a reason to discard this one
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 0107c32..b07f8f7 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -943,10 +943,17 @@
 
             sp<MediaCodecBuffer> outbuf;
             status_t err = mEncoder->getOutputBuffer(index, &outbuf);
-            if (err != OK || outbuf == NULL || outbuf->data() == NULL
-                || outbuf->size() == 0) {
+            if (err != OK || outbuf == NULL || outbuf->data() == NULL) {
                 signalEOS();
                 break;
+            } else if (outbuf->size() == 0) {
+                // Zero length CSD buffers are not treated as an error
+                if (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) {
+                    mEncoder->releaseOutputBuffer(index);
+                } else {
+                    signalEOS();
+                }
+                break;
             }
 
             MediaBufferBase *mbuf = new MediaBuffer(outbuf->size());
diff --git a/media/libstagefright/SkipCutBuffer.cpp b/media/libstagefright/SkipCutBuffer.cpp
index ee9016d..de91533 100644
--- a/media/libstagefright/SkipCutBuffer.cpp
+++ b/media/libstagefright/SkipCutBuffer.cpp
@@ -145,7 +145,19 @@
     if (available < num) {
         int32_t newcapacity = mCapacity + (num - available);
         char * newbuffer = new char[newcapacity];
-        memcpy(newbuffer, mCutBuffer, mCapacity);
+        if (mWriteHead < mReadHead) {
+            // data isn't continuous, need to memcpy twice
+            // to move previous data to new buffer.
+            size_t copyLeft = mCapacity - mReadHead;
+            memcpy(newbuffer, mCutBuffer + mReadHead, copyLeft);
+            memcpy(newbuffer + copyLeft, mCutBuffer, mWriteHead);
+            mReadHead = 0;
+            mWriteHead += copyLeft;
+        } else {
+            memcpy(newbuffer, mCutBuffer + mReadHead, mWriteHead - mReadHead);
+            mWriteHead -= mReadHead;
+            mReadHead = 0;
+        }
         delete [] mCutBuffer;
         mCapacity = newcapacity;
         mCutBuffer = newbuffer;
diff --git a/media/libstagefright/colorconversion/Android.bp b/media/libstagefright/colorconversion/Android.bp
index 06cebd3..7ff9b10 100644
--- a/media/libstagefright/colorconversion/Android.bp
+++ b/media/libstagefright/colorconversion/Android.bp
@@ -25,10 +25,6 @@
         "SoftwareRenderer.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/native/include/media/openmax",
-    ],
-
     shared_libs: [
         "libui",
         "libnativewindow",
@@ -37,6 +33,7 @@
     header_libs: [
         "libstagefright_headers",
         "libstagefright_foundation_headers",
+        "media_plugin_headers",
     ],
 
     static_libs: ["libyuv_static"],
diff --git a/media/libstagefright/filters/Android.bp b/media/libstagefright/filters/Android.bp
index acc9e87..b46a271 100644
--- a/media/libstagefright/filters/Android.bp
+++ b/media/libstagefright/filters/Android.bp
@@ -22,10 +22,6 @@
         "ZeroFilter.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/native/include/media/openmax",
-    ],
-
     cflags: [
         "-Wno-multichar",
         "-Werror",
diff --git a/media/libstagefright/foundation/Android.bp b/media/libstagefright/foundation/Android.bp
index dd2c66f..5f86c22 100644
--- a/media/libstagefright/foundation/Android.bp
+++ b/media/libstagefright/foundation/Android.bp
@@ -33,18 +33,13 @@
     },
     host_supported: true,
     double_loadable: true,
-    include_dirs: [
-        "frameworks/av/include",
-        "frameworks/native/include",
-        "frameworks/native/libs/arect/include",
-        "frameworks/native/libs/nativebase/include",
-    ],
 
     local_include_dirs: [
         "include/media/stagefright/foundation",
     ],
 
     header_libs: [
+        "av-headers",
         // this is only needed for the vendor variant that removes libbinder, but vendor
         // target below does not allow adding header_libs.
         "libbinder_headers",
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index 0ec5ad5..5c4ec17 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -60,6 +60,10 @@
 const char *MEDIA_MIMETYPE_AUDIO_AC4 = "audio/ac4";
 const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1 = "audio/mha1";
 const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1 = "audio/mhm1";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3 = "audio/mhm1.03";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4 = "audio/mhm1.04";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3 = "audio/mhm1.0d";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4 = "audio/mhm1.0e";
 const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
 const char *MEDIA_MIMETYPE_AUDIO_ALAC = "audio/alac";
 const char *MEDIA_MIMETYPE_AUDIO_WMA = "audio/x-ms-wma";
@@ -75,7 +79,47 @@
 const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS = "audio/amr-wb+";
 const char *MEDIA_MIMETYPE_AUDIO_APTX = "audio/aptx";
 const char *MEDIA_MIMETYPE_AUDIO_DRA = "audio/vnd.dra";
-const char *MEDIA_MIMETYPE_AUDIO_AAC_FORMAT = "audio/aac";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT = "audio/vnd.dolby.mat";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_1_0 = "audio/vnd.dolby.mat.1.0";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_0 = "audio/vnd.dolby.mat.2.0";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_1 = "audio/vnd.dolby.mat.2.1";
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD = "audio/vnd.dolby.mlp";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_MP4 = "audio/mp4a.40";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_MAIN = "audio/mp4a.40.01";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LC = "audio/mp4a.40.02";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_SSR = "audio/mp4a.40.03";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LTP = "audio/mp4a.40.04";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V1 = "audio/mp4a.40.05";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE = "audio/mp4a.40.06";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ERLC = "audio/mp4a.40.17";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LD = "audio/mp4a.40.23";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V2 = "audio/mp4a.40.29";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ELD = "audio/mp4a.40.39";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_XHE = "audio/mp4a.40.42";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADIF = "audio/aac-adif";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN = "audio/aac-adts.01";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC = "audio/aac-adts.02";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR = "audio/aac-adts.03";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP = "audio/aac-adts.04";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1 = "audio/aac-adts.05";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE = "audio/aac-adts.06";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC = "audio/aac-adts.17";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD = "audio/aac-adts.23";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2 = "audio/aac-adts.29";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD = "audio/aac-adts.39";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE = "audio/aac-adts.42";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC = "audio/mp4a-latm.02";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1 = "audio/mp4a-latm.05";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2 = "audio/mp4a-latm.29";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_IEC61937 = "audio/x-iec61937";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_IEC60958 = "audio/x-iec60958";
 
 const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
 const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index afa0c6d..fb8c299 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -62,6 +62,10 @@
 extern const char *MEDIA_MIMETYPE_AUDIO_AC4;
 extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1;
 extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4;
 extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
 extern const char *MEDIA_MIMETYPE_AUDIO_ALAC;
 extern const char *MEDIA_MIMETYPE_AUDIO_WMA;
@@ -77,7 +81,40 @@
 extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS;
 extern const char *MEDIA_MIMETYPE_AUDIO_APTX;
 extern const char *MEDIA_MIMETYPE_AUDIO_DRA;
-extern const char *MEDIA_MIMETYPE_AUDIO_AAC_FORMAT;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_1_0;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_0;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_1;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_MP4;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_MAIN;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_SSR;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LTP;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V1;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ERLC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V2;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ELD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_XHE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADIF;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2;
+extern const char *MEDIA_MIMETYPE_AUDIO_IEC61937;
+extern const char *MEDIA_MIMETYPE_AUDIO_IEC60958;
 
 extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
 extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
diff --git a/media/libstagefright/foundation/tests/AVCUtils/Android.bp b/media/libstagefright/foundation/tests/AVCUtils/Android.bp
index 594da56..ee7db21 100644
--- a/media/libstagefright/foundation/tests/AVCUtils/Android.bp
+++ b/media/libstagefright/foundation/tests/AVCUtils/Android.bp
@@ -43,10 +43,6 @@
         "libstagefright_foundation",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright/foundation",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/media/libstagefright/foundation/tests/Android.bp b/media/libstagefright/foundation/tests/Android.bp
index e50742e..e72ce43 100644
--- a/media/libstagefright/foundation/tests/Android.bp
+++ b/media/libstagefright/foundation/tests/Android.bp
@@ -18,10 +18,6 @@
         "-Wall",
     ],
 
-    include_dirs: [
-        "frameworks/av/include",
-    ],
-
     shared_libs: [
         "liblog",
         "libstagefright_foundation",
diff --git a/media/libstagefright/httplive/Android.bp b/media/libstagefright/httplive/Android.bp
index 0b0acbf..7acf735 100644
--- a/media/libstagefright/httplive/Android.bp
+++ b/media/libstagefright/httplive/Android.bp
@@ -29,7 +29,6 @@
     ],
 
     include_dirs: [
-        "frameworks/av/media/libstagefright",
         "frameworks/native/include/media/openmax",
     ],
 
@@ -65,6 +64,8 @@
 
     header_libs: [
         "libbase_headers",
+        "libstagefright_headers",
+        "libstagefright_httplive_headers",
     ],
 
     static_libs: [
@@ -74,3 +75,8 @@
     ],
 
 }
+
+cc_library_headers {
+    name: "libstagefright_httplive_headers",
+    export_include_dirs: ["."],
+}
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 3bad015..0d7cadd 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -23,7 +23,7 @@
 #include "M3UParser.h"
 #include "PlaylistFetcher.h"
 
-#include "mpeg2ts/AnotherPacketSource.h"
+#include <AnotherPacketSource.h>
 
 #include <cutils/properties.h>
 #include <media/MediaHTTPService.h>
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 7a6d487..ceea41d 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -24,7 +24,7 @@
 
 #include <utils/String8.h>
 
-#include "mpeg2ts/ATSParser.h"
+#include <ATSParser.h>
 
 namespace android {
 
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index b23aa8a..907b326 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -24,9 +24,9 @@
 #include "HTTPDownloader.h"
 #include "LiveSession.h"
 #include "M3UParser.h"
-#include "include/ID3.h"
-#include "mpeg2ts/AnotherPacketSource.h"
-#include "mpeg2ts/HlsSampleDecryptor.h"
+#include <ID3.h>
+#include <AnotherPacketSource.h>
+#include <HlsSampleDecryptor.h>
 
 #include <datasource/DataURISource.h>
 #include <media/stagefright/foundation/ABitReader.h>
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index 5d3f9c1..2e28164 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -21,7 +21,7 @@
 #include <media/stagefright/foundation/AHandler.h>
 #include <openssl/aes.h>
 
-#include "mpeg2ts/ATSParser.h"
+#include <ATSParser.h>
 #include "LiveSession.h"
 
 namespace android {
diff --git a/media/libstagefright/httplive/fuzzer/Android.bp b/media/libstagefright/httplive/fuzzer/Android.bp
new file mode 100644
index 0000000..14097b0
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/Android.bp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_av_media_libstagefright_httplive_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: [
+        "frameworks_av_media_libstagefright_httplive_license",
+    ],
+}
+
+cc_fuzz {
+    name: "httplive_fuzzer",
+    srcs: [
+        "httplive_fuzzer.cpp",
+    ],
+    static_libs: [
+        "libstagefright_httplive",
+        "libstagefright_id3",
+        "libstagefright_metadatautils",
+        "libstagefright_mpeg2support",
+        "liblog",
+        "libcutils",
+        "libdatasource",
+        "libmedia",
+        "libstagefright",
+        "libutils",
+    ],
+    header_libs: [
+        "libbase_headers",
+        "libstagefright_foundation_headers",
+        "libstagefright_headers",
+        "libstagefright_httplive_headers",
+    ],
+    shared_libs: [
+        "libcrypto",
+        "libstagefright_foundation",
+        "libhidlbase",
+        "libhidlmemory",
+        "android.hidl.allocator@1.0",
+    ],
+    corpus: ["corpus/*"],
+    dictionary: "httplive_fuzzer.dict",
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
diff --git a/media/libstagefright/httplive/fuzzer/README.md b/media/libstagefright/httplive/fuzzer/README.md
new file mode 100644
index 0000000..3a64ea4
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/README.md
@@ -0,0 +1,56 @@
+# Fuzzer for libstagefright_httplive
+
+## Plugin Design Considerations
+The fuzzer plugin for libstagefright_httplive is designed based on the understanding of the library and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data.Also, several .m3u8 files are hand-crafted and added to the corpus directory to increase the code coverage. This ensures more code paths are reached by the fuzzer.
+
+libstagefright_httplive supports the following parameters:
+1. Final Result (parameter name: `finalResult`)
+2. Flags (parameter name: `flags`)
+3. Time Us (parameter name: `timeUs`)
+4. Track Index (parameter name: `trackIndex`)
+5. Index (parameter name: `index`)
+6. Select (parameter name: `select`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `finalResult` | `-34` to `-1` | Value obtained from FuzzedDataProvider|
+| `flags` | `0` to `1` | Value obtained from FuzzedDataProvider|
+| `timeUs` | `0` to `10000000` | Value obtained from FuzzedDataProvider|
+| `trackIndex` | `UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `index` | `UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `select` | `True` to `False` | Value obtained from FuzzedDataProvider|
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feeds the entire input data to the httplive module.
+This ensures that the plugin tolerates any kind of input (empty, huge,
+malformed, etc) and doesnt `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build httplive_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+  $ mm -j$(nproc) httplive_fuzzer
+```
+#### Steps to run
+To run on device
+```
+  $ adb push $ANDROID_PRODUCT_OUT/data/fuzz/$(TARGET_ARCH)/lib /data/fuzz/$(TARGET_ARCH)/lib
+  $ adb push $ANDROID_PRODUCT_OUT/data/fuzz/$(TARGET_ARCH)/httplive_fuzzer /data/fuzz/$(TARGET_ARCH)/httplive_fuzzer
+  $ adb shell /data/fuzz/${TARGET_ARCH}/httplive_fuzzer/httplive_fuzzer /data/fuzz/${TARGET_ARCH}/httplive_fuzzer/corpus
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/media/libstagefright/httplive/fuzzer/corpus/crypt.key b/media/libstagefright/httplive/fuzzer/corpus/crypt.key
new file mode 100644
index 0000000..f9d5d7f
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/crypt.key
@@ -0,0 +1,2 @@

+ÏŒüÐ5Љ_xïHÎ3
diff --git a/media/libstagefright/httplive/fuzzer/corpus/encrypted.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/encrypted.m3u8
new file mode 100644
index 0000000..32b0eac
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/encrypted.m3u8
@@ -0,0 +1,12 @@
+#EXTM3U
+#EXT-X-TARGETDURATION:10
+#EXT-X-ALLOW-CACHE:YES
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXT-X-VERSION:3
+#EXT-X-MEDIA-SEQUENCE:1
+#EXT-X-KEY:METHOD=AES-128,URI="../../fuzz/arm64/httplive_fuzzer/corpus/crypt.key"
+#EXTINF:10.000,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5.092,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/hls.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/hls.m3u8
new file mode 100644
index 0000000..9338e04
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/hls.m3u8
@@ -0,0 +1,8 @@
+#EXTM3U
+#EXT-X-TARGETDURATION:10
+#EXT-X-MEDIA-SEQUENCE:0
+#EXTINF:10, no desc
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:10, no desc
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index1.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index1.m3u8
new file mode 100644
index 0000000..e1eff58
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index1.m3u8
@@ -0,0 +1,14 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=NONE
+#EXT-X-DISCONTINUITY-SEQUENCE:0
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence0.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index2.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index2.m3u8
new file mode 100644
index 0000000..37a0189
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index2.m3u8
@@ -0,0 +1,6 @@
+#EXTM3U
+#EXT-X-INDEPENDENT-SEGMENTS
+#EXT-X-STREAM-INF:CLOSED-CAPTIONS=NONE,BANDWIDTH=165340,RESOLUTION=256x144,CODECS="mp4a.40.5,avc1.42c00b"
+https://non.existentsite.com/test-doesnt-dereference-these-paths/prog_index.m3u8
+#EXT-X-STREAM-INF:CLOSED-CAPTIONS=NONE,BANDWIDTH=344388,RESOLUTION=426x240,CODECS="mp4a.40.5,avc1.4d4015"
+https://non.existentsite.com/test-doesnt-dereference-these-paths/prog_index1.m3u8
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index3.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index3.m3u8
new file mode 100644
index 0000000..1b7f489
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index3.m3u8
@@ -0,0 +1,13 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=AES-128,URI="https://demo.unified-streaming.com/video/tears-of-steel/aes.key",IV=0X99b74007b6254e4bd1c6e03631cad15b
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence3.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index4.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index4.m3u8
new file mode 100644
index 0000000..89ba37c
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index4.m3u8
@@ -0,0 +1,15 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;charset=utf-8,a4cd9995a1aa91e1",IV=0X99b74007b6254e4bd1c6e03631cad15b
+#EXT-X-DISCONTINUITY-SEQUENCE:0
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence0.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index5.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index5.m3u8
new file mode 100644
index 0000000..2120de4
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index5.m3u8
@@ -0,0 +1,14 @@
+#EXTM3U
+#EXT-X-TARGETDURATION:11
+#EXT-X-KEY:METHOD=NONE
+#EXT-X-MEDIA-SEQUENCE:0
+#EXT-X-VERSION:4
+#EXTINF:10.0,
+#EXT-X-BYTERANGE:10@0
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:10.0,
+#EXT-X-BYTERANGE:20@10
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:10.0,
+#EXT-X-BYTERANGE:80
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index6.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index6.m3u8
new file mode 100644
index 0000000..588368a
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index6.m3u8
@@ -0,0 +1,12 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=AES-128,URI="data:text/plain;charset=utf-8,a4cd9995a1aa91e1",IV=0x30303030303030303030303030303030
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence3.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index7.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index7.m3u8
new file mode 100644
index 0000000..b09948e
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index7.m3u8
@@ -0,0 +1,46 @@
+#EXTM3U
+#EXT-X-VERSION:4
+## Created with Unified Streaming Platform  (version=1.11.3-24438)
+#EXT-X-SESSION-KEY:METHOD=AES-128,URI="https://demo.unified-streaming.com/video/tears-of-steel/aes.key"
+
+# AUDIO groups
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio-aacl-64",LANGUAGE="en",NAME="English",DEFAULT=YES,AUTOSELECT=YES,CHANNELS="2"
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio-aacl-128",LANGUAGE="en",NAME="English",DEFAULT=YES,AUTOSELECT=YES,CHANNELS="2"
+
+# SUBTITLES groups
+#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="textstream",LANGUAGE="en",NAME="English",DEFAULT=YES,AUTOSELECT=YES,URI="tears-of-steel-aes-textstream_eng=1000.m3u8"
+#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="textstream",LANGUAGE="ru",NAME="Russian",AUTOSELECT=YES,URI="tears-of-steel-aes-textstream_rus=1000.m3u8"
+
+# variants
+#EXT-X-STREAM-INF:BANDWIDTH=494000,CODECS="mp4a.40.2,avc1.42C00D",RESOLUTION=224x100,FRAME-RATE=24,AUDIO="audio-aacl-64",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=64008-video_eng=401000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=933000,CODECS="mp4a.40.2,avc1.42C016",RESOLUTION=448x200,FRAME-RATE=24,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=751000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1198000,CODECS="mp4a.40.2,avc1.4D401F",RESOLUTION=784x350,FRAME-RATE=24,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=1001000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1728000,CODECS="mp4a.40.2,avc1.640028",RESOLUTION=1680x750,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=1501000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=2469000,CODECS="mp4a.40.2,avc1.640028",RESOLUTION=1680x750,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=2200000.m3u8
+
+# variants
+#EXT-X-STREAM-INF:BANDWIDTH=1025000,CODECS="mp4a.40.2,hvc1.1.6.L150.90",RESOLUTION=1680x750,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-64",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=64008-video_eng_1=902000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1368000,CODECS="mp4a.40.2,hvc1.1.6.L150.90",RESOLUTION=2576x1150,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng_1=1161000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1815000,CODECS="mp4a.40.2,hvc1.1.6.L150.90",RESOLUTION=3360x1500,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng_1=1583000.m3u8
+
+# variants
+#EXT-X-STREAM-INF:BANDWIDTH=69000,CODECS="mp4a.40.2",AUDIO="audio-aacl-64",SUBTITLES="textstream"
+tears-of-steel-aes-audio_eng=64008.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=137000,CODECS="mp4a.40.2",AUDIO="audio-aacl-128",SUBTITLES="textstream"
+tears-of-steel-aes-audio_eng=128002.m3u8
+
+# keyframes
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=54000,CODECS="avc1.42C00D",RESOLUTION=224x100,URI="keyframes/tears-of-steel-aes-video_eng=401000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=100000,CODECS="avc1.42C016",RESOLUTION=448x200,URI="keyframes/tears-of-steel-aes-video_eng=751000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=133000,CODECS="avc1.4D401F",RESOLUTION=784x350,URI="keyframes/tears-of-steel-aes-video_eng=1001000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=120000,CODECS="hvc1.1.6.L150.90",RESOLUTION=1680x750,VIDEO-RANGE=SDR,URI="keyframes/tears-of-steel-aes-video_eng_1=902000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=154000,CODECS="hvc1.1.6.L150.90",RESOLUTION=2576x1150,VIDEO-RANGE=SDR,URI="keyframes/tears-of-steel-aes-video_eng_1=1161000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=210000,CODECS="hvc1.1.6.L150.90",RESOLUTION=3360x1500,VIDEO-RANGE=SDR,URI="keyframes/tears-of-steel-aes-video_eng_1=1583000.m3u8"
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index8.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index8.m3u8
new file mode 100644
index 0000000..353d589
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index8.m3u8
@@ -0,0 +1,13 @@
+#EXTM3U
+#EXT-X-VERSION:5
+
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",NAME="English stereo",LANGUAGE="en",AUTOSELECT=YES,URI="../../fuzz/arm64/httplive_fuzzer/index1.m3u8"
+
+#EXT-X-STREAM-INF:BANDWIDTH=628000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=320x180,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index1.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=928000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=480x270,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index2.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1728000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=640x360,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index3.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=2528000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=960x540,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index1.m3u8
diff --git a/media/libstagefright/httplive/fuzzer/corpus/prog_index.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/prog_index.m3u8
new file mode 100644
index 0000000..eb88422
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/prog_index.m3u8
@@ -0,0 +1,17 @@
+#EXTM3U
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="eng",NAME="English",AUTOSELECT=YES,DEFAULT=YES,URI="corpus/index1.m3u8"
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="fre",NAME="Français",AUTOSELECT=YES,DEFAULT=NO,URI="corpus/index1.m3u8"
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="sp",NAME="Espanol",AUTOSELECT=YES,DEFAULT=NO,URI="corpus/index1.m3u8"
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=NONE
+#EXT-X-DISCONTINUITY-SEQUENCE:0
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/httplive_fuzzer.cpp b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.cpp
new file mode 100644
index 0000000..aa777b3
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.cpp
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fstream>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <LiveDataSource.h>
+#include <LiveSession.h>
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
+#include <media/mediaplayer_common.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/ALooperRoster.h>
+#include <string>
+#include <utils/Log.h>
+
+using namespace std;
+using namespace android;
+
+constexpr char kFileNamePrefix[] = "/data/local/tmp/httplive-";
+constexpr char kFileNameSuffix[] = ".m3u8";
+constexpr char kFileUrlPrefix[] = "file://";
+constexpr int64_t kOffSet = 0;
+constexpr int32_t kReadyMarkMs = 5000;
+constexpr int32_t kPrepareMarkMs = 1500;
+constexpr int32_t kErrorNoMax = -1;
+constexpr int32_t kErrorNoMin = -34;
+constexpr int32_t kMaxTimeUs = 1000;
+constexpr int32_t kRandomStringLength = 64;
+constexpr int32_t kRangeMin = 0;
+constexpr int32_t kRangeMax = 1000;
+
+constexpr LiveSession::StreamType kValidStreamType[] = {
+    LiveSession::STREAMTYPE_AUDIO, LiveSession::STREAMTYPE_VIDEO,
+    LiveSession::STREAMTYPE_SUBTITLES, LiveSession::STREAMTYPE_METADATA};
+
+constexpr MediaSource::ReadOptions::SeekMode kValidSeekMode[] = {
+    MediaSource::ReadOptions::SeekMode::SEEK_PREVIOUS_SYNC,
+    MediaSource::ReadOptions::SeekMode::SEEK_NEXT_SYNC,
+    MediaSource::ReadOptions::SeekMode::SEEK_CLOSEST_SYNC,
+    MediaSource::ReadOptions::SeekMode::SEEK_CLOSEST,
+    MediaSource::ReadOptions::SeekMode::SEEK_FRAME_INDEX};
+
+constexpr media_track_type kValidMediaTrackType[] = {
+    MEDIA_TRACK_TYPE_UNKNOWN,  MEDIA_TRACK_TYPE_VIDEO,
+    MEDIA_TRACK_TYPE_AUDIO,    MEDIA_TRACK_TYPE_TIMEDTEXT,
+    MEDIA_TRACK_TYPE_SUBTITLE, MEDIA_TRACK_TYPE_METADATA};
+
+struct TestAHandler : public AHandler {
+public:
+  TestAHandler(std::function<void()> signalEosFunction)
+      : mSignalEosFunction(signalEosFunction) {}
+  virtual ~TestAHandler() {}
+
+protected:
+  void onMessageReceived(const sp<AMessage> &msg) override {
+    int32_t what = -1;
+    msg->findInt32("what", &what);
+    switch (what) {
+    case LiveSession::kWhatError:
+    case LiveSession::kWhatPrepared:
+    case LiveSession::kWhatPreparationFailed: {
+      mSignalEosFunction();
+      break;
+    }
+    }
+    return;
+  }
+
+private:
+  std::function<void()> mSignalEosFunction;
+};
+
+struct TestMediaHTTPConnection : public MediaHTTPConnection {
+public:
+  TestMediaHTTPConnection() {}
+  virtual ~TestMediaHTTPConnection() {}
+
+  virtual bool connect(const char * /*uri*/,
+                       const KeyedVector<String8, String8> * /*headers*/) {
+    return true;
+  }
+
+  virtual void disconnect() { return; }
+
+  virtual ssize_t readAt(off64_t /*offset*/, void * /*data*/, size_t size) {
+    return size;
+  }
+
+  virtual off64_t getSize() { return 0; }
+  virtual status_t getMIMEType(String8 * /*mimeType*/) { return NO_ERROR; }
+  virtual status_t getUri(String8 * /*uri*/) { return NO_ERROR; }
+
+private:
+  DISALLOW_EVIL_CONSTRUCTORS(TestMediaHTTPConnection);
+};
+
+struct TestMediaHTTPService : public MediaHTTPService {
+public:
+  TestMediaHTTPService() {}
+  ~TestMediaHTTPService(){};
+
+  virtual sp<MediaHTTPConnection> makeHTTPConnection() {
+    mediaHTTPConnection = sp<TestMediaHTTPConnection>::make();
+    return mediaHTTPConnection;
+  }
+
+private:
+  sp<TestMediaHTTPConnection> mediaHTTPConnection = nullptr;
+  DISALLOW_EVIL_CONSTRUCTORS(TestMediaHTTPService);
+};
+
+class HttpLiveFuzzer {
+public:
+  void process(const uint8_t *data, size_t size);
+  void deInitLiveSession();
+  ~HttpLiveFuzzer() { deInitLiveSession(); }
+
+private:
+  void invokeLiveDataSource();
+  void createM3U8File(const uint8_t *data, size_t size);
+  void initLiveDataSource();
+  void invokeLiveSession();
+  void initLiveSession();
+  void invokeDequeueAccessUnit();
+  void invokeConnectAsync();
+  void invokeSeekTo();
+  void invokeGetConfig();
+  void signalEos();
+  string generateFileName();
+  sp<LiveDataSource> mLiveDataSource = nullptr;
+  sp<LiveSession> mLiveSession = nullptr;
+  sp<ALooper> mLiveLooper = nullptr;
+  sp<TestMediaHTTPService> httpService = nullptr;
+  sp<TestAHandler> mHandler = nullptr;
+  FuzzedDataProvider *mFDP = nullptr;
+  bool mEosReached = false;
+  std::mutex mDownloadCompleteMutex;
+  std::condition_variable mConditionalVariable;
+};
+
+string HttpLiveFuzzer::generateFileName() {
+  return kFileNamePrefix + to_string(getpid()) + kFileNameSuffix;
+}
+
+void HttpLiveFuzzer::createM3U8File(const uint8_t *data, size_t size) {
+  ofstream m3u8File;
+  string currentFileName = generateFileName();
+  m3u8File.open(currentFileName, ios::out | ios::binary);
+  m3u8File.write((char *)data, size);
+  m3u8File.close();
+}
+
+void HttpLiveFuzzer::initLiveDataSource() {
+  mLiveDataSource = sp<LiveDataSource>::make();
+}
+
+void HttpLiveFuzzer::invokeLiveDataSource() {
+  initLiveDataSource();
+  size_t size = mFDP->ConsumeIntegralInRange<size_t>(kRangeMin, kRangeMax);
+  sp<ABuffer> buffer = new ABuffer(size);
+  mLiveDataSource->queueBuffer(buffer);
+  uint8_t *data = new uint8_t[size];
+  mLiveDataSource->readAtNonBlocking(kOffSet, data, size);
+  int32_t finalResult = mFDP->ConsumeIntegralInRange(kErrorNoMin, kErrorNoMax);
+  mLiveDataSource->queueEOS(finalResult);
+  mLiveDataSource->reset();
+  mLiveDataSource->countQueuedBuffers();
+  mLiveDataSource->initCheck();
+  delete[] data;
+}
+
+void HttpLiveFuzzer::initLiveSession() {
+  ALooperRoster looperRoster;
+  mHandler =
+      sp<TestAHandler>::make(std::bind(&HttpLiveFuzzer::signalEos, this));
+  mLiveLooper = sp<ALooper>::make();
+  mLiveLooper->setName("http live");
+  mLiveLooper->start();
+  sp<AMessage> notify = sp<AMessage>::make(0, mHandler);
+  httpService = new TestMediaHTTPService();
+  uint32_t flags = mFDP->ConsumeIntegral<uint32_t>();
+  mLiveSession = sp<LiveSession>::make(notify, flags, httpService);
+  mLiveLooper->registerHandler(mLiveSession);
+  looperRoster.registerHandler(mLiveLooper, mHandler);
+}
+
+void HttpLiveFuzzer::invokeDequeueAccessUnit() {
+  LiveSession::StreamType stream = mFDP->PickValueInArray(kValidStreamType);
+  sp<ABuffer> buffer;
+  mLiveSession->dequeueAccessUnit(stream, &buffer);
+}
+
+void HttpLiveFuzzer::invokeSeekTo() {
+  int64_t timeUs = mFDP->ConsumeIntegralInRange<int64_t>(0, kMaxTimeUs);
+  MediaSource::ReadOptions::SeekMode mode =
+      mFDP->PickValueInArray(kValidSeekMode);
+  mLiveSession->seekTo(timeUs, mode);
+}
+
+void HttpLiveFuzzer::invokeGetConfig() {
+  mLiveSession->getTrackCount();
+  size_t trackIndex = mFDP->ConsumeIntegral<size_t>();
+  mLiveSession->getTrackInfo(trackIndex);
+  media_track_type type = mFDP->PickValueInArray(kValidMediaTrackType);
+  mLiveSession->getSelectedTrack(type);
+  sp<MetaData> meta;
+  LiveSession::StreamType stream = mFDP->PickValueInArray(kValidStreamType);
+  mLiveSession->getStreamFormatMeta(stream, &meta);
+  mLiveSession->getKeyForStream(stream);
+  if (stream != LiveSession::STREAMTYPE_SUBTITLES) {
+    mLiveSession->getSourceTypeForStream(stream);
+  }
+}
+
+void HttpLiveFuzzer::invokeConnectAsync() {
+  string currentFileName = generateFileName();
+  string url = kFileUrlPrefix + currentFileName;
+  string str_1 = mFDP->ConsumeRandomLengthString(kRandomStringLength);
+  string str_2 = mFDP->ConsumeRandomLengthString(kRandomStringLength);
+
+  KeyedVector<String8, String8> headers;
+  headers.add(String8(str_1.c_str()), String8(str_2.c_str()));
+  mLiveSession->connectAsync(url.c_str(), &headers);
+}
+
+void HttpLiveFuzzer::invokeLiveSession() {
+  initLiveSession();
+  BufferingSettings bufferingSettings;
+  bufferingSettings.mInitialMarkMs = kPrepareMarkMs;
+  bufferingSettings.mResumePlaybackMarkMs = kReadyMarkMs;
+  mLiveSession->setBufferingSettings(bufferingSettings);
+  invokeConnectAsync();
+  std::unique_lock waitForDownloadComplete(mDownloadCompleteMutex);
+  mConditionalVariable.wait(waitForDownloadComplete,
+                            [this] { return mEosReached; });
+  if (mLiveSession->isSeekable()) {
+    invokeSeekTo();
+  }
+  invokeDequeueAccessUnit();
+  size_t index = mFDP->ConsumeIntegral<size_t>();
+  bool select = mFDP->ConsumeBool();
+  mLiveSession->selectTrack(index, select);
+  mLiveSession->hasDynamicDuration();
+  int64_t firstTimeUs =
+      mFDP->ConsumeIntegralInRange<int64_t>(kRangeMin, kRangeMax);
+  int64_t timeUs = mFDP->ConsumeIntegralInRange<int64_t>(kRangeMin, kRangeMax);
+  int32_t discontinuitySeq = mFDP->ConsumeIntegral<int32_t>();
+  mLiveSession->calculateMediaTimeUs(firstTimeUs, timeUs, discontinuitySeq);
+  invokeGetConfig();
+}
+
+void HttpLiveFuzzer::process(const uint8_t *data, size_t size) {
+  mFDP = new FuzzedDataProvider(data, size);
+  createM3U8File(data, size);
+  invokeLiveDataSource();
+  invokeLiveSession();
+  delete mFDP;
+}
+
+void HttpLiveFuzzer::deInitLiveSession() {
+  if (mLiveSession != nullptr) {
+    mLiveSession->disconnect();
+    mLiveLooper->unregisterHandler(mLiveSession->id());
+    mLiveLooper->stop();
+  }
+  mLiveSession.clear();
+  mLiveLooper.clear();
+}
+
+void HttpLiveFuzzer::signalEos() {
+  mEosReached = true;
+  {
+    std::lock_guard<std::mutex> waitForDownloadComplete(mDownloadCompleteMutex);
+  }
+  mConditionalVariable.notify_one();
+  return;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+  HttpLiveFuzzer httpliveFuzzer;
+  httpliveFuzzer.process(data, size);
+  return 0;
+}
diff --git a/media/libstagefright/httplive/fuzzer/httplive_fuzzer.dict b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.dict
new file mode 100644
index 0000000..703cc7e
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.dict
@@ -0,0 +1,15 @@
+#m3u8-Tags
+kw1="#EXTM3U"
+kw2="#EXT-X-VERSION:"
+kw3="#EXT-X-TARGETDURATION:"
+kw4="#EXT-X-PLAYLIST-TYPE:"
+kw5="#EXTINF:"
+kw6="#EXT-X-ENDLIST"
+kw7="#EXT-X-MEDIA-SEQUENCE:"
+kw8="#EXT-X-KEY:METHOD=NONE"
+kw9="#EXT-X-DISCONTINUITY:"
+kw10="#EXT-X-DISCONTINUITY-SEQUENCE:0"
+kw11="#EXT-X-STREAM-INF:BANDWIDTH="
+kw12="#EXT-X-STREAM-INF:CODECS="
+kw13="#EXT-X-BYTERANGE:"
+kw14="#EXT-X-MEDIA"
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index c84cc10..632b32c 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -71,6 +71,9 @@
     virtual void initiateSetInputSurface(const sp<PersistentSurface> &surface);
     virtual void initiateStart();
     virtual void initiateShutdown(bool keepComponentAllocated = false);
+    virtual status_t querySupportedParameters(std::vector<std::string> *names) override;
+    virtual status_t subscribeToParameters(const std::vector<std::string> &names) override;
+    virtual status_t unsubscribeFromParameters(const std::vector<std::string> &names) override;
 
     status_t queryCapabilities(
             const char* owner, const char* name,
diff --git a/media/libstagefright/mpeg2ts/Android.bp b/media/libstagefright/mpeg2ts/Android.bp
index a970224..fbfa8cc 100644
--- a/media/libstagefright/mpeg2ts/Android.bp
+++ b/media/libstagefright/mpeg2ts/Android.bp
@@ -27,11 +27,6 @@
         "ESQueue.cpp",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-        "frameworks/native/include/media/openmax",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
@@ -76,7 +71,6 @@
     },
 }
 
-
 cc_library_static {
     name: "libstagefright_mpeg2support",
     defaults: [
diff --git a/media/libstagefright/mpeg2ts/test/Android.bp b/media/libstagefright/mpeg2ts/test/Android.bp
index 464b039..34a8d3e 100644
--- a/media/libstagefright/mpeg2ts/test/Android.bp
+++ b/media/libstagefright/mpeg2ts/test/Android.bp
@@ -57,11 +57,6 @@
         "libstagefright_mpeg2support",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/extractors/",
-        "frameworks/av/media/libstagefright/",
-    ],
-
     header_libs: [
         "libmedia_headers",
         "libaudioclient_headers",
diff --git a/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp b/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp
index 79c233b..7f25d78 100644
--- a/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp
+++ b/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp
@@ -27,8 +27,8 @@
 #include <media/stagefright/MetaDataBase.h>
 #include <media/stagefright/foundation/AUtils.h>
 
-#include "mpeg2ts/ATSParser.h"
-#include "mpeg2ts/AnotherPacketSource.h"
+#include <ATSParser.h>
+#include <AnotherPacketSource.h>
 
 #include "Mpeg2tsUnitTestEnvironment.h"
 
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index ffccbb1..0bd342a 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -508,8 +508,6 @@
                     if (n != (ssize_t)buffer->size()) {
                         ALOGW("failed to send RTCP TMMBR (%s).",
                                 n >= 0 ? "connection gone" : strerror(errno));
-
-                        it = mStreams.erase(it);
                         continue;
                     }
                 }
@@ -560,8 +558,6 @@
                 if (n != (ssize_t)buffer->size()) {
                     ALOGW("failed to send RTCP receiver report (%s).",
                             n >= 0 ? "connection gone" : strerror(errno));
-
-                    it = mStreams.erase(it);
                     continue;
                 }
 
@@ -621,7 +617,14 @@
     } while (nbytes < 0 && errno == EINTR);
 
     if (nbytes <= 0) {
-        return -ECONNRESET;
+        ALOGW("failed to recv rtp packet. cause=%s", strerror(errno));
+        // ECONNREFUSED may happen in next recvfrom() calling if one of
+        // outgoing packet can not be delivered to remote by using sendto()
+        if (errno == ECONNREFUSED) {
+            return -ECONNREFUSED;
+        } else {
+            return -ECONNRESET;
+        }
     }
 
     buffer->setRange(0, nbytes);
@@ -665,6 +668,10 @@
                     pRemoteRTCPAddr, sizeSockSt);
         } while (n < 0 && errno == EINTR);
 
+        if (n < 0) {
+            ALOGW("failed to send rtcp packet. cause=%s", strerror(errno));
+        }
+
         return n;
 }
 
diff --git a/media/libstagefright/rtsp/JitterCalculator.cpp b/media/libstagefright/rtsp/JitterCalculator.cpp
index 93b5a83..7e60be2 100644
--- a/media/libstagefright/rtsp/JitterCalculator.cpp
+++ b/media/libstagefright/rtsp/JitterCalculator.cpp
@@ -38,14 +38,13 @@
     mInterArrivalJitterUs = inter;
 }
 
-void JitterCalc::putBaseData(int64_t rtpTime, int64_t arrivalTimeUs) {
-    // A RTP time wraps around after UINT32_MAX. We must consider this case.
-    const int64_t UINT32_MSB = 0x80000000;
-    int64_t overflowMask = (mFirstTimeStamp & UINT32_MSB & ~rtpTime) << 1;
-    int64_t tempRtpTime = overflowMask | rtpTime;
+void JitterCalc::putBaseData(uint32_t rtpTime, int64_t arrivalTimeUs) {
+    // A RTP time wraps around after UINT32_MAX. Overflow can present.
+    uint32_t diff = 0;
+    __builtin_usub_overflow(rtpTime, mFirstTimeStamp, &diff);
 
     // Base jitter implementation can be various
-    int64_t scheduledTimeUs = (tempRtpTime - (int64_t)mFirstTimeStamp) * 1000000ll / mClockRate;
+    int64_t scheduledTimeUs = ((int32_t)diff) * 1000000ll / mClockRate;
     int64_t elapsedTimeUs = arrivalTimeUs - mFirstArrivalTimeUs;
     int64_t correctionTimeUs = elapsedTimeUs - scheduledTimeUs; // additional propagation delay;
     mBaseJitterUs = (mBaseJitterUs * 15 + correctionTimeUs) / 16;
@@ -53,18 +52,13 @@
             (long long)mBaseJitterUs, (long long)correctionTimeUs);
 }
 
-void JitterCalc::putInterArrivalData(int64_t rtpTime, int64_t arrivalTimeUs) {
-    const int64_t UINT32_MSB = 0x80000000;
-    int64_t tempRtpTime = rtpTime;
-    int64_t tempLastTimeStamp = mLastTimeStamp;
-
-    // A RTP time wraps around after UINT32_MAX. We must consider this case.
-    int64_t overflowMask = (mLastTimeStamp ^ rtpTime) & UINT32_MSB;
-    tempRtpTime |= ((overflowMask & ~rtpTime) << 1);
-    tempLastTimeStamp |= ((overflowMask & ~mLastTimeStamp) << 1);
+void JitterCalc::putInterArrivalData(uint32_t rtpTime, int64_t arrivalTimeUs) {
+    // A RTP time wraps around after UINT32_MAX. Overflow can present.
+    uint32_t diff = 0;
+    __builtin_usub_overflow(rtpTime, mLastTimeStamp, &diff);
 
     // 6.4.1 of RFC3550 defines this interarrival jitter value.
-    int64_t diffTimeStampUs = abs(tempRtpTime - tempLastTimeStamp) * 1000000ll / mClockRate;
+    int64_t diffTimeStampUs = abs((int32_t)diff) * 1000000ll / mClockRate;
     int64_t diffArrivalUs = arrivalTimeUs - mLastArrivalTimeUs; // Can't be minus
     ALOGV("diffTimeStampUs %lld \t\t diffArrivalUs %lld",
             (long long)diffTimeStampUs, (long long)diffArrivalUs);
@@ -72,7 +66,7 @@
     int64_t varianceUs = diffArrivalUs - diffTimeStampUs;
     mInterArrivalJitterUs = (mInterArrivalJitterUs * 15 + abs(varianceUs)) / 16;
 
-    mLastTimeStamp = (uint32_t)rtpTime;
+    mLastTimeStamp = rtpTime;
     mLastArrivalTimeUs = arrivalTimeUs;
 }
 
diff --git a/media/libstagefright/rtsp/JitterCalculator.h b/media/libstagefright/rtsp/JitterCalculator.h
index ff36f1f..4f3b761 100644
--- a/media/libstagefright/rtsp/JitterCalculator.h
+++ b/media/libstagefright/rtsp/JitterCalculator.h
@@ -40,8 +40,8 @@
     JitterCalc(int32_t clockRate);
 
     void init(uint32_t rtpTime, int64_t arrivalTimeUs, int32_t base, int32_t inter);
-    void putInterArrivalData(int64_t rtpTime, int64_t arrivalTime);
-    void putBaseData(int64_t rtpTime, int64_t arrivalTimeUs);
+    void putInterArrivalData(uint32_t rtpTime, int64_t arrivalTime);
+    void putBaseData(uint32_t rtpTime, int64_t arrivalTimeUs);
     int32_t getBaseJitterMs();
     int32_t getInterArrivalJitterMs();
 };
diff --git a/media/libstagefright/tests/HEVC/Android.bp b/media/libstagefright/tests/HEVC/Android.bp
index 91bf385..7a0ba52 100644
--- a/media/libstagefright/tests/HEVC/Android.bp
+++ b/media/libstagefright/tests/HEVC/Android.bp
@@ -44,10 +44,6 @@
         "libstagefright_foundation",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp b/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp
index 324a042..c43e1f8 100644
--- a/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp
+++ b/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp
@@ -21,7 +21,7 @@
 #include <fstream>
 
 #include <media/stagefright/foundation/ABitReader.h>
-#include "include/HevcUtils.h"
+#include <HevcUtils.h>
 
 #include "HEVCUtilsTestEnvironment.h"
 
diff --git a/media/libstagefright/tests/extractorFactory/Android.bp b/media/libstagefright/tests/extractorFactory/Android.bp
index 13d5b89..a067284 100644
--- a/media/libstagefright/tests/extractorFactory/Android.bp
+++ b/media/libstagefright/tests/extractorFactory/Android.bp
@@ -51,10 +51,6 @@
         "libstagefright_foundation",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-    ],
-
     // TODO: (b/150181583)
     compile_multilib: "first",
 
diff --git a/media/libstagefright/tests/writer/Android.bp b/media/libstagefright/tests/writer/Android.bp
index 38d5ecc..49fb569 100644
--- a/media/libstagefright/tests/writer/Android.bp
+++ b/media/libstagefright/tests/writer/Android.bp
@@ -52,10 +52,6 @@
         "libogg",
     ],
 
-    include_dirs: [
-        "frameworks/av/media/libstagefright",
-    ],
-
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/media/libstagefright/tests/writer/WriterTest.cpp b/media/libstagefright/tests/writer/WriterTest.cpp
index d170e7c..398c592 100644
--- a/media/libstagefright/tests/writer/WriterTest.cpp
+++ b/media/libstagefright/tests/writer/WriterTest.cpp
@@ -36,7 +36,7 @@
 #include <media/stagefright/MPEG2TSWriter.h>
 #include <media/stagefright/MPEG4Writer.h>
 #include <media/stagefright/OggWriter.h>
-#include <webm/WebmWriter.h>
+#include <WebmWriter.h>
 
 #include "WriterTestEnvironment.h"
 #include "WriterUtility.h"
diff --git a/media/libstagefright/webm/Android.bp b/media/libstagefright/webm/Android.bp
index 32a22ba..4209aea 100644
--- a/media/libstagefright/webm/Android.bp
+++ b/media/libstagefright/webm/Android.bp
@@ -33,7 +33,7 @@
         "WebmWriter.cpp",
     ],
 
-    include_dirs: ["frameworks/av/include"],
+    export_include_dirs: ["."],
 
     shared_libs: [
         "libdatasource",
@@ -44,6 +44,7 @@
     ],
 
     header_libs: [
+        "av-headers",
         "libmedia_headers",
         "media_ndk_headers",
     ],
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 1ae2b44..0e2de4e 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -452,17 +452,19 @@
         uint32_t flags) {
     sp<AMessage> nativeFormat;
     AMediaFormat_getFormat(format, &nativeFormat);
-    ALOGV("configure with format: %s", nativeFormat->debugString(0).c_str());
+    // create our shallow copy, so we aren't victim to any later changes.
+    sp<AMessage> dupNativeFormat = nativeFormat->dup();
+    ALOGV("configure with format: %s", dupNativeFormat->debugString(0).c_str());
     sp<Surface> surface = NULL;
     if (window != NULL) {
         surface = (Surface*) window;
     }
 
-    status_t err = mData->mCodec->configure(nativeFormat, surface,
+    status_t err = mData->mCodec->configure(dupNativeFormat, surface,
             crypto ? crypto->mCrypto : NULL, flags);
     if (err != OK) {
         ALOGE("configure: err(%d), failed with format: %s",
-              err, nativeFormat->debugString(0).c_str());
+              err, dupNativeFormat->debugString(0).c_str());
     }
     return translate_error(err);
 }
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index b035e5a..69ab242 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -200,8 +200,11 @@
     AString tmp;
     if (mData->mFormat->findString(name, &tmp)) {
         String8 ret(tmp.c_str());
-        mData->mStringCache.add(String8(name), ret);
-        *out = ret.string();
+        ssize_t i = mData->mStringCache.add(String8(name), ret);
+        if (i < 0) {
+            return false;
+        }
+        *out = mData->mStringCache.valueAt(i).string();
         return true;
     }
     return false;
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index 73c4e3b..6e29ac8 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -51,6 +51,7 @@
         "libpermission",
         "android.hardware.graphics.bufferqueue@1.0",
         "android.hidl.token@1.0-utils",
+        "packagemanager_aidl-cpp",
     ],
     export_static_lib_headers: [
         "libbatterystats_aidl",
@@ -71,6 +72,7 @@
 
     export_shared_lib_headers: [
         "libpermission",
+        "packagemanager_aidl-cpp",
     ],
 
     include_dirs: [
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index b91f302..f401fff 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -41,6 +41,7 @@
         "FastThreadState.cpp",
         "NBAIO_Tee.cpp",
         "PatchPanel.cpp",
+        "PropertyUtils.cpp",
         "SpdifStreamOut.cpp",
         "StateQueue.cpp",
         "Threads.cpp",
@@ -54,6 +55,7 @@
     ],
 
     shared_libs: [
+        "android.media.audio.common.types-V1-cpp",
         "audioflinger-aidl-cpp",
         "audioclient-types-aidl-cpp",
         "av-types-aidl-cpp",
@@ -81,6 +83,7 @@
         "libmedia_helper",
         "libshmemcompat",
         "libvibrator",
+        "packagemanager_aidl-cpp",
     ],
 
     static_libs: [
@@ -90,6 +93,7 @@
     ],
 
     header_libs: [
+        "libaaudio_headers",
         "libaudioclient_headers",
         "libaudiohal_headers",
         "libmedia_headers",
@@ -97,6 +101,7 @@
 
     export_shared_lib_headers: [
         "libpermission",
+        "packagemanager_aidl-cpp",
     ],
 
     cflags: [
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 8a32b72..b359331 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -57,6 +57,7 @@
 
 #include "AudioFlinger.h"
 #include "NBAIO_Tee.h"
+#include "PropertyUtils.h"
 
 #include <media/AudioResamplerPublic.h>
 
@@ -64,6 +65,7 @@
 #include <system/audio_effects/effect_ns.h>
 #include <system/audio_effects/effect_aec.h>
 #include <system/audio_effects/effect_hapticgenerator.h>
+#include <system/audio_effects/effect_spatializer.h>
 
 #include <audio_utils/primitives.h>
 
@@ -102,7 +104,11 @@
 
 namespace android {
 
+#define MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION 7.0
+
 using media::IEffectClient;
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
 using android::content::AttributionSourceState;
 
 static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
@@ -300,6 +306,11 @@
 
     mDevicesFactoryHalCallback = new DevicesFactoryHalCallbackImpl;
     mDevicesFactoryHal->setCallbackOnce(mDevicesFactoryHalCallback);
+
+    if (mDevicesFactoryHal->getHalVersion() <= MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION) {
+        mAAudioBurstsPerBuffer = getAAudioMixerBurstCountFromSystemProperty();
+        mAAudioHwBurstMinMicros = getAAudioHardwareBurstMinUsecFromSystemProperty();
+    }
 }
 
 status_t AudioFlinger::setAudioHalPids(const std::vector<pid_t>& pids) {
@@ -335,6 +346,44 @@
     return NO_ERROR;
 }
 
+status_t AudioFlinger::getMmapPolicyInfos(
+            AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+    Mutex::Autolock _l(mLock);
+    if (const auto it = mPolicyInfos.find(policyType); it != mPolicyInfos.end()) {
+        *policyInfos = it->second;
+        return NO_ERROR;
+    }
+    if (mDevicesFactoryHal->getHalVersion() > MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION) {
+        AutoMutex lock(mHardwareLock);
+        for (size_t i = 0; i < mAudioHwDevs.size(); ++i) {
+            AudioHwDevice *dev = mAudioHwDevs.valueAt(i);
+            std::vector<AudioMMapPolicyInfo> infos;
+            status_t status = dev->getMmapPolicyInfos(policyType, &infos);
+            if (status != NO_ERROR) {
+                ALOGE("Failed to query mmap policy info of %d, error %d",
+                      mAudioHwDevs.keyAt(i), status);
+                continue;
+            }
+            policyInfos->insert(policyInfos->end(), infos.begin(), infos.end());
+        }
+        mPolicyInfos[policyType] = *policyInfos;
+    } else {
+        getMmapPolicyInfosFromSystemProperty(policyType, policyInfos);
+        mPolicyInfos[policyType] = *policyInfos;
+    }
+    return NO_ERROR;
+}
+
+int32_t AudioFlinger::getAAudioMixerBurstCount() {
+    Mutex::Autolock _l(mLock);
+    return mAAudioBurstsPerBuffer;
+}
+
+int32_t AudioFlinger::getAAudioHardwareBurstMinUsec() {
+    Mutex::Autolock _l(mLock);
+    return mAAudioHwBurstMinMicros;
+}
+
 // getDefaultVibratorInfo_l must be called with AudioFlinger lock held.
 std::optional<media::AudioVibratorInfo> AudioFlinger::getDefaultVibratorInfo_l() {
     if (mAudioVibratorInfos.empty()) {
@@ -566,10 +615,12 @@
     String8 result;
 
     result.append("Clients:\n");
+    result.append("   pid    heap_size\n");
     for (size_t i = 0; i < mClients.size(); ++i) {
         sp<Client> client = mClients.valueAt(i).promote();
         if (client != 0) {
-            result.appendFormat("  pid: %d\n", client->pid());
+            result.appendFormat("%6d %12zu\n", client->pid(),
+                    client->heap()->getMemoryHeap()->getSize());
         }
     }
 
@@ -2287,6 +2338,17 @@
         mHardwareStatus = AUDIO_HW_IDLE;
     }
 
+    if (mDevicesFactoryHal->getHalVersion() > MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION) {
+        if (int32_t mixerBursts = dev->getAAudioMixerBurstCount();
+            mixerBursts > mAAudioBurstsPerBuffer) {
+            mAAudioBurstsPerBuffer = mixerBursts;
+        }
+        if (int32_t hwBurstMinMicros = dev->getAAudioHardwareBurstMinUsec();
+            hwBurstMinMicros < mAAudioHwBurstMinMicros || mAAudioHwBurstMinMicros == 0) {
+            mAAudioHwBurstMinMicros = hwBurstMinMicros;
+        }
+    }
+
     mAudioHwDevs.add(handle, audioDevice);
 
     ALOGI("loadHwModule() Loaded %s audio interface, handle %d", name, handle);
@@ -2857,7 +2919,7 @@
             &config,
             device.mType,
             device.address().c_str(),
-            VALUE_OR_RETURN_STATUS(aidl2legacy_AudioSourceType_audio_source_t(request.source)),
+            VALUE_OR_RETURN_STATUS(aidl2legacy_AudioSource_audio_source_t(request.source)),
             VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_input_flags_t_mask(request.flags)),
             AUDIO_DEVICE_NONE,
             String8{});
@@ -3757,6 +3819,15 @@
             goto Exit;
         }
 
+        // Only audio policy service can create a spatializer effect
+        if ((memcmp(&descOut.type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0) &&
+            (callingUid != AID_AUDIOSERVER || currentPid != getpid())) {
+            ALOGW("%s: attempt to create a spatializer effect from uid/pid %d/%d",
+                    __func__, callingUid, currentPid);
+            lStatus = PERMISSION_DENIED;
+            goto Exit;
+        }
+
         if (io == AUDIO_IO_HANDLE_NONE && sessionId == AUDIO_SESSION_OUTPUT_MIX) {
             // if the output returned by getOutputForEffect() is removed before we lock the
             // mutex below, the call to checkPlaybackThread_l(io) below will detect it
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index d6bf0ae..8c546cc 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -282,6 +282,14 @@
     virtual status_t updateSecondaryOutputs(
             const TrackSecondaryOutputsMap& trackSecondaryOutputs);
 
+    virtual status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos);
+
+    virtual int32_t getAAudioMixerBurstCount();
+
+    virtual int32_t getAAudioHardwareBurstMinUsec();
+
     status_t onTransactWrapper(TransactionCode code, const Parcel& data, uint32_t flags,
         const std::function<status_t()>& delegate) override;
 
@@ -1004,6 +1012,11 @@
 
     // Keep in sync with java definition in media/java/android/media/AudioRecord.java
     static constexpr int32_t kMaxSharedAudioHistoryMs = 5000;
+
+    std::map<media::audio::common::AudioMMapPolicyType,
+             std::vector<media::audio::common::AudioMMapPolicyInfo>> mPolicyInfos;
+    int32_t mAAudioBurstsPerBuffer = 0;
+    int32_t mAAudioHwBurstMinMicros = 0;
 };
 
 #undef INCLUDING_FROM_AUDIOFLINGER_H
diff --git a/services/audioflinger/AudioHwDevice.cpp b/services/audioflinger/AudioHwDevice.cpp
index 16b25f6..dee6161 100644
--- a/services/audioflinger/AudioHwDevice.cpp
+++ b/services/audioflinger/AudioHwDevice.cpp
@@ -29,6 +29,9 @@
 
 namespace android {
 
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
+
 // ----------------------------------------------------------------------------
 
 status_t AudioHwDevice::openOutputStream(
@@ -102,5 +105,18 @@
     return mHwDevice->getAudioPort(port);
 }
 
+status_t AudioHwDevice::getMmapPolicyInfos(
+            AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) const {
+    return mHwDevice->getMmapPolicyInfos(policyType, policyInfos);
+}
+
+int32_t AudioHwDevice::getAAudioMixerBurstCount() const {
+    return mHwDevice->getAAudioMixerBurstCount();
+}
+
+int32_t AudioHwDevice::getAAudioHardwareBurstMinUsec() const {
+    return mHwDevice->getAAudioHardwareBurstMinUsec();
+}
+
 
 }; // namespace android
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
index fc2c693..8c5d239 100644
--- a/services/audioflinger/AudioHwDevice.h
+++ b/services/audioflinger/AudioHwDevice.h
@@ -22,6 +22,8 @@
 #include <stdlib.h>
 #include <sys/types.h>
 
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
 #include <media/audiohal/DeviceHalInterface.h>
 #include <utils/Errors.h>
 #include <system/audio.h>
@@ -85,6 +87,14 @@
 
     status_t getAudioPort(struct audio_port_v7 *port) const;
 
+    status_t getMmapPolicyInfos(
+            media::audio::common::AudioMMapPolicyType policyType,
+            std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) const;
+
+    int32_t getAAudioMixerBurstCount() const;
+
+    int32_t getAAudioHardwareBurstMinUsec() const;
+
 private:
     const audio_module_handle_t mHandle;
     const char * const          mModuleName;
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index e9e98ca..b069462 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -195,6 +195,10 @@
 
     audio_output_flags_t getOutputFlags() const { return mFlags; }
     float getSpeed() const { return mSpeed; }
+
+    bool canBeSpatialized() const { return (mAttr.flags
+            & (AUDIO_FLAG_CONTENT_SPATIALIZED | AUDIO_FLAG_NEVER_SPATIALIZE)) == 0; }
+
 protected:
     // for numerous
     friend class PlaybackThread;
diff --git a/services/audioflinger/PropertyUtils.cpp b/services/audioflinger/PropertyUtils.cpp
new file mode 100644
index 0000000..65e2533
--- /dev/null
+++ b/services/audioflinger/PropertyUtils.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
+#include <android/media/audio/common/AudioMMapPolicy.h>
+#include <cutils/properties.h>
+
+#include "PropertyUtils.h"
+
+namespace android {
+
+using media::audio::common::AudioMMapPolicy;
+using media::audio::common::AudioMMapPolicyType;
+using media::audio::common::AudioMMapPolicyInfo;
+
+std::string getMmapPolicyProperty(AudioMMapPolicyType policyType) {
+    switch (policyType) {
+        case AudioMMapPolicyType::DEFAULT:
+            return "aaudio.mmap_policy";
+        case AudioMMapPolicyType::EXCLUSIVE:
+            return "aaudio.mmap_exclusive_policy";
+        default:
+            return "";
+    }
+}
+
+int getDefaultPolicyFromType(AudioMMapPolicyType policyType) {
+    switch (policyType) {
+        case AudioMMapPolicyType::EXCLUSIVE:
+            return AAUDIO_UNSPECIFIED;
+        case AudioMMapPolicyType::DEFAULT:
+        default:
+            return AAUDIO_POLICY_NEVER;
+    }
+}
+
+AudioMMapPolicy legacy2aidl_aaudio_policy_t_AudioMMapPolicy(aaudio_policy_t legacy) {
+    switch (legacy) {
+        case AAUDIO_POLICY_NEVER:
+            return AudioMMapPolicy::NEVER;
+        case AAUDIO_POLICY_AUTO:
+            return AudioMMapPolicy::AUTO;
+        case AAUDIO_POLICY_ALWAYS:
+            return AudioMMapPolicy::ALWAYS;
+        case AAUDIO_UNSPECIFIED:
+            return AudioMMapPolicy::UNSPECIFIED;
+        default:
+            ALOGE("%s unknown aaudio policy: %d", __func__, legacy);
+            return AudioMMapPolicy::UNSPECIFIED;
+    }
+}
+
+status_t getMmapPolicyInfosFromSystemProperty(
+        AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+    AudioMMapPolicyInfo policyInfo;
+    const std::string propertyStr = getMmapPolicyProperty(policyType);
+    if (propertyStr.empty()) {
+        return BAD_VALUE;
+    }
+    policyInfo.mmapPolicy = legacy2aidl_aaudio_policy_t_AudioMMapPolicy(
+            property_get_int32(propertyStr.c_str(), getDefaultPolicyFromType(policyType)));
+    policyInfos->push_back(policyInfo);
+    return NO_ERROR;
+}
+
+int32_t getAAudioMixerBurstCountFromSystemProperty() {
+    static const int32_t sDefaultBursts = 2; // arbitrary, use 2 for double buffered
+    static const int32_t sMaxBursts = 1024; // arbitrary
+    static const char* sPropMixerBursts = "aaudio.mixer_bursts";
+    int32_t prop = property_get_int32(sPropMixerBursts, sDefaultBursts);
+    if (prop <= 0 || prop > sMaxBursts) {
+        ALOGE("%s: invalid value %d, use default %d", __func__, prop, sDefaultBursts);
+        prop = sDefaultBursts;
+    }
+    return prop;
+}
+
+int32_t getAAudioHardwareBurstMinUsecFromSystemProperty() {
+    static const int32_t sDefaultMicros = 1000; // arbitrary
+    static const int32_t sMaxMicros = 1000 * 1000; // arbitrary
+    static const char* sPropHwBurstMinUsec = "aaudio.hw_burst_min_usec";
+    int32_t prop = property_get_int32(sPropHwBurstMinUsec, sDefaultMicros);
+    if (prop <= 0 || prop > sMaxMicros) {
+        ALOGE("%s invalid value %d, use default %d", __func__, prop, sDefaultMicros);
+        prop = sDefaultMicros;
+    }
+    return prop;
+}
+
+} // namespace android
diff --git a/services/audioflinger/PropertyUtils.h b/services/audioflinger/PropertyUtils.h
new file mode 100644
index 0000000..fbf651a
--- /dev/null
+++ b/services/audioflinger/PropertyUtils.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/media/audio/common/AudioMMapPolicyType.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+
+namespace android {
+
+status_t getMmapPolicyInfosFromSystemProperty(
+        media::audio::common::AudioMMapPolicyType policyType,
+        std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos);
+
+int32_t getAAudioMixerBurstCountFromSystemProperty();
+
+int32_t getAAudioHardwareBurstMinUsecFromSystemProperty();
+
+} // namespace android
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 9242f89..a14c1dd 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1343,6 +1343,13 @@
         return BAD_VALUE;
     }
 
+    if (memcmp(&desc->type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0
+            && mType != SPATIALIZER) {
+        ALOGW("%s: attempt to create a spatializer effect on a thread of type %d",
+                __func__, mType);
+        return BAD_VALUE;
+    }
+
     switch (mType) {
     case MIXER: {
 #ifndef MULTICHANNEL_EFFECT_CHAIN
@@ -2052,6 +2059,7 @@
     free(mSinkBuffer);
     free(mMixerBuffer);
     free(mEffectBuffer);
+    free(mEffectToSinkBuffer);
 }
 
 // Thread virtuals
@@ -3004,11 +3012,18 @@
     // Originally this was int16_t[] array, need to remove legacy implications.
     free(mSinkBuffer);
     mSinkBuffer = NULL;
+    free(mEffectToSinkBuffer);
+    mEffectToSinkBuffer = nullptr;
+
     // For sink buffer size, we use the frame size from the downstream sink to avoid problems
     // with non PCM formats for compressed music, e.g. AAC, and Offload threads.
     const size_t sinkBufferSize = mNormalFrameCount * mFrameSize;
     (void)posix_memalign(&mSinkBuffer, 32, sinkBufferSize);
 
+    if (mType == SPATIALIZER) {
+        (void)posix_memalign(&mEffectToSinkBuffer, 32, sinkBufferSize);
+    }
+
     // We resize the mMixerBuffer according to the requirements of the sink buffer which
     // drives the output.
     free(mMixerBuffer);
@@ -3839,11 +3854,11 @@
             //
             // mMixerBufferValid is only set true by MixerThread::prepareTracks_l().
             // TODO use mSleepTimeUs == 0 as an additional condition.
+            uint32_t mixerChannelCount = mEffectBufferValid ?
+                        audio_channel_count_from_out_mask(mMixerChannelMask) : mChannelCount;
             if (mMixerBufferValid) {
                 void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
                 audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
-                uint32_t channelCount = mEffectBufferValid ?
-                            audio_channel_count_from_out_mask(mMixerChannelMask) : mChannelCount;
 
                 // mono blend occurs for mixer threads only (not direct or offloaded)
                 // and is handled here if we're going directly to the sink.
@@ -3861,7 +3876,7 @@
                 }
 
                 memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
-                        mNormalFrameCount * (channelCount + mHapticChannelCount));
+                        mNormalFrameCount * (mixerChannelCount + mHapticChannelCount));
 
                 // If we're going directly to the sink and there are haptic channels,
                 // we should adjust channels as the sample data is partially interleaved
@@ -3894,8 +3909,11 @@
                             && activeHapticSessionId == effectChains[i]->sessionId()) {
                         // Haptic data is active in this case, copy it directly from
                         // in buffer to out buffer.
+                        uint32_t channelCount =
+                                effectChains[i]->sessionId() == AUDIO_SESSION_OUTPUT_STAGE ?
+                                        mixerChannelCount : mChannelCount;
                         const size_t audioBufferSize = mNormalFrameCount
-                                * audio_bytes_per_frame(mChannelCount, EFFECT_BUFFER_FORMAT);
+                                * audio_bytes_per_frame(channelCount, EFFECT_BUFFER_FORMAT);
                         memcpy_by_audio_format(
                                 (uint8_t*)effectChains[i]->outBuffer() + audioBufferSize,
                                 EFFECT_BUFFER_FORMAT,
@@ -3935,8 +3953,23 @@
                 mBalance.process((float *)mEffectBuffer, mNormalFrameCount);
             }
 
-            memcpy_by_audio_format(mSinkBuffer, mFormat, mEffectBuffer, mEffectBufferFormat,
-                    mNormalFrameCount * (mChannelCount + mHapticChannelCount));
+            if (mType == SPATIALIZER) {
+                memcpy_by_audio_format(mEffectToSinkBuffer, mFormat, mEffectBuffer,
+                        mEffectBufferFormat,
+                        mNormalFrameCount * (mChannelCount + mHapticChannelCount));
+                accumulate_by_audio_format(mSinkBuffer, mEffectToSinkBuffer, mFormat,
+                                           mNormalFrameCount * mChannelCount);
+                const size_t audioBufferSize = mNormalFrameCount
+                        * audio_bytes_per_frame(mChannelCount, mFormat);
+                memcpy_by_audio_format(
+                        (uint8_t*)mSinkBuffer + audioBufferSize,
+                        mFormat,
+                        (uint8_t*)mEffectToSinkBuffer + audioBufferSize,
+                        mFormat, mNormalFrameCount * mHapticChannelCount);
+            } else {
+                memcpy_by_audio_format(mSinkBuffer, mFormat, mEffectBuffer, mEffectBufferFormat,
+                        mNormalFrameCount * (mChannelCount + mHapticChannelCount));
+            }
             // The sample data is partially interleaved when haptic channels exist,
             // we need to adjust channels here.
             if (mHapticChannelCount > 0) {
@@ -4598,6 +4631,7 @@
         initFastMixer = mFrameCount < mNormalFrameCount;
         break;
     }
+    ALOG_ASSERT(initFastMixer && mType == SPATIALIZER);
     ALOGW_IF(initFastMixer == false && mFrameCount < mNormalFrameCount,
             "FastMixer is preferred for this sink as frameCount %zu is less than threshold %zu",
             mFrameCount, mNormalFrameCount);
@@ -4956,6 +4990,9 @@
         // before effects processing or output.
         if (mMixerBufferValid) {
             memset(mMixerBuffer, 0, mMixerBufferSize);
+            if (mType == SPATIALIZER) {
+                memset(mSinkBuffer, 0, mSinkBufferSize);
+            }
         } else {
             memset(mSinkBuffer, 0, mSinkBufferSize);
         }
@@ -5184,7 +5221,7 @@
                 break;
             case TrackBase::IDLE:
             default:
-                LOG_ALWAYS_FATAL("unexpected track state %d", track->mState);
+                LOG_ALWAYS_FATAL("unexpected track state %d", (int)track->mState);
             }
 
             if (isActive) {
@@ -5244,7 +5281,7 @@
                     // TODO Remove the ALOGW when this theory is confirmed.
                     ALOGW("fast track %d should have been active; "
                             "mState=%d, mTrackMask=%#x, recentUnderruns=%u, isShared=%d",
-                            j, track->mState, state->mTrackMask, recentUnderruns,
+                            j, (int)track->mState, state->mTrackMask, recentUnderruns,
                             track->sharedBuffer() != 0);
                     // Since the FastMixer state already has the track inactive, do nothing here.
                 }
@@ -5448,11 +5485,21 @@
                 trackId,
                 AudioMixer::TRACK,
                 AudioMixer::CHANNEL_MASK, (void *)(uintptr_t)track->channelMask());
-            mAudioMixer->setParameter(
-                trackId,
-                AudioMixer::TRACK,
-                AudioMixer::MIXER_CHANNEL_MASK,
-                (void *)(uintptr_t)(mMixerChannelMask | mHapticChannelMask));
+
+            if (mType == SPATIALIZER && !track->canBeSpatialized()) {
+                mAudioMixer->setParameter(
+                    trackId,
+                    AudioMixer::TRACK,
+                    AudioMixer::MIXER_CHANNEL_MASK,
+                    (void *)(uintptr_t)(mChannelMask | mHapticChannelMask));
+            } else {
+                mAudioMixer->setParameter(
+                    trackId,
+                    AudioMixer::TRACK,
+                    AudioMixer::MIXER_CHANNEL_MASK,
+                    (void *)(uintptr_t)(mMixerChannelMask | mHapticChannelMask));
+            }
+
             // limit track sample rate to 2 x output sample rate, which changes at re-configuration
             uint32_t maxSampleRate = mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX;
             uint32_t reqSampleRate = proxy->getSampleRate();
@@ -5489,16 +5536,27 @@
             if (mMixerBufferEnabled
                     && (track->mainBuffer() == mSinkBuffer
                             || track->mainBuffer() == mMixerBuffer)) {
-                mAudioMixer->setParameter(
-                        trackId,
-                        AudioMixer::TRACK,
-                        AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
-                mAudioMixer->setParameter(
-                        trackId,
-                        AudioMixer::TRACK,
-                        AudioMixer::MAIN_BUFFER, (void *)mMixerBuffer);
-                // TODO: override track->mainBuffer()?
-                mMixerBufferValid = true;
+                if (mType == SPATIALIZER && !track->canBeSpatialized()) {
+                    mAudioMixer->setParameter(
+                            trackId,
+                            AudioMixer::TRACK,
+                            AudioMixer::MIXER_FORMAT, (void *)mFormat);
+                    mAudioMixer->setParameter(
+                            trackId,
+                            AudioMixer::TRACK,
+                            AudioMixer::MAIN_BUFFER, (void *)mSinkBuffer);
+                } else {
+                    mAudioMixer->setParameter(
+                            trackId,
+                            AudioMixer::TRACK,
+                            AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
+                    mAudioMixer->setParameter(
+                            trackId,
+                            AudioMixer::TRACK,
+                            AudioMixer::MAIN_BUFFER, (void *)mMixerBuffer);
+                    // TODO: override track->mainBuffer()?
+                    mMixerBufferValid = true;
+                }
             } else {
                 mAudioMixer->setParameter(
                         trackId,
@@ -5688,8 +5746,10 @@
     // sink or mix buffer must be cleared if all tracks are connected to an
     // effect chain as in this case the mixer will not write to the sink or mix buffer
     // and track effects will accumulate into it
-    if ((mBytesRemaining == 0) && ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
-            (mixedTracks == 0 && fastTracks > 0))) {
+    // always clear sink buffer for spatializer output as the output of the spatializer
+    // effect will be accumulated into it
+    if ((mBytesRemaining == 0) && (((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
+            (mixedTracks == 0 && fastTracks > 0)) || (mType == SPATIALIZER))) {
         // FIXME as a performance optimization, should remember previous zero status
         if (mMixerBufferValid) {
             memset(mMixerBuffer, 0, mMixerBufferSize);
@@ -8205,7 +8265,7 @@
                 ALOGV("active record track PAUSING -> ACTIVE");
                 recordTrack->mState = TrackBase::ACTIVE;
             } else {
-                ALOGV("active record track state %d", recordTrack->mState);
+                ALOGV("active record track state %d", (int)recordTrack->mState);
             }
             return status;
         }
@@ -8231,7 +8291,7 @@
             }
             if (recordTrack->mState != TrackBase::STARTING_1) {
                 ALOGW("%s(%d): unsynchronized mState:%d change",
-                    __func__, recordTrack->id(), recordTrack->mState);
+                    __func__, recordTrack->id(), (int)recordTrack->mState);
                 // Someone else has changed state, let them take over,
                 // leave mState in the new state.
                 recordTrack->clearSyncStartEvent();
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 300171e..5755c42 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1123,6 +1123,11 @@
     // for any processing (including output processing).
     bool                            mEffectBufferValid;
 
+    // Frame size aligned buffer used to convert mEffectBuffer samples to mSinkBuffer format prior
+    // to accumulate into mSinkBuffer on SPATIALIZER threads
+    void*                           mEffectToSinkBuffer = nullptr;
+
+
     // suspend count, > 0 means suspended.  While suspended, the thread continues to pull from
     // tracks and mix, but doesn't write to HAL.  A2DP and SCO HAL implementations can't handle
     // concurrent use of both of them, so Audio Policy Service suspends one of the threads to
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 92f129c..5311fe2 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -23,7 +23,7 @@
 class TrackBase : public ExtendedAudioBufferProvider, public RefBase {
 
 public:
-    enum track_state {
+    enum track_state : int32_t {
         IDLE,
         FLUSHED,        // for PlaybackTracks only
         STOPPED,
@@ -271,6 +271,7 @@
 
     void releaseCblk() {
         if (mCblk != nullptr) {
+            mState.clear();
             mCblk->~audio_track_cblk_t();   // destroy our shared-structure.
             if (mClient == 0) {
                 free(mCblk);
@@ -355,7 +356,7 @@
                                     // except for OutputTrack when it is in local memory
     size_t              mBufferSize; // size of mBuffer in bytes
     // we don't really need a lock for these
-    track_state         mState;
+    MirroredVariable<track_state>  mState;
     const audio_attributes_t mAttr;
     const uint32_t      mSampleRate;    // initial sample rate only; for tracks which
                         // support dynamic rates, the current value is in control block
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index d2a30b1..e0c5fa5 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -234,7 +234,11 @@
 #ifdef TEE_SINK
         mTee.set(sampleRate, mChannelCount, format, NBAIO_Tee::TEE_FLAG_TRACK);
 #endif
-
+        // mState is mirrored for the client to read.
+        mState.setMirror(&mCblk->mState);
+        // ensure our state matches up until we consolidate the enumeration.
+        static_assert(CBLK_STATE_IDLE == IDLE);
+        static_assert(CBLK_STATE_PAUSING == PAUSING);
     }
 }
 
@@ -933,7 +937,7 @@
     buffer->raw = buf.mRaw;
     if (buf.mFrameCount == 0 && !isStopping() && !isStopped() && !isPaused() && !isOffloaded()) {
         ALOGV("%s(%d): underrun,  framesReady(%zu) < framesDesired(%zd), state: %d",
-                __func__, mId, buf.mFrameCount, desiredFrames, mState);
+                __func__, mId, buf.mFrameCount, desiredFrames, (int)mState);
         mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
     } else {
         mAudioTrackServerProxy->tallyUnderrunFrames(0);
@@ -1590,7 +1594,7 @@
                                       (mState == STOPPED)))) {
         ALOGW("%s(%d): in invalid state %d on session %d %s mode, framesReady %zu",
               __func__, mId,
-              mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
+              (int)mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
         event->cancel();
         return INVALID_OPERATION;
     }
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 8613b2e..add0684 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -32,30 +32,42 @@
 
 // ----------------------------------------------------------------------------
 
-// The AudioPolicyInterface and AudioPolicyClientInterface classes define the communication interfaces
-// between the platform specific audio policy manager and Android generic audio policy manager.
-// The platform specific audio policy manager must implement methods of the AudioPolicyInterface class.
+// The AudioPolicyInterface and AudioPolicyClientInterface classes define the communication
+// interfaces between the platform specific audio policy manager and Android generic audio policy
+// manager.
+// The platform specific audio policy manager must implement methods of the AudioPolicyInterface
+// class.
 // This implementation makes use of the AudioPolicyClientInterface to control the activity and
 // configuration of audio input and output streams.
 //
 // The platform specific audio policy manager is in charge of the audio routing and volume control
 // policies for a given platform.
 // The main roles of this module are:
-//   - keep track of current system state (removable device connections, phone state, user requests...).
-//   System state changes and user actions are notified to audio policy manager with methods of the AudioPolicyInterface.
+//   - keep track of current system state (removable device connections, phone state,
+//   user requests...).
+//   System state changes and user actions are notified to audio policy manager with methods of the
+//   AudioPolicyInterface.
 //   - process getOutput() queries received when AudioTrack objects are created: Those queries
-//   return a handler on an output that has been selected, configured and opened by the audio policy manager and that
-//   must be used by the AudioTrack when registering to the AudioFlinger with the createTrack() method.
-//   When the AudioTrack object is released, a putOutput() query is received and the audio policy manager can decide
-//   to close or reconfigure the output depending on other streams using this output and current system state.
-//   - similarly process getInput() and putInput() queries received from AudioRecord objects and configure audio inputs.
-//   - process volume control requests: the stream volume is converted from an index value (received from UI) to a float value
-//   applicable to each output as a function of platform specific settings and current output route (destination device). It
-//   also make sure that streams are not muted if not allowed (e.g. camera shutter sound in some countries).
+//   return a handler on an output that has been selected, configured and opened by the audio
+//   policy manager and that must be used by the AudioTrack when registering to the AudioFlinger
+//   with the createTrack() method.
+//   When the AudioTrack object is released, a putOutput() query is received and the audio policy
+//   manager can decide to close or reconfigure the output depending on other streams using this
+//   output and current system state.
+//   - similarly process getInput() and putInput() queries received from AudioRecord objects and
+//   configure audio inputs.
+//   - process volume control requests: the stream volume is converted from an index value
+//   (received from UI) to a float value applicable to each output as a function of platform
+//   specificsettings and current output route (destination device). It also make sure that streams
+//   are not muted if not allowed (e.g. camera shutter sound in some countries).
 //
-// The platform specific audio policy manager is provided as a shared library by platform vendors (as for libaudio.so)
-// and is linked with libaudioflinger.so
-
+// The platform specific audio policy manager is provided as a shared library by platform vendors
+// (as for libaudio.so) and is linked with libaudioflinger.so
+//
+// NOTE: by convention, the implementation of the AudioPolicyInterface in AudioPolicyManager does
+// not have to perform any nullptr check on input arguments: The caller of this API is
+// AudioPolicyService running in the same process and in charge of validating arguments received
+// from incoming binder calls before calling AudioPolicyManager.
 
 //    Audio Policy Manager Interface
 class AudioPolicyInterface
@@ -100,7 +112,7 @@
                                               audio_format_t encodedFormat) = 0;
     // retrieve a device connection status
     virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
-                                                                          const char *device_address) = 0;
+                                                              const char *device_address) = 0;
     // indicate a change in device configuration
     virtual status_t handleDeviceConfigChange(audio_devices_t device,
                                               const char *device_address,
@@ -134,9 +146,11 @@
                                         audio_port_handle_t *portId,
                                         std::vector<audio_io_handle_t> *secondaryOutputs,
                                         output_type_t *outputType) = 0;
-    // indicates to the audio policy manager that the output starts being used by corresponding stream.
+    // indicates to the audio policy manager that the output starts being used by corresponding
+    // stream.
     virtual status_t startOutput(audio_port_handle_t portId) = 0;
-    // indicates to the audio policy manager that the output stops being used by corresponding stream.
+    // indicates to the audio policy manager that the output stops being used by corresponding
+    // stream.
     virtual status_t stopOutput(audio_port_handle_t portId) = 0;
     // releases the output, return true if the output descriptor is reopened.
     virtual bool releaseOutput(audio_port_handle_t portId) = 0;
@@ -398,10 +412,13 @@
     // Audio output Control functions
     //
 
-    // opens an audio output with the requested parameters. The parameter values can indicate to use the default values
-    // in case the audio policy manager has no specific requirements for the output being opened.
-    // When the function returns, the parameter values reflect the actual values used by the audio hardware output stream.
-    // The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
+    // opens an audio output with the requested parameters. The parameter values can indicate to
+    // use the default values in case the audio policy manager has no specific requirements for the
+    // output being opened.
+    // When the function returns, the parameter values reflect the actual values used by the audio
+    // hardware output stream.
+    // The audio policy manager can check if the proposed parameters are suitable or not and act
+    // accordingly.
     virtual status_t openOutput(audio_module_handle_t module,
                                 audio_io_handle_t *output,
                                 audio_config_t *halConfig,
@@ -409,13 +426,15 @@
                                 const sp<DeviceDescriptorBase>& device,
                                 uint32_t *latencyMs,
                                 audio_output_flags_t flags) = 0;
-    // creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by
-    // a special mixer thread in the AudioFlinger.
-    virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2) = 0;
+    // creates a special output that is duplicated to the two outputs passed as arguments.
+    // The duplication is performed by a special mixer thread in the AudioFlinger.
+    virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
+                                                  audio_io_handle_t output2) = 0;
     // closes the output stream
     virtual status_t closeOutput(audio_io_handle_t output) = 0;
-    // suspends the output. When an output is suspended, the corresponding audio hardware output stream is placed in
-    // standby and the AudioTracks attached to the mixer thread are still processed but the output mix is discarded.
+    // suspends the output. When an output is suspended, the corresponding audio hardware output
+    // stream is placed in standby and the AudioTracks attached to the mixer thread are still
+    // processed but the output mix is discarded.
     virtual status_t suspendOutput(audio_io_handle_t output) = 0;
     // restores a suspended output.
     virtual status_t restoreOutput(audio_io_handle_t output) = 0;
@@ -438,16 +457,21 @@
     // misc control functions
     //
 
-    // set a stream volume for a particular output. For the same user setting, a given stream type can have different volumes
+    // set a stream volume for a particular output. For the same user setting, a given stream type
+    // can have different volumes
     // for each output (destination device) it is attached to.
-    virtual status_t setStreamVolume(audio_stream_type_t stream, float volume, audio_io_handle_t output, int delayMs = 0) = 0;
+    virtual status_t setStreamVolume(audio_stream_type_t stream, float volume,
+                                     audio_io_handle_t output, int delayMs = 0) = 0;
 
     // invalidate a stream type, causing a reroute to an unspecified new output
     virtual status_t invalidateStream(audio_stream_type_t stream) = 0;
 
-    // function enabling to send proprietary informations directly from audio policy manager to audio hardware interface.
-    virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs, int delayMs = 0) = 0;
-    // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
+    // function enabling to send proprietary informations directly from audio policy manager to
+    // audio hardware interface.
+    virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs,
+                               int delayMs = 0) = 0;
+    // function enabling to receive proprietary informations directly from audio hardware interface
+    // to audio policy manager.
     virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) = 0;
 
     // set down link audio volume.
@@ -510,7 +534,8 @@
     // These are the signatures of createAudioPolicyManager/destroyAudioPolicyManager
     // methods respectively, expected by AudioPolicyService, needs to be exposed by
     // libaudiopolicymanagercustom.
-    using CreateAudioPolicyManagerInstance = AudioPolicyInterface* (*)(AudioPolicyClientInterface*);
+    using CreateAudioPolicyManagerInstance =
+            AudioPolicyInterface* (*)(AudioPolicyClientInterface*);
     using DestroyAudioPolicyManagerInstance = void (*)(AudioPolicyInterface*);
 
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 7c7f02d..123011a 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -307,6 +307,8 @@
     DeviceVector mDevices; /**< current devices this output is routed to */
     wp<AudioPolicyMix> mPolicyMix;  // non NULL when used by a dynamic policy
 
+    virtual uint32_t getRecommendedMuteDurationMs() const { return 0; }
+
 protected:
     const sp<PolicyAudioPort> mPolicyAudioPort;
     AudioPolicyClientInterface * const mClientInterface;
@@ -415,6 +417,8 @@
      */
     DeviceVector filterSupportedDevices(const DeviceVector &devices) const;
 
+    uint32_t getRecommendedMuteDurationMs() const override;
+
     const sp<IOProfile> mProfile;          // I/O profile this output derives from
     audio_io_handle_t mIoHandle;           // output handle
     uint32_t mLatency;                  //
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index a74cefa..81828ed 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -207,6 +207,8 @@
     // Number of streams currently active for this profile. This is not the number of active clients
     // (AudioTrack or AudioRecord) but the number of active HAL streams.
     uint32_t     curActiveCount;
+    // Mute duration while changing device on this output profile.
+    uint32_t     recommendedMuteDurationMs = 0;
 
 private:
     DeviceVector mSupportedDevices; // supported devices: this input/output can be routed from/to
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index f3d2326..34b5c1a 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -670,6 +670,15 @@
     return NO_ERROR;
 }
 
+uint32_t SwAudioOutputDescriptor::getRecommendedMuteDurationMs() const
+{
+    if (isDuplicated()) {
+        return std::max(mOutput1->getRecommendedMuteDurationMs(),
+                mOutput2->getRecommendedMuteDurationMs());
+    }
+    return mProfile->recommendedMuteDurationMs;
+}
+
 // HwAudioOutputDescriptor implementation
 HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<SourceClientDescriptor>& source,
                                                  AudioPolicyClientInterface *clientInterface)
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 1722032..c9c8ede 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -58,15 +58,6 @@
         mDeclaredAddress(DeviceDescriptorBase::address())
 {
     mCurrentEncodedFormat = AUDIO_FORMAT_DEFAULT;
-    /* If framework runs against a pre 5.0 Audio HAL, encoded formats are absent from the config.
-     * FIXME: APM should know the version of the HAL and don't add the formats for V5.0.
-     * For now, the workaround to remove AC3 and IEC61937 support on HDMI is to declare
-     * something like 'encodedFormats="AUDIO_FORMAT_PCM_16_BIT"' on the HDMI devicePort.
-     */
-    if (mDeviceTypeAddr.mType == AUDIO_DEVICE_OUT_HDMI && mEncodedFormats.empty()) {
-        mEncodedFormats.push_back(AUDIO_FORMAT_AC3);
-        mEncodedFormats.push_back(AUDIO_FORMAT_IEC61937);
-    }
 }
 
 void DeviceDescriptor::attach(const sp<HwModule>& module)
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 09b614d..624ad95 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -137,6 +137,7 @@
              maxOpenCount, curOpenCount);
     dst->appendFormat("    - maxActiveCount: %u - curActiveCount: %u\n",
              maxActiveCount, curActiveCount);
+    dst->appendFormat("    - recommendedMuteDurationMs: %u ms\n", recommendedMuteDurationMs);
 }
 
 void IOProfile::log()
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 84ed656..4dfef73 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -123,6 +123,7 @@
         static constexpr const char *flags = "flags";
         static constexpr const char *maxOpenCount = "maxOpenCount";
         static constexpr const char *maxActiveCount = "maxActiveCount";
+        static constexpr const char *recommendedMuteDurationMs = "recommendedMuteDurationMs";
     };
 
     // Children: GainTraits
@@ -496,6 +497,13 @@
     if (!maxActiveCount.empty()) {
         convertTo(maxActiveCount, mixPort->maxActiveCount);
     }
+
+    std::string recommendedmuteDurationMsLiteral =
+            getXmlAttribute(child, Attributes::recommendedMuteDurationMs);
+    if (!recommendedmuteDurationMsLiteral.empty()) {
+        convertTo(recommendedmuteDurationMsLiteral, mixPort->recommendedMuteDurationMs);
+    }
+
     // Deserialize children
     AudioGainTraits::Collection gains;
     status = deserializeCollection<AudioGainTraits>(child, &gains, NULL);
diff --git a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
index d39eff6..665c2dd 100644
--- a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
+++ b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
@@ -69,12 +69,6 @@
           {
               {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION, AUDIO_SOURCE_DEFAULT,
                AUDIO_FLAG_NONE, ""},
-              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
-               AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
-               AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
-              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
-               AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
               {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_EVENT,
                AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""}
           }
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
index bc32416..0ddf66d 100644
--- a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -57,9 +57,6 @@
     <ProductStrategy name="STRATEGY_SONIFICATION_RESPECTFUL">
         <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION" volumeGroup="notification">
             <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
-            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
-            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
-            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/> </Attributes>
             <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_EVENT"/> </Attributes>
         </AttributesGroup>
     </ProductStrategy>
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
index bc32416..0ddf66d 100644
--- a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -57,9 +57,6 @@
     <ProductStrategy name="STRATEGY_SONIFICATION_RESPECTFUL">
         <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION" volumeGroup="notification">
             <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
-            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
-            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
-            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/> </Attributes>
             <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_EVENT"/> </Attributes>
         </AttributesGroup>
     </ProductStrategy>
diff --git a/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
index 7000cd9..8584702 100644
--- a/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
+++ b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
@@ -163,7 +163,9 @@
     AUDIO_FLAG_BYPASS_MUTE,    AUDIO_FLAG_LOW_LATENCY,
     AUDIO_FLAG_DEEP_BUFFER,    AUDIO_FLAG_NO_MEDIA_PROJECTION,
     AUDIO_FLAG_MUTE_HAPTIC,    AUDIO_FLAG_NO_SYSTEM_CAPTURE,
-    AUDIO_FLAG_CAPTURE_PRIVATE};
+    AUDIO_FLAG_CAPTURE_PRIVATE, AUDIO_FLAG_CONTENT_SPATIALIZED,
+    AUDIO_FLAG_NEVER_SPATIALIZE,
+    };
 
 std::vector<audio_policy_dev_state_t> kAudioPolicyDeviceStates = {
     AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 6f87bf0..a46da41 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -248,9 +248,7 @@
                     // been opened by checkOutputsForDevice() to query dynamic parameters
                     if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE)
                             || (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
-                                (desc->mDirectOpenCount == 0))
-                            || (((desc->mFlags & AUDIO_OUTPUT_FLAG_SPATIALIZER) != 0) &&
-                                (desc != mSpatializerOutput))) {
+                                (desc->mDirectOpenCount == 0))) {
                         clearAudioSourcesForOutput(output);
                         closeOutput(output);
                     }
@@ -928,8 +926,7 @@
 }
 
 sp<IOProfile> AudioPolicyManager::getSpatializerOutputProfile(
-        const audio_config_t *config __unused, const AudioDeviceTypeAddrVector &devices,
-        bool forOpening) const
+        const audio_config_t *config __unused, const AudioDeviceTypeAddrVector &devices) const
 {
     for (const auto& hwModule : mHwModules) {
         for (const auto& curProfile : hwModule->getOutputProfiles()) {
@@ -947,9 +944,6 @@
                     continue;
                 }
             }
-            if (forOpening && !curProfile->canOpenNewIo()) {
-                continue;
-            }
             ALOGV("%s found profile %s", __func__, curProfile->getName().c_str());
             return curProfile;
         }
@@ -4844,6 +4838,21 @@
     return source;
 }
 
+/* static */
+bool AudioPolicyManager::isChannelMaskSpatialized(audio_channel_mask_t channels) {
+    switch (channels) {
+        case AUDIO_CHANNEL_OUT_5POINT1:
+        case AUDIO_CHANNEL_OUT_5POINT1POINT2:
+        case AUDIO_CHANNEL_OUT_5POINT1POINT4:
+        case AUDIO_CHANNEL_OUT_7POINT1:
+        case AUDIO_CHANNEL_OUT_7POINT1POINT2:
+        case AUDIO_CHANNEL_OUT_7POINT1POINT4:
+            return true;
+        default:
+            return false;
+    }
+}
+
 bool AudioPolicyManager::canBeSpatialized(const audio_attributes_t *attr,
                                       const audio_config_t *config,
                                       const AudioDeviceTypeAddrVector &devices)  const
@@ -4852,9 +4861,13 @@
     // the AUDIO_ATTRIBUTES_INITIALIZER value.
     // If attributes are specified, current policy is to only allow spatialization for media
     // and game usages.
-    if (attr != nullptr && *attr != AUDIO_ATTRIBUTES_INITIALIZER &&
-            attr->usage != AUDIO_USAGE_MEDIA && attr->usage != AUDIO_USAGE_GAME) {
-        return false;
+    if (attr != nullptr && *attr != AUDIO_ATTRIBUTES_INITIALIZER) {
+        if (attr->usage != AUDIO_USAGE_MEDIA && attr->usage != AUDIO_USAGE_GAME) {
+            return false;
+        }
+        if ((attr->flags & (AUDIO_FLAG_CONTENT_SPATIALIZED | AUDIO_FLAG_NEVER_SPATIALIZE)) != 0) {
+            return false;
+        }
     }
 
     // The caller can have the devices criteria ignored by passing and empty vector, and
@@ -4862,7 +4875,7 @@
     // Otherwise an output profile supporting a spatializer effect that can be routed
     // to the specified devices must exist.
     sp<IOProfile> profile =
-            getSpatializerOutputProfile(config, devices, false /*forOpening*/);
+            getSpatializerOutputProfile(config, devices);
     if (profile == nullptr) {
         return false;
     }
@@ -4870,37 +4883,36 @@
     // The caller can have the audio config criteria ignored by either passing a null ptr or
     // the AUDIO_CONFIG_INITIALIZER value.
     // If an audio config is specified, current policy is to only allow spatialization for
-    // 5.1, 7.1and 7.1.4 audio.
+    // some positional channel masks.
     // If the spatializer output is already opened, only channel masks included in the
     // spatializer output mixer channel mask are allowed.
+
     if (config != nullptr && *config != AUDIO_CONFIG_INITIALIZER) {
-        if (config->channel_mask != AUDIO_CHANNEL_OUT_5POINT1
-                && config->channel_mask != AUDIO_CHANNEL_OUT_7POINT1
-                && config->channel_mask != AUDIO_CHANNEL_OUT_7POINT1POINT4) {
+        if (!isChannelMaskSpatialized(config->channel_mask)) {
             return false;
         }
-        if (mSpatializerOutput != nullptr) {
+        if (mSpatializerOutput != nullptr && mSpatializerOutput->mProfile == profile) {
             if ((config->channel_mask & mSpatializerOutput->mMixerChannelMask)
                     != config->channel_mask) {
                 return false;
             }
         }
     }
-
     return true;
 }
 
 void AudioPolicyManager::checkVirtualizerClientRoutes() {
     std::set<audio_stream_type_t> streamsToInvalidate;
     for (size_t i = 0; i < mOutputs.size(); i++) {
-        const sp<SwAudioOutputDescriptor>& outputDescriptor = mOutputs[i];
-        for (const sp<TrackClientDescriptor>& client : outputDescriptor->getClientIterable()) {
+        const sp<SwAudioOutputDescriptor>& desc = mOutputs[i];
+        for (const sp<TrackClientDescriptor>& client : desc->getClientIterable()) {
             audio_attributes_t attr = client->attributes();
             DeviceVector devices = mEngine->getOutputDevicesForAttributes(attr, nullptr, false);
             AudioDeviceTypeAddrVector devicesTypeAddress = devices.toTypeAddrVector();
             audio_config_base_t clientConfig = client->config();
             audio_config_t config = audio_config_initializer(&clientConfig);
-            if (canBeSpatialized(&attr, &config, devicesTypeAddress)) {
+            if (desc != mSpatializerOutput
+                    && canBeSpatialized(&attr, &config, devicesTypeAddress)) {
                 streamsToInvalidate.insert(client->stream());
             }
         }
@@ -4916,10 +4928,6 @@
                                                         audio_io_handle_t *output) {
     *output = AUDIO_IO_HANDLE_NONE;
 
-    if (mSpatializerOutput != nullptr) {
-        return INVALID_OPERATION;
-    }
-
     DeviceVector devices = mEngine->getOutputDevicesForAttributes(*attr, nullptr, false);
     AudioDeviceTypeAddrVector devicesTypeAddress = devices.toTypeAddrVector();
     audio_config_t *configPtr = nullptr;
@@ -4929,35 +4937,87 @@
         configPtr = &config;
     }
     if (!canBeSpatialized(attr, configPtr, devicesTypeAddress)) {
+        ALOGW("%s provided attributes or mixer config cannot be spatialized", __func__);
         return BAD_VALUE;
     }
 
     sp<IOProfile> profile =
-            getSpatializerOutputProfile(configPtr, devicesTypeAddress, true /*forOpening*/);
+            getSpatializerOutputProfile(configPtr, devicesTypeAddress);
     if (profile == nullptr) {
+        ALOGW("%s no suitable output profile for provided attributes or mixer config", __func__);
         return BAD_VALUE;
     }
 
-    mSpatializerOutput = new SwAudioOutputDescriptor(profile, mpClientInterface);
-    status_t status = mSpatializerOutput->open(nullptr, mixerConfig, devices,
+    if (mSpatializerOutput != nullptr && mSpatializerOutput->mProfile == profile
+            && configPtr != nullptr
+            && configPtr->channel_mask == mSpatializerOutput->mMixerChannelMask) {
+        *output = mSpatializerOutput->mIoHandle;
+        ALOGV("%s returns current spatializer output %d", __func__, *output);
+        return NO_ERROR;
+    }
+    mSpatializerOutput.clear();
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+        if (!desc->isDuplicated() && desc->mProfile == profile) {
+            mSpatializerOutput = desc;
+            break;
+        }
+    }
+    if (mSpatializerOutput == nullptr) {
+        ALOGW("%s no opened spatializer output for profile %s",
+                __func__, profile->getName().c_str());
+        return BAD_VALUE;
+    }
+
+    if (configPtr != nullptr
+            && configPtr->channel_mask != mSpatializerOutput->mMixerChannelMask) {
+        audio_config_base_t savedMixerConfig = {
+            .sample_rate = mSpatializerOutput->getSamplingRate(),
+            .format = mSpatializerOutput->getFormat(),
+            .channel_mask = mSpatializerOutput->mMixerChannelMask,
+        };
+        DeviceVector savedDevices = mSpatializerOutput->devices();
+
+        closeOutput(mSpatializerOutput->mIoHandle);
+        mSpatializerOutput.clear();
+
+        const sp<SwAudioOutputDescriptor> desc =
+                new SwAudioOutputDescriptor(profile, mpClientInterface);
+        status_t status = desc->open(nullptr, mixerConfig, devices,
                                                     mEngine->getStreamTypeForAttributes(*attr),
                                                     AUDIO_OUTPUT_FLAG_SPATIALIZER, output);
-    if (status != NO_ERROR) {
-        ALOGV("%s failed opening output: status %d, output %d", __func__, status, *output);
-        if (*output != AUDIO_IO_HANDLE_NONE) {
-            mSpatializerOutput->close();
+        if (status != NO_ERROR) {
+            ALOGW("%s failed opening output: status %d, output %d", __func__, status, *output);
+            if (*output != AUDIO_IO_HANDLE_NONE) {
+                desc->close();
+            }
+            // re open the spatializer output with previous channel mask
+            status_t newStatus = desc->open(nullptr, &savedMixerConfig, savedDevices,
+                                mEngine->getStreamTypeForAttributes(*attr),
+                                AUDIO_OUTPUT_FLAG_SPATIALIZER, output);
+            if (newStatus != NO_ERROR) {
+                if (*output != AUDIO_IO_HANDLE_NONE) {
+                    desc->close();
+                }
+                ALOGE("%s failed to re-open mSpatializerOutput, status %d", __func__, newStatus);
+            } else {
+                mSpatializerOutput = desc;
+                addOutput(*output, desc);
+            }
+            mPreviousOutputs = mOutputs;
+            mpClientInterface->onAudioPortListUpdate();
+            *output = AUDIO_IO_HANDLE_NONE;
+            return status;
         }
-        mSpatializerOutput.clear();
-        *output = AUDIO_IO_HANDLE_NONE;
-        return status;
+        mSpatializerOutput = desc;
+        addOutput(*output, desc);
+        mPreviousOutputs = mOutputs;
+        mpClientInterface->onAudioPortListUpdate();
     }
 
     checkVirtualizerClientRoutes();
 
-    addOutput(*output, mSpatializerOutput);
-    mPreviousOutputs = mOutputs;
-    mpClientInterface->onAudioPortListUpdate();
-
+    *output = mSpatializerOutput->mIoHandle;
     ALOGV("%s returns new spatializer output %d", __func__, *output);
     return NO_ERROR;
 }
@@ -4969,8 +5029,11 @@
     if (mSpatializerOutput->mIoHandle != output) {
         return BAD_VALUE;
     }
-    closeOutput(output);
+
     mSpatializerOutput.clear();
+
+    checkVirtualizerClientRoutes();
+
     return NO_ERROR;
 }
 
@@ -5187,8 +5250,7 @@
                     outProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) {
                 mPrimaryOutput = outputDesc;
             }
-            if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_DIRECT) != 0
-                || (outProfile->getFlags() & AUDIO_OUTPUT_FLAG_SPATIALIZER) != 0 ) {
+            if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
                 outputDesc->close();
             } else {
                 addOutput(output, outputDesc);
@@ -6294,11 +6356,18 @@
     // different per device volumes
     if (outputDesc->isActive() && (devices != prevDevices)) {
         uint32_t tempMuteWaitMs = outputDesc->latency() * 2;
-        // temporary mute duration is conservatively set to 4 times the reported latency
-        uint32_t tempMuteDurationMs = outputDesc->latency() * 4;
+
         if (muteWaitMs < tempMuteWaitMs) {
             muteWaitMs = tempMuteWaitMs;
         }
+
+        // If recommended duration is defined, replace temporary mute duration to avoid
+        // truncated notifications at beginning, which depends on duration of changing path in HAL.
+        // Otherwise, temporary mute duration is conservatively set to 4 times the reported latency.
+        uint32_t tempRecommendedMuteDuration = outputDesc->getRecommendedMuteDurationMs();
+        uint32_t tempMuteDurationMs = tempRecommendedMuteDuration > 0 ?
+                tempRecommendedMuteDuration : outputDesc->latency() * 4;
+
         for (const auto &activeVs : outputDesc->getActiveVolumeSources()) {
             // make sure that we do not start the temporary mute period too early in case of
             // delayed device change
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 2b3f3b4..0eda10e 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -962,8 +962,9 @@
                 audio_io_handle_t *output);
 
         sp<IOProfile> getSpatializerOutputProfile(const audio_config_t *config,
-                                                       const AudioDeviceTypeAddrVector &devices,
-                                                       bool forOpening) const;
+                                                  const AudioDeviceTypeAddrVector &devices) const;
+
+        static bool isChannelMaskSpatialized(audio_channel_mask_t channels);
 
         void checkVirtualizerClientRoutes();
 
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
index 5ffddc2..cdad9a6 100644
--- a/services/audiopolicy/service/Android.bp
+++ b/services/audiopolicy/service/Android.bp
@@ -17,6 +17,7 @@
         "AudioPolicyService.cpp",
         "CaptureStateNotifier.cpp",
         "Spatializer.cpp",
+        "SpatializerPoseController.cpp",
     ],
 
     include_dirs: [
@@ -36,20 +37,27 @@
         "libcutils",
         "libeffectsconfig",
         "libhardware_legacy",
+        "libheadtracking",
+        "libheadtracking-binding",
         "liblog",
         "libmedia_helper",
         "libmediametrics",
         "libmediautils",
         "libpermission",
+        "libsensor",
         "libsensorprivacy",
         "libshmemcompat",
         "libutils",
+        "libstagefright_foundation",
+        "android.media.audio.common.types-V1-cpp",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
         "audiopolicy-types-aidl-cpp",
         "capture_state_listener-aidl-cpp",
         "framework-permission-aidl-cpp",
+        "packagemanager_aidl-cpp",
+        "spatializer-aidl-cpp",
     ],
 
     static_libs: [
@@ -74,6 +82,8 @@
 
     export_shared_lib_headers: [
         "libactivitymanager_aidl",
+        "libheadtracking",
+        "libheadtracking-binding",
         "libsensorprivacy",
         "framework-permission-aidl-cpp",
     ],
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index c8db45b..aaf6fba 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -139,7 +139,7 @@
     request.config = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_config_t_AudioConfig(*config, true /*isInput*/));
     request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(deviceTypeAddr));
-    request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSourceType(source));
+    request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSource(source));
     request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
 
     media::OpenInputResponse response;
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 95c1bab..29baa22 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -32,6 +32,9 @@
        if (!_tmp.ok()) return aidl_utils::binderStatusFromStatusT(_tmp.error()); \
        std::move(_tmp.value()); })
 
+#define RETURN_BINDER_STATUS_IF_ERROR(x) \
+    if (status_t _tmp = (x); _tmp != OK) return aidl_utils::binderStatusFromStatusT(_tmp);
+
 #define RETURN_IF_BINDER_ERROR(x)      \
     {                                  \
         binder::Status _tmp = (x);     \
@@ -44,6 +47,19 @@
 using binder::Status;
 using aidl_utils::binderStatusFromStatusT;
 using content::AttributionSourceState;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceAddress;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioOffloadInfo;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::AudioUuid;
+using media::audio::common::Int;
 
 const std::vector<audio_usage_t>& SYSTEM_USAGES = {
     AUDIO_USAGE_CALL_ASSISTANT,
@@ -96,12 +112,14 @@
 }
 
 Status AudioPolicyService::setDeviceConnectionState(
-        const media::AudioDevice& deviceAidl,
+        const AudioDevice& deviceAidl,
         media::AudioPolicyDeviceState stateAidl,
         const std::string& deviceNameAidl,
-        const media::AudioFormatDescription& encodedFormatAidl) {
-    audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl.type));
+        const AudioFormatDescription& encodedFormatAidl) {
+    audio_devices_t device;
+    std::string address;
+    RETURN_BINDER_STATUS_IF_ERROR(
+            aidl2legacy_AudioDevice_audio_device(deviceAidl, &device, &address));
     audio_policy_dev_state_t state = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioPolicyDeviceState_audio_policy_dev_state_t(stateAidl));
     audio_format_t encodedFormat = VALUE_OR_RETURN_BINDER_STATUS(
@@ -121,17 +139,20 @@
     ALOGV("setDeviceConnectionState()");
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
-    return binderStatusFromStatusT(
-            mAudioPolicyManager->setDeviceConnectionState(device, state,
-                                                          deviceAidl.address.c_str(),
-                                                          deviceNameAidl.c_str(),
-                                                          encodedFormat));
+    status_t status = mAudioPolicyManager->setDeviceConnectionState(
+            device, state, address.c_str(), deviceNameAidl.c_str(), encodedFormat);
+    if (status == NO_ERROR) {
+        onCheckSpatializer_l();
+    }
+    return binderStatusFromStatusT(status);
 }
 
-Status AudioPolicyService::getDeviceConnectionState(const media::AudioDevice& deviceAidl,
+Status AudioPolicyService::getDeviceConnectionState(const AudioDevice& deviceAidl,
                                                     media::AudioPolicyDeviceState* _aidl_return) {
-    audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl.type));
+    audio_devices_t device;
+    std::string address;
+    RETURN_BINDER_STATUS_IF_ERROR(
+            aidl2legacy_AudioDevice_audio_device(deviceAidl, &device, &address));
     if (mAudioPolicyManager == NULL) {
         *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
                 legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(
@@ -141,17 +162,19 @@
     AutoCallerClear acc;
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
             legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(
-                    mAudioPolicyManager->getDeviceConnectionState(device,
-                                                                  deviceAidl.address.c_str())));
+                    mAudioPolicyManager->getDeviceConnectionState(
+                            device, address.c_str())));
     return Status::ok();
 }
 
 Status AudioPolicyService::handleDeviceConfigChange(
-        const media::AudioDevice& deviceAidl,
+        const AudioDevice& deviceAidl,
         const std::string& deviceNameAidl,
-        const media::AudioFormatDescription& encodedFormatAidl) {
-    audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl.type));
+        const AudioFormatDescription& encodedFormatAidl) {
+    audio_devices_t device;
+    std::string address;
+    RETURN_BINDER_STATUS_IF_ERROR(
+            aidl2legacy_AudioDevice_audio_device(deviceAidl, &device, &address));
     audio_format_t encodedFormat = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioFormatDescription_audio_format_t(encodedFormatAidl));
 
@@ -165,12 +188,16 @@
     ALOGV("handleDeviceConfigChange()");
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
-    return binderStatusFromStatusT(
-            mAudioPolicyManager->handleDeviceConfigChange(device, deviceAidl.address.c_str(),
-                                                          deviceNameAidl.c_str(), encodedFormat));
+    status_t status =  mAudioPolicyManager->handleDeviceConfigChange(
+            device, address.c_str(), deviceNameAidl.c_str(), encodedFormat);
+
+    if (status == NO_ERROR) {
+       onCheckSpatializer_l();
+    }
+    return binderStatusFromStatusT(status);
 }
 
-Status AudioPolicyService::setPhoneState(media::AudioMode stateAidl, int32_t uidAidl)
+Status AudioPolicyService::setPhoneState(AudioMode stateAidl, int32_t uidAidl)
 {
     audio_mode_t state = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioMode_audio_mode_t(stateAidl));
@@ -202,7 +229,7 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::getPhoneState(media::AudioMode* _aidl_return) {
+Status AudioPolicyService::getPhoneState(AudioMode* _aidl_return) {
     Mutex::Autolock _l(mLock);
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_mode_t_AudioMode(mPhoneState));
     return Status::ok();
@@ -234,6 +261,7 @@
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
     mAudioPolicyManager->setForceUse(usage, config);
+    onCheckSpatializer_l();
     return Status::ok();
 }
 
@@ -257,7 +285,7 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::getOutput(media::AudioStreamType streamAidl, int32_t* _aidl_return)
+Status AudioPolicyService::getOutput(AudioStreamType streamAidl, int32_t* _aidl_return)
 {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
@@ -281,7 +309,7 @@
 Status AudioPolicyService::getOutputForAttr(const media::AudioAttributesInternal& attrAidl,
                                             int32_t sessionAidl,
                                             const AttributionSourceState& attributionSource,
-                                            const media::AudioConfig& configAidl,
+                                            const AudioConfig& configAidl,
                                             int32_t flagsAidl,
                                             int32_t selectedDeviceIdAidl,
                                             media::GetOutputForAttrResponse* _aidl_return)
@@ -510,7 +538,7 @@
                                            int32_t riidAidl,
                                            int32_t sessionAidl,
                                            const AttributionSourceState& attributionSource,
-                                           const media::AudioConfigBase& configAidl,
+                                           const AudioConfigBase& configAidl,
                                            int32_t flagsAidl,
                                            int32_t selectedDeviceIdAidl,
                                            media::GetInputForAttrResponse* _aidl_return) {
@@ -581,28 +609,35 @@
             adjAttributionSource)));
 
     // check calling permissions.
-    // Capturing from FM_TUNER source is controlled by captureTunerAudioInputAllowed() and
-    // captureAudioOutputAllowed() (deprecated) as this does not affect users privacy
-    // as does capturing from an actual microphone.
-    if (!(recordingAllowed(adjAttributionSource, attr.source)
-            || attr.source == AUDIO_SOURCE_FM_TUNER)) {
+    // Capturing from the following sources does not require permission RECORD_AUDIO
+    // as the captured audio does not come from a microphone:
+    // - FM_TUNER source is controlled by captureTunerAudioInputAllowed() or
+    // captureAudioOutputAllowed() (deprecated).
+    // - REMOTE_SUBMIX source is controlled by captureAudioOutputAllowed() if the input
+    // type is API_INPUT_MIX_EXT_POLICY_REROUTE and by AudioService if a media projection
+    // is used and input type is API_INPUT_MIX_PUBLIC_CAPTURE_PLAYBACK
+    // - ECHO_REFERENCE source is controlled by captureAudioOutputAllowed()
+    if (!(recordingAllowed(adjAttributionSource, inputSource)
+            || inputSource == AUDIO_SOURCE_FM_TUNER
+            || inputSource == AUDIO_SOURCE_REMOTE_SUBMIX
+            || inputSource == AUDIO_SOURCE_ECHO_REFERENCE)) {
         ALOGE("%s permission denied: recording not allowed for %s",
                 __func__, adjAttributionSource.toString().c_str());
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
 
     bool canCaptureOutput = captureAudioOutputAllowed(adjAttributionSource);
-    if ((inputSource == AUDIO_SOURCE_VOICE_UPLINK ||
-        inputSource == AUDIO_SOURCE_VOICE_DOWNLINK ||
-        inputSource == AUDIO_SOURCE_VOICE_CALL ||
-        inputSource == AUDIO_SOURCE_ECHO_REFERENCE)
-        && !canCaptureOutput) {
+    if ((inputSource == AUDIO_SOURCE_VOICE_UPLINK
+            || inputSource == AUDIO_SOURCE_VOICE_DOWNLINK
+            || inputSource == AUDIO_SOURCE_VOICE_CALL
+            || inputSource == AUDIO_SOURCE_ECHO_REFERENCE)
+            && !canCaptureOutput) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
 
     if (inputSource == AUDIO_SOURCE_FM_TUNER
-        && !captureTunerAudioInputAllowed(adjAttributionSource)
-        && !canCaptureOutput) {
+        && !canCaptureOutput
+        && !captureTunerAudioInputAllowed(adjAttributionSource)) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
 
@@ -730,8 +765,10 @@
 
     // check calling permissions
     if (!(startRecording(client->attributionSource, String16(msg.str().c_str()),
-        client->attributes.source)
-            || client->attributes.source == AUDIO_SOURCE_FM_TUNER)) {
+                         client->attributes.source)
+            || client->attributes.source == AUDIO_SOURCE_FM_TUNER
+            || client->attributes.source == AUDIO_SOURCE_REMOTE_SUBMIX
+            || client->attributes.source == AUDIO_SOURCE_ECHO_REFERENCE)) {
         ALOGE("%s permission denied: recording not allowed for attribution source %s",
                 __func__, client->attributionSource.toString().c_str());
         return binderStatusFromStatusT(PERMISSION_DENIED);
@@ -901,7 +938,7 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::initStreamVolume(media::AudioStreamType streamAidl,
+Status AudioPolicyService::initStreamVolume(AudioStreamType streamAidl,
                                             int32_t indexMinAidl,
                                             int32_t indexMaxAidl) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
@@ -924,8 +961,8 @@
     return binderStatusFromStatusT(NO_ERROR);
 }
 
-Status AudioPolicyService::setStreamVolumeIndex(media::AudioStreamType streamAidl,
-                                                const media::AudioDeviceDescription& deviceAidl,
+Status AudioPolicyService::setStreamVolumeIndex(AudioStreamType streamAidl,
+                                                const AudioDeviceDescription& deviceAidl,
                                                 int32_t indexAidl) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
@@ -949,8 +986,8 @@
                                                                              device));
 }
 
-Status AudioPolicyService::getStreamVolumeIndex(media::AudioStreamType streamAidl,
-                                                const media::AudioDeviceDescription& deviceAidl,
+Status AudioPolicyService::getStreamVolumeIndex(AudioStreamType streamAidl,
+                                                const AudioDeviceDescription& deviceAidl,
                                                 int32_t* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
@@ -974,7 +1011,7 @@
 
 Status AudioPolicyService::setVolumeIndexForAttributes(
         const media::AudioAttributesInternal& attrAidl,
-        const media::AudioDeviceDescription& deviceAidl, int32_t indexAidl) {
+        const AudioDeviceDescription& deviceAidl, int32_t indexAidl) {
     audio_attributes_t attributes = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
     int index = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int>(indexAidl));
@@ -997,7 +1034,7 @@
 
 Status AudioPolicyService::getVolumeIndexForAttributes(
         const media::AudioAttributesInternal& attrAidl,
-        const media::AudioDeviceDescription& deviceAidl, int32_t* _aidl_return) {
+        const AudioDeviceDescription& deviceAidl, int32_t* _aidl_return) {
     audio_attributes_t attributes = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
     audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1055,7 +1092,7 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::getStrategyForStream(media::AudioStreamType streamAidl,
+Status AudioPolicyService::getStrategyForStream(AudioStreamType streamAidl,
                                                 int32_t* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
@@ -1080,13 +1117,13 @@
 //audio policy: use audio_device_t appropriately
 
 Status AudioPolicyService::getDevicesForStream(
-        media::AudioStreamType streamAidl,
-        std::vector<media::AudioDeviceDescription>* _aidl_return) {
+        AudioStreamType streamAidl,
+        std::vector<AudioDeviceDescription>* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
 
     if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
-        *_aidl_return = std::vector<media::AudioDeviceDescription>{};
+        *_aidl_return = std::vector<AudioDeviceDescription>{};
         return Status::ok();
     }
     if (mAudioPolicyManager == NULL) {
@@ -1095,14 +1132,14 @@
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-            convertContainer<std::vector<media::AudioDeviceDescription>>(
+            convertContainer<std::vector<AudioDeviceDescription>>(
                     mAudioPolicyManager->getDevicesForStream(stream),
                     legacy2aidl_audio_devices_t_AudioDeviceDescription));
     return Status::ok();
 }
 
 Status AudioPolicyService::getDevicesForAttributes(const media::AudioAttributesEx& attrAidl,
-                                                   std::vector<media::AudioDevice>* _aidl_return)
+                                                   std::vector<AudioDevice>* _aidl_return)
 {
     AudioAttributes aa = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioAttributesEx_AudioAttributes(attrAidl));
@@ -1116,8 +1153,8 @@
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDevicesForAttributes(aa.getAttributes(), &devices)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return Status::ok();
 }
 
@@ -1203,7 +1240,7 @@
     return binderStatusFromStatusT(mAudioPolicyManager->moveEffectsToIo(ids, io));
 }
 
-Status AudioPolicyService::isStreamActive(media::AudioStreamType streamAidl, int32_t inPastMsAidl,
+Status AudioPolicyService::isStreamActive(AudioStreamType streamAidl, int32_t inPastMsAidl,
                                           bool* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
@@ -1222,7 +1259,7 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::isStreamActiveRemotely(media::AudioStreamType streamAidl,
+Status AudioPolicyService::isStreamActiveRemotely(AudioStreamType streamAidl,
                                                   int32_t inPastMsAidl,
                                                   bool* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1242,9 +1279,9 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::isSourceActive(media::AudioSourceType sourceAidl, bool* _aidl_return) {
+Status AudioPolicyService::isSourceActive(AudioSource sourceAidl, bool* _aidl_return) {
     audio_source_t source = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(sourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(sourceAidl));
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1272,7 +1309,7 @@
 
 Status AudioPolicyService::queryDefaultPreProcessing(
         int32_t audioSessionAidl,
-        media::Int* countAidl,
+        Int* countAidl,
         std::vector<media::EffectDescriptor>* _aidl_return) {
     audio_session_t audioSession = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_session_t(audioSessionAidl));
@@ -1296,11 +1333,11 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::addSourceDefaultEffect(const media::AudioUuid& typeAidl,
+Status AudioPolicyService::addSourceDefaultEffect(const AudioUuid& typeAidl,
                                                   const std::string& opPackageNameAidl,
-                                                  const media::AudioUuid& uuidAidl,
+                                                  const AudioUuid& uuidAidl,
                                                   int32_t priority,
-                                                  media::AudioSourceType sourceAidl,
+                                                  AudioSource sourceAidl,
                                                   int32_t* _aidl_return) {
     effect_uuid_t type = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioUuid_audio_uuid_t(typeAidl));
@@ -1309,7 +1346,7 @@
     effect_uuid_t uuid = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioUuid_audio_uuid_t(uuidAidl));
     audio_source_t source = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(sourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(sourceAidl));
     audio_unique_id_t id;
 
     sp<AudioPolicyEffects>audioPolicyEffects;
@@ -1323,10 +1360,10 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::addStreamDefaultEffect(const media::AudioUuid& typeAidl,
+Status AudioPolicyService::addStreamDefaultEffect(const AudioUuid& typeAidl,
                                                   const std::string& opPackageNameAidl,
-                                                  const media::AudioUuid& uuidAidl,
-                                                  int32_t priority, media::AudioUsage usageAidl,
+                                                  const AudioUuid& uuidAidl,
+                                                  int32_t priority, AudioUsage usageAidl,
                                                   int32_t* _aidl_return) {
     effect_uuid_t type = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioUuid_audio_uuid_t(typeAidl));
@@ -1374,7 +1411,7 @@
 }
 
 Status AudioPolicyService::setSupportedSystemUsages(
-        const std::vector<media::AudioUsage>& systemUsagesAidl) {
+        const std::vector<AudioUsage>& systemUsagesAidl) {
     size_t size = systemUsagesAidl.size();
     if (size > MAX_ITEMS_PER_LIST) {
         size = MAX_ITEMS_PER_LIST;
@@ -1413,7 +1450,7 @@
             mAudioPolicyManager->setAllowedCapturePolicy(uid, capturePolicy));
 }
 
-Status AudioPolicyService::getOffloadSupport(const media::AudioOffloadInfo& infoAidl,
+Status AudioPolicyService::getOffloadSupport(const AudioOffloadInfo& infoAidl,
                                              media::AudioOffloadMode* _aidl_return) {
     audio_offload_info_t info = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioOffloadInfo_audio_offload_info_t(infoAidl));
@@ -1429,7 +1466,7 @@
 }
 
 Status AudioPolicyService::isDirectOutputSupported(
-        const media::AudioConfigBase& configAidl,
+        const AudioConfigBase& configAidl,
         const media::AudioAttributesInternal& attributesAidl,
         bool* _aidl_return) {
     audio_config_base_t config = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1453,7 +1490,7 @@
 
 
 Status AudioPolicyService::listAudioPorts(media::AudioPortRole roleAidl,
-                                          media::AudioPortType typeAidl, media::Int* count,
+                                          media::AudioPortType typeAidl, Int* count,
                                           std::vector<media::AudioPort>* portsAidl,
                                           int32_t* _aidl_return) {
     audio_port_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1541,7 +1578,7 @@
                                                    IPCThreadState::self()->getCallingUid()));
 }
 
-Status AudioPolicyService::listAudioPatches(media::Int* count,
+Status AudioPolicyService::listAudioPatches(Int* count,
                                             std::vector<media::AudioPatch>* patchesAidl,
                                             int32_t* _aidl_return) {
     unsigned int num_patches = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1678,7 +1715,7 @@
 
 Status AudioPolicyService::setUidDeviceAffinities(
         int32_t uidAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     uid_t uid = VALUE_OR_RETURN_BINDER_STATUS(aidl2legacy_int32_t_uid_t(uidAidl));
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
             convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
@@ -1711,7 +1748,7 @@
 
 Status AudioPolicyService::setUserIdDeviceAffinities(
         int32_t userIdAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     int userId = VALUE_OR_RETURN_BINDER_STATUS(convertReinterpret<int>(userIdAidl));
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
             convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
@@ -1809,8 +1846,8 @@
 
 
 Status AudioPolicyService::getStreamVolumeDB(
-        media::AudioStreamType streamAidl, int32_t indexAidl,
-        const media::AudioDeviceDescription& deviceAidl, float* _aidl_return) {
+        AudioStreamType streamAidl, int32_t indexAidl,
+        const AudioDeviceDescription& deviceAidl, float* _aidl_return) {
     audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
     int index = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int>(indexAidl));
@@ -1826,8 +1863,8 @@
     return Status::ok();
 }
 
-Status AudioPolicyService::getSurroundFormats(media::Int* count,
-        std::vector<media::AudioFormatDescription>* formats,
+Status AudioPolicyService::getSurroundFormats(Int* count,
+        std::vector<AudioFormatDescription>* formats,
         std::vector<bool>* formatsEnabled) {
     unsigned int numSurroundFormats = VALUE_OR_RETURN_BINDER_STATUS(
             convertIntegral<unsigned int>(count->value));
@@ -1860,7 +1897,7 @@
 }
 
 Status AudioPolicyService::getReportedSurroundFormats(
-        media::Int* count, std::vector<media::AudioFormatDescription>* formats) {
+        Int* count, std::vector<AudioFormatDescription>* formats) {
     unsigned int numSurroundFormats = VALUE_OR_RETURN_BINDER_STATUS(
             convertIntegral<unsigned int>(count->value));
     if (numSurroundFormats > MAX_ITEMS_PER_LIST) {
@@ -1887,7 +1924,7 @@
 }
 
 Status AudioPolicyService::getHwOffloadEncodingFormatsSupportedForA2DP(
-        std::vector<media::AudioFormatDescription>* _aidl_return) {
+        std::vector<AudioFormatDescription>* _aidl_return) {
     std::vector<audio_format_t> formats;
 
     if (mAudioPolicyManager == NULL) {
@@ -1898,14 +1935,14 @@
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getHwOffloadEncodingFormatsSupportedForA2DP(&formats)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-            convertContainer<std::vector<media::AudioFormatDescription>>(
+            convertContainer<std::vector<AudioFormatDescription>>(
                     formats,
                     legacy2aidl_audio_format_t_AudioFormatDescription));
     return Status::ok();
 }
 
 Status AudioPolicyService::setSurroundFormatEnabled(
-        const media::AudioFormatDescription& audioFormatAidl, bool enabled) {
+        const AudioFormatDescription& audioFormatAidl, bool enabled) {
     audio_format_t audioFormat = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioFormatDescription_audio_format_t(audioFormatAidl));
     if (mAudioPolicyManager == NULL) {
@@ -2058,7 +2095,7 @@
 Status AudioPolicyService::setDevicesRoleForStrategy(
         int32_t strategyAidl,
         media::DeviceRole roleAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     product_strategy_t strategy = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_product_strategy_t(strategyAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2071,8 +2108,11 @@
         return binderStatusFromStatusT(NO_INIT);
     }
     Mutex::Autolock _l(mLock);
-    return binderStatusFromStatusT(
-            mAudioPolicyManager->setDevicesRoleForStrategy(strategy, role, devices));
+    status_t status = mAudioPolicyManager->setDevicesRoleForStrategy(strategy, role, devices);
+    if (status == NO_ERROR) {
+       onCheckSpatializer_l();
+    }
+    return binderStatusFromStatusT(status);
 }
 
 Status AudioPolicyService::removeDevicesRoleForStrategy(int32_t strategyAidl,
@@ -2085,14 +2125,17 @@
         return binderStatusFromStatusT(NO_INIT);
     }
     Mutex::Autolock _l(mLock);
-    return binderStatusFromStatusT(
-            mAudioPolicyManager->removeDevicesRoleForStrategy(strategy, role));
+    status_t status = mAudioPolicyManager->removeDevicesRoleForStrategy(strategy, role);
+    if (status == NO_ERROR) {
+       onCheckSpatializer_l();
+    }
+    return binderStatusFromStatusT(status);
 }
 
 Status AudioPolicyService::getDevicesForRoleAndStrategy(
         int32_t strategyAidl,
         media::DeviceRole roleAidl,
-        std::vector<media::AudioDevice>* _aidl_return) {
+        std::vector<AudioDevice>* _aidl_return) {
     product_strategy_t strategy = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_product_strategy_t(strategyAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2106,8 +2149,8 @@
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDevicesForRoleAndStrategy(strategy, role, devices)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return Status::ok();
 }
 
@@ -2118,11 +2161,11 @@
 }
 
 Status AudioPolicyService::setDevicesRoleForCapturePreset(
-        media::AudioSourceType audioSourceAidl,
+        AudioSource audioSourceAidl,
         media::DeviceRole roleAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_DeviceRole_device_role_t(roleAidl));
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2138,11 +2181,11 @@
 }
 
 Status AudioPolicyService::addDevicesRoleForCapturePreset(
-        media::AudioSourceType audioSourceAidl,
+        AudioSource audioSourceAidl,
         media::DeviceRole roleAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_DeviceRole_device_role_t(roleAidl));
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2158,11 +2201,11 @@
 }
 
 Status AudioPolicyService::removeDevicesRoleForCapturePreset(
-        media::AudioSourceType audioSourceAidl,
+        AudioSource audioSourceAidl,
         media::DeviceRole roleAidl,
-        const std::vector<media::AudioDevice>& devicesAidl) {
+        const std::vector<AudioDevice>& devicesAidl) {
     audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_DeviceRole_device_role_t(roleAidl));
     AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2177,10 +2220,10 @@
             mAudioPolicyManager->removeDevicesRoleForCapturePreset(audioSource, role, devices));
 }
 
-Status AudioPolicyService::clearDevicesRoleForCapturePreset(media::AudioSourceType audioSourceAidl,
+Status AudioPolicyService::clearDevicesRoleForCapturePreset(AudioSource audioSourceAidl,
                                                             media::DeviceRole roleAidl) {
     audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_DeviceRole_device_role_t(roleAidl));
 
@@ -2193,11 +2236,11 @@
 }
 
 Status AudioPolicyService::getDevicesForRoleAndCapturePreset(
-        media::AudioSourceType audioSourceAidl,
+        AudioSource audioSourceAidl,
         media::DeviceRole roleAidl,
-        std::vector<media::AudioDevice>* _aidl_return) {
+        std::vector<AudioDevice>* _aidl_return) {
     audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
-            aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+            aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
     device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_DeviceRole_device_role_t(roleAidl));
     AudioDeviceTypeAddrVector devices;
@@ -2209,8 +2252,8 @@
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
-            convertContainer<std::vector<media::AudioDevice>>(devices,
-                                                              legacy2aidl_AudioDeviceTypeAddress));
+            convertContainer<std::vector<AudioDevice>>(devices,
+                                                       legacy2aidl_AudioDeviceTypeAddress));
     return Status::ok();
 }
 
@@ -2218,16 +2261,21 @@
         const sp<media::INativeSpatializerCallback>& callback,
         media::GetSpatializerResponse* _aidl_return) {
     _aidl_return->spatializer = nullptr;
-    LOG_ALWAYS_FATAL_IF(callback == nullptr);
-    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(mSpatializer->registerCallback(callback)));
-    _aidl_return->spatializer = mSpatializer;
+    if (callback == nullptr) {
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    if (mSpatializer != nullptr) {
+        RETURN_IF_BINDER_ERROR(
+                binderStatusFromStatusT(mSpatializer->registerCallback(callback)));
+        _aidl_return->spatializer = mSpatializer;
+    }
     return Status::ok();
 }
 
 Status AudioPolicyService::canBeSpatialized(
         const std::optional<media::AudioAttributesInternal>& attrAidl,
-        const std::optional<media::AudioConfig>& configAidl,
-        const std::vector<media::AudioDevice>& devicesAidl,
+        const std::optional<AudioConfig>& configAidl,
+        const std::vector<AudioDevice>& devicesAidl,
         bool* _aidl_return) {
     if (mAudioPolicyManager == nullptr) {
         return binderStatusFromStatusT(NO_INIT);
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 86994ed..0b4a059 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -142,11 +142,14 @@
     sensorPrivacyPolicy->registerSelf();
 
     // Create spatializer if supported
-    const audio_attributes_t attr = attributes_initializer(AUDIO_USAGE_MEDIA);
-    AudioDeviceTypeAddrVector devices;
-    bool hasSpatializer = mAudioPolicyManager->canBeSpatialized(&attr, nullptr, devices);
-    if (hasSpatializer) {
-        mSpatializer = Spatializer::create(this);
+    if (mAudioPolicyManager != nullptr) {
+        Mutex::Autolock _l(mLock);
+        const audio_attributes_t attr = attributes_initializer(AUDIO_USAGE_MEDIA);
+        AudioDeviceTypeAddrVector devices;
+        bool hasSpatializer = mAudioPolicyManager->canBeSpatialized(&attr, nullptr, devices);
+        if (hasSpatializer) {
+            mSpatializer = Spatializer::create(this);
+        }
     }
     AudioSystem::audioPolicyReady();
 }
@@ -366,41 +369,52 @@
 void AudioPolicyService::onCheckSpatializer()
 {
     Mutex::Autolock _l(mLock);
-    mOutputCommandThread->checkSpatializerCommand();
+    onCheckSpatializer_l();
+}
+
+void AudioPolicyService::onCheckSpatializer_l()
+{
+    if (mSpatializer != nullptr) {
+        mOutputCommandThread->checkSpatializerCommand();
+    }
 }
 
 void AudioPolicyService::doOnCheckSpatializer()
 {
-    sp<Spatializer> spatializer;
-    {
-        Mutex::Autolock _l(mLock);
-        spatializer = mSpatializer;
+    Mutex::Autolock _l(mLock);
 
-        if (spatializer != nullptr) {
-            audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-            if (spatializer->getLevel() != media::SpatializationLevel::NONE
-                && spatializer->getOutput() == AUDIO_IO_HANDLE_NONE) {
-                const audio_attributes_t attr = attributes_initializer(AUDIO_USAGE_MEDIA);
-                audio_config_base_t config = spatializer->getAudioInConfig();
-                status_t status =
-                        mAudioPolicyManager->getSpatializerOutput(&config, &attr, &output);
-                if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
-                    return;
-                }
-                mLock.unlock();
-                status = spatializer->attachOutput(output);
+    if (mSpatializer != nullptr) {
+        // Note: mSpatializer != nullptr =>  mAudioPolicyManager != nullptr
+        if (mSpatializer->getLevel() != media::SpatializationLevel::NONE) {
+            audio_io_handle_t currentOutput = mSpatializer->getOutput();
+            audio_io_handle_t newOutput;
+            const audio_attributes_t attr = attributes_initializer(AUDIO_USAGE_MEDIA);
+            audio_config_base_t config = mSpatializer->getAudioInConfig();
+            status_t status =
+                    mAudioPolicyManager->getSpatializerOutput(&config, &attr, &newOutput);
+
+            if (status == NO_ERROR && currentOutput == newOutput) {
+                return;
+            }
+            mLock.unlock();
+            // It is OK to call detachOutput() is none is already attached.
+            mSpatializer->detachOutput();
+            if (status != NO_ERROR || newOutput == AUDIO_IO_HANDLE_NONE) {
                 mLock.lock();
-                if (status != NO_ERROR) {
-                    mAudioPolicyManager->releaseSpatializerOutput(output);
-                }
-            } else if (spatializer->getLevel() == media::SpatializationLevel::NONE
-                                   && spatializer->getOutput() != AUDIO_IO_HANDLE_NONE) {
-                mLock.unlock();
-                output = spatializer->detachOutput();
-                mLock.lock();
-                if (output != AUDIO_IO_HANDLE_NONE) {
-                    mAudioPolicyManager->releaseSpatializerOutput(output);
-                }
+                return;
+            }
+            status = mSpatializer->attachOutput(newOutput);
+            mLock.lock();
+            if (status != NO_ERROR) {
+                mAudioPolicyManager->releaseSpatializerOutput(newOutput);
+            }
+        } else if (mSpatializer->getLevel() == media::SpatializationLevel::NONE
+                               && mSpatializer->getOutput() != AUDIO_IO_HANDLE_NONE) {
+            mLock.unlock();
+            audio_io_handle_t output = mSpatializer->detachOutput();
+            mLock.lock();
+            if (output != AUDIO_IO_HANDLE_NONE) {
+                mAudioPolicyManager->releaseSpatializerOutput(output);
             }
         }
     }
@@ -495,14 +509,14 @@
             int32_t eventAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(event));
             media::RecordClientInfo clientInfoAidl = VALUE_OR_RETURN_STATUS(
                     legacy2aidl_record_client_info_t_RecordClientInfo(*clientInfo));
-            media::AudioConfigBase clientConfigAidl = VALUE_OR_RETURN_STATUS(
+            AudioConfigBase clientConfigAidl = VALUE_OR_RETURN_STATUS(
                     legacy2aidl_audio_config_base_t_AudioConfigBase(
                             *clientConfig, true /*isInput*/));
             std::vector<media::EffectDescriptor> clientEffectsAidl = VALUE_OR_RETURN_STATUS(
                     convertContainer<std::vector<media::EffectDescriptor>>(
                             clientEffects,
                             legacy2aidl_effect_descriptor_t_EffectDescriptor));
-            media::AudioConfigBase deviceConfigAidl = VALUE_OR_RETURN_STATUS(
+            AudioConfigBase deviceConfigAidl = VALUE_OR_RETURN_STATUS(
                     legacy2aidl_audio_config_base_t_AudioConfigBase(
                             *deviceConfig, true /*isInput*/));
             std::vector<media::EffectDescriptor> effectsAidl = VALUE_OR_RETURN_STATUS(
@@ -511,8 +525,8 @@
                             legacy2aidl_effect_descriptor_t_EffectDescriptor));
             int32_t patchHandleAidl = VALUE_OR_RETURN_STATUS(
                     legacy2aidl_audio_patch_handle_t_int32_t(patchHandle));
-            media::AudioSourceType sourceAidl = VALUE_OR_RETURN_STATUS(
-                    legacy2aidl_audio_source_t_AudioSourceType(source));
+            media::audio::common::AudioSource sourceAidl = VALUE_OR_RETURN_STATUS(
+                    legacy2aidl_audio_source_t_AudioSource(source));
             return aidl_utils::statusTFromBinderStatus(
                     mAudioPolicyServiceClient->onRecordingConfigurationUpdate(eventAidl,
                                                                               clientInfoAidl,
@@ -894,6 +908,7 @@
     switch (source) {
         case AUDIO_SOURCE_FM_TUNER:
         case AUDIO_SOURCE_ECHO_REFERENCE:
+        case AUDIO_SOURCE_REMOTE_SUBMIX:
             return false;
         default:
             break;
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 97d4c00..03241a2 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -49,6 +49,17 @@
 namespace android {
 
 using content::AttributionSourceState;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::AudioUuid;
+using media::audio::common::Int;
 
 // ----------------------------------------------------------------------------
 
@@ -71,25 +82,25 @@
     //
     binder::Status onNewAudioModulesAvailable() override;
     binder::Status setDeviceConnectionState(
-            const media::AudioDevice& device,
+            const AudioDevice& device,
             media::AudioPolicyDeviceState state,
             const std::string& deviceName,
-            const media::AudioFormatDescription& encodedFormat) override;
-    binder::Status getDeviceConnectionState(const media::AudioDevice& device,
+            const AudioFormatDescription& encodedFormat) override;
+    binder::Status getDeviceConnectionState(const AudioDevice& device,
                                             media::AudioPolicyDeviceState* _aidl_return) override;
     binder::Status handleDeviceConfigChange(
-            const media::AudioDevice& device,
+            const AudioDevice& device,
             const std::string& deviceName,
-            const media::AudioFormatDescription& encodedFormat) override;
-    binder::Status setPhoneState(media::AudioMode state, int32_t uid) override;
+            const AudioFormatDescription& encodedFormat) override;
+    binder::Status setPhoneState(AudioMode state, int32_t uid) override;
     binder::Status setForceUse(media::AudioPolicyForceUse usage,
                                media::AudioPolicyForcedConfig config) override;
     binder::Status getForceUse(media::AudioPolicyForceUse usage,
                                media::AudioPolicyForcedConfig* _aidl_return) override;
-    binder::Status getOutput(media::AudioStreamType stream, int32_t* _aidl_return) override;
+    binder::Status getOutput(AudioStreamType stream, int32_t* _aidl_return) override;
     binder::Status getOutputForAttr(const media::AudioAttributesInternal& attr, int32_t session,
                                     const AttributionSourceState &attributionSource,
-                                    const media::AudioConfig& config,
+                                    const AudioConfig& config,
                                     int32_t flags, int32_t selectedDeviceId,
                                     media::GetOutputForAttrResponse* _aidl_return) override;
     binder::Status startOutput(int32_t portId) override;
@@ -98,37 +109,37 @@
     binder::Status getInputForAttr(const media::AudioAttributesInternal& attr, int32_t input,
                                    int32_t riid, int32_t session,
                                    const AttributionSourceState &attributionSource,
-                                   const media::AudioConfigBase& config, int32_t flags,
+                                   const AudioConfigBase& config, int32_t flags,
                                    int32_t selectedDeviceId,
                                    media::GetInputForAttrResponse* _aidl_return) override;
     binder::Status startInput(int32_t portId) override;
     binder::Status stopInput(int32_t portId) override;
     binder::Status releaseInput(int32_t portId) override;
-    binder::Status initStreamVolume(media::AudioStreamType stream, int32_t indexMin,
+    binder::Status initStreamVolume(AudioStreamType stream, int32_t indexMin,
                                     int32_t indexMax) override;
-    binder::Status setStreamVolumeIndex(media::AudioStreamType stream,
-                                        const media::AudioDeviceDescription& device,
+    binder::Status setStreamVolumeIndex(AudioStreamType stream,
+                                        const AudioDeviceDescription& device,
                                         int32_t index) override;
-    binder::Status getStreamVolumeIndex(media::AudioStreamType stream,
-                                        const media::AudioDeviceDescription& device,
+    binder::Status getStreamVolumeIndex(AudioStreamType stream,
+                                        const AudioDeviceDescription& device,
                                         int32_t* _aidl_return) override;
     binder::Status setVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
-                                               const media::AudioDeviceDescription& device,
+                                               const AudioDeviceDescription& device,
                                                int32_t index) override;
     binder::Status getVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
-                                               const media::AudioDeviceDescription& device,
+                                               const AudioDeviceDescription& device,
                                                int32_t* _aidl_return) override;
     binder::Status getMaxVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
                                                   int32_t* _aidl_return) override;
     binder::Status getMinVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
                                                   int32_t* _aidl_return) override;
-    binder::Status getStrategyForStream(media::AudioStreamType stream,
+    binder::Status getStrategyForStream(AudioStreamType stream,
                                         int32_t* _aidl_return) override;
     binder::Status getDevicesForStream(
-            media::AudioStreamType stream,
-            std::vector<media::AudioDeviceDescription>* _aidl_return) override;
+            AudioStreamType stream,
+            std::vector<AudioDeviceDescription>* _aidl_return) override;
     binder::Status getDevicesForAttributes(const media::AudioAttributesEx& attr,
-                                           std::vector<media::AudioDevice>* _aidl_return) override;
+                                           std::vector<AudioDevice>* _aidl_return) override;
     binder::Status getOutputForEffect(const media::EffectDescriptor& desc,
                                       int32_t* _aidl_return) override;
     binder::Status registerEffect(const media::EffectDescriptor& desc, int32_t io, int32_t strategy,
@@ -136,42 +147,42 @@
     binder::Status unregisterEffect(int32_t id) override;
     binder::Status setEffectEnabled(int32_t id, bool enabled) override;
     binder::Status moveEffectsToIo(const std::vector<int32_t>& ids, int32_t io) override;
-    binder::Status isStreamActive(media::AudioStreamType stream, int32_t inPastMs,
+    binder::Status isStreamActive(AudioStreamType stream, int32_t inPastMs,
                                   bool* _aidl_return) override;
-    binder::Status isStreamActiveRemotely(media::AudioStreamType stream, int32_t inPastMs,
+    binder::Status isStreamActiveRemotely(AudioStreamType stream, int32_t inPastMs,
                                           bool* _aidl_return) override;
-    binder::Status isSourceActive(media::AudioSourceType source, bool* _aidl_return) override;
+    binder::Status isSourceActive(AudioSource source, bool* _aidl_return) override;
     binder::Status queryDefaultPreProcessing(
-            int32_t audioSession, media::Int* count,
+            int32_t audioSession, Int* count,
             std::vector<media::EffectDescriptor>* _aidl_return) override;
-    binder::Status addSourceDefaultEffect(const media::AudioUuid& type,
+    binder::Status addSourceDefaultEffect(const AudioUuid& type,
                                           const std::string& opPackageName,
-                                          const media::AudioUuid& uuid, int32_t priority,
-                                          media::AudioSourceType source,
+                                          const AudioUuid& uuid, int32_t priority,
+                                          AudioSource source,
                                           int32_t* _aidl_return) override;
-    binder::Status addStreamDefaultEffect(const media::AudioUuid& type,
+    binder::Status addStreamDefaultEffect(const AudioUuid& type,
                                           const std::string& opPackageName,
-                                          const media::AudioUuid& uuid, int32_t priority,
-                                          media::AudioUsage usage, int32_t* _aidl_return) override;
+                                          const AudioUuid& uuid, int32_t priority,
+                                          AudioUsage usage, int32_t* _aidl_return) override;
     binder::Status removeSourceDefaultEffect(int32_t id) override;
     binder::Status removeStreamDefaultEffect(int32_t id) override;
     binder::Status setSupportedSystemUsages(
-            const std::vector<media::AudioUsage>& systemUsages) override;
+            const std::vector<AudioUsage>& systemUsages) override;
     binder::Status setAllowedCapturePolicy(int32_t uid, int32_t capturePolicy) override;
-    binder::Status getOffloadSupport(const media::AudioOffloadInfo& info,
+    binder::Status getOffloadSupport(const media::audio::common::AudioOffloadInfo& info,
                                      media::AudioOffloadMode* _aidl_return) override;
-    binder::Status isDirectOutputSupported(const media::AudioConfigBase& config,
+    binder::Status isDirectOutputSupported(const AudioConfigBase& config,
                                            const media::AudioAttributesInternal& attributes,
                                            bool* _aidl_return) override;
     binder::Status listAudioPorts(media::AudioPortRole role, media::AudioPortType type,
-                                  media::Int* count, std::vector<media::AudioPort>* ports,
+                                  Int* count, std::vector<media::AudioPort>* ports,
                                   int32_t* _aidl_return) override;
     binder::Status getAudioPort(const media::AudioPort& port,
                                 media::AudioPort* _aidl_return) override;
     binder::Status createAudioPatch(const media::AudioPatch& patch, int32_t handle,
                                     int32_t* _aidl_return) override;
     binder::Status releaseAudioPatch(int32_t handle) override;
-    binder::Status listAudioPatches(media::Int* count, std::vector<media::AudioPatch>* patches,
+    binder::Status listAudioPatches(Int* count, std::vector<media::AudioPatch>* patches,
                                     int32_t* _aidl_return) override;
     binder::Status setAudioPortConfig(const media::AudioPortConfig& config) override;
     binder::Status registerClient(const sp<media::IAudioPolicyServiceClient>& client) override;
@@ -179,15 +190,15 @@
     binder::Status setAudioVolumeGroupCallbacksEnabled(bool enabled) override;
     binder::Status acquireSoundTriggerSession(media::SoundTriggerSession* _aidl_return) override;
     binder::Status releaseSoundTriggerSession(int32_t session) override;
-    binder::Status getPhoneState(media::AudioMode* _aidl_return) override;
+    binder::Status getPhoneState(AudioMode* _aidl_return) override;
     binder::Status registerPolicyMixes(const std::vector<media::AudioMix>& mixes,
                                        bool registration) override;
     binder::Status setUidDeviceAffinities(int32_t uid,
-                                          const std::vector<media::AudioDevice>& devices) override;
+                                          const std::vector<AudioDevice>& devices) override;
     binder::Status removeUidDeviceAffinities(int32_t uid) override;
     binder::Status setUserIdDeviceAffinities(
             int32_t userId,
-            const std::vector<media::AudioDevice>& devices) override;
+            const std::vector<AudioDevice>& devices) override;
     binder::Status removeUserIdDeviceAffinities(int32_t userId) override;
     binder::Status startAudioSource(const media::AudioPortConfig& source,
                                     const media::AudioAttributesInternal& attributes,
@@ -195,17 +206,17 @@
     binder::Status stopAudioSource(int32_t portId) override;
     binder::Status setMasterMono(bool mono) override;
     binder::Status getMasterMono(bool* _aidl_return) override;
-    binder::Status getStreamVolumeDB(media::AudioStreamType stream, int32_t index,
-                                     const media::AudioDeviceDescription& device,
+    binder::Status getStreamVolumeDB(AudioStreamType stream, int32_t index,
+                                     const AudioDeviceDescription& device,
                                      float* _aidl_return) override;
-    binder::Status getSurroundFormats(media::Int* count,
-                                      std::vector<media::AudioFormatDescription>* formats,
+    binder::Status getSurroundFormats(Int* count,
+                                      std::vector<AudioFormatDescription>* formats,
                                       std::vector<bool>* formatsEnabled) override;
     binder::Status getReportedSurroundFormats(
-            media::Int* count, std::vector<media::AudioFormatDescription>* formats) override;
+            Int* count, std::vector<AudioFormatDescription>* formats) override;
     binder::Status getHwOffloadEncodingFormatsSupportedForA2DP(
-            std::vector<media::AudioFormatDescription>* _aidl_return) override;
-    binder::Status setSurroundFormatEnabled(const media::AudioFormatDescription& audioFormat,
+            std::vector<AudioFormatDescription>* _aidl_return) override;
+    binder::Status setSurroundFormatEnabled(const AudioFormatDescription& audioFormat,
                                             bool enabled) override;
     binder::Status setAssistantUid(int32_t uid) override;
     binder::Status setHotwordDetectionServiceUid(int32_t uid) override;
@@ -226,29 +237,29 @@
     binder::Status isCallScreenModeSupported(bool* _aidl_return) override;
     binder::Status setDevicesRoleForStrategy(
             int32_t strategy, media::DeviceRole role,
-            const std::vector<media::AudioDevice>& devices) override;
+            const std::vector<AudioDevice>& devices) override;
     binder::Status removeDevicesRoleForStrategy(int32_t strategy, media::DeviceRole role) override;
     binder::Status getDevicesForRoleAndStrategy(
             int32_t strategy, media::DeviceRole role,
-            std::vector<media::AudioDevice>* _aidl_return) override;
+            std::vector<AudioDevice>* _aidl_return) override;
     binder::Status setDevicesRoleForCapturePreset(
-            media::AudioSourceType audioSource,
+            AudioSource audioSource,
             media::DeviceRole role,
-            const std::vector<media::AudioDevice>& devices) override;
+            const std::vector<AudioDevice>& devices) override;
     binder::Status addDevicesRoleForCapturePreset(
-            media::AudioSourceType audioSource,
+            AudioSource audioSource,
             media::DeviceRole role,
-            const std::vector<media::AudioDevice>& devices) override;
+            const std::vector<AudioDevice>& devices) override;
     binder::Status removeDevicesRoleForCapturePreset(
-            media::AudioSourceType audioSource,
+            AudioSource audioSource,
             media::DeviceRole role,
-            const std::vector<media::AudioDevice>& devices) override;
-    binder::Status clearDevicesRoleForCapturePreset(media::AudioSourceType audioSource,
+            const std::vector<AudioDevice>& devices) override;
+    binder::Status clearDevicesRoleForCapturePreset(AudioSource audioSource,
                                                     media::DeviceRole role) override;
     binder::Status getDevicesForRoleAndCapturePreset(
-            media::AudioSourceType audioSource,
+            AudioSource audioSource,
             media::DeviceRole role,
-            std::vector<media::AudioDevice>* _aidl_return) override;
+            std::vector<AudioDevice>* _aidl_return) override;
     binder::Status registerSoundTriggerCaptureStateListener(
             const sp<media::ICaptureStateListener>& listener, bool* _aidl_return) override;
 
@@ -256,8 +267,8 @@
             media::GetSpatializerResponse* _aidl_return) override;
     binder::Status canBeSpatialized(
             const std::optional<media::AudioAttributesInternal>& attr,
-            const std::optional<media::AudioConfig>& config,
-            const std::vector<media::AudioDevice>& devices,
+            const std::optional<AudioConfig>& config,
+            const std::vector<AudioDevice>& devices,
             bool* _aidl_return) override;
 
     status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) override;
@@ -333,6 +344,7 @@
      * by audio policy manager and attach/detach the spatializer effect accordingly.
      */
     void onCheckSpatializer() override;
+    void onCheckSpatializer_l();
     void doOnCheckSpatializer();
 
     void setEffectSuspended(int effectId,
diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp
index 345665c..502a8d0 100644
--- a/services/audiopolicy/service/Spatializer.cpp
+++ b/services/audiopolicy/service/Spatializer.cpp
@@ -27,8 +27,11 @@
 #include <android/content/AttributionSourceState.h>
 #include <audio_utils/fixedfft.h>
 #include <cutils/bitops.h>
-#include <media/ShmemCompat.h>
+#include <hardware/sensors.h>
 #include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/ShmemCompat.h>
 #include <mediautils/ServiceUtilities.h>
 #include <utils/Thread.h>
 
@@ -41,21 +44,105 @@
 using android::content::AttributionSourceState;
 using binder::Status;
 using media::HeadTrackingMode;
+using media::Pose3f;
 using media::SpatializationLevel;
+using media::SpatializationMode;
+using media::SpatializerHeadTrackingMode;
+using media::SensorPoseProvider;
+
+using namespace std::chrono_literals;
 
 #define VALUE_OR_RETURN_BINDER_STATUS(x) \
     ({ auto _tmp = (x); \
        if (!_tmp.ok()) return aidl_utils::binderStatusFromStatusT(_tmp.error()); \
        std::move(_tmp.value()); })
 
-#define RETURN_IF_BINDER_ERROR(x)      \
-    {                                  \
-        binder::Status _tmp = (x);     \
-        if (!_tmp.isOk()) return _tmp; \
-    }
-
 // ---------------------------------------------------------------------------
 
+class Spatializer::EngineCallbackHandler : public AHandler {
+public:
+    EngineCallbackHandler(wp<Spatializer> spatializer)
+            : mSpatializer(spatializer) {
+    }
+
+    enum {
+        // Device state callbacks
+        kWhatOnFramesProcessed,    // AudioEffect::EVENT_FRAMES_PROCESSED
+        kWhatOnHeadToStagePose,    // SpatializerPoseController::Listener::onHeadToStagePose
+        kWhatOnActualModeChange,   // SpatializerPoseController::Listener::onActualModeChange
+    };
+    static constexpr const char *kNumFramesKey = "numFrames";
+    static constexpr const char *kModeKey = "mode";
+    static constexpr const char *kTranslation0Key = "translation0";
+    static constexpr const char *kTranslation1Key = "translation1";
+    static constexpr const char *kTranslation2Key = "translation2";
+    static constexpr const char *kRotation0Key = "rotation0";
+    static constexpr const char *kRotation1Key = "rotation1";
+    static constexpr const char *kRotation2Key = "rotation2";
+
+    void onMessageReceived(const sp<AMessage> &msg) override {
+        switch (msg->what()) {
+            case kWhatOnFramesProcessed: {
+                sp<Spatializer> spatializer = mSpatializer.promote();
+                if (spatializer == nullptr) {
+                    ALOGW("%s: Cannot promote spatializer", __func__);
+                    return;
+                }
+                int numFrames;
+                if (!msg->findInt32(kNumFramesKey, &numFrames)) {
+                    ALOGE("%s: Cannot find num frames!", __func__);
+                    return;
+                }
+                if (numFrames > 0) {
+                    spatializer->calculateHeadPose();
+                }
+                } break;
+            case kWhatOnHeadToStagePose: {
+                sp<Spatializer> spatializer = mSpatializer.promote();
+                if (spatializer == nullptr) {
+                    ALOGW("%s: Cannot promote spatializer", __func__);
+                    return;
+                }
+                std::vector<float> headToStage(sHeadPoseKeys.size());
+                for (size_t i = 0 ; i < sHeadPoseKeys.size(); i++) {
+                    if (!msg->findFloat(sHeadPoseKeys[i], &headToStage[i])) {
+                        ALOGE("%s: Cannot find kTranslation0Key!", __func__);
+                        return;
+                    }
+                }
+                spatializer->onHeadToStagePoseMsg(headToStage);
+                } break;
+            case kWhatOnActualModeChange: {
+                sp<Spatializer> spatializer = mSpatializer.promote();
+                if (spatializer == nullptr) {
+                    ALOGW("%s: Cannot promote spatializer", __func__);
+                    return;
+                }
+                int mode;
+                if (!msg->findInt32(EngineCallbackHandler::kModeKey, &mode)) {
+                    ALOGE("%s: Cannot find actualMode!", __func__);
+                    return;
+                }
+                spatializer->onActualModeChangeMsg(static_cast<HeadTrackingMode>(mode));
+                } break;
+            default:
+                LOG_ALWAYS_FATAL("Invalid callback message %d", msg->what());
+        }
+    }
+private:
+    wp<Spatializer> mSpatializer;
+};
+
+const std::vector<const char *> Spatializer::sHeadPoseKeys = {
+    Spatializer::EngineCallbackHandler::kTranslation0Key,
+    Spatializer::EngineCallbackHandler::kTranslation1Key,
+    Spatializer::EngineCallbackHandler::kTranslation2Key,
+    Spatializer::EngineCallbackHandler::kRotation0Key,
+    Spatializer::EngineCallbackHandler::kRotation1Key,
+    Spatializer::EngineCallbackHandler::kRotation2Key,
+};
+
+// ---------------------------------------------------------------------------
 sp<Spatializer> Spatializer::create(SpatializerPolicyCallback *callback) {
     sp<Spatializer> spatializer;
 
@@ -84,28 +171,87 @@
 
     if (status == NO_ERROR && effect != nullptr) {
         spatializer = new Spatializer(descriptors[0], callback);
-        // TODO: Read supported config from engine
-        audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
-        config.channel_mask = AUDIO_CHANNEL_OUT_5POINT1;
-        spatializer->setAudioInConfig(config);
+        if (spatializer->loadEngineConfiguration(effect) != NO_ERROR) {
+            spatializer.clear();
+        }
     }
 
     return spatializer;
 }
 
-Spatializer::Spatializer(effect_descriptor_t engineDescriptor,
-                                   SpatializerPolicyCallback *callback)
-    : mEngineDescriptor(engineDescriptor), mPolicyCallback(callback) {
+Spatializer::Spatializer(effect_descriptor_t engineDescriptor, SpatializerPolicyCallback* callback)
+    : mEngineDescriptor(engineDescriptor),
+      mPolicyCallback(callback) {
     ALOGV("%s", __func__);
 }
 
+void Spatializer::onFirstRef() {
+    mLooper = new ALooper;
+    mLooper->setName("Spatializer-looper");
+    mLooper->start(
+            /*runOnCallingThread*/false,
+            /*canCallJava*/       false,
+            PRIORITY_AUDIO);
+
+    mHandler = new EngineCallbackHandler(this);
+    mLooper->registerHandler(mHandler);
+}
+
 Spatializer::~Spatializer() {
     ALOGV("%s", __func__);
+    if (mLooper != nullptr) {
+        mLooper->stop();
+        mLooper->unregisterHandler(mHandler->id());
+    }
+    mLooper.clear();
+    mHandler.clear();
+}
+
+status_t Spatializer::loadEngineConfiguration(sp<EffectHalInterface> effect) {
+    ALOGV("%s", __func__);
+
+    std::vector<bool> supportsHeadTracking;
+    status_t status = getHalParameter<false>(effect, SPATIALIZER_PARAM_HEADTRACKING_SUPPORTED,
+                                         &supportsHeadTracking);
+    if (status != NO_ERROR) {
+        return status;
+    }
+    mSupportsHeadTracking = supportsHeadTracking[0];
+
+    status = getHalParameter<true>(effect, SPATIALIZER_PARAM_SUPPORTED_LEVELS, &mLevels);
+    if (status != NO_ERROR) {
+        return status;
+    }
+    status = getHalParameter<true>(effect, SPATIALIZER_PARAM_SUPPORTED_SPATIALIZATION_MODES,
+                                &mSpatializationModes);
+    if (status != NO_ERROR) {
+        return status;
+    }
+    status = getHalParameter<true>(effect, SPATIALIZER_PARAM_SUPPORTED_CHANNEL_MASKS,
+                                 &mChannelMasks);
+    if (status != NO_ERROR) {
+        return status;
+    }
+    return NO_ERROR;
+}
+
+/** Gets the channel mask, sampling rate and format set for the spatializer input. */
+audio_config_base_t Spatializer::getAudioInConfig() const {
+    std::lock_guard lock(mLock);
+    audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+    // For now use highest supported channel count
+    uint32_t maxCount = 0;
+    for ( auto mask : mChannelMasks) {
+        if (audio_channel_count_from_out_mask(mask) > maxCount) {
+            config.channel_mask = mask;
+        }
+    }
+    return config;
 }
 
 status_t Spatializer::registerCallback(
         const sp<media::INativeSpatializerCallback>& callback) {
-    Mutex::Autolock _l(mLock);
+    std::lock_guard lock(mLock);
     if (callback == nullptr) {
         return BAD_VALUE;
     }
@@ -122,7 +268,7 @@
 // IBinder::DeathRecipient
 void Spatializer::binderDied(__unused const wp<IBinder> &who) {
     {
-        Mutex::Autolock _l(mLock);
+        std::lock_guard lock(mLock);
         mLevel = SpatializationLevel::NONE;
         mSpatializerCallback.clear();
     }
@@ -136,25 +282,28 @@
     if (levels == nullptr) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    //TODO: get this from engine
     levels->push_back(SpatializationLevel::NONE);
-    levels->push_back(SpatializationLevel::SPATIALIZER_MULTICHANNEL);
+    levels->insert(levels->end(), mLevels.begin(), mLevels.end());
     return Status::ok();
 }
 
-Status Spatializer::setLevel(media::SpatializationLevel level) {
+Status Spatializer::setLevel(SpatializationLevel level) {
     ALOGV("%s level %d", __func__, (int)level);
     if (level != SpatializationLevel::NONE
-            && level != SpatializationLevel::SPATIALIZER_MULTICHANNEL) {
+            && std::find(mLevels.begin(), mLevels.end(), level) == mLevels.end()) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
     sp<media::INativeSpatializerCallback> callback;
     bool levelChanged = false;
     {
-        Mutex::Autolock _l(mLock);
+        std::lock_guard lock(mLock);
         levelChanged = mLevel != level;
         mLevel = level;
         callback = mSpatializerCallback;
+
+        if (levelChanged && mEngine != nullptr) {
+            setEffectParameter_l(SPATIALIZER_PARAM_LEVEL, std::vector<SpatializationLevel>{level});
+        }
     }
 
     if (levelChanged) {
@@ -166,61 +315,109 @@
     return Status::ok();
 }
 
-Status Spatializer::getLevel(media::SpatializationLevel *level) {
+Status Spatializer::getLevel(SpatializationLevel *level) {
     if (level == nullptr) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    Mutex::Autolock _l(mLock);
+    std::lock_guard lock(mLock);
     *level = mLevel;
     ALOGV("%s level %d", __func__, (int)*level);
     return Status::ok();
 }
 
+Status Spatializer::isHeadTrackingSupported(bool *supports) {
+    ALOGV("%s mSupportsHeadTracking %d", __func__, mSupportsHeadTracking);
+    if (supports == nullptr) {
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    std::lock_guard lock(mLock);
+    *supports = mSupportsHeadTracking;
+    return Status::ok();
+}
+
 Status Spatializer::getSupportedHeadTrackingModes(
-        std::vector<media::HeadTrackingMode>* modes) {
+        std::vector<SpatializerHeadTrackingMode>* modes) {
+    std::lock_guard lock(mLock);
     ALOGV("%s", __func__);
     if (modes == nullptr) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    //TODO: get this from:
-    // - The engine capabilities
-    // - If a head tracking sensor is registered and linked to a connected audio device
-    // - if we have indications on the screen orientation
-    modes->push_back(HeadTrackingMode::RELATIVE_WORLD);
-    return Status::ok();
-}
 
-Status Spatializer::setDesiredHeadTrackingMode(media::HeadTrackingMode mode) {
-    ALOGV("%s level %d", __func__, (int)mode);
-    if (mode != HeadTrackingMode::DISABLED
-            && mode != HeadTrackingMode::RELATIVE_WORLD) {
-        return binderStatusFromStatusT(BAD_VALUE);
-    }
-    {
-        Mutex::Autolock _l(mLock);
-        mHeadTrackingMode = mode;
+    modes->push_back(SpatializerHeadTrackingMode::DISABLED);
+    if (mSupportsHeadTracking) {
+        if (mHeadSensor != SpatializerPoseController::INVALID_SENSOR) {
+            modes->push_back(SpatializerHeadTrackingMode::RELATIVE_WORLD);
+            if (mScreenSensor != SpatializerPoseController::INVALID_SENSOR) {
+                modes->push_back(SpatializerHeadTrackingMode::RELATIVE_SCREEN);
+            }
+        }
     }
     return Status::ok();
 }
 
-Status Spatializer::getActualHeadTrackingMode(media::HeadTrackingMode *mode) {
+Status Spatializer::setDesiredHeadTrackingMode(SpatializerHeadTrackingMode mode) {
+    ALOGV("%s mode %d", __func__, (int)mode);
+
+    if (!mSupportsHeadTracking) {
+        return binderStatusFromStatusT(INVALID_OPERATION);
+    }
+    std::lock_guard lock(mLock);
+    switch (mode) {
+        case SpatializerHeadTrackingMode::OTHER:
+            return binderStatusFromStatusT(BAD_VALUE);
+        case SpatializerHeadTrackingMode::DISABLED:
+            mDesiredHeadTrackingMode = HeadTrackingMode::STATIC;
+            break;
+        case SpatializerHeadTrackingMode::RELATIVE_WORLD:
+            mDesiredHeadTrackingMode = HeadTrackingMode::WORLD_RELATIVE;
+            break;
+        case SpatializerHeadTrackingMode::RELATIVE_SCREEN:
+            mDesiredHeadTrackingMode = HeadTrackingMode::SCREEN_RELATIVE;
+            break;
+    }
+
+    if (mPoseController != nullptr) {
+        mPoseController->setDesiredMode(mDesiredHeadTrackingMode);
+    }
+
+    return Status::ok();
+}
+
+Status Spatializer::getActualHeadTrackingMode(SpatializerHeadTrackingMode *mode) {
     if (mode == nullptr) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    Mutex::Autolock _l(mLock);
-    *mode = mHeadTrackingMode;
+    std::lock_guard lock(mLock);
+    *mode = mActualHeadTrackingMode;
     ALOGV("%s mode %d", __func__, (int)*mode);
     return Status::ok();
 }
 
-Status Spatializer::recenterHeadtracker() {
+Status Spatializer::recenterHeadTracker() {
+    if (!mSupportsHeadTracking) {
+        return binderStatusFromStatusT(INVALID_OPERATION);
+    }
+    std::lock_guard lock(mLock);
+    if (mPoseController != nullptr) {
+        mPoseController->recenter();
+    }
     return Status::ok();
 }
 
 Status Spatializer::setGlobalTransform(const std::vector<float>& screenToStage) {
-    Mutex::Autolock _l(mLock);
-    mScreenToStageTransform = screenToStage;
     ALOGV("%s", __func__);
+    if (!mSupportsHeadTracking) {
+        return binderStatusFromStatusT(INVALID_OPERATION);
+    }
+    std::optional<Pose3f> maybePose = Pose3f::fromVector(screenToStage);
+    if (!maybePose.has_value()) {
+        ALOGW("Invalid screenToStage vector.");
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    std::lock_guard lock(mLock);
+    if (mPoseController != nullptr) {
+        mPoseController->setScreenToStagePose(maybePose.value());
+    }
     return Status::ok();
 }
 
@@ -228,7 +425,7 @@
     ALOGV("%s", __func__);
     bool levelChanged = false;
     {
-        Mutex::Autolock _l(mLock);
+        std::lock_guard lock(mLock);
         if (mSpatializerCallback == nullptr) {
             return binderStatusFromStatusT(INVALID_OPERATION);
         }
@@ -247,132 +444,283 @@
     return Status::ok();
 }
 
+Status Spatializer::setHeadSensor(int sensorHandle) {
+    ALOGV("%s sensorHandle %d", __func__, sensorHandle);
+    if (!mSupportsHeadTracking) {
+        return binderStatusFromStatusT(INVALID_OPERATION);
+    }
+    std::lock_guard lock(mLock);
+    mHeadSensor = sensorHandle;
+    if (mPoseController != nullptr) {
+        mPoseController->setHeadSensor(mHeadSensor);
+    }
+    return Status::ok();
+}
+
+Status Spatializer::setScreenSensor(int sensorHandle) {
+    ALOGV("%s sensorHandle %d", __func__, sensorHandle);
+    if (!mSupportsHeadTracking) {
+        return binderStatusFromStatusT(INVALID_OPERATION);
+    }
+    std::lock_guard lock(mLock);
+    mScreenSensor = sensorHandle;
+    if (mPoseController != nullptr) {
+        mPoseController->setScreenSensor(mScreenSensor);
+    }
+    return Status::ok();
+}
+
+Status Spatializer::setDisplayOrientation(float physicalToLogicalAngle) {
+    ALOGV("%s physicalToLogicalAngle %f", __func__, physicalToLogicalAngle);
+    if (!mSupportsHeadTracking) {
+        return binderStatusFromStatusT(INVALID_OPERATION);
+    }
+    std::lock_guard lock(mLock);
+    mDisplayOrientation = physicalToLogicalAngle;
+    if (mPoseController != nullptr) {
+        mPoseController->setDisplayOrientation(mDisplayOrientation);
+    }
+    if (mEngine != nullptr) {
+        setEffectParameter_l(
+            SPATIALIZER_PARAM_DISPLAY_ORIENTATION, std::vector<float>{physicalToLogicalAngle});
+    }
+    return Status::ok();
+}
+
+Status Spatializer::setHingeAngle(float hingeAngle) {
+    std::lock_guard lock(mLock);
+    ALOGV("%s hingeAngle %f", __func__, hingeAngle);
+    if (mEngine != nullptr) {
+        setEffectParameter_l(SPATIALIZER_PARAM_HINGE_ANGLE, std::vector<float>{hingeAngle});
+    }
+    return Status::ok();
+}
+
+Status Spatializer::getSupportedModes(std::vector<SpatializationMode> *modes) {
+    ALOGV("%s", __func__);
+    if (modes == nullptr) {
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    *modes = mSpatializationModes;
+    return Status::ok();
+}
+
+Status Spatializer::registerHeadTrackingCallback(
+        const sp<media::ISpatializerHeadTrackingCallback>& callback) {
+    ALOGV("%s callback %p", __func__, callback.get());
+    std::lock_guard lock(mLock);
+    if (!mSupportsHeadTracking) {
+        return binderStatusFromStatusT(INVALID_OPERATION);
+    }
+    mHeadTrackingCallback = callback;
+    return Status::ok();
+}
+
+Status Spatializer::setParameter(int key, const std::vector<unsigned char>& value) {
+    ALOGV("%s key %d", __func__, key);
+    std::lock_guard lock(mLock);
+    status_t status = INVALID_OPERATION;
+    if (mEngine != nullptr) {
+        status = setEffectParameter_l(key, value);
+    }
+    return binderStatusFromStatusT(status);
+}
+
+Status Spatializer::getParameter(int key, std::vector<unsigned char> *value) {
+    ALOGV("%s key %d value size %d", __func__, key,
+          (value != nullptr ? (int)value->size() : -1));
+    if (value == nullptr) {
+        return binderStatusFromStatusT(BAD_VALUE);
+    }
+    std::lock_guard lock(mLock);
+    status_t status = INVALID_OPERATION;
+    if (mEngine != nullptr) {
+        ALOGV("%s key %d mEngine %p", __func__, key, mEngine.get());
+        status = getEffectParameter_l(key, value);
+    }
+    return binderStatusFromStatusT(status);
+}
+
+Status Spatializer::getOutput(int *output) {
+    ALOGV("%s", __func__);
+    if (output == nullptr) {
+        binderStatusFromStatusT(BAD_VALUE);
+    }
+    std::lock_guard lock(mLock);
+    *output = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_io_handle_t_int32_t(mOutput));
+    ALOGV("%s got output %d", __func__, *output);
+    return Status::ok();
+}
+
+// SpatializerPoseController::Listener
+void Spatializer::onHeadToStagePose(const Pose3f& headToStage) {
+    ALOGV("%s", __func__);
+    LOG_ALWAYS_FATAL_IF(!mSupportsHeadTracking,
+            "onHeadToStagePose() called with no head tracking support!");
+
+    auto vec = headToStage.toVector();
+    LOG_ALWAYS_FATAL_IF(vec.size() != sHeadPoseKeys.size(),
+            "%s invalid head to stage vector size %zu", __func__, vec.size());
+
+    sp<AMessage> msg =
+            new AMessage(EngineCallbackHandler::kWhatOnHeadToStagePose, mHandler);
+    for (size_t i = 0 ; i < sHeadPoseKeys.size(); i++) {
+        msg->setFloat(sHeadPoseKeys[i], vec[i]);
+    }
+    msg->post();
+}
+
+void Spatializer::onHeadToStagePoseMsg(const std::vector<float>& headToStage) {
+    ALOGV("%s", __func__);
+    sp<media::ISpatializerHeadTrackingCallback> callback;
+    {
+        std::lock_guard lock(mLock);
+        callback = mHeadTrackingCallback;
+        if (mEngine != nullptr) {
+            setEffectParameter_l(SPATIALIZER_PARAM_HEAD_TO_STAGE, headToStage);
+        }
+    }
+
+    if (callback != nullptr) {
+        callback->onHeadToSoundStagePoseUpdated(headToStage);
+    }
+}
+
+void Spatializer::onActualModeChange(HeadTrackingMode mode) {
+    ALOGV("%s(%d)", __func__, (int)mode);
+    sp<AMessage> msg =
+            new AMessage(EngineCallbackHandler::kWhatOnActualModeChange, mHandler);
+    msg->setInt32(EngineCallbackHandler::kModeKey, static_cast<int>(mode));
+    msg->post();
+}
+
+void Spatializer::onActualModeChangeMsg(HeadTrackingMode mode) {
+    ALOGV("%s(%d)", __func__, (int) mode);
+    sp<media::ISpatializerHeadTrackingCallback> callback;
+    SpatializerHeadTrackingMode spatializerMode;
+    {
+        std::lock_guard lock(mLock);
+        if (!mSupportsHeadTracking) {
+            spatializerMode = SpatializerHeadTrackingMode::DISABLED;
+        } else {
+            switch (mode) {
+                case HeadTrackingMode::STATIC:
+                    spatializerMode = SpatializerHeadTrackingMode::DISABLED;
+                    break;
+                case HeadTrackingMode::WORLD_RELATIVE:
+                    spatializerMode = SpatializerHeadTrackingMode::RELATIVE_WORLD;
+                    break;
+                case HeadTrackingMode::SCREEN_RELATIVE:
+                    spatializerMode = SpatializerHeadTrackingMode::RELATIVE_SCREEN;
+                    break;
+                default:
+                    LOG_ALWAYS_FATAL("Unknown mode: %d", mode);
+            }
+        }
+        mActualHeadTrackingMode = spatializerMode;
+        callback = mHeadTrackingCallback;
+    }
+    if (callback != nullptr) {
+        callback->onHeadTrackingModeChanged(spatializerMode);
+    }
+}
+
 status_t Spatializer::attachOutput(audio_io_handle_t output) {
-    Mutex::Autolock _l(mLock);
-    ALOGV("%s output %d mOutput %d", __func__, (int)output, (int)mOutput);
-    if (mOutput != AUDIO_IO_HANDLE_NONE) {
-        LOG_ALWAYS_FATAL_IF(mEngine != nullptr, "%s output set without FX engine", __func__);
-        // remove FX instance
-        mEngine->setEnabled(false);
-        mEngine.clear();
+    std::shared_ptr<SpatializerPoseController> poseController;
+    {
+        std::lock_guard lock(mLock);
+        ALOGV("%s output %d mOutput %d", __func__, (int)output, (int)mOutput);
+        if (mOutput != AUDIO_IO_HANDLE_NONE) {
+            LOG_ALWAYS_FATAL_IF(mEngine == nullptr, "%s output set without FX engine", __func__);
+            // remove FX instance
+            mEngine->setEnabled(false);
+            mEngine.clear();
+        }
+        // create FX instance on output
+        AttributionSourceState attributionSource = AttributionSourceState();
+        mEngine = new AudioEffect(attributionSource);
+        mEngine->set(nullptr, &mEngineDescriptor.uuid, 0, Spatializer::engineCallback /* cbf */,
+                     this /* user */, AUDIO_SESSION_OUTPUT_STAGE, output, {} /* device */,
+                     false /* probe */, true /* notifyFramesProcessed */);
+        status_t status = mEngine->initCheck();
+        ALOGV("%s mEngine create status %d", __func__, (int)status);
+        if (status != NO_ERROR) {
+            return status;
+        }
+
+        setEffectParameter_l(SPATIALIZER_PARAM_LEVEL,
+                             std::vector<SpatializationLevel>{mLevel});
+        setEffectParameter_l(SPATIALIZER_PARAM_HEADTRACKING_MODE,
+                             std::vector<SpatializerHeadTrackingMode>{mActualHeadTrackingMode});
+
+        mEngine->setEnabled(true);
+        mOutput = output;
+
+        if (mSupportsHeadTracking) {
+            mPoseController = std::make_shared<SpatializerPoseController>(
+                    static_cast<SpatializerPoseController::Listener*>(this), 10ms, 50ms);
+            LOG_ALWAYS_FATAL_IF(mPoseController == nullptr,
+                                "%s could not allocate pose controller", __func__);
+
+            mPoseController->setDesiredMode(mDesiredHeadTrackingMode);
+            mPoseController->setHeadSensor(mHeadSensor);
+            mPoseController->setScreenSensor(mScreenSensor);
+            mPoseController->setDisplayOrientation(mDisplayOrientation);
+            poseController = mPoseController;
+        }
     }
-    // create FX instance on output
-    AttributionSourceState attributionSource = AttributionSourceState();
-    mEngine = new AudioEffect(attributionSource);
-    mEngine->set(nullptr, &mEngineDescriptor.uuid, 0,
-                 Spatializer::engineCallback /* cbf */, this /* user */,
-                 AUDIO_SESSION_OUTPUT_STAGE, output,
-                 {} /* device */, false /* probe */, true /* notifyFramesProcessed */);
-    status_t status = mEngine->initCheck();
-    ALOGV("%s mEngine create status %d", __func__, (int)status);
-    if (status != NO_ERROR) {
-        return status;
+    if (poseController != nullptr) {
+        poseController->waitUntilCalculated();
     }
-    mEngine->setEnabled(true);
-    mOutput = output;
     return NO_ERROR;
 }
 
 audio_io_handle_t Spatializer::detachOutput() {
-    Mutex::Autolock _l(mLock);
+    std::lock_guard lock(mLock);
     ALOGV("%s mOutput %d", __func__, (int)mOutput);
+    audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
     if (mOutput == AUDIO_IO_HANDLE_NONE) {
-        return AUDIO_IO_HANDLE_NONE;
+        return output;
     }
     // remove FX instance
     mEngine->setEnabled(false);
     mEngine.clear();
-    audio_io_handle_t output = mOutput;
+    output = mOutput;
     mOutput = AUDIO_IO_HANDLE_NONE;
+    mPoseController.reset();
     return output;
 }
 
-void Spatializer::engineCallback(int32_t event, void *user, void *info) {
+void Spatializer::calculateHeadPose() {
+    ALOGV("%s", __func__);
+    std::lock_guard lock(mLock);
+    if (mPoseController != nullptr) {
+        mPoseController->calculateAsync();
+    }
+}
 
+void Spatializer::engineCallback(int32_t event, void *user, void *info) {
     if (user == nullptr) {
         return;
     }
-    const Spatializer * const me = reinterpret_cast<Spatializer *>(user);
+    Spatializer* const me = reinterpret_cast<Spatializer *>(user);
     switch (event) {
         case AudioEffect::EVENT_FRAMES_PROCESSED: {
-            int frames = info == nullptr ? 0 : *(int *)info;
+            int frames = info == nullptr ? 0 : *(int*)info;
             ALOGD("%s frames processed %d for me %p", __func__, frames, me);
-            } break;
+            me->postFramesProcessedMsg(frames);
+        } break;
         default:
             ALOGD("%s event %d", __func__, event);
             break;
     }
 }
 
-// ---------------------------------------------------------------------------
-
-Spatializer::EffectClient::EffectClient(const sp<media::IEffectClient>& effectClient,
-             Spatializer& parent)
-             : BnEffect(),
-             mEffectClient(effectClient), mParent(parent) {
-}
-
-Spatializer::EffectClient::~EffectClient() {
-}
-
-// IEffect
-
-#define RETURN(code) \
-  *_aidl_return = (code); \
-  return Status::ok();
-
-// Write a POD value into a vector of bytes (clears the previous buffer
-// content).
-template<typename T>
-void writeToBuffer(const T& value, std::vector<uint8_t>* buffer) {
-    buffer->clear();
-    appendToBuffer(value, buffer);
-}
-
-Status Spatializer::EffectClient::enable(int32_t* _aidl_return) {
-    RETURN(OK);
-}
-
-Status Spatializer::EffectClient::disable(int32_t* _aidl_return) {
-    RETURN(OK);
-}
-
-Status Spatializer::EffectClient::command(int32_t cmdCode,
-                                const std::vector<uint8_t>& cmdData __unused,
-                                int32_t maxResponseSize __unused,
-                                std::vector<uint8_t>* response __unused,
-                                int32_t* _aidl_return) {
-
-    // reject commands reserved for internal use by audio framework if coming from outside
-    // of audioserver
-    switch(cmdCode) {
-        case EFFECT_CMD_ENABLE:
-        case EFFECT_CMD_DISABLE:
-        case EFFECT_CMD_SET_PARAM_DEFERRED:
-        case EFFECT_CMD_SET_PARAM_COMMIT:
-            RETURN(BAD_VALUE);
-        case EFFECT_CMD_SET_PARAM:
-        case EFFECT_CMD_GET_PARAM:
-            break;
-        default:
-            if (cmdCode >= EFFECT_CMD_FIRST_PROPRIETARY) {
-                break;
-            }
-            android_errorWriteLog(0x534e4554, "62019992");
-            RETURN(BAD_VALUE);
-    }
-    (void)mParent;
-    RETURN(OK);
-}
-
-Status Spatializer::EffectClient::disconnect() {
-    mDisconnected = true;
-    return Status::ok();
-}
-
-Status Spatializer::EffectClient::getCblk(media::SharedFileRegion* _aidl_return) {
-    LOG_ALWAYS_FATAL_IF(!convertIMemoryToSharedFileRegion(mCblkMemory, _aidl_return));
-    return Status::ok();
+void Spatializer::postFramesProcessedMsg(int frames) {
+    sp<AMessage> msg =
+            new AMessage(EngineCallbackHandler::kWhatOnFramesProcessed, mHandler);
+    msg->setInt32(EngineCallbackHandler::kNumFramesKey, frames);
+    msg->post();
 }
 
 } // namespace android
diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h
index f0ab605..4d77b78 100644
--- a/services/audiopolicy/service/Spatializer.h
+++ b/services/audiopolicy/service/Spatializer.h
@@ -19,12 +19,15 @@
 
 #include <android/media/BnEffect.h>
 #include <android/media/BnSpatializer.h>
-#include <android/media/HeadTrackingMode.h>
 #include <android/media/SpatializationLevel.h>
-
+#include <android/media/SpatializationMode.h>
+#include <android/media/SpatializerHeadTrackingMode.h>
+#include <media/audiohal/EffectHalInterface.h>
+#include <media/stagefright/foundation/ALooper.h>
 #include <media/AudioEffect.h>
 #include <system/audio_effects/effect_spatializer.h>
 
+#include "SpatializerPoseController.h"
 
 namespace android {
 
@@ -80,24 +83,41 @@
  * made to audio policy manager to release and close the spatializer output stream and the
  * spatializer mixer thread is destroyed.
  */
-class Spatializer : public media::BnSpatializer, public IBinder::DeathRecipient {
-public:
-
+class Spatializer : public media::BnSpatializer,
+                    public IBinder::DeathRecipient,
+                    private SpatializerPoseController::Listener {
+  public:
     static sp<Spatializer> create(SpatializerPolicyCallback *callback);
 
            ~Spatializer() override;
 
+    /** RefBase */
+    void onFirstRef();
+
     /** ISpatializer, see ISpatializer.aidl */
     binder::Status release() override;
     binder::Status getSupportedLevels(std::vector<media::SpatializationLevel>* levels) override;
     binder::Status setLevel(media::SpatializationLevel level) override;
     binder::Status getLevel(media::SpatializationLevel *level) override;
+    binder::Status isHeadTrackingSupported(bool *supports);
     binder::Status getSupportedHeadTrackingModes(
-            std::vector<media::HeadTrackingMode>* modes) override;
-    binder::Status setDesiredHeadTrackingMode(media::HeadTrackingMode mode) override;
-    binder::Status getActualHeadTrackingMode(media::HeadTrackingMode *mode) override;
-    binder::Status recenterHeadtracker() override;
+            std::vector<media::SpatializerHeadTrackingMode>* modes) override;
+    binder::Status setDesiredHeadTrackingMode(
+            media::SpatializerHeadTrackingMode mode) override;
+    binder::Status getActualHeadTrackingMode(
+            media::SpatializerHeadTrackingMode* mode) override;
+    binder::Status recenterHeadTracker() override;
     binder::Status setGlobalTransform(const std::vector<float>& screenToStage) override;
+    binder::Status setHeadSensor(int sensorHandle) override;
+    binder::Status setScreenSensor(int sensorHandle) override;
+    binder::Status setDisplayOrientation(float physicalToLogicalAngle) override;
+    binder::Status setHingeAngle(float hingeAngle) override;
+    binder::Status getSupportedModes(std::vector<media::SpatializationMode>* modes) override;
+    binder::Status registerHeadTrackingCallback(
+        const sp<media::ISpatializerHeadTrackingCallback>& callback) override;
+    binder::Status setParameter(int key, const std::vector<unsigned char>& value) override;
+    binder::Status getParameter(int key, std::vector<unsigned char> *value) override;
+    binder::Status getOutput(int *output);
 
     /** IBinder::DeathRecipient. Listen to the death of the INativeSpatializerCallback. */
     virtual void binderDied(const wp<IBinder>& who);
@@ -107,8 +127,10 @@
      */
     status_t registerCallback(const sp<media::INativeSpatializerCallback>& callback);
 
+    status_t loadEngineConfiguration(sp<EffectHalInterface> effect);
+
     /** Level getter for use by local classes. */
-    media::SpatializationLevel getLevel() const { Mutex::Autolock _l(mLock); return mLevel; }
+    media::SpatializationLevel getLevel() const { std::lock_guard lock(mLock); return mLevel; }
 
     /** Called by audio policy service when the special output mixer dedicated to spatialization
      * is opened and the spatializer engine must be created.
@@ -119,84 +141,194 @@
      */
     audio_io_handle_t detachOutput();
     /** Returns the output stream the spatializer is attached to. */
-    audio_io_handle_t getOutput() const { Mutex::Autolock _l(mLock); return mOutput; }
-
-    /** Sets the channel mask, sampling rate and format for the spatializer input. */
-    void setAudioInConfig(const audio_config_base_t& config) {
-        Mutex::Autolock _l(mLock);
-        mAudioInConfig = config;
-    }
+    audio_io_handle_t getOutput() const { std::lock_guard lock(mLock); return mOutput; }
 
     /** Gets the channel mask, sampling rate and format set for the spatializer input. */
-    audio_config_base_t getAudioInConfig() const {
-        Mutex::Autolock _l(mLock);
-        return mAudioInConfig;
-    }
+    audio_config_base_t getAudioInConfig() const;
 
-    /** An implementation of an IEffect interface that can be used to pass advanced parameters to
-     * the spatializer engine. All APis are noop (i.e. the interface cannot be used to control
-     * the effect) except for passing parameters via the command() API. */
-    class EffectClient: public android::media::BnEffect {
-    public:
-
-        EffectClient(const sp<media::IEffectClient>& effectClient,
-                     Spatializer& parent);
-        virtual ~EffectClient();
-
-        // IEffect
-        android::binder::Status enable(int32_t* _aidl_return) override;
-        android::binder::Status disable(int32_t* _aidl_return) override;
-        android::binder::Status command(int32_t cmdCode,
-                                        const std::vector<uint8_t>& cmdData,
-                                        int32_t maxResponseSize,
-                                        std::vector<uint8_t>* response,
-                                        int32_t* _aidl_return) override;
-        android::binder::Status disconnect() override;
-        android::binder::Status getCblk(media::SharedFileRegion* _aidl_return) override;
-
-    private:
-        const sp<media::IEffectClient> mEffectClient;
-        sp<IMemory> mCblkMemory;
-        const Spatializer& mParent;
-        bool mDisconnected = false;
-    };
+    void calculateHeadPose();
 
 private:
-
     Spatializer(effect_descriptor_t engineDescriptor,
                      SpatializerPolicyCallback *callback);
 
-
     static void engineCallback(int32_t event, void* user, void *info);
 
+    // From VirtualizerStageController::Listener
+    void onHeadToStagePose(const media::Pose3f& headToStage) override;
+    void onActualModeChange(media::HeadTrackingMode mode) override;
+
+    void onHeadToStagePoseMsg(const std::vector<float>& headToStage);
+    void onActualModeChangeMsg(media::HeadTrackingMode mode);
+
+    static constexpr int kMaxEffectParamValues = 10;
+    /**
+     * Get a parameter from spatializer engine by calling the effect HAL command method directly.
+     * To be used when the engine instance mEngine is not yet created in the effect framework.
+     * When MULTI_VALUES is false, the expected reply is only one value of type T.
+     * When MULTI_VALUES is true, the expected reply is made of a number (of type T) indicating
+     * how many values are returned, followed by this number for values of type T.
+     */
+    template<bool MULTI_VALUES, typename T>
+    status_t getHalParameter(sp<EffectHalInterface> effect, uint32_t type,
+                                          std::vector<T> *values) {
+        static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
+
+        uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1];
+        uint32_t reply[sizeof(effect_param_t) / sizeof(uint32_t) + 2 + kMaxEffectParamValues];
+
+        effect_param_t *p = (effect_param_t *)cmd;
+        p->psize = sizeof(uint32_t);
+        if (MULTI_VALUES) {
+            p->vsize = (kMaxEffectParamValues + 1) * sizeof(T);
+        } else {
+            p->vsize = sizeof(T);
+        }
+        *(uint32_t *)p->data = type;
+        uint32_t replySize = sizeof(effect_param_t) + p->psize + p->vsize;
+
+        status_t status = effect->command(EFFECT_CMD_GET_PARAM,
+                                          sizeof(effect_param_t) + sizeof(uint32_t), cmd,
+                                          &replySize, reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        if (p->status != NO_ERROR) {
+            return p->status;
+        }
+        if (replySize <
+                sizeof(effect_param_t) + sizeof(uint32_t) + (MULTI_VALUES ? 2 : 1) * sizeof(T)) {
+            return BAD_VALUE;
+        }
+
+        T *params = (T *)((uint8_t *)reply + sizeof(effect_param_t) + sizeof(uint32_t));
+        int numParams = 1;
+        if (MULTI_VALUES) {
+            numParams = (int)*params++;
+        }
+        if (numParams > kMaxEffectParamValues) {
+            return BAD_VALUE;
+        }
+        (*values).clear();
+        std::copy(&params[0], &params[numParams], back_inserter(*values));
+        return NO_ERROR;
+    }
+
+    /**
+     * Set a parameter to spatializer engine by calling setParameter on mEngine AudioEffect object.
+     * It is possible to pass more than one value of type T according to the parameter type
+     *  according to values vector size.
+     */
+    template<typename T>
+    status_t setEffectParameter_l(uint32_t type, const std::vector<T>& values) REQUIRES(mLock) {
+        static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
+
+        uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1 + values.size()];
+        effect_param_t *p = (effect_param_t *)cmd;
+        p->psize = sizeof(uint32_t);
+        p->vsize = sizeof(T) * values.size();
+        *(uint32_t *)p->data = type;
+        memcpy((uint32_t *)p->data + 1, values.data(), sizeof(T) * values.size());
+
+        status_t status = mEngine->setParameter(p);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        if (p->status != NO_ERROR) {
+            return p->status;
+        }
+        return NO_ERROR;
+    }
+
+    /**
+     * Get a parameter from spatializer engine by calling getParameter on AudioEffect object.
+     * It is possible to read more than one value of type T according to the parameter type
+     * by specifying values vector size.
+     */
+    template<typename T>
+    status_t getEffectParameter_l(uint32_t type, std::vector<T> *values) REQUIRES(mLock) {
+        static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
+
+        uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1 + values->size()];
+        effect_param_t *p = (effect_param_t *)cmd;
+        p->psize = sizeof(uint32_t);
+        p->vsize = sizeof(T) * values->size();
+        *(uint32_t *)p->data = type;
+
+        status_t status = mEngine->getParameter(p);
+
+        if (status != NO_ERROR) {
+            return status;
+        }
+        if (p->status != NO_ERROR) {
+            return p->status;
+        }
+
+        int numValues = std::min(p->vsize / sizeof(T), values->size());
+        (*values).clear();
+        T *retValues = (T *)((uint8_t *)p->data + sizeof(uint32_t));
+        std::copy(&retValues[0], &retValues[numValues], back_inserter(*values));
+
+        return NO_ERROR;
+    }
+
+    void postFramesProcessedMsg(int frames);
+
     /** Effect engine descriptor */
     const effect_descriptor_t mEngineDescriptor;
     /** Callback interface to parent audio policy service */
     SpatializerPolicyCallback* mPolicyCallback;
 
     /** Mutex protecting internal state */
-    mutable Mutex mLock;
+    mutable std::mutex mLock;
 
     /** Client AudioEffect for the engine */
     sp<AudioEffect> mEngine GUARDED_BY(mLock);
     /** Output stream the spatializer mixer thread is attached to */
     audio_io_handle_t mOutput GUARDED_BY(mLock) = AUDIO_IO_HANDLE_NONE;
-    /** Virtualizer engine input configuration */
-    audio_config_base_t mAudioInConfig GUARDED_BY(mLock) = AUDIO_CONFIG_BASE_INITIALIZER;
 
     /** Callback interface to the client (AudioService) controlling this`Spatializer */
     sp<media::INativeSpatializerCallback> mSpatializerCallback GUARDED_BY(mLock);
 
+    /** Callback interface for head tracking */
+    sp<media::ISpatializerHeadTrackingCallback> mHeadTrackingCallback GUARDED_BY(mLock);
+
     /** Requested spatialization level */
     media::SpatializationLevel mLevel GUARDED_BY(mLock) = media::SpatializationLevel::NONE;
-    /** Requested head tracking mode */
-    media::HeadTrackingMode mHeadTrackingMode GUARDED_BY(mLock)
-            = media::HeadTrackingMode::DISABLED;
-    /** Configured screen to stage transform */
-    std::vector<float> mScreenToStageTransform GUARDED_BY(mLock);
 
-    /** Extended IEffect interface is one has been created */
-    sp<EffectClient> mEffectClient GUARDED_BY(mLock);
+    /** Control logic for head-tracking, etc. */
+    std::shared_ptr<SpatializerPoseController> mPoseController GUARDED_BY(mLock);
+
+    /** Last requested head tracking mode */
+    media::HeadTrackingMode mDesiredHeadTrackingMode GUARDED_BY(mLock)
+            = media::HeadTrackingMode::STATIC;
+
+    /** Last-reported actual head-tracking mode. */
+    media::SpatializerHeadTrackingMode mActualHeadTrackingMode GUARDED_BY(mLock)
+            = media::SpatializerHeadTrackingMode::DISABLED;
+
+    /** Selected Head pose sensor */
+    int32_t mHeadSensor GUARDED_BY(mLock) = SpatializerPoseController::INVALID_SENSOR;
+
+    /** Selected Screen pose sensor */
+    int32_t mScreenSensor GUARDED_BY(mLock) = SpatializerPoseController::INVALID_SENSOR;
+
+    /** Last display orientation received */
+    static constexpr float kDisplayOrientationInvalid = 1000;
+    float mDisplayOrientation GUARDED_BY(mLock) = kDisplayOrientationInvalid;
+
+    std::vector<media::SpatializationLevel> mLevels;
+    std::vector<media::SpatializationMode> mSpatializationModes;
+    std::vector<audio_channel_mask_t> mChannelMasks;
+    bool mSupportsHeadTracking;
+
+    // Looper thread for mEngine callbacks
+    class EngineCallbackHandler;
+
+    sp<ALooper> mLooper;
+    sp<EngineCallbackHandler> mHandler;
+
+    static const std::vector<const char *> sHeadPoseKeys;
 };
 
 
diff --git a/services/audiopolicy/service/SpatializerPoseController.cpp b/services/audiopolicy/service/SpatializerPoseController.cpp
new file mode 100644
index 0000000..eb23298
--- /dev/null
+++ b/services/audiopolicy/service/SpatializerPoseController.cpp
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "SpatializerPoseController.h"
+
+#define LOG_TAG "SpatializerPoseController"
+//#define LOG_NDEBUG 0
+#include <sensor/Sensor.h>
+#include <utils/Log.h>
+#include <utils/SystemClock.h>
+
+namespace android {
+
+using media::createHeadTrackingProcessor;
+using media::HeadTrackingMode;
+using media::HeadTrackingProcessor;
+using media::Pose3f;
+using media::SensorPoseProvider;
+using media::Twist3f;
+
+using namespace std::chrono_literals;
+
+namespace {
+
+// This is how fast, in m/s, we allow position to shift during rate-limiting.
+constexpr auto kMaxTranslationalVelocity = 2;
+
+// This is how fast, in rad/s, we allow rotation angle to shift during rate-limiting.
+constexpr auto kMaxRotationalVelocity = 4 * M_PI;
+
+// This should be set to the typical time scale that the translation sensors used drift in. This
+// means, loosely, for how long we can trust the reading to be "accurate enough". This would
+// determine the time constants used for high-pass filtering those readings. If the value is set
+// too high, we may experience drift. If it is set too low, we may experience poses tending toward
+// identity too fast.
+constexpr auto kTranslationalDriftTimeConstant = 20s;
+
+// This should be set to the typical time scale that the rotation sensors used drift in. This
+// means, loosely, for how long we can trust the reading to be "accurate enough". This would
+// determine the time constants used for high-pass filtering those readings. If the value is set
+// too high, we may experience drift. If it is set too low, we may experience poses tending toward
+// identity too fast.
+constexpr auto kRotationalDriftTimeConstant = 20s;
+
+// This is how far into the future we predict the head pose, using linear extrapolation based on
+// twist (velocity). It should be set to a value that matches the characteristic durations of moving
+// one's head. The higher we set this, the more latency we are able to reduce, but setting this too
+// high will result in high prediction errors whenever the head accelerates (changes velocity).
+constexpr auto kPredictionDuration = 10ms;
+
+// After losing this many consecutive samples from either sensor, we would treat the measurement as
+// stale;
+constexpr auto kMaxLostSamples = 4;
+
+// Time units for system clock ticks. This is what the Sensor Framework timestamps represent and
+// what we use for pose filtering.
+using Ticks = std::chrono::nanoseconds;
+
+// How many ticks in a second.
+constexpr auto kTicksPerSecond = Ticks::period::den;
+
+}  // namespace
+
+SpatializerPoseController::SpatializerPoseController(Listener* listener,
+                                                     std::chrono::microseconds sensorPeriod,
+                                                     std::chrono::microseconds maxUpdatePeriod)
+    : mListener(listener),
+      mSensorPeriod(sensorPeriod),
+      mProcessor(createHeadTrackingProcessor(HeadTrackingProcessor::Options{
+              .maxTranslationalVelocity = kMaxTranslationalVelocity / kTicksPerSecond,
+              .maxRotationalVelocity = kMaxRotationalVelocity / kTicksPerSecond,
+              .translationalDriftTimeConstant = Ticks(kTranslationalDriftTimeConstant).count(),
+              .rotationalDriftTimeConstant = Ticks(kRotationalDriftTimeConstant).count(),
+              .freshnessTimeout = Ticks(sensorPeriod * kMaxLostSamples).count(),
+              .predictionDuration = Ticks(kPredictionDuration).count(),
+      })),
+      mPoseProvider(SensorPoseProvider::create("headtracker", this)),
+      mThread([this, maxUpdatePeriod] {
+          while (true) {
+              Pose3f headToStage;
+              std::optional<HeadTrackingMode> modeIfChanged;
+              {
+                  std::unique_lock lock(mMutex);
+                  mCondVar.wait_for(lock, maxUpdatePeriod,
+                                    [this] { return mShouldExit || mShouldCalculate; });
+                  if (mShouldExit) {
+                      ALOGV("Exiting thread");
+                      return;
+                  }
+
+                  // Calculate.
+                  std::tie(headToStage, modeIfChanged) = calculate_l();
+              }
+
+              // Invoke the callbacks outside the lock.
+              mListener->onHeadToStagePose(headToStage);
+              if (modeIfChanged) {
+                  mListener->onActualModeChange(modeIfChanged.value());
+              }
+
+              {
+                  std::lock_guard lock(mMutex);
+                  if (!mCalculated) {
+                      mCalculated = true;
+                      mCondVar.notify_all();
+                  }
+                  mShouldCalculate = false;
+              }
+          }
+      }) {}
+
+SpatializerPoseController::~SpatializerPoseController() {
+    {
+        std::unique_lock lock(mMutex);
+        mShouldExit = true;
+        mCondVar.notify_all();
+    }
+    mThread.join();
+}
+
+void SpatializerPoseController::setHeadSensor(int32_t sensor) {
+    std::lock_guard lock(mMutex);
+    // Stop current sensor, if valid and different from the other sensor.
+    if (mHeadSensor != INVALID_SENSOR && mHeadSensor != mScreenSensor) {
+        mPoseProvider->stopSensor(mHeadSensor);
+    }
+
+    if (sensor != INVALID_SENSOR) {
+        if (sensor != mScreenSensor) {
+            // Start new sensor.
+            mHeadSensor =
+                    mPoseProvider->startSensor(sensor, mSensorPeriod) ? sensor : INVALID_SENSOR;
+        } else {
+            // Sensor is already enabled.
+            mHeadSensor = mScreenSensor;
+        }
+    } else {
+        mHeadSensor = INVALID_SENSOR;
+    }
+
+    mProcessor->recenter(true, false);
+}
+
+void SpatializerPoseController::setScreenSensor(int32_t sensor) {
+    std::lock_guard lock(mMutex);
+    // Stop current sensor, if valid and different from the other sensor.
+    if (mScreenSensor != INVALID_SENSOR && mScreenSensor != mHeadSensor) {
+        mPoseProvider->stopSensor(mScreenSensor);
+    }
+
+    if (sensor != INVALID_SENSOR) {
+        if (sensor != mHeadSensor) {
+            // Start new sensor.
+            mScreenSensor =
+                    mPoseProvider->startSensor(sensor, mSensorPeriod) ? sensor : INVALID_SENSOR;
+        } else {
+            // Sensor is already enabled.
+            mScreenSensor = mHeadSensor;
+        }
+    } else {
+        mScreenSensor = INVALID_SENSOR;
+    }
+
+    mProcessor->recenter(false, true);
+}
+
+void SpatializerPoseController::setDesiredMode(HeadTrackingMode mode) {
+    std::lock_guard lock(mMutex);
+    mProcessor->setDesiredMode(mode);
+}
+
+void SpatializerPoseController::setScreenToStagePose(const Pose3f& screenToStage) {
+    std::lock_guard lock(mMutex);
+    mProcessor->setScreenToStagePose(screenToStage);
+}
+
+void SpatializerPoseController::setDisplayOrientation(float physicalToLogicalAngle) {
+    std::lock_guard lock(mMutex);
+    mProcessor->setDisplayOrientation(physicalToLogicalAngle);
+}
+
+void SpatializerPoseController::calculateAsync() {
+    std::lock_guard lock(mMutex);
+    mShouldCalculate = true;
+    mCondVar.notify_all();
+}
+
+void SpatializerPoseController::waitUntilCalculated() {
+    std::unique_lock lock(mMutex);
+    mCondVar.wait(lock, [this] { return mCalculated; });
+}
+
+std::tuple<media::Pose3f, std::optional<media::HeadTrackingMode>>
+SpatializerPoseController::calculate_l() {
+    Pose3f headToStage;
+    HeadTrackingMode mode;
+    std::optional<media::HeadTrackingMode> modeIfChanged;
+
+    mProcessor->calculate(elapsedRealtimeNano());
+    headToStage = mProcessor->getHeadToStagePose();
+    mode = mProcessor->getActualMode();
+    if (!mActualMode.has_value() || mActualMode.value() != mode) {
+        mActualMode = mode;
+        modeIfChanged = mode;
+    }
+    return std::make_tuple(headToStage, modeIfChanged);
+}
+
+void SpatializerPoseController::recenter() {
+    std::lock_guard lock(mMutex);
+    mProcessor->recenter();
+}
+
+void SpatializerPoseController::onPose(int64_t timestamp, int32_t sensor, const Pose3f& pose,
+                                       const std::optional<Twist3f>& twist) {
+    std::lock_guard lock(mMutex);
+    if (sensor == mHeadSensor) {
+        mProcessor->setWorldToHeadPose(timestamp, pose, twist.value_or(Twist3f()));
+    }
+    if (sensor == mScreenSensor) {
+        mProcessor->setWorldToScreenPose(timestamp, pose);
+    }
+}
+
+}  // namespace android
diff --git a/services/audiopolicy/service/SpatializerPoseController.h b/services/audiopolicy/service/SpatializerPoseController.h
new file mode 100644
index 0000000..c579622
--- /dev/null
+++ b/services/audiopolicy/service/SpatializerPoseController.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <chrono>
+#include <condition_variable>
+#include <limits>
+#include <memory>
+#include <mutex>
+#include <thread>
+
+#include <media/HeadTrackingProcessor.h>
+#include <media/SensorPoseProvider.h>
+
+namespace android {
+
+/**
+ * This class encapsulates the logic for pose processing, intended for driving a spatializer effect.
+ * This includes integration with the Sensor sub-system for retrieving sensor data, doing all the
+ * necessary processing, etc.
+ *
+ * Calculations happen on a dedicated thread and published to the client via the Listener interface.
+ * A calculation may be triggered in one of two ways:
+ * - By calling calculateAsync() - calculation will be kicked off in the background.
+ * - By setting a timeout in the ctor, a calculation will be triggered after the timeout elapsed
+ *   from the last calculateAsync() call.
+ *
+ * This class is thread-safe.
+ */
+class SpatializerPoseController : private media::SensorPoseProvider::Listener {
+  public:
+    static constexpr int32_t INVALID_SENSOR = media::SensorPoseProvider::INVALID_HANDLE;
+
+    /**
+     * Listener interface for getting pose and mode updates.
+     * Methods will always be invoked from a designated thread.
+     */
+    class Listener {
+      public:
+        virtual ~Listener() = default;
+
+        virtual void onHeadToStagePose(const media::Pose3f&) = 0;
+        virtual void onActualModeChange(media::HeadTrackingMode) = 0;
+    };
+
+    /**
+     * Ctor.
+     * sensorPeriod determines how often to receive updates from the sensors (input rate).
+     * maxUpdatePeriod determines how often to produce an output when calculateAsync() isn't
+     * invoked.
+     */
+    SpatializerPoseController(Listener* listener, std::chrono::microseconds sensorPeriod,
+                               std::chrono::microseconds maxUpdatePeriod);
+
+    /** Dtor. */
+    ~SpatializerPoseController();
+
+    /**
+     * Set the sensor that is to be used for head-tracking.
+     * INVALID_SENSOR can be used to disable head-tracking.
+     */
+    void setHeadSensor(int32_t sensor);
+
+    /**
+     * Set the sensor that is to be used for screen-tracking.
+     * INVALID_SENSOR can be used to disable screen-tracking.
+     */
+    void setScreenSensor(int32_t sensor);
+
+    /** Sets the desired head-tracking mode. */
+    void setDesiredMode(media::HeadTrackingMode mode);
+
+    /**
+     * Set the screen-to-stage pose, used in all modes.
+     */
+    void setScreenToStagePose(const media::Pose3f& screenToStage);
+
+    /**
+     * Sets the display orientation.
+     * Orientation is expressed in the angle of rotation from the physical "up" side of the screen
+     * to the logical "up" side of the content displayed the screen. Counterclockwise angles, as
+     * viewed while facing the screen are positive.
+     */
+    void setDisplayOrientation(float physicalToLogicalAngle);
+
+    /**
+     * This causes the current poses for both the head and screen to be considered "center".
+     */
+    void recenter();
+
+    /**
+     * This call triggers the recalculation of the output and the invocation of the relevant
+     * callbacks. This call is async and the callbacks will be triggered shortly after.
+     */
+    void calculateAsync();
+
+    /**
+     * Blocks until calculation and invocation of the respective callbacks has happened at least
+     * once. Do not call from within callbacks.
+     */
+    void waitUntilCalculated();
+
+  private:
+    mutable std::mutex mMutex;
+    Listener* const mListener;
+    const std::chrono::microseconds mSensorPeriod;
+    // Order matters for the following two members to ensure correct destruction.
+    std::unique_ptr<media::HeadTrackingProcessor> mProcessor;
+    std::unique_ptr<media::SensorPoseProvider> mPoseProvider;
+    int32_t mHeadSensor = media::SensorPoseProvider::INVALID_HANDLE;
+    int32_t mScreenSensor = media::SensorPoseProvider::INVALID_HANDLE;
+    std::optional<media::HeadTrackingMode> mActualMode;
+    std::thread mThread;
+    std::condition_variable mCondVar;
+    bool mShouldCalculate = true;
+    bool mShouldExit = false;
+    bool mCalculated = false;
+
+    void onPose(int64_t timestamp, int32_t sensor, const media::Pose3f& pose,
+                const std::optional<media::Twist3f>& twist) override;
+
+    /**
+     * Calculates the new outputs and updates internal state. Must be called with the lock held.
+     * Returns values that should be passed to the respective callbacks.
+     */
+    std::tuple<media::Pose3f, std::optional<media::HeadTrackingMode>> calculate_l();
+};
+
+}  // namespace android
diff --git a/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h b/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h
index a5ad9b1..7343b9b 100644
--- a/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h
+++ b/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h
@@ -28,19 +28,26 @@
 class AudioPolicyManagerTestClientForHdmi : public AudioPolicyManagerTestClient {
 public:
     String8 getParameters(audio_io_handle_t /* ioHandle */, const String8&  /* keys*/ ) override {
+        AudioParameter mAudioParameters;
+        std::string formats;
+        for (const auto& f : mSupportedFormats) {
+            if (!formats.empty()) formats += AUDIO_PARAMETER_VALUE_LIST_SEPARATOR;
+            formats += audio_format_to_string(f);
+        }
+        mAudioParameters.add(
+                String8(AudioParameter::keyStreamSupportedFormats),
+                String8(formats.c_str()));
+        mAudioParameters.addInt(String8(AudioParameter::keyStreamSupportedSamplingRates), 48000);
+        mAudioParameters.add(String8(AudioParameter::keyStreamSupportedChannels), String8(""));
         return mAudioParameters.toString();
     }
 
     void addSupportedFormat(audio_format_t format) override {
-        mAudioParameters.add(
-                String8(AudioParameter::keyStreamSupportedFormats),
-                String8(audio_format_to_string(format)));
-        mAudioParameters.addInt(String8(AudioParameter::keyStreamSupportedSamplingRates), 48000);
-        mAudioParameters.add(String8(AudioParameter::keyStreamSupportedChannels), String8(""));
+        mSupportedFormats.insert(format);
     }
 
 private:
-    AudioParameter mAudioParameters;
+    std::set<audio_format_t> mSupportedFormats;
 };
 
-} // namespace android
\ No newline at end of file
+} // namespace android
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index a16ab7d..9c1adc6 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -151,7 +151,7 @@
 void AudioPolicyManagerTest::SetUp() {
     mClient.reset(getClient());
     mManager.reset(new AudioPolicyTestManager(mClient.get()));
-    SetUpManagerConfig();  // Subclasses may want to customize the config.
+    ASSERT_NO_FATAL_FAILURE(SetUpManagerConfig());  // Subclasses may want to customize the config.
     ASSERT_EQ(NO_ERROR, mManager->initialize());
     ASSERT_EQ(NO_ERROR, mManager->initCheck());
 }
@@ -401,7 +401,7 @@
 
 void AudioPolicyManagerTestMsd::SetUpManagerConfig() {
     // TODO: Consider using Serializer to load part of the config from a string.
-    AudioPolicyManagerTest::SetUpManagerConfig();
+    ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTest::SetUpManagerConfig());
     AudioPolicyConfig& config = mManager->getConfig();
     mMsdOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_BUS);
     sp<AudioProfile> pcmOutputProfile = new AudioProfile(
@@ -660,6 +660,7 @@
 void AudioPolicyManagerTestWithConfigurationFile::SetUpManagerConfig() {
     status_t status = deserializeAudioPolicyFile(getConfigFile().c_str(), &mManager->getConfig());
     ASSERT_EQ(NO_ERROR, status);
+    mManager->getConfig().setSource(getConfigFile());
 }
 
 TEST_F(AudioPolicyManagerTestWithConfigurationFile, InitSuccess) {
@@ -803,7 +804,8 @@
 }
 
 class AudioPolicyManagerTestForHdmi
-        : public AudioPolicyManagerTestWithConfigurationFile {
+        : public AudioPolicyManagerTestWithConfigurationFile,
+          public testing::WithParamInterface<audio_format_t> {
 protected:
     void SetUp() override;
     std::string getConfigFile() override { return sTvConfig; }
@@ -824,7 +826,8 @@
         "test_settop_box_surround_configuration.xml";
 
 void AudioPolicyManagerTestForHdmi::SetUp() {
-    AudioPolicyManagerTest::SetUp();
+    ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTest::SetUp());
+    mClient->addSupportedFormat(AUDIO_FORMAT_AC3);
     mClient->addSupportedFormat(AUDIO_FORMAT_E_AC3);
     mManager->setDeviceConnectionState(
             AUDIO_DEVICE_OUT_HDMI, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
@@ -914,76 +917,90 @@
     return formats;
 }
 
-TEST_F(AudioPolicyManagerTestForHdmi, GetSurroundFormatsReturnsSupportedFormats) {
+TEST_P(AudioPolicyManagerTestForHdmi, GetSurroundFormatsReturnsSupportedFormats) {
     mManager->setForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS);
     auto surroundFormats = getSurroundFormatsHelper();
-    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(1, surroundFormats.count(GetParam()));
 }
 
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
         GetSurroundFormatsReturnsManipulatedFormats) {
     mManager->setForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL);
 
     status_t ret =
-            mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/);
+            mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/);
     ASSERT_EQ(NO_ERROR, ret);
     auto surroundFormats = getSurroundFormatsHelper();
-    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
-    ASSERT_FALSE(surroundFormats[AUDIO_FORMAT_E_AC3]);
+    ASSERT_EQ(1, surroundFormats.count(GetParam()));
+    ASSERT_FALSE(surroundFormats[GetParam()]);
 
-    ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, true /*enabled*/);
+    ret = mManager->setSurroundFormatEnabled(GetParam(), true /*enabled*/);
     ASSERT_EQ(NO_ERROR, ret);
     surroundFormats = getSurroundFormatsHelper();
-    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
-    ASSERT_TRUE(surroundFormats[AUDIO_FORMAT_E_AC3]);
+    ASSERT_EQ(1, surroundFormats.count(GetParam()));
+    ASSERT_TRUE(surroundFormats[GetParam()]);
 
-    ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/);
+    ret = mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/);
     ASSERT_EQ(NO_ERROR, ret);
     surroundFormats = getSurroundFormatsHelper();
-    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
-    ASSERT_FALSE(surroundFormats[AUDIO_FORMAT_E_AC3]);
+    ASSERT_EQ(1, surroundFormats.count(GetParam()));
+    ASSERT_FALSE(surroundFormats[GetParam()]);
 }
 
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
         ListAudioPortsReturnManipulatedHdmiFormats) {
     mManager->setForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL);
 
-    ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/));
+    ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/));
     auto formats = getFormatsFromPorts();
-    ASSERT_EQ(0, formats.count(AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(0, formats.count(GetParam()));
 
-    ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, true /*enabled*/));
+    ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(GetParam(), true /*enabled*/));
     formats = getFormatsFromPorts();
-    ASSERT_EQ(1, formats.count(AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(1, formats.count(GetParam()));
 }
 
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
         GetReportedSurroundFormatsReturnsHdmiReportedFormats) {
     mManager->setForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS);
     auto surroundFormats = getReportedSurroundFormatsHelper();
-    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), GetParam()));
 }
 
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
         GetReportedSurroundFormatsReturnsNonManipulatedHdmiReportedFormats) {
     mManager->setForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL);
 
-    status_t ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/);
+    status_t ret = mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/);
     ASSERT_EQ(NO_ERROR, ret);
     auto surroundFormats = getReportedSurroundFormatsHelper();
-    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), GetParam()));
 
-    ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, true /*enabled*/);
+    ret = mManager->setSurroundFormatEnabled(GetParam(), true /*enabled*/);
     ASSERT_EQ(NO_ERROR, ret);
     surroundFormats = getReportedSurroundFormatsHelper();
-    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), AUDIO_FORMAT_E_AC3));
+    ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), GetParam()));
 }
 
+TEST_P(AudioPolicyManagerTestForHdmi, GetSurroundFormatsIgnoresSupportedFormats) {
+    mManager->setForceUse(
+            AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER);
+    auto surroundFormats = getSurroundFormatsHelper();
+    ASSERT_EQ(1, surroundFormats.count(GetParam()));
+    ASSERT_FALSE(surroundFormats[GetParam()]);
+}
+
+INSTANTIATE_TEST_SUITE_P(SurroundFormatSupport, AudioPolicyManagerTestForHdmi,
+        testing::Values(AUDIO_FORMAT_AC3, AUDIO_FORMAT_E_AC3),
+        [](const ::testing::TestParamInfo<AudioPolicyManagerTestForHdmi::ParamType>& info) {
+            return audio_format_to_string(info.param);
+        });
+
 class AudioPolicyManagerTestDPNoRemoteSubmixModule : public AudioPolicyManagerTestDynamicPolicy {
 protected:
     std::string getConfigFile() override { return sPrimaryOnlyConfig; }
@@ -1035,7 +1052,7 @@
 };
 
 void AudioPolicyManagerTestDPPlaybackReRouting::SetUp() {
-    AudioPolicyManagerTestDynamicPolicy::SetUp();
+    ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTestDynamicPolicy::SetUp());
 
     mTracker.reset(new RecordingActivityTracker());
 
@@ -1221,7 +1238,7 @@
 };
 
 void AudioPolicyManagerTestDPMixRecordInjection::SetUp() {
-    AudioPolicyManagerTestDynamicPolicy::SetUp();
+    ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTestDynamicPolicy::SetUp());
 
     mTracker.reset(new RecordingActivityTracker());
 
@@ -1375,7 +1392,8 @@
     if (type == AUDIO_DEVICE_OUT_HDMI) {
         // Set device connection state failed due to no device descriptor found
         // For HDMI case, it is easier to simulate device descriptor not found error
-        // by using a undeclared encoded format.
+        // by using an encoded format which isn't listed in the 'encodedFormats'
+        // attribute for this devicePort.
         ASSERT_EQ(INVALID_OPERATION, mManager->setDeviceConnectionState(
                 type, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
                 address.c_str(), name.c_str(), AUDIO_FORMAT_MAT_2_1));
@@ -1519,7 +1537,7 @@
 };
 
 void AudioPolicyManagerDynamicHwModulesTest::SetUpManagerConfig() {
-    AudioPolicyManagerTestWithConfigurationFile::SetUpManagerConfig();
+    ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTestWithConfigurationFile::SetUpManagerConfig());
     // Only allow successful opening of "primary" hw module during APM initialization.
     mClient->swapAllowedModuleNames({"primary"});
 }
diff --git a/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml b/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
index 87f0ab9..41ed70c 100644
--- a/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
+++ b/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
@@ -50,7 +50,8 @@
                 </devicePort>
                 <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
                 </devicePort>
-                <devicePort tagName="Hdmi" type="AUDIO_DEVICE_OUT_HDMI" role="sink">
+                <devicePort tagName="Hdmi" type="AUDIO_DEVICE_OUT_HDMI" role="sink"
+                            encodedFormats="AUDIO_FORMAT_AC3">
                 </devicePort>
                 <devicePort tagName="Hdmi-In Mic" type="AUDIO_DEVICE_IN_HDMI" role="source">
                 </devicePort>
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 19a111d..54f6520 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -1336,7 +1336,7 @@
                 auto clientSp = current->getValue();
                 if (clientSp.get() != nullptr) { // should never be needed
                     if (!clientSp->canCastToApiClient(effectiveApiLevel)) {
-                        ALOGW("CameraService connect called from same client, but with a different"
+                        ALOGW("CameraService connect called with a different"
                                 " API level, evicting prior client...");
                     } else if (clientSp->getRemote() == remoteCallback) {
                         ALOGI("CameraService::connect X (PID %d) (second call from same"
@@ -1836,8 +1836,9 @@
         // Set rotate-and-crop override behavior
         if (mOverrideRotateAndCropMode != ANDROID_SCALER_ROTATE_AND_CROP_AUTO) {
             client->setRotateAndCropOverride(mOverrideRotateAndCropMode);
-        } else if (CameraServiceProxyWrapper::isRotateAndCropOverrideNeeded(clientPackageName,
-                    orientation, facing)) {
+        } else if ((effectiveApiLevel == API_2) &&
+                CameraServiceProxyWrapper::isRotateAndCropOverrideNeeded(clientPackageName,
+                        orientation, facing) ) {
             client->setRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_90);
         }
 
@@ -2077,6 +2078,11 @@
                     id.string());
                 errorCode = ERROR_ILLEGAL_ARGUMENT;
                 break;
+            case -EBUSY:
+                msg = String8::format("Camera \"%s\" is in use",
+                    id.string());
+                errorCode = ERROR_CAMERA_IN_USE;
+                break;
             default:
                 msg = String8::format(
                     "Setting torch mode of camera \"%s\" to %d failed: %s (%d)",
@@ -2201,7 +2207,6 @@
     newDeviceState |= vendorBits;
 
     ALOGV("%s: New device state 0x%" PRIx64, __FUNCTION__, newDeviceState);
-    Mutex::Autolock l(mServiceLock);
     mCameraProviderManager->notifyDeviceStateChange(newDeviceState);
 
     return Status::ok();
@@ -2235,7 +2240,7 @@
     for (auto& current : clients) {
         if (current != nullptr) {
             const auto basicClient = current->getValue();
-            if (basicClient.get() != nullptr) {
+            if (basicClient.get() != nullptr && basicClient->canCastToApiClient(API_2)) {
                 if (CameraServiceProxyWrapper::isRotateAndCropOverrideNeeded(
                             basicClient->getPackageName(), basicClient->getCameraOrientation(),
                             basicClient->getCameraFacing())) {
@@ -2396,7 +2401,8 @@
         Mutex::Autolock lock(mCameraStatesLock);
         for (auto& i : mCameraStates) {
             cameraStatuses->emplace_back(i.first,
-                    mapToInterface(i.second->getStatus()), i.second->getUnavailablePhysicalIds());
+                    mapToInterface(i.second->getStatus()), i.second->getUnavailablePhysicalIds(),
+                    openCloseCallbackAllowed ? i.second->getClientPackage() : String8::empty());
         }
     }
     // Remove the camera statuses that should be hidden from the client, we do
@@ -3766,6 +3772,16 @@
     return count > 0;
 }
 
+void CameraService::CameraState::setClientPackage(const String8& clientPackage) {
+    Mutex::Autolock lock(mStatusLock);
+    mClientPackage = clientPackage;
+}
+
+String8 CameraService::CameraState::getClientPackage() const {
+    Mutex::Autolock lock(mStatusLock);
+    return mClientPackage;
+}
+
 // ----------------------------------------------------------------------------
 //                  ClientEventListener
 // ----------------------------------------------------------------------------
@@ -4329,6 +4345,18 @@
 
 void CameraService::updateOpenCloseStatus(const String8& cameraId, bool open,
         const String16& clientPackageName) {
+    auto state = getCameraState(cameraId);
+    if (state == nullptr) {
+        ALOGW("%s: Could not update the status for %s, no such device exists", __FUNCTION__,
+                cameraId.string());
+        return;
+    }
+    if (open) {
+        state->setClientPackage(String8(clientPackageName));
+    } else {
+        state->setClientPackage(String8::empty());
+    }
+
     Mutex::Autolock lock(mStatusListenerLock);
 
     for (const auto& it : mListenerList) {
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 224287e..7a69123 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -635,6 +635,12 @@
         bool removeUnavailablePhysicalId(const String8& physicalId);
 
         /**
+         * Set and get client package name.
+         */
+        void setClientPackage(const String8& clientPackage);
+        String8 getClientPackage() const;
+
+        /**
          * Return the unavailable physical ids for this device.
          *
          * This method acquires mStatusLock.
@@ -646,6 +652,7 @@
         const int mCost;
         std::set<String8> mConflicting;
         std::set<String8> mUnavailablePhysicalIds;
+        String8 mClientPackage;
         mutable Mutex mStatusLock;
         CameraParameters mShimParams;
         const SystemCameraKind mSystemCameraKind;
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 80508e4..a406e62 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -864,7 +864,6 @@
 
     if (fabs(maxDigitalZoom.data.f[0] - 1.f) > 0.00001f) {
         params.set(CameraParameters::KEY_ZOOM, zoom);
-        params.set(CameraParameters::KEY_MAX_ZOOM, NUM_ZOOM_STEPS - 1);
 
         {
             String8 zoomRatios;
@@ -872,18 +871,34 @@
             float zoomIncrement = (maxDigitalZoom.data.f[0] - zoom) /
                     (NUM_ZOOM_STEPS-1);
             bool addComma = false;
-            for (size_t i=0; i < NUM_ZOOM_STEPS; i++) {
+            int previousZoom = -1;
+            size_t zoomSteps = 0;
+            for (size_t i = 0; i < NUM_ZOOM_STEPS; i++) {
+                int currentZoom = static_cast<int>(zoom * 100);
+                if (previousZoom == currentZoom) {
+                    zoom += zoomIncrement;
+                    continue;
+                }
                 if (addComma) zoomRatios += ",";
                 addComma = true;
-                zoomRatios += String8::format("%d", static_cast<int>(zoom * 100));
+                zoomRatios += String8::format("%d", currentZoom);
                 zoom += zoomIncrement;
+                previousZoom = currentZoom;
+                zoomSteps++;
             }
-            params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios);
+
+            if (zoomSteps > 0) {
+                params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios);
+                params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+                        CameraParameters::TRUE);
+                params.set(CameraParameters::KEY_MAX_ZOOM, zoomSteps - 1);
+                zoomAvailable = true;
+            } else {
+                params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+                        CameraParameters::FALSE);
+            }
         }
 
-        params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
-                CameraParameters::TRUE);
-        zoomAvailable = true;
     } else {
         params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
                 CameraParameters::FALSE);
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index e2f8d011..1b2ceda 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -205,7 +205,7 @@
     static const int MAX_INITIAL_PREVIEW_WIDTH = 1920;
     static const int MAX_INITIAL_PREVIEW_HEIGHT = 1080;
     // Aspect ratio tolerance
-    static const CONSTEXPR float ASPECT_RATIO_TOLERANCE = 0.001;
+    static const CONSTEXPR float ASPECT_RATIO_TOLERANCE = 0.01;
     // Threshold for slow jpeg mode
     static const int64_t kSlowJpegModeThreshold = 33400000LL; // 33.4 ms
     // Margin for checking FPS
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 4f2b878..9abb972 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -359,7 +359,13 @@
     for (auto& provider : mProviders) {
         ALOGV("%s: Notifying %s for new state 0x%" PRIx64,
                 __FUNCTION__, provider->mProviderName.c_str(), newState);
+        // b/199240726 Camera providers can for example try to add/remove
+        // camera devices as part of the state change notification. Holding
+        // 'mInterfaceMutex' while calling 'notifyDeviceStateChange' can
+        // result in a recursive deadlock.
+        mInterfaceMutex.unlock();
         status_t singleRes = provider->notifyDeviceStateChange(mDeviceState);
+        mInterfaceMutex.lock();
         if (singleRes != OK) {
             ALOGE("%s: Unable to notify provider %s about device state change",
                     __FUNCTION__,
@@ -367,6 +373,7 @@
             res = singleRes;
             // continue to do the rest of the providers instead of returning now
         }
+        provider->notifyDeviceInfoStateChangeLocked(mDeviceState);
     }
     return res;
 }
@@ -1185,10 +1192,12 @@
 }
 
 bool CameraProviderManager::isHiddenPhysicalCamera(const std::string& cameraId) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
     return isHiddenPhysicalCameraInternal(cameraId).first;
 }
 
 status_t CameraProviderManager::filterSmallJpegSizes(const std::string& cameraId) {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
     for (auto& provider : mProviders) {
         for (auto& deviceInfo : provider->mDevices) {
             if (deviceInfo->mId == cameraId) {
@@ -2031,6 +2040,14 @@
     return OK;
 }
 
+void CameraProviderManager::ProviderInfo::notifyDeviceInfoStateChangeLocked(
+        hardware::hidl_bitfield<provider::V2_5::DeviceState> newDeviceState) {
+    std::lock_guard<std::mutex> lock(mLock);
+    for (auto it = mDevices.begin(); it != mDevices.end(); it++) {
+        (*it)->notifyDeviceStateChange(newDeviceState);
+    }
+}
+
 status_t CameraProviderManager::ProviderInfo::notifyDeviceStateChange(
         hardware::hidl_bitfield<provider::V2_5::DeviceState> newDeviceState) {
     mDeviceState = newDeviceState;
@@ -2285,6 +2302,18 @@
         return;
     }
 
+    if (mCameraCharacteristics.exists(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS)) {
+        const auto &stateMap = mCameraCharacteristics.find(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS);
+        if ((stateMap.count > 0) && ((stateMap.count % 2) == 0)) {
+            for (size_t i = 0; i < stateMap.count; i += 2) {
+                mDeviceStateOrientationMap.emplace(stateMap.data.i64[i], stateMap.data.i64[i+1]);
+            }
+        } else {
+            ALOGW("%s: Invalid ANDROID_INFO_DEVICE_STATE_ORIENTATIONS map size: %zu", __FUNCTION__,
+                    stateMap.count);
+        }
+    }
+
     mSystemCameraKind = getSystemCameraKind();
 
     status_t res = fixupMonochromeTags();
@@ -2413,6 +2442,16 @@
 
 CameraProviderManager::ProviderInfo::DeviceInfo3::~DeviceInfo3() {}
 
+void CameraProviderManager::ProviderInfo::DeviceInfo3::notifyDeviceStateChange(
+        hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState> newState) {
+
+    if (!mDeviceStateOrientationMap.empty() &&
+            (mDeviceStateOrientationMap.find(newState) != mDeviceStateOrientationMap.end())) {
+        mCameraCharacteristics.update(ANDROID_SENSOR_ORIENTATION,
+                &mDeviceStateOrientationMap[newState], 1);
+    }
+}
+
 status_t CameraProviderManager::ProviderInfo::DeviceInfo3::setTorchMode(bool enabled) {
     return setTorchModeForDevice<InterfaceT>(enabled);
 }
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 1bdbb44..e3763a1 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -40,7 +40,6 @@
 #include <camera/VendorTagDescriptor.h>
 
 namespace android {
-
 /**
  * The vendor tag descriptor class that takes HIDL vendor tag information as
  * input. Not part of VendorTagDescriptor class because that class is used
@@ -284,12 +283,6 @@
             sp<hardware::camera::device::V3_2::ICameraDeviceSession> *session);
 
     /**
-     * Save the ICameraProvider while it is being used by a camera or torch client
-     */
-    void saveRef(DeviceMode usageType, const std::string &cameraId,
-            sp<hardware::camera::provider::V2_4::ICameraProvider> provider);
-
-    /**
      * Notify that the camera or torch is no longer being used by a camera client
      */
     void removeRef(DeviceMode usageType, const std::string &cameraId);
@@ -434,6 +427,10 @@
 
         /**
          * Notify provider about top-level device physical state changes
+         *
+         * Note that 'mInterfaceMutex' should not be held when calling this method.
+         * It is possible for camera providers to add/remove devices and try to
+         * acquire it.
          */
         status_t notifyDeviceStateChange(
                 hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
@@ -442,6 +439,15 @@
         std::vector<std::unordered_set<std::string>> getConcurrentCameraIdCombinations();
 
         /**
+         * Notify 'DeviceInfo' instanced about top-level device physical state changes
+         *
+         * Note that 'mInterfaceMutex' should be held when calling this method.
+         */
+        void notifyDeviceInfoStateChangeLocked(
+               hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
+                   newDeviceState);
+
+        /**
          * Query the camera provider for concurrent stream configuration support
          */
         status_t isConcurrentSessionConfigurationSupported(
@@ -493,6 +499,9 @@
                 return INVALID_OPERATION;
             }
             virtual status_t filterSmallJpegSizes() = 0;
+            virtual void notifyDeviceStateChange(
+                    hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
+                        /*newState*/) {}
 
             template<class InterfaceT>
             sp<InterfaceT> startDeviceInterface();
@@ -553,6 +562,9 @@
                     bool *status /*out*/)
                     override;
             virtual status_t filterSmallJpegSizes() override;
+            virtual void notifyDeviceStateChange(
+                    hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
+                        newState) override;
 
             DeviceInfo3(const std::string& name, const metadata_vendor_id_t tagId,
                     const std::string &id, uint16_t minorVersion,
@@ -562,6 +574,8 @@
             virtual ~DeviceInfo3();
         private:
             CameraMetadata mCameraCharacteristics;
+            // Map device states to sensor orientations
+            std::unordered_map<int64_t, int32_t> mDeviceStateOrientationMap;
             // A copy of mCameraCharacteristics without performance class
             // override
             std::unique_ptr<CameraMetadata> mCameraCharNoPCOverride;
@@ -662,6 +676,12 @@
                 sp<hardware::camera::provider::V2_6::ICameraProvider> &interface2_6);
     };
 
+    /**
+     * Save the ICameraProvider while it is being used by a camera or torch client
+     */
+    void saveRef(DeviceMode usageType, const std::string &cameraId,
+            sp<hardware::camera::provider::V2_4::ICameraProvider> provider);
+
     // Utility to find a DeviceInfo by ID; pointer is only valid while mInterfaceMutex is held
     // and the calling code doesn't mutate the list of providers or their lists of devices.
     // Finds the first device of the given ID that falls within the requested version range
diff --git a/services/camera/libcameraservice/device3/BufferUtils.h b/services/camera/libcameraservice/device3/BufferUtils.h
index 1e1cd60..03112ec 100644
--- a/services/camera/libcameraservice/device3/BufferUtils.h
+++ b/services/camera/libcameraservice/device3/BufferUtils.h
@@ -104,7 +104,7 @@
 
         // Return the removed buffer ID if input cache is found.
         // Otherwise return BUFFER_ID_NO_BUFFER
-        uint64_t removeOneBufferCache(int streamId, const native_handle_t* handle);
+        uint64_t removeOneBufferCache(int streamId, const native_handle_t* handle) override;
 
         // Clear all caches for input stream, but do not remove the stream
         // Removed buffers' ID are returned
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 7c3c6b7..67f99bf 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2677,6 +2677,7 @@
     }
 
     mGroupIdPhysicalCameraMap.clear();
+    bool composerSurfacePresent = false;
     for (size_t i = 0; i < mOutputStreams.size(); i++) {
 
         // Don't configure bidi streams twice, nor add them twice to the list
@@ -2716,6 +2717,10 @@
             const String8& physicalCameraId = mOutputStreams[i]->getPhysicalCameraId();
             mGroupIdPhysicalCameraMap[streamGroupId].insert(physicalCameraId);
         }
+
+        if (outputStream->usage & GraphicBuffer::USAGE_HW_COMPOSER) {
+            composerSurfacePresent = true;
+        }
     }
 
     config.streams = streams.editArray();
@@ -2783,6 +2788,8 @@
         }
     }
 
+    mRequestThread->setComposerSurface(composerSurfacePresent);
+
     // Request thread needs to know to avoid using repeat-last-settings protocol
     // across configure_streams() calls
     if (notifyRequestThread) {
@@ -3084,10 +3091,11 @@
 void Camera3Device::monitorMetadata(TagMonitor::eventSource source,
         int64_t frameNumber, nsecs_t timestamp, const CameraMetadata& metadata,
         const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
-        const std::set<int32_t> &outputStreamIds, int32_t inputStreamId) {
+        const camera_stream_buffer_t *outputBuffers, uint32_t numOutputBuffers,
+        int32_t inputStreamId) {
 
     mTagMonitor.monitorMetadata(source, frameNumber, timestamp, metadata,
-            physicalMetadata, outputStreamIds, inputStreamId);
+            physicalMetadata, outputBuffers, numOutputBuffers, inputStreamId);
 }
 
 /**
@@ -4144,6 +4152,11 @@
     return mBufferRecords.getBufferId(buf, streamId);
 }
 
+uint64_t Camera3Device::HalInterface::removeOneBufferCache(int streamId,
+        const native_handle_t* handle) {
+    return mBufferRecords.removeOneBufferCache(streamId, handle);
+}
+
 void Camera3Device::HalInterface::onBufferFreed(
         int streamId, const native_handle_t* handle) {
     uint32_t bufferId = mBufferRecords.removeOneBufferCache(streamId, handle);
@@ -4186,6 +4199,7 @@
         mCurrentAfTriggerId(0),
         mCurrentPreCaptureTriggerId(0),
         mRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_NONE),
+        mComposerOutput(false),
         mCameraMute(ANDROID_SENSOR_TEST_PATTERN_MODE_OFF),
         mCameraMuteChanged(false),
         mRepeatingLastFrameNumber(
@@ -4590,12 +4604,6 @@
 
         sp<Camera3Device> parent = mParent.promote();
         if (parent != NULL) {
-            std::set<int32_t> outputStreamIds;
-            for (size_t i = 0; i < halRequest.num_output_buffers; i++) {
-                const camera_stream_buffer_t *src = halRequest.output_buffers + i;
-                int32_t streamId = Camera3Stream::cast(src->stream)->getId();
-                outputStreamIds.emplace(streamId);
-            }
             int32_t inputStreamId = -1;
             if (halRequest.input_buffer != nullptr) {
               inputStreamId = Camera3Stream::cast(halRequest.input_buffer->stream)->getId();
@@ -4603,7 +4611,8 @@
 
             parent->monitorMetadata(TagMonitor::REQUEST,
                     halRequest.frame_number,
-                    0, mLatestRequest, mLatestPhysicalRequest, outputStreamIds, inputStreamId);
+                    0, mLatestRequest, mLatestPhysicalRequest, halRequest.output_buffers,
+                    halRequest.num_output_buffers, inputStreamId);
         }
     }
 
@@ -4801,6 +4810,26 @@
     return submitRequestSuccess;
 }
 
+status_t Camera3Device::removeFwkOnlyRegionKeys(CameraMetadata *request) {
+    static const std::array<uint32_t, 4> kFwkOnlyRegionKeys = {ANDROID_CONTROL_AF_REGIONS_SET,
+        ANDROID_CONTROL_AE_REGIONS_SET, ANDROID_CONTROL_AWB_REGIONS_SET,
+        ANDROID_SCALER_CROP_REGION_SET};
+    if (request == nullptr) {
+        ALOGE("%s request metadata nullptr", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    status_t res = OK;
+    for (const auto &key : kFwkOnlyRegionKeys) {
+        if (request->exists(key)) {
+            res = request->erase(key);
+            if (res != OK) {
+                return res;
+            }
+        }
+    }
+    return OK;
+}
+
 status_t Camera3Device::RequestThread::prepareHalRequests() {
     ATRACE_CALL();
 
@@ -4827,7 +4856,11 @@
         bool triggersMixedIn = (triggerCount > 0 || mPrevTriggers > 0);
         mPrevTriggers = triggerCount;
 
-        bool rotateAndCropChanged = overrideAutoRotateAndCrop(captureRequest);
+        // Do not override rotate&crop for stream configurations that include
+        // SurfaceViews(HW_COMPOSER) output. The display rotation there will be
+        // compensated by NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY
+        bool rotateAndCropChanged = mComposerOutput ? false :
+            overrideAutoRotateAndCrop(captureRequest);
         bool testPatternChanged = overrideTestPattern(captureRequest);
 
         // If the request is the same as last, or we had triggers now or last time or
@@ -4860,6 +4893,12 @@
                             it != captureRequest->mSettingsList.end(); it++) {
                         if (parent->mUHRCropAndMeteringRegionMappers.find(it->cameraId) ==
                                 parent->mUHRCropAndMeteringRegionMappers.end()) {
+                            if (removeFwkOnlyRegionKeys(&(it->metadata)) != OK) {
+                                SET_ERR("RequestThread: Unable to remove fwk-only keys from request"
+                                        "%d: %s (%d)", halRequest->frame_number, strerror(-res),
+                                        res);
+                                return INVALID_OPERATION;
+                            }
                             continue;
                         }
 
@@ -4874,6 +4913,12 @@
                                 return INVALID_OPERATION;
                             }
                             captureRequest->mUHRCropAndMeteringRegionsUpdated = true;
+                            if (removeFwkOnlyRegionKeys(&(it->metadata)) != OK) {
+                                SET_ERR("RequestThread: Unable to remove fwk-only keys from request"
+                                        "%d: %s (%d)", halRequest->frame_number, strerror(-res),
+                                        res);
+                                return INVALID_OPERATION;
+                            }
                         }
                     }
 
@@ -5321,6 +5366,13 @@
     return OK;
 }
 
+status_t Camera3Device::RequestThread::setComposerSurface(bool composerSurfacePresent) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mTriggerMutex);
+    mComposerOutput = composerSurfacePresent;
+    return OK;
+}
+
 status_t Camera3Device::RequestThread::setCameraMute(int32_t muteMode) {
     ATRACE_CALL();
     Mutex::Autolock l(mTriggerMutex);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index c0dc6f8..4ffdc5f 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -297,6 +297,7 @@
 
   private:
     status_t disconnectImpl();
+    static status_t removeFwkOnlyRegionKeys(CameraMetadata *request);
 
     // internal typedefs
     using RequestMetadataQueue = hardware::MessageQueue<uint8_t, hardware::kSynchronizedReadWrite>;
@@ -412,6 +413,8 @@
         std::pair<bool, uint64_t> getBufferId(
                 const buffer_handle_t& buf, int streamId) override;
 
+        uint64_t removeOneBufferCache(int streamId, const native_handle_t* handle) override;
+
         status_t popInflightBuffer(int32_t frameNumber, int32_t streamId,
                 /*out*/ buffer_handle_t **buffer) override;
 
@@ -916,6 +919,7 @@
 
         status_t setRotateAndCropAutoBehavior(
                 camera_metadata_enum_android_scaler_rotate_and_crop_t rotateAndCropValue);
+        status_t setComposerSurface(bool composerSurfacePresent);
 
         status_t setCameraMute(int32_t muteMode);
 
@@ -1068,6 +1072,7 @@
         uint32_t           mCurrentAfTriggerId;
         uint32_t           mCurrentPreCaptureTriggerId;
         camera_metadata_enum_android_scaler_rotate_and_crop_t mRotateAndCropOverride;
+        bool               mComposerOutput;
         int32_t            mCameraMute; // 0 = no mute, otherwise the TEST_PATTERN_MODE to use
         bool               mCameraMuteChanged;
 
@@ -1250,7 +1255,8 @@
     void monitorMetadata(TagMonitor::eventSource source, int64_t frameNumber,
             nsecs_t timestamp, const CameraMetadata& metadata,
             const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
-            const std::set<int32_t> &outputStreamIds, int32_t inputStreamId);
+            const camera_stream_buffer_t *outputBuffers, uint32_t numOutputBuffers,
+            int32_t inputStreamId);
 
     metadata_vendor_id_t mVendorTagId;
 
diff --git a/services/camera/libcameraservice/device3/Camera3OutputInterface.h b/services/camera/libcameraservice/device3/Camera3OutputInterface.h
index 8817833..40eef1d 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputInterface.h
@@ -50,6 +50,10 @@
         // return pair of (newlySeenBuffer?, bufferId)
         virtual std::pair<bool, uint64_t> getBufferId(const buffer_handle_t& buf, int streamId) = 0;
 
+        // Return the removed buffer ID if input cache is found.
+        // Otherwise return BUFFER_ID_NO_BUFFER
+        virtual uint64_t removeOneBufferCache(int streamId, const native_handle_t* handle) = 0;
+
         // Find a buffer_handle_t based on frame number and stream ID
         virtual status_t popInflightBuffer(int32_t frameNumber, int32_t streamId,
                 /*out*/ buffer_handle_t **buffer) = 0;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index ef51ada..5a97f4b 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -368,7 +368,7 @@
     }
     states.tagMonitor.monitorMetadata(TagMonitor::RESULT,
             frameNumber, sensorTimestamp, captureResult.mMetadata,
-            monitoredPhysicalMetadata, std::set<int32_t>());
+            monitoredPhysicalMetadata);
 
     insertResultLocked(states, &captureResult, frameNumber);
 }
@@ -1305,6 +1305,7 @@
         hardware::hidl_vec<StreamBuffer> tmpRetBuffers(numBuffersRequested);
         bool currentReqSucceeds = true;
         std::vector<camera_stream_buffer_t> streamBuffers(numBuffersRequested);
+        std::vector<buffer_handle_t> newBuffers;
         size_t numAllocatedBuffers = 0;
         size_t numPushedInflightBuffers = 0;
         for (size_t b = 0; b < numBuffersRequested; b++) {
@@ -1344,6 +1345,9 @@
             hBuf.buffer = (isNewBuffer) ? *buffer : nullptr;
             hBuf.status = BufferStatus::OK;
             hBuf.releaseFence = nullptr;
+            if (isNewBuffer) {
+                newBuffers.push_back(*buffer);
+            }
 
             native_handle_t *acquireFence = nullptr;
             if (sb.acquire_fence != -1) {
@@ -1386,6 +1390,9 @@
             returnOutputBuffers(states.useHalBufManager, /*listener*/nullptr,
                     streamBuffers.data(), numAllocatedBuffers, 0, /*requested*/false,
                     /*requestTimeNs*/0, states.sessionStatsBuilder);
+            for (auto buf : newBuffers) {
+                states.bufferRecordsIntf.removeOneBufferCache(streamId, buf);
+            }
         }
     }
 
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.h b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
index 142889a..3f54ac5 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
@@ -42,66 +42,6 @@
 
 namespace camera3 {
 
-    typedef struct camera_stream_configuration {
-        uint32_t num_streams;
-        camera_stream_t **streams;
-        uint32_t operation_mode;
-        bool input_is_multi_resolution;
-    } camera_stream_configuration_t;
-
-    typedef struct camera_capture_request {
-        uint32_t frame_number;
-        const camera_metadata_t *settings;
-        camera_stream_buffer_t *input_buffer;
-        uint32_t num_output_buffers;
-        const camera_stream_buffer_t *output_buffers;
-        uint32_t num_physcam_settings;
-        const char **physcam_id;
-        const camera_metadata_t **physcam_settings;
-        int32_t input_width;
-        int32_t input_height;
-    } camera_capture_request_t;
-
-    typedef struct camera_capture_result {
-        uint32_t frame_number;
-        const camera_metadata_t *result;
-        uint32_t num_output_buffers;
-        const camera_stream_buffer_t *output_buffers;
-        const camera_stream_buffer_t *input_buffer;
-        uint32_t partial_result;
-        uint32_t num_physcam_metadata;
-        const char **physcam_ids;
-        const camera_metadata_t **physcam_metadata;
-    } camera_capture_result_t;
-
-    typedef struct camera_shutter_msg {
-        uint32_t frame_number;
-        uint64_t timestamp;
-    } camera_shutter_msg_t;
-
-    typedef struct camera_error_msg {
-        uint32_t frame_number;
-        camera_stream_t *error_stream;
-        int error_code;
-    } camera_error_msg_t;
-
-    typedef enum camera_error_msg_code {
-        CAMERA_MSG_ERROR_DEVICE = 1,
-        CAMERA_MSG_ERROR_REQUEST = 2,
-        CAMERA_MSG_ERROR_RESULT = 3,
-        CAMERA_MSG_ERROR_BUFFER = 4,
-        CAMERA_MSG_NUM_ERRORS
-    } camera_error_msg_code_t;
-
-    typedef struct camera_notify_msg {
-        int type;
-
-        union {
-            camera_error_msg_t error;
-            camera_shutter_msg_t shutter;
-        } message;
-    } camera_notify_msg_t;
-
     /**
      * Helper methods shared between Camera3Device/Camera3OfflineSession for HAL callbacks
      */
diff --git a/services/camera/libcameraservice/device3/InFlightRequest.h b/services/camera/libcameraservice/device3/InFlightRequest.h
index 523a2c7..85e809c 100644
--- a/services/camera/libcameraservice/device3/InFlightRequest.h
+++ b/services/camera/libcameraservice/device3/InFlightRequest.h
@@ -30,6 +30,66 @@
 
 namespace camera3 {
 
+typedef struct camera_stream_configuration {
+    uint32_t num_streams;
+    camera_stream_t **streams;
+    uint32_t operation_mode;
+    bool input_is_multi_resolution;
+} camera_stream_configuration_t;
+
+typedef struct camera_capture_request {
+    uint32_t frame_number;
+    const camera_metadata_t *settings;
+    camera_stream_buffer_t *input_buffer;
+    uint32_t num_output_buffers;
+    const camera_stream_buffer_t *output_buffers;
+    uint32_t num_physcam_settings;
+    const char **physcam_id;
+    const camera_metadata_t **physcam_settings;
+    int32_t input_width;
+    int32_t input_height;
+} camera_capture_request_t;
+
+typedef struct camera_capture_result {
+    uint32_t frame_number;
+    const camera_metadata_t *result;
+    uint32_t num_output_buffers;
+    const camera_stream_buffer_t *output_buffers;
+    const camera_stream_buffer_t *input_buffer;
+    uint32_t partial_result;
+    uint32_t num_physcam_metadata;
+    const char **physcam_ids;
+    const camera_metadata_t **physcam_metadata;
+} camera_capture_result_t;
+
+typedef struct camera_shutter_msg {
+    uint32_t frame_number;
+    uint64_t timestamp;
+} camera_shutter_msg_t;
+
+typedef struct camera_error_msg {
+    uint32_t frame_number;
+    camera_stream_t *error_stream;
+    int error_code;
+} camera_error_msg_t;
+
+typedef enum camera_error_msg_code {
+    CAMERA_MSG_ERROR_DEVICE = 1,
+    CAMERA_MSG_ERROR_REQUEST = 2,
+    CAMERA_MSG_ERROR_RESULT = 3,
+    CAMERA_MSG_ERROR_BUFFER = 4,
+    CAMERA_MSG_NUM_ERRORS
+} camera_error_msg_code_t;
+
+typedef struct camera_notify_msg {
+    int type;
+
+    union {
+        camera_error_msg_t error;
+        camera_shutter_msg_t shutter;
+    } message;
+} camera_notify_msg_t;
+
 typedef enum {
     // Cache the buffers with STATUS_ERROR within InFlightRequest
     ERROR_BUF_CACHE,
diff --git a/services/camera/libcameraservice/hidl/HidlCameraService.cpp b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
index 7d1b3cf..a812587 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraService.cpp
+++ b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
@@ -279,6 +279,9 @@
         size_t numSections = sectionNames->size();
         std::vector<std::vector<HVendorTag>> tagsBySection(numSections);
         int tagCount = desc->getTagCount();
+        if (tagCount <= 0) {
+            continue;
+        }
         std::vector<uint32_t> tags(tagCount);
         desc->getTagArray(tags.data());
         for (int i = 0; i < tagCount; i++) {
diff --git a/services/camera/libcameraservice/tests/ExifUtilsTest.cpp b/services/camera/libcameraservice/tests/ExifUtilsTest.cpp
new file mode 100644
index 0000000..3de4bf2
--- /dev/null
+++ b/services/camera/libcameraservice/tests/ExifUtilsTest.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ExifUtilsTest"
+
+#include <camera/CameraMetadata.h>
+#include "../utils/ExifUtils.h"
+#include <gtest/gtest.h>
+
+using android::camera3::ExifUtils;
+using android::camera3::ExifOrientation;
+using android::CameraMetadata;
+
+uint32_t kImageWidth = 1920;
+uint32_t kImageHeight = 1440;
+ExifOrientation kExifOrientation = ExifOrientation::ORIENTATION_0_DEGREES;
+
+// Test that setFromMetadata works correctly, without errors.
+TEST(ExifUtilsTest, SetFromMetadataTest) {
+    std::unique_ptr<ExifUtils> utils(ExifUtils::create());
+    uint8_t invalidSensorPixelMode = 2;
+    uint8_t validSensorPixelMode = ANDROID_SENSOR_PIXEL_MODE_DEFAULT;
+    CameraMetadata metadata;
+    // Empty staticInfo
+    CameraMetadata staticInfo;
+    ASSERT_TRUE(utils->initializeEmpty());
+    ASSERT_TRUE(
+            metadata.update(ANDROID_SENSOR_PIXEL_MODE, &invalidSensorPixelMode, 1) == android::OK);
+    ASSERT_FALSE(utils->setFromMetadata(metadata, staticInfo, kImageWidth, kImageHeight));
+    ASSERT_TRUE(
+            metadata.update(ANDROID_SENSOR_PIXEL_MODE, &validSensorPixelMode, 1) == android::OK);
+    ASSERT_TRUE(utils->setFromMetadata(metadata, staticInfo, kImageWidth, kImageHeight));
+    ASSERT_TRUE(utils->setImageWidth(kImageWidth));
+    ASSERT_TRUE(utils->setImageHeight(kImageHeight));
+    ASSERT_TRUE(utils->setOrientationValue(kExifOrientation));
+    ASSERT_TRUE(utils->generateApp1());
+    const uint8_t* exifBuffer = utils->getApp1Buffer();
+    ASSERT_NE(exifBuffer, nullptr);
+    size_t exifBufferSize = utils->getApp1Length();
+    ASSERT_TRUE(exifBufferSize != 0);
+}
diff --git a/services/camera/libcameraservice/utils/ExifUtils.cpp b/services/camera/libcameraservice/utils/ExifUtils.cpp
index 485705c..21f02db 100644
--- a/services/camera/libcameraservice/utils/ExifUtils.cpp
+++ b/services/camera/libcameraservice/utils/ExifUtils.cpp
@@ -920,7 +920,7 @@
     camera_metadata_ro_entry sensorPixelModeEntry = metadata.find(ANDROID_SENSOR_PIXEL_MODE);
     if (sensorPixelModeEntry.count != 0) {
         sensorPixelMode = sensorPixelModeEntry.data.u8[0];
-        if (sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_DEFAULT ||
+        if (sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_DEFAULT &&
             sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) {
             ALOGE("%s: Request sensor pixel mode is not one of the valid values %d",
                       __FUNCTION__, sensorPixelMode);
diff --git a/services/camera/libcameraservice/utils/TagMonitor.cpp b/services/camera/libcameraservice/utils/TagMonitor.cpp
index 53a92e9..4488098 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.cpp
+++ b/services/camera/libcameraservice/utils/TagMonitor.cpp
@@ -24,6 +24,7 @@
 #include <utils/Log.h>
 #include <camera/VendorTagDescriptor.h>
 #include <camera_metadata_hidden.h>
+#include <device3/Camera3Stream.h>
 
 namespace android {
 
@@ -119,7 +120,8 @@
 void TagMonitor::monitorMetadata(eventSource source, int64_t frameNumber, nsecs_t timestamp,
         const CameraMetadata& metadata,
         const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
-        const std::set<int32_t> &outputStreamIds, int32_t inputStreamId) {
+        const camera3::camera_stream_buffer_t *outputBuffers, uint32_t numOutputBuffers,
+        int32_t inputStreamId) {
     if (!mMonitoringEnabled) return;
 
     std::lock_guard<std::mutex> lock(mMonitorMutex);
@@ -127,7 +129,12 @@
     if (timestamp == 0) {
         timestamp = systemTime(SYSTEM_TIME_BOOTTIME);
     }
-
+    std::unordered_set<int32_t> outputStreamIds;
+    for (size_t i = 0; i < numOutputBuffers; i++) {
+        const camera3::camera_stream_buffer_t *src = outputBuffers + i;
+        int32_t streamId = camera3::Camera3Stream::cast(src->stream)->getId();
+        outputStreamIds.emplace(streamId);
+    }
     std::string emptyId;
     for (auto tag : mMonitoredTagList) {
         monitorSingleMetadata(source, frameNumber, timestamp, emptyId, tag, metadata,
@@ -142,7 +149,7 @@
 
 void TagMonitor::monitorSingleMetadata(eventSource source, int64_t frameNumber, nsecs_t timestamp,
         const std::string& cameraId, uint32_t tag, const CameraMetadata& metadata,
-        const std::set<int32_t> &outputStreamIds, int32_t inputStreamId) {
+        const std::unordered_set<int32_t> &outputStreamIds, int32_t inputStreamId) {
 
     CameraMetadata &lastValues = (source == REQUEST) ?
             (cameraId.empty() ? mLastMonitoredRequestValues :
@@ -186,7 +193,8 @@
         // Also monitor when the stream ids change, this helps visually see what
         // monitored metadata values are for capture requests with different
         // stream ids.
-        if (inputStreamId != mLastInputStreamId || outputStreamIds != mLastStreamIds) {
+        if (source == REQUEST &&
+                (inputStreamId != mLastInputStreamId || outputStreamIds != mLastStreamIds)) {
             mLastInputStreamId = inputStreamId;
             mLastStreamIds = outputStreamIds;
             isDifferent = true;
@@ -261,7 +269,7 @@
 #define CAMERA_METADATA_ENUM_STRING_MAX_SIZE 29
 
 void TagMonitor::printData(int fd, const uint8_t *data_ptr, uint32_t tag,
-        int type, int count, int indentation, const std::set<int32_t> &outputStreamIds,
+        int type, int count, int indentation, const std::unordered_set<int32_t> &outputStreamIds,
         int32_t inputStreamId) {
     static int values_per_line[NUM_TYPES] = {
         [TYPE_BYTE]     = 16,
@@ -353,7 +361,8 @@
 
 template<typename T>
 TagMonitor::MonitorEvent::MonitorEvent(eventSource src, uint32_t frameNumber, nsecs_t timestamp,
-        const T &value, const std::string& cameraId, const std::set<int32_t> &outputStreamIds,
+        const T &value, const std::string& cameraId,
+        const std::unordered_set<int32_t> &outputStreamIds,
         int32_t inputStreamId) :
         source(src),
         frameNumber(frameNumber),
diff --git a/services/camera/libcameraservice/utils/TagMonitor.h b/services/camera/libcameraservice/utils/TagMonitor.h
index dfc424e..f6df4b7 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.h
+++ b/services/camera/libcameraservice/utils/TagMonitor.h
@@ -20,7 +20,6 @@
 #include <vector>
 #include <atomic>
 #include <mutex>
-#include <set>
 #include <unordered_map>
 
 #include <utils/RefBase.h>
@@ -31,6 +30,7 @@
 #include <system/camera_metadata.h>
 #include <system/camera_vendor_tags.h>
 #include <camera/CameraMetadata.h>
+#include <device3/InFlightRequest.h>
 
 namespace android {
 
@@ -68,7 +68,8 @@
     void monitorMetadata(eventSource source, int64_t frameNumber,
             nsecs_t timestamp, const CameraMetadata& metadata,
             const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
-            const std::set<int32_t> &outputStreamIds, int32_t inputStreamId = -1);
+            const camera3::camera_stream_buffer_t *outputBuffers = nullptr,
+            uint32_t numOutputBuffers = 0, int32_t inputStreamId = -1);
 
     // Dump current event log to the provided fd
     void dumpMonitoredMetadata(int fd);
@@ -76,12 +77,12 @@
   private:
 
     static void printData(int fd, const uint8_t *data_ptr, uint32_t tag,
-            int type, int count, int indentation, const std::set<int32_t> &outputStreamIds,
-            int32_t inputStreamId);
+            int type, int count, int indentation,
+            const std::unordered_set<int32_t> &outputStreamIds, int32_t inputStreamId);
 
     void monitorSingleMetadata(TagMonitor::eventSource source, int64_t frameNumber,
             nsecs_t timestamp, const std::string& cameraId, uint32_t tag,
-            const CameraMetadata& metadata, const std::set<int32_t> &outputStreamIds,
+            const CameraMetadata& metadata, const std::unordered_set<int32_t> &outputStreamIds,
             int32_t inputStreamId);
 
     std::atomic<bool> mMonitoringEnabled;
@@ -98,7 +99,7 @@
     std::unordered_map<std::string, CameraMetadata> mLastMonitoredPhysicalResultKeys;
 
     int32_t mLastInputStreamId = -1;
-    std::set<int32_t> mLastStreamIds;
+    std::unordered_set<int32_t> mLastStreamIds;
 
     /**
      * A monitoring event
@@ -109,7 +110,7 @@
         template<typename T>
         MonitorEvent(eventSource src, uint32_t frameNumber, nsecs_t timestamp,
                 const T &newValue, const std::string& cameraId,
-                const std::set<int32_t> &outputStreamIds, int32_t inputStreamId);
+                const std::unordered_set<int32_t> &outputStreamIds, int32_t inputStreamId);
         ~MonitorEvent();
 
         eventSource source;
@@ -119,7 +120,7 @@
         uint8_t type;
         std::vector<uint8_t> newData;
         std::string cameraId;
-        std::set<int32_t> outputStreamIds;
+        std::unordered_set<int32_t> outputStreamIds;
         int32_t inputStreamId = 1;
     };
 
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy b/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
index 9058f10..41efce0 100644
--- a/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
@@ -84,5 +84,6 @@
 getgid32: 1
 getegid32: 1
 getgroups32: 1
+sysinfo: 1
 
 @include /apex/com.android.media.swcodec/etc/seccomp_policy/code_coverage.arm.policy
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
index 4c51a9c..4317ccc 100644
--- a/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
@@ -78,5 +78,16 @@
 getgid: 1
 getegid: 1
 getgroups: 1
+sysinfo: 1
+
+# Android profiler (heapprofd, traced_perf) additions, where not already
+# covered by the rest of the file, or by builtin minijail allow-listing of
+# logging-related syscalls.
+# TODO(b/197184220): this is a targeted addition for a specific investigation,
+# and addresses just the arm64 framework av service policies. In the future, we
+# should make this more general (e.g. a central file that can be @included in
+# other policy files).
+setsockopt: 1
+sendmsg: 1
 
 @include /apex/com.android.media.swcodec/etc/seccomp_policy/code_coverage.arm64.policy
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy b/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
index 9bbd53b..e54c918 100644
--- a/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
@@ -46,5 +46,16 @@
 # Required by Sanitizers
 sched_yield: 1
 
+# Android profiler (heapprofd, traced_perf) additions, where not already
+# covered by the rest of the file, or by builtin minijail allow-listing of
+# logging-related syscalls.
+# TODO(b/197184220): this is a targeted addition for a specific investigation,
+# and addresses just the arm64 framework av service policies. In the future, we
+# should make this more general (e.g. a central file that can be @included in
+# other policy files).
+setsockopt: 1
+sendmsg: 1
+set_tid_address: 1
+
 @include /apex/com.android.media/etc/seccomp_policy/crash_dump.arm64.policy
 @include /apex/com.android.media/etc/seccomp_policy/code_coverage.arm64.policy
diff --git a/services/mediametrics/Android.bp b/services/mediametrics/Android.bp
index 0351d2d..04fc430 100644
--- a/services/mediametrics/Android.bp
+++ b/services/mediametrics/Android.bp
@@ -172,11 +172,13 @@
         "libstatspull",
         "libstatssocket",
         "libutils",
+        "packagemanager_aidl-cpp",
     ],
 
     export_shared_lib_headers: [
         "libstatspull",
         "libstatssocket",
+        "packagemanager_aidl-cpp",
     ],
 
     static_libs: [
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index 340076e..390cd5c 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -193,7 +193,9 @@
     if (direction == AAUDIO_DIRECTION_OUTPUT) {
         flags = static_cast<audio_flags_mask_t>(AUDIO_FLAG_LOW_LATENCY
                 | AAudioConvert_allowCapturePolicyToAudioFlagsMask(
-                        params->getAllowedCapturePolicy()));
+                        params->getAllowedCapturePolicy(),
+                        params->getSpatializationBehavior(),
+                        params->isContentSpatialized()));
     } else {
         flags = static_cast<audio_flags_mask_t>(AUDIO_FLAG_LOW_LATENCY
                 | AAudioConvert_privacySensitiveToAudioFlagsMask(params->isPrivacySensitive()));
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index 4e46033..f590fc8 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -21,6 +21,7 @@
 #include <assert.h>
 #include <map>
 #include <mutex>
+#include <media/AudioSystem.h>
 #include <utils/Singleton.h>
 
 #include "AAudioEndpointManager.h"
@@ -51,7 +52,7 @@
         mMixer.allocate(getStreamInternal()->getSamplesPerFrame(),
                         getStreamInternal()->getFramesPerBurst());
 
-        int32_t burstsPerBuffer = AAudioProperty_getMixerBursts();
+        int32_t burstsPerBuffer = AudioSystem::getAAudioMixerBurstCount();
         if (burstsPerBuffer == 0) {
             mLatencyTuningEnabled = true;
             burstsPerBuffer = BURSTS_PER_BUFFER_DEFAULT;
diff --git a/services/tuner/Android.bp b/services/tuner/Android.bp
index b2b1c3b..ec62d4e 100644
--- a/services/tuner/Android.bp
+++ b/services/tuner/Android.bp
@@ -52,6 +52,7 @@
         "liblog",
         "libmedia",
         "libutils",
+        "packagemanager_aidl-cpp",
         "tv_tuner_aidl_interface-ndk",
         "tv_tuner_resource_manager_aidl_interface-ndk",
     ],
diff --git a/services/tuner/hidl/TunerHidlFilter.cpp b/services/tuner/hidl/TunerHidlFilter.cpp
index 2e9e4a6..de82855 100644
--- a/services/tuner/hidl/TunerHidlFilter.cpp
+++ b/services/tuner/hidl/TunerHidlFilter.cpp
@@ -283,8 +283,6 @@
         if (res == HidlResult::SUCCESS) {
             *out_avMemory = dupToAidl(avMemory);
             *_aidl_return = static_cast<int64_t>(avMemSize);
-        } else {
-            _aidl_return = nullptr;
         }
     });