Merge "Add SharedFilter to tuner service."
diff --git a/camera/Android.bp b/camera/Android.bp
index 6878c20..4ed3269 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -43,6 +43,10 @@
     ],
 }
 
+cc_library_headers {
+    name: "camera_headers",
+    export_include_dirs: ["include"],
+}
 cc_library_shared {
     name: "libcamera_client",
 
diff --git a/cmds/OWNERS b/cmds/OWNERS
index 0d32aac..a48c37a 100644
--- a/cmds/OWNERS
+++ b/cmds/OWNERS
@@ -1,3 +1,3 @@
 elaurent@google.com
+essick@google.com
 lajos@google.com
-marcone@google.com
diff --git a/cmds/stagefright/Android.bp b/cmds/stagefright/Android.bp
new file mode 100644
index 0000000..c4783d3
--- /dev/null
+++ b/cmds/stagefright/Android.bp
@@ -0,0 +1,278 @@
+package {
+    default_applicable_licenses: ["frameworks_av_cmds_stagefright_license"],
+}
+
+// Added automatically by a large-scale-change
+// See: http://go/android-license-faq
+license {
+    name: "frameworks_av_cmds_stagefright_license",
+    visibility: [":__subpackages__"],
+    license_kinds: [
+        "SPDX-license-identifier-Apache-2.0",
+    ],
+    license_text: [
+        "NOTICE",
+    ],
+}
+
+cc_binary {
+    name: "stagefright",
+
+    srcs: [
+        "AudioPlayer.cpp",
+        "stagefright.cpp",
+        "jpeg.cpp",
+        "SineSource.cpp",
+    ],
+
+    header_libs: [
+        "libmediametrics_headers",
+        "libstagefright_headers",
+    ],
+
+    shared_libs: [
+        "libstagefright",
+        "libmedia",
+        "libmedia_codeclist",
+        "libutils",
+        "libbinder",
+        "libstagefright_foundation",
+        "libjpeg",
+        "libui",
+        "libgui",
+        "libcutils",
+        "liblog",
+        "libhidlbase",
+        "libdatasource",
+        "libaudioclient",
+        "android.hardware.media.omx@1.0",
+        "framework-permission-aidl-cpp",
+    ],
+
+    static_libs: ["framework-permission-aidl-cpp"],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+
+    system_ext_specific: true,
+}
+
+cc_binary {
+    name: "record",
+
+    srcs: [
+        "AudioPlayer.cpp",
+        "SineSource.cpp",
+        "record.cpp",
+    ],
+
+    header_libs: [
+        "libmediametrics_headers",
+        "libstagefright_headers",
+        "camera_headers",
+    ],
+
+    shared_libs: [
+        "libstagefright",
+        "libmedia",
+        "liblog",
+        "libutils",
+        "libbinder",
+        "libstagefright_foundation",
+        "libdatasource",
+        "libaudioclient",
+        "framework-permission-aidl-cpp",
+    ],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+}
+
+cc_binary {
+    name: "recordvideo",
+
+    srcs: [
+        "AudioPlayer.cpp",
+        "recordvideo.cpp",
+    ],
+
+    header_libs: [
+        "libmediametrics_headers",
+        "libstagefright_headers",
+    ],
+
+    shared_libs: [
+        "libstagefright",
+        "libmedia",
+        "liblog",
+        "libutils",
+        "libbinder",
+        "libstagefright_foundation",
+        "libaudioclient",
+        "framework-permission-aidl-cpp",
+    ],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+}
+
+cc_binary {
+    name: "audioloop",
+
+    srcs: [
+        "AudioPlayer.cpp",
+        "SineSource.cpp",
+        "audioloop.cpp",
+    ],
+
+    header_libs: [
+        "libmediametrics_headers",
+        "libstagefright_headers",
+    ],
+
+    shared_libs: [
+        "libstagefright",
+        "libmedia",
+        "liblog",
+        "libutils",
+        "libbinder",
+        "libstagefright_foundation",
+        "libaudioclient",
+        "framework-permission-aidl-cpp",
+    ],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+}
+
+cc_binary {
+    name: "stream",
+
+    srcs: ["stream.cpp"],
+
+    header_libs: [
+        "libmediametrics_headers",
+        "libstagefright_headers",
+    ],
+
+    shared_libs: [
+        "libstagefright",
+        "liblog",
+        "libutils",
+        "libbinder",
+        "libui",
+        "libgui",
+        "libstagefright_foundation",
+        "libmedia",
+        "libcutils",
+        "libdatasource",
+    ],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+}
+
+cc_binary {
+    name: "codec",
+
+    srcs: [
+        "codec.cpp",
+        "SimplePlayer.cpp",
+    ],
+
+    header_libs: [
+        "libmediadrm_headers",
+        "libmediametrics_headers",
+        "libstagefright_headers",
+    ],
+
+    shared_libs: [
+        "libstagefright",
+        "liblog",
+        "libutils",
+        "libbinder",
+        "libstagefright_foundation",
+        "libmedia",
+        "libmedia_omx",
+        "libaudioclient",
+        "libui",
+        "libgui",
+        "libcutils",
+    ],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+}
+
+cc_binary {
+    name: "mediafilter",
+
+    srcs: [
+        "filters/argbtorgba.rscript",
+        "filters/nightvision.rscript",
+        "filters/saturation.rscript",
+        "mediafilter.cpp",
+    ],
+
+    header_libs: [
+        "libmediadrm_headers",
+        "libmediametrics_headers",
+        "libstagefright_headers",
+        "rs-headers",
+    ],
+
+    include_dirs: ["frameworks/av/media/libstagefright"],
+
+    shared_libs: [
+        "libstagefright",
+        "liblog",
+        "libutils",
+        "libbinder",
+        "libstagefright_foundation",
+        "libmedia_omx",
+        "libui",
+        "libgui",
+        "libRScpp",
+    ],
+
+    static_libs: ["libstagefright_mediafilter"],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+
+    sanitize: {
+        cfi: true,
+    },
+}
+
+cc_binary {
+    name: "muxer",
+
+    srcs: ["muxer.cpp"],
+
+    header_libs: [
+        "libmediametrics_headers",
+        "libstagefright_headers",
+    ],
+
+    shared_libs: [
+        "libstagefright",
+        "liblog",
+        "libutils",
+        "libbinder",
+        "libstagefright_foundation",
+        "libcutils",
+        "libc",
+    ],
+
+    cflags: [
+        "-Wno-multichar",
+    ],
+}
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
deleted file mode 100644
index 803c4a4..0000000
--- a/cmds/stagefright/Android.mk
+++ /dev/null
@@ -1,276 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=       \
-        AudioPlayer.cpp \
-        stagefright.cpp \
-        jpeg.cpp        \
-        SineSource.cpp
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright libmedia libmedia_codeclist libutils libbinder \
-        libstagefright_foundation libjpeg libui libgui libcutils liblog \
-        libhidlbase libdatasource libaudioclient \
-        android.hardware.media.omx@1.0 \
-        framework-permission-aidl-cpp
-
-LOCAL_STATIC_LIBRARIES := framework-permission-aidl-cpp
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/media/libstagefright \
-        frameworks/av/media/libstagefright/include \
-        frameworks/native/include/media/openmax \
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_SYSTEM_EXT_MODULE:= true
-LOCAL_MODULE:= stagefright
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=         \
-        AudioPlayer.cpp \
-        SineSource.cpp    \
-        record.cpp
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright libmedia liblog libutils libbinder \
-        libstagefright_foundation libdatasource libaudioclient \
-        framework-permission-aidl-cpp
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/camera/include \
-        frameworks/av/media/libstagefright \
-        frameworks/native/include/media/openmax \
-        frameworks/native/include/media/hardware
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= record
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=         \
-        AudioPlayer.cpp \
-        recordvideo.cpp
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright libmedia liblog libutils libbinder \
-        libstagefright_foundation libaudioclient
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/media/libstagefright \
-        frameworks/native/include/media/openmax \
-        frameworks/native/include/media/hardware \
-        framework-permission-aidl-cpp
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= recordvideo
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=         \
-        AudioPlayer.cpp \
-        SineSource.cpp    \
-        audioloop.cpp
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright libmedia liblog libutils libbinder \
-        libstagefright_foundation libaudioclient \
-        framework-permission-aidl-cpp
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/media/libstagefright \
-        frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= audioloop
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=         \
-        stream.cpp    \
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright liblog libutils libbinder libui libgui \
-        libstagefright_foundation libmedia libcutils libdatasource
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/media/libstagefright \
-        frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= stream
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=               \
-        codec.cpp               \
-        SimplePlayer.cpp        \
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediadrm_headers \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright liblog libutils libbinder libstagefright_foundation \
-        libmedia libmedia_omx libaudioclient libui libgui libcutils
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/media/libstagefright \
-        frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= codec
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
-        filters/argbtorgba.rscript \
-        filters/nightvision.rscript \
-        filters/saturation.rscript \
-        mediafilter.cpp \
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediadrm_headers \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright \
-        liblog \
-        libutils \
-        libbinder \
-        libstagefright_foundation \
-        libmedia_omx \
-        libui \
-        libgui \
-        libRScpp \
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/media/libstagefright \
-        frameworks/native/include/media/openmax \
-        frameworks/rs/cpp \
-        frameworks/rs \
-
-intermediates := $(call intermediates-dir-for,STATIC_LIBRARIES,libRS,TARGET,)
-LOCAL_C_INCLUDES += $(intermediates)
-
-LOCAL_STATIC_LIBRARIES:= \
-        libstagefright_mediafilter
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= mediafilter
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-LOCAL_SANITIZE := cfi
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=               \
-        muxer.cpp            \
-
-LOCAL_HEADER_LIBRARIES := \
-        libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
-        libstagefright liblog libutils libbinder libstagefright_foundation \
-        libcutils libc
-
-LOCAL_C_INCLUDES:= \
-        frameworks/av/media/libstagefright \
-        frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= muxer
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
diff --git a/include/OWNERS b/include/OWNERS
index d6bd998..88de595 100644
--- a/include/OWNERS
+++ b/include/OWNERS
@@ -1,6 +1,5 @@
 elaurent@google.com
-gkasten@google.com
 hunga@google.com
 jtinker@google.com
 lajos@google.com
-marcone@google.com
+essick@google.com
diff --git a/media/codec2/components/flac/C2SoftFlacEnc.cpp b/media/codec2/components/flac/C2SoftFlacEnc.cpp
index 6fead3a..182edfb 100644
--- a/media/codec2/components/flac/C2SoftFlacEnc.cpp
+++ b/media/codec2/components/flac/C2SoftFlacEnc.cpp
@@ -439,9 +439,6 @@
     }
     FLAC__bool ok = FLAC__stream_encoder_finish(mFlacStreamEncoder);
     if (!ok) return C2_CORRUPTED;
-    mIsFirstFrame = true;
-    mAnchorTimeStamp = 0ull;
-    mProcessedSamples = 0u;
 
     return C2_OK;
 }
diff --git a/media/janitors/codec_OWNERS b/media/janitors/codec_OWNERS
index e201399..d4ee51b 100644
--- a/media/janitors/codec_OWNERS
+++ b/media/janitors/codec_OWNERS
@@ -2,4 +2,4 @@
 # differentiated from plugins connecting those codecs to either omx or codec2 infrastructure
 essick@google.com
 lajos@google.com
-marcone@google.com
+wonsik@google.com
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index b512b48..fe84ec5 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -103,7 +103,7 @@
             : getFormat();
 
     // Setup the callback if there is one.
-    AudioTrack::callback_t callback = nullptr;
+    AudioTrack::legacy_callback_t callback = nullptr;
     void *callbackData = nullptr;
     // Note that TRANSFER_SYNC does not allow FAST track
     AudioTrack::transfer_type streamTransferType = AudioTrack::transfer_type::TRANSFER_SYNC;
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index fad861a..343bcef 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -368,6 +368,8 @@
 namespace {
 
 namespace detail {
+using AudioChannelBitPair = std::pair<audio_channel_mask_t, int>;
+using AudioChannelBitPairs = std::vector<AudioChannelBitPair>;
 using AudioChannelPair = std::pair<audio_channel_mask_t, AudioChannelLayout>;
 using AudioChannelPairs = std::vector<AudioChannelPair>;
 using AudioDevicePair = std::pair<audio_devices_t, AudioDeviceDescription>;
@@ -376,6 +378,28 @@
 using AudioFormatPairs = std::vector<AudioFormatPair>;
 }
 
+const detail::AudioChannelBitPairs& getInAudioChannelBits() {
+    static const detail::AudioChannelBitPairs pairs = {
+        { AUDIO_CHANNEL_IN_LEFT, AudioChannelLayout::CHANNEL_FRONT_LEFT },
+        { AUDIO_CHANNEL_IN_RIGHT, AudioChannelLayout::CHANNEL_FRONT_RIGHT },
+        // AUDIO_CHANNEL_IN_FRONT is at the end
+        { AUDIO_CHANNEL_IN_BACK, AudioChannelLayout::CHANNEL_BACK_CENTER },
+        // AUDIO_CHANNEL_IN_*_PROCESSED not supported
+        // AUDIO_CHANNEL_IN_PRESSURE not supported
+        // AUDIO_CHANNEL_IN_*_AXIS not supported
+        // AUDIO_CHANNEL_IN_VOICE_* not supported
+        { AUDIO_CHANNEL_IN_BACK_LEFT, AudioChannelLayout::CHANNEL_BACK_LEFT },
+        { AUDIO_CHANNEL_IN_BACK_RIGHT, AudioChannelLayout::CHANNEL_BACK_RIGHT },
+        { AUDIO_CHANNEL_IN_CENTER, AudioChannelLayout::CHANNEL_FRONT_CENTER },
+        { AUDIO_CHANNEL_IN_LOW_FREQUENCY, AudioChannelLayout::CHANNEL_LOW_FREQUENCY },
+        { AUDIO_CHANNEL_IN_TOP_LEFT, AudioChannelLayout::CHANNEL_TOP_SIDE_LEFT },
+        { AUDIO_CHANNEL_IN_TOP_RIGHT, AudioChannelLayout::CHANNEL_TOP_SIDE_RIGHT },
+        // When going from aidl to legacy, IN_CENTER is used
+        { AUDIO_CHANNEL_IN_FRONT, AudioChannelLayout::CHANNEL_FRONT_CENTER }
+    };
+    return pairs;
+}
+
 const detail::AudioChannelPairs& getInAudioChannelPairs() {
     static const detail::AudioChannelPairs pairs = {
 #define DEFINE_INPUT_LAYOUT(n)                                                 \
@@ -399,6 +423,44 @@
     return pairs;
 }
 
+const detail::AudioChannelBitPairs& getOutAudioChannelBits() {
+    static const detail::AudioChannelBitPairs pairs = {
+#define DEFINE_OUTPUT_BITS(n)                                                  \
+            { AUDIO_CHANNEL_OUT_##n, AudioChannelLayout::CHANNEL_##n }
+
+        DEFINE_OUTPUT_BITS(FRONT_LEFT),
+        DEFINE_OUTPUT_BITS(FRONT_RIGHT),
+        DEFINE_OUTPUT_BITS(FRONT_CENTER),
+        DEFINE_OUTPUT_BITS(LOW_FREQUENCY),
+        DEFINE_OUTPUT_BITS(BACK_LEFT),
+        DEFINE_OUTPUT_BITS(BACK_RIGHT),
+        DEFINE_OUTPUT_BITS(FRONT_LEFT_OF_CENTER),
+        DEFINE_OUTPUT_BITS(FRONT_RIGHT_OF_CENTER),
+        DEFINE_OUTPUT_BITS(BACK_CENTER),
+        DEFINE_OUTPUT_BITS(SIDE_LEFT),
+        DEFINE_OUTPUT_BITS(SIDE_RIGHT),
+        DEFINE_OUTPUT_BITS(TOP_CENTER),
+        DEFINE_OUTPUT_BITS(TOP_FRONT_LEFT),
+        DEFINE_OUTPUT_BITS(TOP_FRONT_CENTER),
+        DEFINE_OUTPUT_BITS(TOP_FRONT_RIGHT),
+        DEFINE_OUTPUT_BITS(TOP_BACK_LEFT),
+        DEFINE_OUTPUT_BITS(TOP_BACK_CENTER),
+        DEFINE_OUTPUT_BITS(TOP_BACK_RIGHT),
+        DEFINE_OUTPUT_BITS(TOP_SIDE_LEFT),
+        DEFINE_OUTPUT_BITS(TOP_SIDE_RIGHT),
+        DEFINE_OUTPUT_BITS(BOTTOM_FRONT_LEFT),
+        DEFINE_OUTPUT_BITS(BOTTOM_FRONT_CENTER),
+        DEFINE_OUTPUT_BITS(BOTTOM_FRONT_RIGHT),
+        DEFINE_OUTPUT_BITS(LOW_FREQUENCY_2),
+        DEFINE_OUTPUT_BITS(FRONT_WIDE_LEFT),
+        DEFINE_OUTPUT_BITS(FRONT_WIDE_RIGHT),
+#undef DEFINE_OUTPUT_BITS
+        { AUDIO_CHANNEL_OUT_HAPTIC_A, AudioChannelLayout::CHANNEL_HAPTIC_A },
+        { AUDIO_CHANNEL_OUT_HAPTIC_B, AudioChannelLayout::CHANNEL_HAPTIC_B }
+    };
+    return pairs;
+}
+
 const detail::AudioChannelPairs& getOutAudioChannelPairs() {
     static const detail::AudioChannelPairs pairs = {
 #define DEFINE_OUTPUT_LAYOUT(n)                                                \
@@ -1007,6 +1069,25 @@
 
 }  // namespace
 
+audio_channel_mask_t aidl2legacy_AudioChannelLayout_layout_audio_channel_mask_t_bits(
+        int aidlLayout, bool isInput) {
+    auto& bitMapping = isInput ? getInAudioChannelBits() : getOutAudioChannelBits();
+    const int aidlLayoutInitial = aidlLayout; // for error message
+    audio_channel_mask_t legacy = AUDIO_CHANNEL_NONE;
+    for (const auto& bitPair : bitMapping) {
+        if ((aidlLayout & bitPair.second) == bitPair.second) {
+            legacy = static_cast<audio_channel_mask_t>(legacy | bitPair.first);
+            aidlLayout &= ~bitPair.second;
+            if (aidlLayout == 0) {
+                return legacy;
+            }
+        }
+    }
+    ALOGE("%s: aidl layout 0x%x contains bits 0x%x that have no match to legacy %s bits",
+            __func__, aidlLayoutInitial, aidlLayout, isInput ? "input" : "output");
+    return AUDIO_CHANNEL_NONE;
+}
+
 ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
         const AudioChannelLayout& aidl, bool isInput) {
     using ReverseMap = std::unordered_map<AudioChannelLayout, audio_channel_mask_t>;
@@ -1020,7 +1101,7 @@
         if (auto it = m.find(aidl); it != m.end()) {
             return it->second;
         } else {
-            ALOGE("%s: no legacy %s audio_channel_mask_t found for %s", func, type,
+            ALOGW("%s: no legacy %s audio_channel_mask_t found for %s", func, type,
                     aidl.toString().c_str());
             return unexpected(BAD_VALUE);
         }
@@ -1031,10 +1112,10 @@
             return AUDIO_CHANNEL_NONE;
         case Tag::invalid:
             return AUDIO_CHANNEL_INVALID;
-        case Tag::indexMask: {
+        case Tag::indexMask:
             // Index masks do not have pre-defined values.
-            const int bits = aidl.get<Tag::indexMask>();
-            if (__builtin_popcount(bits) != 0 &&
+            if (const int bits = aidl.get<Tag::indexMask>();
+                    __builtin_popcount(bits) != 0 &&
                     __builtin_popcount(bits) <= AUDIO_CHANNEL_COUNT_MAX) {
                 return audio_channel_mask_from_representation_and_bits(
                         AUDIO_CHANNEL_REPRESENTATION_INDEX, bits);
@@ -1043,9 +1124,21 @@
                         __func__, bits, aidl.toString().c_str());
                 return unexpected(BAD_VALUE);
             }
-        }
         case Tag::layoutMask:
-            return convert(aidl, isInput ? mIn : mOut, __func__, isInput ? "input" : "output");
+            // The fast path is to find a direct match for some known layout mask.
+            if (const auto layoutMatch = convert(aidl, isInput ? mIn : mOut, __func__,
+                    isInput ? "input" : "output");
+                    layoutMatch.ok()) {
+                return layoutMatch;
+            }
+            // If a match for a predefined layout wasn't found, make a custom one from bits.
+            if (audio_channel_mask_t bitMask =
+                    aidl2legacy_AudioChannelLayout_layout_audio_channel_mask_t_bits(
+                            aidl.get<Tag::layoutMask>(), isInput);
+                    bitMask != AUDIO_CHANNEL_NONE) {
+                return bitMask;
+            }
+            return unexpected(BAD_VALUE);
         case Tag::voiceMask:
             return convert(aidl, mVoice, __func__, "voice");
     }
@@ -1053,6 +1146,25 @@
     return unexpected(BAD_VALUE);
 }
 
+int legacy2aidl_audio_channel_mask_t_bits_AudioChannelLayout_layout(
+        audio_channel_mask_t legacy, bool isInput) {
+    auto& bitMapping = isInput ? getInAudioChannelBits() : getOutAudioChannelBits();
+    const int legacyInitial = legacy; // for error message
+    int aidlLayout = 0;
+    for (const auto& bitPair : bitMapping) {
+        if ((legacy & bitPair.first) == bitPair.first) {
+            aidlLayout |= bitPair.second;
+            legacy = static_cast<audio_channel_mask_t>(legacy & ~bitPair.first);
+            if (legacy == 0) {
+                return aidlLayout;
+            }
+        }
+    }
+    ALOGE("%s: legacy %s audio_channel_mask_t 0x%x contains unrecognized bits 0x%x",
+            __func__, isInput ? "input" : "output", legacyInitial, legacy);
+    return 0;
+}
+
 ConversionResult<AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
         audio_channel_mask_t legacy, bool isInput) {
     using DirectMap = std::unordered_map<audio_channel_mask_t, AudioChannelLayout>;
@@ -1066,7 +1178,7 @@
         if (auto it = m.find(legacy); it != m.end()) {
             return it->second;
         } else {
-            ALOGE("%s: no AudioChannelLayout found for legacy %s audio_channel_mask_t value 0x%x",
+            ALOGW("%s: no AudioChannelLayout found for legacy %s audio_channel_mask_t value 0x%x",
                     func, type, legacy);
             return unexpected(BAD_VALUE);
         }
@@ -1089,8 +1201,27 @@
             return unexpected(BAD_VALUE);
         }
     } else if (repr == AUDIO_CHANNEL_REPRESENTATION_POSITION) {
-        return convert(legacy, isInput ? mInAndVoice : mOut, __func__,
+        // The fast path is to find a direct match for some known layout mask.
+        if (const auto layoutMatch = convert(legacy, isInput ? mInAndVoice : mOut, __func__,
                 isInput ? "input / voice" : "output");
+                layoutMatch.ok()) {
+            return layoutMatch;
+        }
+        // If a match for a predefined layout wasn't found, make a custom one from bits,
+        // rejecting those with voice channel bits.
+        if (!isInput ||
+                (legacy & (AUDIO_CHANNEL_IN_VOICE_UPLINK | AUDIO_CHANNEL_IN_VOICE_DNLINK)) == 0) {
+            if (int bitMaskLayout =
+                    legacy2aidl_audio_channel_mask_t_bits_AudioChannelLayout_layout(
+                            legacy, isInput);
+                    bitMaskLayout != 0) {
+                return AudioChannelLayout::make<Tag::layoutMask>(bitMaskLayout);
+            }
+        } else {
+            ALOGE("%s: legacy audio_channel_mask_t value 0x%x contains voice bits",
+                    __func__, legacy);
+        }
+        return unexpected(BAD_VALUE);
     }
 
     ALOGE("%s: unknown representation %d in audio_channel_mask_t value 0x%x",
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 8cf22c4..9617556 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -254,8 +254,7 @@
         audio_channel_mask_t channelMask,
         size_t frameCount,
         audio_output_flags_t flags,
-        callback_t cbf,
-        void* user,
+        const wp<IAudioTrackCallback> & callback,
         int32_t notificationFrames,
         audio_session_t sessionId,
         transfer_type transferType,
@@ -275,7 +274,85 @@
     mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
 
     (void)set(streamType, sampleRate, format, channelMask,
-            frameCount, flags, cbf, user, notificationFrames,
+            frameCount, flags, callback, notificationFrames,
+            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
+            attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+}
+
+namespace {
+    class LegacyCallbackWrapper : public AudioTrack::IAudioTrackCallback {
+      const AudioTrack::legacy_callback_t mCallback;
+      void * const mData;
+      public:
+        LegacyCallbackWrapper(AudioTrack::legacy_callback_t callback, void* user)
+            : mCallback(callback), mData(user) {}
+        size_t onMoreData(const AudioTrack::Buffer & buffer) override {
+          AudioTrack::Buffer copy = buffer;
+          mCallback(AudioTrack::EVENT_MORE_DATA, mData, static_cast<void*>(&copy));
+          return copy.size;
+        }
+        void onUnderrun() override {
+            mCallback(AudioTrack::EVENT_UNDERRUN, mData, nullptr);
+        }
+        void onLoopEnd(int32_t loopsRemaining) override {
+            mCallback(AudioTrack::EVENT_LOOP_END, mData, &loopsRemaining);
+        }
+        void onMarker(uint32_t markerPosition) override {
+            mCallback(AudioTrack::EVENT_MARKER, mData, &markerPosition);
+        }
+        void onNewPos(uint32_t newPos) override {
+            mCallback(AudioTrack::EVENT_NEW_POS, mData, &newPos);
+        }
+        void onBufferEnd() override {
+            mCallback(AudioTrack::EVENT_BUFFER_END, mData, nullptr);
+        }
+        void onNewIAudioTrack() override {
+            mCallback(AudioTrack::EVENT_NEW_IAUDIOTRACK, mData, nullptr);
+        }
+        void onStreamEnd() override {
+            mCallback(AudioTrack::EVENT_STREAM_END, mData, nullptr);
+        }
+        size_t onCanWriteMoreData(const AudioTrack::Buffer & buffer) override {
+          AudioTrack::Buffer copy = buffer;
+          mCallback(AudioTrack::EVENT_CAN_WRITE_MORE_DATA, mData, static_cast<void*>(&copy));
+          return copy.size;
+        }
+    };
+}
+
+AudioTrack::AudioTrack(
+        audio_stream_type_t streamType,
+        uint32_t sampleRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        size_t frameCount,
+        audio_output_flags_t flags,
+        legacy_callback_t callback,
+        void* user,
+        int32_t notificationFrames,
+        audio_session_t sessionId,
+        transfer_type transferType,
+        const audio_offload_info_t *offloadInfo,
+        const AttributionSourceState& attributionSource,
+        const audio_attributes_t* pAttributes,
+        bool doNotReconnect,
+        float maxRequiredSpeed,
+        audio_port_handle_t selectedDeviceId)
+    : mStatus(NO_INIT),
+      mState(STATE_STOPPED),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(SP_DEFAULT),
+      mPausedPosition(0),
+      mAudioTrackCallback(new AudioTrackCallback())
+{
+    mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
+    if (callback != nullptr) {
+        mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+    } else if (user) {
+        LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+    }
+    (void)set(streamType, sampleRate, format, channelMask,
+            frameCount, flags, mLegacyCallbackWrapper, notificationFrames,
             0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
             attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
 }
@@ -287,8 +364,7 @@
         audio_channel_mask_t channelMask,
         const sp<IMemory>& sharedBuffer,
         audio_output_flags_t flags,
-        callback_t cbf,
-        void* user,
+        const wp<IAudioTrackCallback>& callback,
         int32_t notificationFrames,
         audio_session_t sessionId,
         transfer_type transferType,
@@ -308,11 +384,49 @@
     mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
 
     (void)set(streamType, sampleRate, format, channelMask,
-            0 /*frameCount*/, flags, cbf, user, notificationFrames,
+            0 /*frameCount*/, flags, callback, notificationFrames,
             sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
             attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed);
 }
 
+AudioTrack::AudioTrack(
+        audio_stream_type_t streamType,
+        uint32_t sampleRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        const sp<IMemory>& sharedBuffer,
+        audio_output_flags_t flags,
+        legacy_callback_t callback,
+        void* user,
+        int32_t notificationFrames,
+        audio_session_t sessionId,
+        transfer_type transferType,
+        const audio_offload_info_t *offloadInfo,
+        const AttributionSourceState& attributionSource,
+        const audio_attributes_t* pAttributes,
+        bool doNotReconnect,
+        float maxRequiredSpeed)
+    : mStatus(NO_INIT),
+      mState(STATE_STOPPED),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(SP_DEFAULT),
+      mPausedPosition(0),
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+      mAudioTrackCallback(new AudioTrackCallback())
+{
+    mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
+    if (callback) {
+        mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+    } else if (user) {
+        LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+    }
+
+    (void)set(streamType, sampleRate, format, channelMask, 0 /*frameCount*/, flags,
+              mLegacyCallbackWrapper, notificationFrames, sharedBuffer,
+              false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, attributionSource,
+              pAttributes, doNotReconnect, maxRequiredSpeed);
+}
+
 AudioTrack::~AudioTrack()
 {
     // pull together the numbers, before we clean up our structures
@@ -374,8 +488,38 @@
         audio_channel_mask_t channelMask,
         size_t frameCount,
         audio_output_flags_t flags,
-        callback_t cbf,
-        void* user,
+        legacy_callback_t callback,
+        void * user,
+        int32_t notificationFrames,
+        const sp<IMemory>& sharedBuffer,
+        bool threadCanCallJava,
+        audio_session_t sessionId,
+        transfer_type transferType,
+        const audio_offload_info_t *offloadInfo,
+        const AttributionSourceState& attributionSource,
+        const audio_attributes_t* pAttributes,
+        bool doNotReconnect,
+        float maxRequiredSpeed,
+        audio_port_handle_t selectedDeviceId)
+{
+    if (callback) {
+        mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+    } else if (user) {
+        LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+    }
+    return set(streamType, sampleRate,format, channelMask, frameCount, flags,
+               mLegacyCallbackWrapper, notificationFrames, sharedBuffer, threadCanCallJava,
+               sessionId, transferType, offloadInfo, attributionSource, pAttributes,
+               doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+}
+status_t AudioTrack::set(
+        audio_stream_type_t streamType,
+        uint32_t sampleRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        size_t frameCount,
+        audio_output_flags_t flags,
+        const wp<IAudioTrackCallback>& callback,
         int32_t notificationFrames,
         const sp<IMemory>& sharedBuffer,
         bool threadCanCallJava,
@@ -394,7 +538,7 @@
     pid_t myPid;
     uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
     pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
-
+    sp<IAudioTrackCallback> _callback = callback.promote();
     // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
     ALOGV("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
           "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
@@ -410,7 +554,7 @@
     case TRANSFER_DEFAULT:
         if (sharedBuffer != 0) {
             transferType = TRANSFER_SHARED;
-        } else if (cbf == NULL || threadCanCallJava) {
+        } else if (_callback == nullptr|| threadCanCallJava) {
             transferType = TRANSFER_SYNC;
         } else {
             transferType = TRANSFER_CALLBACK;
@@ -418,8 +562,8 @@
         break;
     case TRANSFER_CALLBACK:
     case TRANSFER_SYNC_NOTIF_CALLBACK:
-        if (cbf == NULL || sharedBuffer != 0) {
-            ALOGE("%s(): Transfer type %s but cbf == NULL || sharedBuffer != 0",
+        if (_callback == nullptr || sharedBuffer != 0) {
+            ALOGE("%s(): Transfer type %s but callback == nullptr || sharedBuffer != 0",
                     convertTransferToText(transferType), __func__);
             status = BAD_VALUE;
             goto exit;
@@ -609,10 +753,10 @@
     }
     mAuxEffectId = 0;
     mOrigFlags = mFlags = flags;
-    mCbf = cbf;
+    mCallback = callback;
 
-    if (cbf != NULL) {
-        mAudioTrackThread = new AudioTrackThread(*this);
+    if (_callback != nullptr) {
+        mAudioTrackThread = sp<AudioTrackThread>::make(*this);
         mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
         // thread begins in paused state, and will not reference us until start()
     }
@@ -631,7 +775,6 @@
         goto exit;
     }
 
-    mUserData = user;
     mLoopCount = 0;
     mLoopStart = 0;
     mLoopEnd = 0;
@@ -675,7 +818,7 @@
         uint32_t channelMask,
         size_t frameCount,
         audio_output_flags_t flags,
-        callback_t cbf,
+        legacy_callback_t callback,
         void* user,
         int32_t notificationFrames,
         const sp<IMemory>& sharedBuffer,
@@ -694,11 +837,15 @@
     attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(uid));
     attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(pid));
     attributionSource.token = sp<BBinder>::make();
-    return set(streamType, sampleRate, format,
-            static_cast<audio_channel_mask_t>(channelMask),
-            frameCount, flags, cbf, user, notificationFrames, sharedBuffer,
-            threadCanCallJava, sessionId, transferType, offloadInfo, attributionSource,
-            pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+    if (callback) {
+        mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+    } else if (user) {
+        LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+    }
+    return set(streamType, sampleRate, format, static_cast<audio_channel_mask_t>(channelMask),
+               frameCount, flags, mLegacyCallbackWrapper, notificationFrames, sharedBuffer,
+               threadCanCallJava, sessionId, transferType, offloadInfo, attributionSource,
+               pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
 }
 
 // -------------------------------------------------------------------------
@@ -1407,7 +1554,7 @@
 status_t AudioTrack::setMarkerPosition(uint32_t marker)
 {
     // The only purpose of setting marker position is to get a callback
-    if (mCbf == NULL || isOffloadedOrDirect()) {
+    if (!mCallback.promote() || isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
 
@@ -1440,7 +1587,7 @@
 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
 {
     // The only purpose of setting position update period is to get a callback
-    if (mCbf == NULL || isOffloadedOrDirect()) {
+    if (!mCallback.promote() || isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
 
@@ -2209,10 +2356,14 @@
 {
     // Currently the AudioTrack thread is not created if there are no callbacks.
     // Would it ever make sense to run the thread, even without callbacks?
-    // If so, then replace this by checks at each use for mCbf != NULL.
+    // If so, then replace this by checks at each use for mCallback != NULL.
     LOG_ALWAYS_FATAL_IF(mCblk == NULL);
-
     mLock.lock();
+    sp<IAudioTrackCallback> callback = mCallback.promote();
+    if (!callback) {
+        mCallback = nullptr;
+        return NS_NEVER;
+    }
     if (mAwaitBoost) {
         mAwaitBoost = false;
         mLock.unlock();
@@ -2310,7 +2461,7 @@
     sp<AudioTrackClientProxy> proxy = mProxy;
 
     // Determine the number of new loop callback(s) that will be needed, while locked.
-    int loopCountNotifications = 0;
+    uint32_t loopCountNotifications = 0;
     uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
 
     if (mLoopCount > 0) {
@@ -2332,7 +2483,7 @@
     }
 
     // These fields don't need to be cached, because they are assigned only by set():
-    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
+    // mTransfer, mCallback, mUserData, mFormat, mFrameSize, mFlags
     // mFlags is also assigned by createTrack_l(), but not the bit we care about.
 
     mLock.unlock();
@@ -2357,7 +2508,7 @@
             if (status != DEAD_OBJECT) {
                 // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
                 // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
-                mCbf(EVENT_STREAM_END, mUserData, NULL);
+                callback->onStreamEnd();
             }
             {
                 AutoMutex lock(mLock);
@@ -2380,28 +2531,27 @@
 
     // perform callbacks while unlocked
     if (newUnderrun) {
-        mCbf(EVENT_UNDERRUN, mUserData, NULL);
+        callback->onUnderrun();
     }
     while (loopCountNotifications > 0) {
-        mCbf(EVENT_LOOP_END, mUserData, NULL);
         --loopCountNotifications;
+        callback->onLoopEnd(mLoopCount > 0 ? loopCountNotifications + mLoopCountNotified : -1);
     }
     if (flags & CBLK_BUFFER_END) {
-        mCbf(EVENT_BUFFER_END, mUserData, NULL);
+        callback->onBufferEnd();
     }
     if (markerReached) {
-        mCbf(EVENT_MARKER, mUserData, &markerPosition);
+        callback->onMarker(markerPosition.value());
     }
     while (newPosCount > 0) {
-        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
-        mCbf(EVENT_NEW_POS, mUserData, &temp);
+        callback->onNewPos(newPosition.value());
         newPosition += updatePeriod;
         newPosCount--;
     }
 
     if (mObservedSequence != sequence) {
         mObservedSequence = sequence;
-        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
+        callback->onNewIAudioTrack();
         // for offloaded tracks, just wait for the upper layers to recreate the track
         if (isOffloadedOrDirect()) {
             return NS_INACTIVE;
@@ -2539,10 +2689,9 @@
             // written in the next write() call, since it's not passed through the callback
             audioBuffer.size += nonContig;
         }
-        mCbf(mTransfer == TRANSFER_CALLBACK ? EVENT_MORE_DATA : EVENT_CAN_WRITE_MORE_DATA,
-                mUserData, &audioBuffer);
-        size_t writtenSize = audioBuffer.size;
-
+        const size_t writtenSize = (mTransfer == TRANSFER_CALLBACK)
+                                      ? callback->onMoreData(audioBuffer)
+                                      : callback->onCanWriteMoreData(audioBuffer);
         // Validate on returned size
         if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
             ALOGE("%s(%d): EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
@@ -2602,6 +2751,9 @@
             return ns;
         }
 
+        // releaseBuffer reads from audioBuffer.size
+        audioBuffer.size = writtenSize;
+
         size_t releasedFrames = writtenSize / mFrameSize;
         audioBuffer.frameCount = releasedFrames;
         mRemainingFrames -= releasedFrames;
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 5ba9f91..1bc2c32 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -146,7 +146,51 @@
      *          - EVENT_NEW_TIMESTAMP: pointer to const AudioTimestamp.
      */
 
-    typedef void (*callback_t)(int event, void* user, void *info);
+    typedef void (*legacy_callback_t)(int event, void* user, void* info);
+    class IAudioTrackCallback : public virtual RefBase {
+      friend AudioTrack;
+      protected:
+        // Request to write more data to buffer.
+        // This event only occurs for TRANSFER_CALLBACK.
+        // If this event is delivered but the callback handler does not want to write more data,
+        // the handler must ignore the event by returning zero.
+        // This might occur, for example, if the application is waiting for source data or is at
+        // the end of stream.
+        // For data filling, it is preferred that the callback does not block and instead returns
+        // a short count of the amount of data actually delivered.
+        // buffer: Buffer to fill
+        virtual size_t onMoreData(const AudioTrack::Buffer& buffer) { return buffer.size; }
+        // Buffer underrun occurred. This will not occur for static tracks.
+        virtual void onUnderrun() {}
+        // Sample loop end was reached; playback restarted from loop start if loop count was not 0
+        // for a static track.
+        // loopsRemaining: Number of loops remaining to be played. -1 if infinite looping.
+        virtual void onLoopEnd([[maybe_unused]] int32_t loopsRemaining) {}
+        // Playback head is at the specified marker (See setMarkerPosition()).
+        // onMarker: Marker position in frames
+        virtual void onMarker([[maybe_unused]] uint32_t markerPosition) {}
+        // Playback head is at a new position (See setPositionUpdatePeriod()).
+        // newPos: New position in frames
+        virtual void onNewPos([[maybe_unused]] uint32_t newPos) {}
+        // Playback has completed for a static track.
+        virtual void onBufferEnd() {}
+        // IAudioTrack was re-created, either due to re-routing and voluntary invalidation
+        // by mediaserver, or mediaserver crash.
+        virtual void onNewIAudioTrack() {}
+        // Sent after all the buffers queued in AF and HW are played back (after stop is called)
+        // for an offloaded track.
+        virtual void onStreamEnd() {}
+        // Delivered periodically and when there's a significant change
+        // in the mapping from frame position to presentation time.
+        // See AudioTimestamp for the information included with event.
+        // TODO not yet implemented.
+        virtual void onNewTimestamp([[maybe_unused]] AudioTimestamp timestamp) {}
+        // Notification that more data can be given by write()
+        // This event only occurs for TRANSFER_SYNC_NOTIF_CALLBACK.
+        // Similar to onMoreData(), return the number of frames actually written
+        // buffer: Buffer to fill
+        virtual size_t onCanWriteMoreData(const AudioTrack::Buffer& buffer) { return buffer.size; }
+    };
 
     /* Returns the minimum frame count required for the successful creation of
      * an AudioTrack object.
@@ -257,15 +301,34 @@
                                     audio_channel_mask_t channelMask,
                                     size_t frameCount    = 0,
                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                                    callback_t cbf       = NULL,
-                                    void* user           = NULL,
+                                    const wp<IAudioTrackCallback>& callback = nullptr,
                                     int32_t notificationFrames = 0,
                                     audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
-                                    const audio_offload_info_t *offloadInfo = NULL,
+                                    const audio_offload_info_t *offloadInfo = nullptr,
                                     const AttributionSourceState& attributionSource =
                                         AttributionSourceState(),
-                                    const audio_attributes_t* pAttributes = NULL,
+                                    const audio_attributes_t* pAttributes = nullptr,
+                                    bool doNotReconnect = false,
+                                    float maxRequiredSpeed = 1.0f,
+                                    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+
+
+                        AudioTrack( audio_stream_type_t streamType,
+                                    uint32_t sampleRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    size_t frameCount,
+                                    audio_output_flags_t flags,
+                                    legacy_callback_t cbf,
+                                    void* user = nullptr,
+                                    int32_t notificationFrames = 0,
+                                    audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
+                                    transfer_type transferType = TRANSFER_DEFAULT,
+                                    const audio_offload_info_t *offloadInfo = nullptr,
+                                    const AttributionSourceState& attributionSource =
+                                        AttributionSourceState(),
+                                    const audio_attributes_t* pAttributes = nullptr,
                                     bool doNotReconnect = false,
                                     float maxRequiredSpeed = 1.0f,
                                     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
@@ -281,22 +344,39 @@
      * It is recommended to pass a callback function to be notified of playback end by an
      * EVENT_UNDERRUN event.
      */
-
                         AudioTrack( audio_stream_type_t streamType,
                                     uint32_t sampleRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
                                     const sp<IMemory>& sharedBuffer,
                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                                    callback_t cbf      = NULL,
-                                    void* user          = NULL,
+                                    const wp<IAudioTrackCallback>& callback = nullptr,
                                     int32_t notificationFrames = 0,
                                     audio_session_t sessionId   = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
-                                    const audio_offload_info_t *offloadInfo = NULL,
+                                    const audio_offload_info_t *offloadInfo = nullptr,
                                     const AttributionSourceState& attributionSource =
                                         AttributionSourceState(),
-                                    const audio_attributes_t* pAttributes = NULL,
+                                    const audio_attributes_t* pAttributes = nullptr,
+                                    bool doNotReconnect = false,
+                                    float maxRequiredSpeed = 1.0f);
+
+
+                        AudioTrack( audio_stream_type_t streamType,
+                                    uint32_t sampleRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    const sp<IMemory>& sharedBuffer,
+                                    audio_output_flags_t flags,
+                                    legacy_callback_t cbf,
+                                    void* user          = nullptr,
+                                    int32_t notificationFrames = 0,
+                                    audio_session_t sessionId   = AUDIO_SESSION_ALLOCATE,
+                                    transfer_type transferType = TRANSFER_DEFAULT,
+                                    const audio_offload_info_t *offloadInfo = nullptr,
+                                    const AttributionSourceState& attributionSource =
+                                        AttributionSourceState(),
+                                    const audio_attributes_t* pAttributes = nullptr,
                                     bool doNotReconnect = false,
                                     float maxRequiredSpeed = 1.0f);
 
@@ -334,20 +414,41 @@
                             audio_channel_mask_t channelMask,
                             size_t frameCount   = 0,
                             audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                            callback_t cbf      = NULL,
-                            void* user          = NULL,
+                            const wp<IAudioTrackCallback>& callback = nullptr,
                             int32_t notificationFrames = 0,
                             const sp<IMemory>& sharedBuffer = 0,
                             bool threadCanCallJava = false,
                             audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
                             transfer_type transferType = TRANSFER_DEFAULT,
-                            const audio_offload_info_t *offloadInfo = NULL,
+                            const audio_offload_info_t *offloadInfo = nullptr,
                             const AttributionSourceState& attributionSource =
                                 AttributionSourceState(),
-                            const audio_attributes_t* pAttributes = NULL,
+                            const audio_attributes_t* pAttributes = nullptr,
                             bool doNotReconnect = false,
                             float maxRequiredSpeed = 1.0f,
                             audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+
+            status_t    set(audio_stream_type_t streamType,
+                            uint32_t sampleRate,
+                            audio_format_t format,
+                            audio_channel_mask_t channelMask,
+                            size_t frameCount,
+                            audio_output_flags_t flags,
+                            legacy_callback_t callback,
+                            void * user = nullptr,
+                            int32_t notificationFrames = 0,
+                            const sp<IMemory>& sharedBuffer = 0,
+                            bool threadCanCallJava = false,
+                            audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
+                            transfer_type transferType = TRANSFER_DEFAULT,
+                            const audio_offload_info_t *offloadInfo = nullptr,
+                            const AttributionSourceState& attributionSource =
+                                AttributionSourceState(),
+                            const audio_attributes_t* pAttributes = nullptr,
+                            bool doNotReconnect = false,
+                            float maxRequiredSpeed = 1.0f,
+                            audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+
     // FIXME(b/169889714): Vendor code depends on the old method signature at link time
             status_t    set(audio_stream_type_t streamType,
                             uint32_t sampleRate,
@@ -355,17 +456,17 @@
                             uint32_t channelMask,
                             size_t frameCount   = 0,
                             audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                            callback_t cbf      = NULL,
-                            void* user          = NULL,
+                            legacy_callback_t cbf = nullptr,
+                            void* user          = nullptr,
                             int32_t notificationFrames = 0,
                             const sp<IMemory>& sharedBuffer = 0,
                             bool threadCanCallJava = false,
                             audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
                             transfer_type transferType = TRANSFER_DEFAULT,
-                            const audio_offload_info_t *offloadInfo = NULL,
+                            const audio_offload_info_t *offloadInfo = nullptr,
                             uid_t uid = AUDIO_UID_INVALID,
                             pid_t pid = -1,
-                            const audio_attributes_t* pAttributes = NULL,
+                            const audio_attributes_t* pAttributes = nullptr,
                             bool doNotReconnect = false,
                             float maxRequiredSpeed = 1.0f,
                             audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
@@ -1215,9 +1316,8 @@
     }
 
     // for client callback handler
-    callback_t              mCbf;                   // callback handler for events, or NULL
-    void*                   mUserData;
-
+    wp<IAudioTrackCallback> mCallback;                   // callback handler for events, or NULL
+    sp<IAudioTrackCallback> mLegacyCallbackWrapper;      // wrapper for legacy callback interface
     // for notification APIs
 
     // next 2 fields are const after constructor or set()
diff --git a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
index f963e68..997f62a 100644
--- a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
+++ b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
@@ -48,6 +48,16 @@
             AudioChannelLayout::LAYOUT_STEREO);
 }
 
+AudioChannelLayout make_ACL_LayoutArbitrary() {
+    return AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>(
+            // Use channels that exist both for input and output,
+            // but doesn't form a known layout mask.
+            AudioChannelLayout::CHANNEL_FRONT_LEFT |
+            AudioChannelLayout::CHANNEL_FRONT_RIGHT |
+            AudioChannelLayout::CHANNEL_TOP_SIDE_LEFT |
+            AudioChannelLayout::CHANNEL_TOP_SIDE_RIGHT);
+}
+
 AudioChannelLayout make_ACL_ChannelIndex2() {
     return AudioChannelLayout::make<AudioChannelLayout::Tag::indexMask>(
             AudioChannelLayout::INDEX_MASK_2);
@@ -168,7 +178,8 @@
 
 TEST_F(HashIdentityTest, AudioChannelLayoutHashIdentity) {
     verifyHashIdentity<AudioChannelLayout>({
-            make_ACL_None, make_ACL_Invalid, make_ACL_Stereo, make_ACL_ChannelIndex2,
+            make_ACL_None, make_ACL_Invalid, make_ACL_Stereo,
+            make_ACL_LayoutArbitrary, make_ACL_ChannelIndex2,
             make_ACL_ChannelIndexArbitrary, make_ACL_VoiceCall});
 }
 
@@ -200,13 +211,52 @@
         AudioChannelLayoutRoundTripTest,
         testing::Combine(
                 testing::Values(AudioChannelLayout{}, make_ACL_Invalid(), make_ACL_Stereo(),
-                        make_ACL_ChannelIndex2(), make_ACL_ChannelIndexArbitrary()),
+                        make_ACL_LayoutArbitrary(), make_ACL_ChannelIndex2(),
+                        make_ACL_ChannelIndexArbitrary()),
                 testing::Values(false, true)));
 INSTANTIATE_TEST_SUITE_P(AudioChannelVoiceRoundTrip,
         AudioChannelLayoutRoundTripTest,
         // In legacy constants the voice call is only defined for input.
         testing::Combine(testing::Values(make_ACL_VoiceCall()), testing::Values(true)));
 
+using ChannelLayoutEdgeCaseParam = std::tuple<int /*legacy*/, bool /*isInput*/, bool /*isValid*/>;
+class AudioChannelLayoutEdgeCaseTest :
+        public testing::TestWithParam<ChannelLayoutEdgeCaseParam> {};
+TEST_P(AudioChannelLayoutEdgeCaseTest, Legacy2Aidl) {
+    const audio_channel_mask_t legacy = static_cast<audio_channel_mask_t>(std::get<0>(GetParam()));
+    const bool isInput = std::get<1>(GetParam());
+    const bool isValid = std::get<2>(GetParam());
+    auto conv = legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy, isInput);
+    EXPECT_EQ(isValid, conv.ok());
+}
+INSTANTIATE_TEST_SUITE_P(AudioChannelLayoutEdgeCase,
+        AudioChannelLayoutEdgeCaseTest,
+        testing::Values(
+                // Valid legacy input masks.
+                std::make_tuple(AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO, true, true),
+                std::make_tuple(AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO, true, true),
+                std::make_tuple(AUDIO_CHANNEL_IN_VOICE_CALL_MONO, true, true),
+                // Valid legacy output masks.
+                std::make_tuple(
+                        // This has the same numerical representation as Mask 'A' below
+                        AUDIO_CHANNEL_OUT_FRONT_CENTER | AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
+                        AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT, false, true),
+                std::make_tuple(
+                        // This has the same numerical representation as Mask 'B' below
+                        AUDIO_CHANNEL_OUT_FRONT_CENTER | AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
+                        AUDIO_CHANNEL_OUT_TOP_BACK_LEFT, false, true),
+                // Invalid legacy input masks.
+                std::make_tuple(AUDIO_CHANNEL_IN_6, true, false),
+                std::make_tuple(
+                        AUDIO_CHANNEL_IN_6 | AUDIO_CHANNEL_IN_FRONT_PROCESSED, true, false),
+                std::make_tuple(
+                        AUDIO_CHANNEL_IN_PRESSURE | AUDIO_CHANNEL_IN_X_AXIS |
+                        AUDIO_CHANNEL_IN_Y_AXIS | AUDIO_CHANNEL_IN_Z_AXIS, true, false),
+                std::make_tuple(  // Mask 'A'
+                        AUDIO_CHANNEL_IN_STEREO | AUDIO_CHANNEL_IN_VOICE_UPLINK, true, false),
+                std::make_tuple(  // Mask 'B'
+                        AUDIO_CHANNEL_IN_STEREO | AUDIO_CHANNEL_IN_VOICE_DNLINK, true, false)));
+
 class AudioDeviceDescriptionRoundTripTest :
         public testing::TestWithParam<AudioDeviceDescription> {};
 TEST_P(AudioDeviceDescriptionRoundTripTest, Aidl2Legacy2Aidl) {
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index e68c002..e6fdb1d 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -108,15 +108,11 @@
 
     if (track->mHapticChannelCount > 0) {
         track->mAdjustInChannelCount = track->channelCount + track->mHapticChannelCount;
-        track->mAdjustOutChannelCount = track->channelCount + track->mMixerHapticChannelCount;
-        track->mAdjustNonDestructiveInChannelCount = track->mAdjustOutChannelCount;
-        track->mAdjustNonDestructiveOutChannelCount = track->channelCount;
+        track->mAdjustOutChannelCount = track->channelCount;
         track->mKeepContractedChannels = track->mHapticPlaybackEnabled;
     } else {
         track->mAdjustInChannelCount = 0;
         track->mAdjustOutChannelCount = 0;
-        track->mAdjustNonDestructiveInChannelCount = 0;
-        track->mAdjustNonDestructiveOutChannelCount = 0;
         track->mKeepContractedChannels = false;
     }
 
@@ -131,8 +127,7 @@
     // do it after downmix since track format may change!
     track->prepareForReformat();
 
-    track->prepareForAdjustChannelsNonDestructive(mFrameCount);
-    track->prepareForAdjustChannels();
+    track->prepareForAdjustChannels(mFrameCount);
 
     // Resampler channels may have changed.
     track->recreateResampler(mSampleRate);
@@ -193,6 +188,24 @@
         // mDownmixerBufferProvider reset below.
     }
 
+    // See if we should use our built-in non-effect downmixer.
+    if (mMixerInFormat == AUDIO_FORMAT_PCM_FLOAT
+            && mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO
+            && audio_channel_mask_get_representation(channelMask)
+                    == AUDIO_CHANNEL_REPRESENTATION_POSITION) {
+        mDownmixerBufferProvider.reset(new ChannelMixBufferProvider(channelMask,
+                mMixerChannelMask, mMixerInFormat, kCopyBufferFrameCount));
+        if (static_cast<ChannelMixBufferProvider *>(mDownmixerBufferProvider.get())
+                ->isValid()) {
+            mDownmixRequiresFormat = mMixerInFormat;
+            reconfigureBufferProviders();
+            ALOGD("%s: Fallback using ChannelMix", __func__);
+            return NO_ERROR;
+        } else {
+            ALOGD("%s: ChannelMix not supported for channel mask %#x", __func__, channelMask);
+        }
+    }
+
     // Effect downmixer does not accept the channel conversion.  Let's use our remixer.
     mDownmixerBufferProvider.reset(new RemixBufferProvider(channelMask,
             mMixerChannelMask, mMixerInFormat, kCopyBufferFrameCount));
@@ -265,48 +278,20 @@
     }
 }
 
-status_t AudioMixer::Track::prepareForAdjustChannels()
+status_t AudioMixer::Track::prepareForAdjustChannels(size_t frames)
 {
     ALOGV("AudioMixer::prepareForAdjustChannels(%p) with inChannelCount: %u, outChannelCount: %u",
             this, mAdjustInChannelCount, mAdjustOutChannelCount);
     unprepareForAdjustChannels();
     if (mAdjustInChannelCount != mAdjustOutChannelCount) {
-        mAdjustChannelsBufferProvider.reset(new AdjustChannelsBufferProvider(
-                mFormat, mAdjustInChannelCount, mAdjustOutChannelCount, kCopyBufferFrameCount));
-        reconfigureBufferProviders();
-    }
-    return NO_ERROR;
-}
-
-void AudioMixer::Track::unprepareForAdjustChannelsNonDestructive()
-{
-    ALOGV("AUDIOMIXER::unprepareForAdjustChannelsNonDestructive");
-    if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
-        mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
-        reconfigureBufferProviders();
-    }
-}
-
-status_t AudioMixer::Track::prepareForAdjustChannelsNonDestructive(size_t frames)
-{
-    ALOGV("AudioMixer::prepareForAdjustChannelsNonDestructive(%p) with inChannelCount: %u, "
-          "outChannelCount: %u, keepContractedChannels: %d",
-            this, mAdjustNonDestructiveInChannelCount, mAdjustNonDestructiveOutChannelCount,
-            mKeepContractedChannels);
-    unprepareForAdjustChannelsNonDestructive();
-    if (mAdjustNonDestructiveInChannelCount != mAdjustNonDestructiveOutChannelCount) {
         uint8_t* buffer = mKeepContractedChannels
                 ? (uint8_t*)mainBuffer + frames * audio_bytes_per_frame(
                         mMixerChannelCount, mMixerFormat)
-                : NULL;
-        mContractChannelsNonDestructiveBufferProvider.reset(
-                new AdjustChannelsBufferProvider(
-                        mFormat,
-                        mAdjustNonDestructiveInChannelCount,
-                        mAdjustNonDestructiveOutChannelCount,
-                        frames,
-                        mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
-                        buffer));
+                : nullptr;
+        mAdjustChannelsBufferProvider.reset(new AdjustChannelsBufferProvider(
+                mFormat, mAdjustInChannelCount, mAdjustOutChannelCount, frames,
+                mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
+                buffer, mMixerHapticChannelCount));
         reconfigureBufferProviders();
     }
     return NO_ERROR;
@@ -314,9 +299,9 @@
 
 void AudioMixer::Track::clearContractedBuffer()
 {
-    if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+    if (mAdjustChannelsBufferProvider.get() != nullptr) {
         static_cast<AdjustChannelsBufferProvider*>(
-                mContractChannelsNonDestructiveBufferProvider.get())->clearContractedFrames();
+                mAdjustChannelsBufferProvider.get())->clearContractedFrames();
     }
 }
 
@@ -328,10 +313,6 @@
         mAdjustChannelsBufferProvider->setBufferProvider(bufferProvider);
         bufferProvider = mAdjustChannelsBufferProvider.get();
     }
-    if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
-        mContractChannelsNonDestructiveBufferProvider->setBufferProvider(bufferProvider);
-        bufferProvider = mContractChannelsNonDestructiveBufferProvider.get();
-    }
     if (mReformatBufferProvider.get() != nullptr) {
         mReformatBufferProvider->setBufferProvider(bufferProvider);
         bufferProvider = mReformatBufferProvider.get();
@@ -377,7 +358,7 @@
                 track->mainBuffer = valueBuf;
                 ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
                 if (track->mKeepContractedChannels) {
-                    track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+                    track->prepareForAdjustChannels(mFrameCount);
                 }
                 invalidate();
             }
@@ -405,7 +386,7 @@
                 track->mMixerFormat = format;
                 ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
                 if (track->mKeepContractedChannels) {
-                    track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+                    track->prepareForAdjustChannels(mFrameCount);
                 }
             }
             } break;
@@ -424,8 +405,7 @@
             if (track->mHapticPlaybackEnabled != hapticPlaybackEnabled) {
                 track->mHapticPlaybackEnabled = hapticPlaybackEnabled;
                 track->mKeepContractedChannels = hapticPlaybackEnabled;
-                track->prepareForAdjustChannelsNonDestructive(mFrameCount);
-                track->prepareForAdjustChannels();
+                track->prepareForAdjustChannels(mFrameCount);
             }
             } break;
         case HAPTIC_INTENSITY: {
@@ -518,8 +498,6 @@
         track->mDownmixerBufferProvider->reset();
     } else if (track->mReformatBufferProvider.get() != nullptr) {
         track->mReformatBufferProvider->reset();
-    } else if (track->mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
-        track->mContractChannelsNonDestructiveBufferProvider->reset();
     } else if (track->mAdjustChannelsBufferProvider.get() != nullptr) {
         track->mAdjustChannelsBufferProvider->reset();
     }
@@ -563,9 +541,7 @@
     t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
     t->mMixerHapticChannelCount = 0;
     t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
-    t->mAdjustOutChannelCount = t->channelCount + t->mMixerHapticChannelCount;
-    t->mAdjustNonDestructiveInChannelCount = t->mAdjustOutChannelCount;
-    t->mAdjustNonDestructiveOutChannelCount = t->channelCount;
+    t->mAdjustOutChannelCount = t->channelCount;
     t->mKeepContractedChannels = false;
     // Check the downmixing (or upmixing) requirements.
     status_t status = t->prepareForDownmix();
@@ -576,8 +552,7 @@
     // prepareForDownmix() may change mDownmixRequiresFormat
     ALOGVV("mMixerFormat:%#x  mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
     t->prepareForReformat();
-    t->prepareForAdjustChannelsNonDestructive(mFrameCount);
-    t->prepareForAdjustChannels();
+    t->prepareForAdjustChannels(mFrameCount);
     return OK;
 }
 
diff --git a/media/libaudioprocessing/BufferProviders.cpp b/media/libaudioprocessing/BufferProviders.cpp
index 6d31c12..4658db8 100644
--- a/media/libaudioprocessing/BufferProviders.cpp
+++ b/media/libaudioprocessing/BufferProviders.cpp
@@ -364,6 +364,29 @@
             src, mInputChannels, mIdxAry, mSampleSize, frames);
 }
 
+ChannelMixBufferProvider::ChannelMixBufferProvider(audio_channel_mask_t inputChannelMask,
+        audio_channel_mask_t outputChannelMask, audio_format_t format,
+        size_t bufferFrameCount) :
+        CopyBufferProvider(
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(inputChannelMask),
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(outputChannelMask),
+                bufferFrameCount)
+{
+    ALOGV("ChannelMixBufferProvider(%p)(%#x, %#x, %#x)",
+            this, format, inputChannelMask, outputChannelMask);
+    if (outputChannelMask == AUDIO_CHANNEL_OUT_STEREO && format == AUDIO_FORMAT_PCM_FLOAT) {
+        mIsValid = mChannelMix.setInputChannelMask(inputChannelMask);
+    }
+}
+
+void ChannelMixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    mChannelMix.process(static_cast<const float *>(src), static_cast<float *>(dst),
+            frames, false /* accumulate */);
+}
+
 ReformatBufferProvider::ReformatBufferProvider(int32_t channelCount,
         audio_format_t inputFormat, audio_format_t outputFormat,
         size_t bufferFrameCount) :
@@ -630,7 +653,8 @@
 
 AdjustChannelsBufferProvider::AdjustChannelsBufferProvider(
         audio_format_t format, size_t inChannelCount, size_t outChannelCount,
-        size_t frameCount, audio_format_t contractedFormat, void* contractedBuffer) :
+        size_t frameCount, audio_format_t contractedFormat, void* contractedBuffer,
+        size_t contractedOutChannelCount) :
         CopyBufferProvider(
                 audio_bytes_per_frame(inChannelCount, format),
                 audio_bytes_per_frame(std::max(inChannelCount, outChannelCount), format),
@@ -640,15 +664,22 @@
         mOutChannelCount(outChannelCount),
         mSampleSizeInBytes(audio_bytes_per_sample(format)),
         mFrameCount(frameCount),
-        mContractedChannelCount(inChannelCount - outChannelCount),
-        mContractedFormat(contractedFormat),
+        mContractedFormat(inChannelCount > outChannelCount
+                ? contractedFormat : AUDIO_FORMAT_INVALID),
+        mContractedInChannelCount(inChannelCount > outChannelCount
+                ? inChannelCount - outChannelCount : 0),
+        mContractedOutChannelCount(contractedOutChannelCount),
+        mContractedSampleSizeInBytes(audio_bytes_per_sample(contractedFormat)),
+        mContractedInputFrameSize(mContractedInChannelCount * mContractedSampleSizeInBytes),
         mContractedBuffer(contractedBuffer),
         mContractedWrittenFrames(0)
 {
-    ALOGV("AdjustChannelsBufferProvider(%p)(%#x, %zu, %zu, %zu, %#x, %p)", this, format,
-            inChannelCount, outChannelCount, frameCount, contractedFormat, contractedBuffer);
+    ALOGV("AdjustChannelsBufferProvider(%p)(%#x, %zu, %zu, %zu, %#x, %p, %zu)",
+          this, format, inChannelCount, outChannelCount, frameCount, contractedFormat,
+          contractedBuffer, contractedOutChannelCount);
     if (mContractedFormat != AUDIO_FORMAT_INVALID && mInChannelCount > mOutChannelCount) {
-        mContractedFrameSize = audio_bytes_per_frame(mContractedChannelCount, mContractedFormat);
+        mContractedOutputFrameSize =
+                audio_bytes_per_frame(mContractedOutChannelCount, mContractedFormat);
     }
 }
 
@@ -667,25 +698,39 @@
 
 void AdjustChannelsBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
 {
-    if (mInChannelCount > mOutChannelCount) {
-        // For case multi to mono, adjust_channels has special logic that will mix first two input
-        // channels into a single output channel. In that case, use adjust_channels_non_destructive
-        // to keep only one channel data even when contracting to mono.
-        adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount,
-                mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
-        if (mContractedFormat != AUDIO_FORMAT_INVALID
-            && mContractedBuffer != nullptr) {
-            const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
+    // For case multi to mono, adjust_channels has special logic that will mix first two input
+    // channels into a single output channel. In that case, use adjust_channels_non_destructive
+    // to keep only one channel data even when contracting to mono.
+    adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount,
+            mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
+    if (mContractedFormat != AUDIO_FORMAT_INVALID
+        && mContractedBuffer != nullptr) {
+        const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
+        uint8_t* oriBuf = (uint8_t*) dst + contractedIdx;
+        uint8_t* buf = (uint8_t*) mContractedBuffer
+                + mContractedWrittenFrames * mContractedOutputFrameSize;
+        if (mContractedInChannelCount > mContractedOutChannelCount) {
+            // Adjust the channels first as the contracted buffer may not have enough
+            // space for the data.
+            // Use adjust_channels_non_destructive to avoid mix first two channels into one single
+            // output channel when it is multi to mono.
+            adjust_channels_non_destructive(
+                    oriBuf, mContractedInChannelCount, oriBuf, mContractedOutChannelCount,
+                    mSampleSizeInBytes, frames * mContractedInChannelCount * mSampleSizeInBytes);
             memcpy_by_audio_format(
-                    (uint8_t*) mContractedBuffer + mContractedWrittenFrames * mContractedFrameSize,
-                    mContractedFormat, (uint8_t*) dst + contractedIdx, mFormat,
-                    mContractedChannelCount * frames);
-            mContractedWrittenFrames += frames;
+                    buf, mContractedFormat, oriBuf, mFormat, mContractedOutChannelCount * frames);
+        } else {
+            // Copy the data first as the dst buffer may not have enough space for extra channel.
+            memcpy_by_audio_format(
+                buf, mContractedFormat, oriBuf, mFormat, mContractedInChannelCount * frames);
+            // Note that if the contracted data is from MONO to MULTICHANNEL, the first 2 channels
+            // will be duplicated with the original single input channel and all the other channels
+            // will be 0-filled.
+            adjust_channels(
+                    buf, mContractedInChannelCount, buf, mContractedOutChannelCount,
+                    mContractedSampleSizeInBytes, mContractedInputFrameSize * frames);
         }
-    } else {
-        // Prefer expanding data from the end of each audio frame.
-        adjust_channels(src, mInChannelCount, dst, mOutChannelCount,
-                mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
+        mContractedWrittenFrames += frames;
     }
 }
 
diff --git a/media/libaudioprocessing/include/media/AudioMixer.h b/media/libaudioprocessing/include/media/AudioMixer.h
index 5a9fa07..2993a60 100644
--- a/media/libaudioprocessing/include/media/AudioMixer.h
+++ b/media/libaudioprocessing/include/media/AudioMixer.h
@@ -80,7 +80,6 @@
             mPostDownmixReformatBufferProvider.reset(nullptr);
             mDownmixerBufferProvider.reset(nullptr);
             mReformatBufferProvider.reset(nullptr);
-            mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
             mAdjustChannelsBufferProvider.reset(nullptr);
         }
 
@@ -95,10 +94,8 @@
         void        unprepareForDownmix();
         status_t    prepareForReformat();
         void        unprepareForReformat();
-        status_t    prepareForAdjustChannels();
+        status_t    prepareForAdjustChannels(size_t frames);
         void        unprepareForAdjustChannels();
-        status_t    prepareForAdjustChannelsNonDestructive(size_t frames);
-        void        unprepareForAdjustChannelsNonDestructive();
         void        clearContractedBuffer();
         bool        setPlaybackRate(const AudioPlaybackRate &playbackRate);
         void        reconfigureBufferProviders();
@@ -114,24 +111,18 @@
          * 2) mAdjustChannelsBufferProvider: Expands or contracts sample data from one interleaved
          *    channel format to another. Expanded channels are filled with zeros and put at the end
          *    of each audio frame. Contracted channels are copied to the end of the buffer.
-         * 3) mContractChannelsNonDestructiveBufferProvider: Non-destructively contract sample data.
-         *    This is currently using at audio-haptic coupled playback to separate audio and haptic
-         *    data. Contracted channels could be written to given buffer.
-         * 4) mReformatBufferProvider: If not NULL, performs the audio reformat to
+         * 3) mReformatBufferProvider: If not NULL, performs the audio reformat to
          *    match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
          *    requires reformat. For example, it may convert floating point input to
          *    PCM_16_bit if that's required by the downmixer.
-         * 5) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
+         * 4) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
          *    the number of channels required by the mixer sink.
-         * 6) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
+         * 5) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
          *    the downmixer requirements to the mixer engine input requirements.
-         * 7) mTimestretchBufferProvider: Adds timestretching for playback rate
+         * 6) mTimestretchBufferProvider: Adds timestretching for playback rate
          */
         AudioBufferProvider* mInputBufferProvider;    // externally provided buffer provider.
-        // TODO: combine mAdjustChannelsBufferProvider and
-        // mContractChannelsNonDestructiveBufferProvider
         std::unique_ptr<PassthruBufferProvider> mAdjustChannelsBufferProvider;
-        std::unique_ptr<PassthruBufferProvider> mContractChannelsNonDestructiveBufferProvider;
         std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
         std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
         std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
@@ -153,8 +144,6 @@
         uint32_t             mMixerHapticChannelCount;
         uint32_t             mAdjustInChannelCount;
         uint32_t             mAdjustOutChannelCount;
-        uint32_t             mAdjustNonDestructiveInChannelCount;
-        uint32_t             mAdjustNonDestructiveOutChannelCount;
         bool                 mKeepContractedChannels;
     };
 
diff --git a/media/libaudioprocessing/include/media/BufferProviders.h b/media/libaudioprocessing/include/media/BufferProviders.h
index b038854..b3ab8a5 100644
--- a/media/libaudioprocessing/include/media/BufferProviders.h
+++ b/media/libaudioprocessing/include/media/BufferProviders.h
@@ -20,6 +20,7 @@
 #include <stdint.h>
 #include <sys/types.h>
 
+#include <audio_utils/ChannelMix.h>
 #include <media/AudioBufferProvider.h>
 #include <media/AudioResamplerPublic.h>
 #include <system/audio.h>
@@ -129,6 +130,23 @@
     static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2;
 };
 
+// ChannelMixBufferProvider derives from CopyBufferProvider to perform an
+// downmix to the proper channel count and mask.
+class ChannelMixBufferProvider : public CopyBufferProvider {
+public:
+    ChannelMixBufferProvider(audio_channel_mask_t inputChannelMask,
+            audio_channel_mask_t outputChannelMask, audio_format_t format,
+            size_t bufferFrameCount);
+
+    void copyFrames(void *dst, const void *src, size_t frames) override;
+
+    bool isValid() const { return mIsValid; }
+
+protected:
+    audio_utils::channels::ChannelMix mChannelMix;
+    bool mIsValid = false;
+};
+
 // RemixBufferProvider derives from CopyBufferProvider to perform an
 // upmix or downmix to the proper channel count and mask.
 class RemixBufferProvider : public CopyBufferProvider {
@@ -223,17 +241,22 @@
 // Extra expanded channels are filled with zeros and put at the end of each audio frame.
 // Contracted channels are copied to the end of the output buffer(storage should be
 // allocated appropriately).
-// Contracted channels could be written to output buffer.
+// Contracted channels could be written to output buffer and got adjusted. When the contracted
+// channels are adjusted in the contracted buffer, the input channel count will be calculated
+// as `inChannelCount - outChannelCount`. The output channel count is provided by caller, which
+// is `contractedOutChannelCount`. Currently, adjusting contracted channels is used for audio
+// coupled haptic playback. If the device supports two haptic channels while apps only provide
+// single haptic channel, the second haptic channel will be duplicated with the first haptic
+// channel's data. If the device supports single haptic channels while apps provide two haptic
+// channels, the second channel will be contracted.
 class AdjustChannelsBufferProvider : public CopyBufferProvider {
 public:
-    AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
-            size_t outChannelCount, size_t frameCount) : AdjustChannelsBufferProvider(
-                    format, inChannelCount, outChannelCount,
-                    frameCount, AUDIO_FORMAT_INVALID, nullptr) { }
     // Contracted data is converted to contractedFormat and put into contractedBuffer.
     AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
-            size_t outChannelCount, size_t frameCount, audio_format_t contractedFormat,
-            void* contractedBuffer);
+            size_t outChannelCount, size_t frameCount,
+            audio_format_t contractedFormat = AUDIO_FORMAT_INVALID,
+            void* contractedBuffer = nullptr,
+            size_t contractedOutChannelCount = 0);
     //Overrides
     status_t getNextBuffer(Buffer* pBuffer) override;
     void copyFrames(void *dst, const void *src, size_t frames) override;
@@ -247,11 +270,14 @@
     const size_t         mOutChannelCount;
     const size_t         mSampleSizeInBytes;
     const size_t         mFrameCount;
-    const size_t         mContractedChannelCount;
     const audio_format_t mContractedFormat;
+    const size_t         mContractedInChannelCount;
+    const size_t         mContractedOutChannelCount;
+    const size_t         mContractedSampleSizeInBytes;
+    const size_t         mContractedInputFrameSize; // contracted input frame size
     void                *mContractedBuffer;
     size_t               mContractedWrittenFrames;
-    size_t               mContractedFrameSize;
+    size_t               mContractedOutputFrameSize; // contracted output frame size
 };
 // ----------------------------------------------------------------------------
 } // namespace android
diff --git a/media/libstagefright/SimpleDecodingSource.cpp b/media/libstagefright/SimpleDecodingSource.cpp
index 771dfea..55aa86b 100644
--- a/media/libstagefright/SimpleDecodingSource.cpp
+++ b/media/libstagefright/SimpleDecodingSource.cpp
@@ -318,18 +318,23 @@
                 }
                 size_t cpLen = min(in_buf->range_length(), in_buffer->capacity());
                 memcpy(in_buffer->base(), (uint8_t *)in_buf->data() + in_buf->range_offset(),
-                        cpLen );
+                        cpLen);
 
                 if (mIsVorbis) {
                     int32_t numPageSamples;
                     if (!in_buf->meta_data().findInt32(kKeyValidSamples, &numPageSamples)) {
                         numPageSamples = -1;
                     }
-                    memcpy(in_buffer->base() + cpLen, &numPageSamples, sizeof(numPageSamples));
+                    if (cpLen + sizeof(numPageSamples) <= in_buffer->capacity()) {
+                        memcpy(in_buffer->base() + cpLen, &numPageSamples, sizeof(numPageSamples));
+                        cpLen += sizeof(numPageSamples);
+                    } else {
+                        ALOGW("Didn't have enough space to copy kKeyValidSamples");
+                    }
                 }
 
                 res = mCodec->queueInputBuffer(
-                        in_ix, 0 /* offset */, in_buf->range_length() + (mIsVorbis ? 4 : 0),
+                        in_ix, 0 /* offset */, cpLen,
                         timestampUs, 0 /* flags */);
                 if (res != OK) {
                     ALOGI("[%s] failed to queue input buffer #%zu", mComponentName.c_str(), in_ix);
diff --git a/media/libstagefright/tests/fuzzers/Android.bp b/media/libstagefright/tests/fuzzers/Android.bp
index ea17a4d..250ffb9 100644
--- a/media/libstagefright/tests/fuzzers/Android.bp
+++ b/media/libstagefright/tests/fuzzers/Android.bp
@@ -74,6 +74,7 @@
     srcs: [
         "FrameDecoderFuzzer.cpp",
     ],
+    corpus: ["corpus/*"],
     defaults: ["libstagefright_fuzzer_defaults"],
 }
 
diff --git a/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp b/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp
index c251479..b346718 100644
--- a/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp
+++ b/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp
@@ -46,12 +46,15 @@
     }
 
     while (fdp.remaining_bytes()) {
-        switch (fdp.ConsumeIntegralInRange<uint8_t>(0, 3)) {
-            case 0:
-                decoder->init(/*frameTimeUs*/ fdp.ConsumeIntegral<int64_t>(),
-                              /*option*/ fdp.ConsumeIntegral<int>(),
-                              /*colorFormat*/ fdp.ConsumeIntegral<int>());
+        uint8_t switchCase = fdp.ConsumeIntegralInRange<uint8_t>(0, 3);
+        switch (switchCase) {
+            case 0: {
+                int64_t frameTimeUs = fdp.ConsumeIntegral<int64_t>();
+                int option = fdp.ConsumeIntegral<int>();
+                int colorFormat = fdp.ConsumeIntegral<int>();
+                decoder->init(frameTimeUs, option, colorFormat);
                 break;
+            }
             case 1:
                 decoder->extractFrame();
                 break;
diff --git a/media/libstagefright/tests/fuzzers/corpus/color_format_rgb_565.dat b/media/libstagefright/tests/fuzzers/corpus/color_format_rgb_565.dat
new file mode 100644
index 0000000..698e21d
--- /dev/null
+++ b/media/libstagefright/tests/fuzzers/corpus/color_format_rgb_565.dat
Binary files differ
diff --git a/media/mtp/OWNERS b/media/mtp/OWNERS
index 1928ba8..54d3d4a 100644
--- a/media/mtp/OWNERS
+++ b/media/mtp/OWNERS
@@ -1,6 +1,5 @@
 set noparent
 
-marcone@google.com
 jsharkey@android.com
 jameswei@google.com
 rmojumder@google.com
diff --git a/media/ndk/OWNERS b/media/ndk/OWNERS
index 9dc441e..83644f0 100644
--- a/media/ndk/OWNERS
+++ b/media/ndk/OWNERS
@@ -1,3 +1,4 @@
-marcone@google.com
+essick@google.com
+lajos@google.com
 # For AImage/AImageReader
 include platform/frameworks/av:/camera/OWNERS
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 9c7b863..9620d24 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -71,6 +71,7 @@
   switch (source) {
     case AUDIO_SOURCE_HOTWORD:
       return AppOpsManager::OP_RECORD_AUDIO_HOTWORD;
+    case AUDIO_SOURCE_ECHO_REFERENCE: // fallthrough
     case AUDIO_SOURCE_REMOTE_SUBMIX:
       return AppOpsManager::OP_RECORD_AUDIO_OUTPUT;
     case AUDIO_SOURCE_VOICE_DOWNLINK:
diff --git a/services/OWNERS b/services/OWNERS
index f0b5e2f..17e605d 100644
--- a/services/OWNERS
+++ b/services/OWNERS
@@ -1,9 +1,6 @@
-chz@google.com
 elaurent@google.com
 essick@google.com
 etalvala@google.com
-gkasten@google.com
 hunga@google.com
-marcone@google.com
 nchalko@google.com
 quxiangfang@google.com
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index fc34d95..26bd92d 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -89,6 +89,7 @@
     // TODO: Add channel mask to NBAIO_Format.
     // We assume that the channel mask must be a valid positional channel mask.
     mSinkChannelMask = getChannelMaskFromCount(mSinkChannelCount);
+    mBalance.setChannelMask(mSinkChannelMask);
 
     unsigned i;
     for (i = 0; i < FastMixerState::sMaxFastTracks; ++i) {
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 49d5cd8..425c50d 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -137,6 +137,7 @@
 static constexpr int32_t kVendorClientState = ActivityManager::PROCESS_STATE_PERSISTENT_UI;
 
 const String8 CameraService::kOfflineDevice("offline-");
+const String16 CameraService::kWatchAllClientsFlag("all");
 
 // Set to keep track of logged service error events.
 static std::set<String8> sServiceErrorEventSet;
@@ -560,6 +561,13 @@
     onTorchStatusChangedLocked(cameraId, newStatus, systemCameraKind);
 }
 
+
+void CameraService::onTorchStatusChanged(const String8& cameraId,
+        TorchModeStatus newStatus, SystemCameraKind systemCameraKind) {
+    Mutex::Autolock al(mTorchStatusMutex);
+    onTorchStatusChangedLocked(cameraId, newStatus, systemCameraKind);
+}
+
 void CameraService::onTorchStatusChangedLocked(const String8& cameraId,
         TorchModeStatus newStatus, SystemCameraKind systemCameraKind) {
     ALOGI("%s: Torch status changed for cameraId=%s, newStatus=%d",
@@ -1788,7 +1796,8 @@
         LOG_ALWAYS_FATAL_IF(client.get() == nullptr, "%s: CameraService in invalid state",
                 __FUNCTION__);
 
-        err = client->initialize(mCameraProviderManager, mMonitorTags);
+        String8 monitorTags = isClientWatched(client.get()) ? mMonitorTags : String8("");
+        err = client->initialize(mCameraProviderManager, monitorTags);
         if (err != OK) {
             ALOGE("%s: Could not initialize client from HAL.", __FUNCTION__);
             // Errors could be from the HAL module open call or from AppOpsManager
@@ -1954,7 +1963,8 @@
             return BAD_VALUE;
         }
 
-        auto err = offlineClient->initialize(mCameraProviderManager, mMonitorTags);
+        String8 monitorTags = isClientWatched(offlineClient.get()) ? mMonitorTags : String8("");
+        auto err = offlineClient->initialize(mCameraProviderManager, monitorTags);
         if (err != OK) {
             ALOGE("%s: Could not initialize offline client.", __FUNCTION__);
             return err;
@@ -2620,6 +2630,7 @@
     for (auto& i : mActiveClientManager.getAll()) {
         auto clientSp = i->getValue();
         if (clientSp.get() == client) {
+            cacheClientTagDumpIfNeeded(client->mCameraIdStr, clientSp.get());
             mActiveClientManager.remove(i);
         }
     }
@@ -2696,7 +2707,11 @@
         return sp<BasicClient>{nullptr};
     }
 
-    return clientDescriptorPtr->getValue();
+    sp<BasicClient> client = clientDescriptorPtr->getValue();
+    if (client.get() != nullptr) {
+        cacheClientTagDumpIfNeeded(clientDescriptorPtr->getKey(), client.get());
+    }
+    return client;
 }
 
 void CameraService::doUserSwitch(const std::vector<int32_t>& newUserIds) {
@@ -3115,6 +3130,21 @@
     return OK;
 }
 
+status_t CameraService::BasicClient::startWatchingTags(const String8&, int) {
+    // Can't watch tags directly, must go through CameraService::startWatchingTags
+    return OK;
+}
+
+status_t CameraService::BasicClient::stopWatchingTags(int) {
+    // Can't watch tags directly, must go through CameraService::stopWatchingTags
+    return OK;
+}
+
+status_t CameraService::BasicClient::dumpWatchedEventsToVector(std::vector<std::string> &) {
+    // Can't watch tags directly, must go through CameraService::dumpWatchedEventsToVector
+    return OK;
+}
+
 String16 CameraService::BasicClient::getPackageName() const {
     return mClientPackageName;
 }
@@ -4136,7 +4166,7 @@
 
     // Dump camera traces if there were any
     dprintf(fd, "\n");
-    camera3::CameraTraces::dump(fd, args);
+    camera3::CameraTraces::dump(fd);
 
     // Process dump arguments, if any
     int n = args.size();
@@ -4230,6 +4260,30 @@
     dprintf(fd, "\n");
 }
 
+void CameraService::cacheClientTagDumpIfNeeded(const char *cameraId, BasicClient* client) {
+    Mutex::Autolock lock(mLogLock);
+    if (!isClientWatchedLocked(client)) { return; }
+
+    std::vector<std::string> dumpVector;
+    client->dumpWatchedEventsToVector(dumpVector);
+
+    if (dumpVector.empty()) { return; }
+
+    std::string dumpString;
+    size_t i = dumpVector.size();
+
+    // Store the string in reverse order (latest last)
+    while (i > 0) {
+         i--;
+         dumpString += cameraId;
+         dumpString += ":";
+         dumpString += dumpVector[i]; // implicitly ends with '\n'
+    }
+
+    const String16 &packageName = client->getPackageName();
+    mWatchedClientsDumpCache[packageName] = dumpString;
+}
+
 void CameraService::handleTorchClientBinderDied(const wp<IBinder> &who) {
     Mutex::Autolock al(mTorchClientMapMutex);
     for (size_t i = 0; i < mTorchClientMap.size(); i++) {
@@ -4534,9 +4588,11 @@
         return handleGetImageDumpMask(out);
     } else if (args.size() >= 2 && args[0] == String16("set-camera-mute")) {
         return handleSetCameraMute(args);
+    } else if (args.size() >= 2 && args[0] == String16("watch")) {
+        return handleWatchCommand(args, out);
     } else if (args.size() == 1 && args[0] == String16("help")) {
         printHelp(out);
-        return NO_ERROR;
+        return OK;
     }
     printHelp(err);
     return BAD_VALUE;
@@ -4680,6 +4736,233 @@
     return OK;
 }
 
+status_t CameraService::handleWatchCommand(const Vector<String16>& args, int outFd) {
+    if (args.size() >= 3 && args[1] == String16("start")) {
+        return startWatchingTags(args, outFd);
+    } else if (args.size() == 2 && args[1] == String16("dump")) {
+        return camera3::CameraTraces::dump(outFd);
+    } else if (args.size() == 2 && args[1] == String16("stop")) {
+        return stopWatchingTags(outFd);
+    } else if (args.size() >= 2 && args[1] == String16("print")) {
+        return printWatchedTags(args, outFd);
+    }
+    dprintf(outFd, "Camera service watch commands:\n"
+                 "  start -m <comma_separated_tag_list> [-c <comma_separated_client_list>]\n"
+                 "        starts watching the provided tags for clients with provided package\n"
+                 "        recognizes tag shorthands like '3a'\n"
+                 "        watches all clients if no client is passed, or if 'all' is listed\n"
+                 "  dump dumps camera trace\n"
+                 "  stop stops watching all tags\n"
+                 "  print [-n <refresh_interval_ms>]\n"
+                 "        prints the monitored information in real time\n"
+                 "        Hit Ctrl+C to exit\n");
+  return BAD_VALUE;
+}
+
+status_t CameraService::startWatchingTags(const Vector<String16> &args, int outFd) {
+    Mutex::Autolock lock(mLogLock);
+    size_t tagsIdx; // index of '-m'
+    String16 tags("");
+    for (tagsIdx = 2; tagsIdx < args.size() && args[tagsIdx] != String16("-m"); tagsIdx++);
+    if (tagsIdx < args.size() - 1) {
+        tags = args[tagsIdx + 1];
+    } else {
+        dprintf(outFd, "No tags provided.\n");
+        return BAD_VALUE;
+    }
+
+    size_t clientsIdx; // index of '-c'
+    String16 clients = kWatchAllClientsFlag; // watch all clients if no clients are provided
+    for (clientsIdx = 2; clientsIdx < args.size() && args[clientsIdx] != String16("-c");
+         clientsIdx++);
+    if (clientsIdx < args.size() - 1) {
+        clients = args[clientsIdx + 1];
+    }
+    parseClientsToWatchLocked(String8(clients));
+
+    // track tags to initialize future clients with the monitoring information
+    mMonitorTags = String8(tags);
+
+    bool serviceLock = tryLock(mServiceLock);
+    int numWatchedClients = 0;
+    auto cameraClients = mActiveClientManager.getAll();
+    for (const auto &clientDescriptor: cameraClients) {
+        if (clientDescriptor == nullptr) { continue; }
+        sp<BasicClient> client = clientDescriptor->getValue();
+        if (client.get() == nullptr) { continue; }
+
+        if (isClientWatchedLocked(client.get())) {
+            client->startWatchingTags(mMonitorTags, outFd);
+            numWatchedClients++;
+        }
+    }
+    dprintf(outFd, "Started watching %d active clients\n", numWatchedClients);
+
+    if (serviceLock) { mServiceLock.unlock(); }
+    return OK;
+}
+
+status_t CameraService::stopWatchingTags(int outFd) {
+    // clear mMonitorTags to prevent new clients from monitoring tags at initialization
+    Mutex::Autolock lock(mLogLock);
+    mMonitorTags = String8::empty();
+
+    mWatchedClientPackages.clear();
+    mWatchedClientsDumpCache.clear();
+
+    bool serviceLock = tryLock(mServiceLock);
+    auto cameraClients = mActiveClientManager.getAll();
+    for (const auto &clientDescriptor : cameraClients) {
+        if (clientDescriptor == nullptr) { continue; }
+        sp<BasicClient> client = clientDescriptor->getValue();
+        if (client.get() == nullptr) { continue; }
+        client->stopWatchingTags(outFd);
+    }
+    dprintf(outFd, "Stopped watching all clients.\n");
+    if (serviceLock) { mServiceLock.unlock(); }
+    return OK;
+}
+
+status_t CameraService::printWatchedTags(const Vector<String16> &args, int outFd) {
+    // Figure outFd refresh interval, if present in args
+    useconds_t refreshTimeoutMs = 1000; // refresh every 1s by default
+    if (args.size() > 2) {
+        size_t intervalIdx; // index of '-n'
+        for (intervalIdx = 2; intervalIdx < args.size() && String16("-n") != args[intervalIdx];
+             intervalIdx++);
+
+        size_t intervalValIdx = intervalIdx + 1;
+        if (intervalValIdx < args.size()) {
+            refreshTimeoutMs = strtol(String8(args[intervalValIdx].string()), nullptr, 10);
+            if (errno) { return BAD_VALUE; }
+        }
+    }
+
+    mLogLock.lock();
+    bool serviceLock = tryLock(mServiceLock);
+    // get all watched clients that are currently connected
+    std::set<String16> connectedMoniterdClients;
+    for (const auto &clientDescriptor: mActiveClientManager.getAll()) {
+        if (clientDescriptor == nullptr) { continue; }
+
+        sp<BasicClient> client = clientDescriptor->getValue();
+        if (client.get() == nullptr) { continue; }
+        if (!isClientWatchedLocked(client.get())) { continue; }
+
+        connectedMoniterdClients.emplace(client->getPackageName());
+    }
+    if (serviceLock) { mServiceLock.unlock(); }
+
+    // Print entries in mWatchedClientsDumpCache for clients that are not connected
+    for (const auto &kv: mWatchedClientsDumpCache) {
+        const String16 &package = kv.first;
+        if (connectedMoniterdClients.find(package) != connectedMoniterdClients.end()) {
+            continue;
+        }
+
+        dprintf(outFd, "Client: %s\n", String8(package).string());
+        dprintf(outFd, "%s\n", kv.second.c_str());
+    }
+    mLogLock.unlock();
+
+    if (connectedMoniterdClients.empty()) {
+        dprintf(outFd, "No watched client active.\n");
+        return OK;
+    }
+
+    // For connected watched clients, print monitored tags live
+    return printWatchedTagsUntilInterrupt(refreshTimeoutMs  * 1000, outFd);
+}
+
+status_t CameraService::printWatchedTagsUntilInterrupt(useconds_t refreshMicros, int outFd) {
+    std::unordered_map<std::string, std::string> cameraToLastEvent;
+    auto cameraClients = mActiveClientManager.getAll();
+
+    if (cameraClients.empty()) {
+        dprintf(outFd, "No clients connected.\n");
+        return OK;
+    }
+
+    dprintf(outFd, "Press Ctrl + C to exit...\n\n");
+    while (true) {
+        for (const auto& clientDescriptor : cameraClients) {
+            Mutex::Autolock lock(mLogLock);
+            if (clientDescriptor == nullptr) { continue; }
+            const char* cameraId = clientDescriptor->getKey().string();
+
+            // This also initializes the map entries with an empty string
+            const std::string& lastPrintedEvent = cameraToLastEvent[cameraId];
+
+            sp<BasicClient> client = clientDescriptor->getValue();
+            if (client.get() == nullptr) { continue; }
+            if (!isClientWatchedLocked(client.get())) { continue; }
+
+            std::vector<std::string> latestEvents;
+            client->dumpWatchedEventsToVector(latestEvents);
+
+            if (!latestEvents.empty()) {
+                printNewWatchedEvents(outFd,
+                                      cameraId,
+                                      client->getPackageName(),
+                                      latestEvents,
+                                      lastPrintedEvent);
+                cameraToLastEvent[cameraId] = latestEvents[0];
+            }
+        }
+        usleep(refreshMicros);  // convert ms to us
+    }
+    return OK;
+}
+
+void CameraService::printNewWatchedEvents(int outFd,
+                                          const char *cameraId,
+                                          const String16 &packageName,
+                                          const std::vector<std::string> &events,
+                                          const std::string &lastPrintedEvent) {
+    if (events.empty()) { return; }
+
+    // index of lastPrintedEvent in events.
+    // lastPrintedIdx = events.size() if lastPrintedEvent is not in events
+    size_t lastPrintedIdx;
+    for (lastPrintedIdx = 0;
+         lastPrintedIdx < events.size() && lastPrintedEvent != events[lastPrintedIdx];
+         lastPrintedIdx++);
+
+    if (lastPrintedIdx == 0) { return; } // early exit if no new event in `events`
+
+    const char *printPackageName = String8(packageName).string();
+    // print events in chronological order (latest event last)
+    size_t idxToPrint = lastPrintedIdx;
+    do {
+        idxToPrint--;
+        dprintf(outFd, "%s:%s  %s", cameraId, printPackageName, events[idxToPrint].c_str());
+    } while (idxToPrint != 0);
+}
+
+void CameraService::parseClientsToWatchLocked(String8 clients) {
+    mWatchedClientPackages.clear();
+
+    const char *allSentinel = String8(kWatchAllClientsFlag).string();
+
+    char *tokenized = clients.lockBuffer(clients.size());
+    char *savePtr;
+    char *nextClient = strtok_r(tokenized, ",", &savePtr);
+
+    while (nextClient != nullptr) {
+        if (strcmp(nextClient, allSentinel) == 0) {
+            // Don't need to track any other package if 'all' is present
+            mWatchedClientPackages.clear();
+            mWatchedClientPackages.emplace(kWatchAllClientsFlag);
+            break;
+        }
+
+        // track package names
+        mWatchedClientPackages.emplace(nextClient);
+        nextClient = strtok_r(nullptr, ",", &savePtr);
+    }
+    clients.unlockBuffer();
+}
+
 status_t CameraService::printHelp(int out) {
     return dprintf(out, "Camera service commands:\n"
         "  get-uid-state <PACKAGE> [--user USER_ID] gets the uid state\n"
@@ -4692,9 +4975,20 @@
         "      Valid values 0=OFF, 1=ON for JPEG\n"
         "  get-image-dump-mask returns the current image-dump-mask value\n"
         "  set-camera-mute <0/1> enable or disable camera muting\n"
+        "  watch <start|stop|dump|live> manages tag monitoring in connected clients\n"
         "  help print this message\n");
 }
 
+bool CameraService::isClientWatched(const BasicClient *client) {
+    Mutex::Autolock lock(mLogLock);
+    return isClientWatchedLocked(client);
+}
+
+bool CameraService::isClientWatchedLocked(const BasicClient *client) {
+    return mWatchedClientPackages.find(kWatchAllClientsFlag) != mWatchedClientPackages.end() ||
+           mWatchedClientPackages.find(client->getPackageName()) != mWatchedClientPackages.end();
+}
+
 int32_t CameraService::updateAudioRestriction() {
     Mutex::Autolock lock(mServiceLock);
     return updateAudioRestrictionLocked();
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 7a69123..cb26763 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -110,8 +110,16 @@
     virtual void        onDeviceStatusChanged(const String8 &cameraId,
             const String8 &physicalCameraId,
             hardware::camera::common::V1_0::CameraDeviceStatus newHalStatus) override;
+    // This method may hold CameraProviderManager::mInterfaceMutex as a part
+    // of calling getSystemCameraKind() internally. Care should be taken not to
+    // directly / indirectly call this from callers who also hold
+    // mInterfaceMutex.
     virtual void        onTorchStatusChanged(const String8& cameraId,
             hardware::camera::common::V1_0::TorchModeStatus newStatus) override;
+    // Does not hold CameraProviderManager::mInterfaceMutex.
+    virtual void        onTorchStatusChanged(const String8& cameraId,
+            hardware::camera::common::V1_0::TorchModeStatus newStatus,
+            SystemCameraKind kind) override;
     virtual void        onNewProviderRegistered() override;
 
     /////////////////////////////////////////////////////////////////////
@@ -272,6 +280,10 @@
         // Internal dump method to be called by CameraService
         virtual status_t dumpClient(int fd, const Vector<String16>& args) = 0;
 
+        virtual status_t startWatchingTags(const String8 &tags, int outFd);
+        virtual status_t stopWatchingTags(int outFd);
+        virtual status_t dumpWatchedEventsToVector(std::vector<std::string> &out);
+
         // Return the package name for this client
         virtual String16 getPackageName() const;
 
@@ -825,6 +837,14 @@
     RingBuffer<String8> mEventLog;
     Mutex mLogLock;
 
+    // set of client package names to watch. if this set contains 'all', then all clients will
+    // be watched. Access should be guarded by mLogLock
+    std::set<String16> mWatchedClientPackages;
+    // cache of last monitored tags dump immediately before the client disconnects. If a client
+    // re-connects, its entry is not updated until it disconnects again. Access should be guarded
+    // by mLogLock
+    std::map<String16, std::string> mWatchedClientsDumpCache;
+
     // The last monitored tags set by client
     String8 mMonitorTags;
 
@@ -957,6 +977,8 @@
      */
     void dumpEventLog(int fd);
 
+    void cacheClientTagDumpIfNeeded(const char *cameraId, BasicClient *client);
+
     /**
      * This method will acquire mServiceLock
      */
@@ -1149,9 +1171,47 @@
     // Set the camera mute state
     status_t handleSetCameraMute(const Vector<String16>& args);
 
+    // Handle 'watch' command as passed through 'cmd'
+    status_t handleWatchCommand(const Vector<String16> &args, int outFd);
+
+    // Enable tag monitoring of the given tags in provided clients
+    status_t startWatchingTags(const Vector<String16> &args, int outFd);
+
+    // Disable tag monitoring
+    status_t stopWatchingTags(int outFd);
+
+    // Print events of monitored tags in all cached and attached clients
+    status_t printWatchedTags(const Vector<String16> &args, int outFd);
+
+    // Print events of monitored tags in all attached clients as they are captured. New events are
+    // fetched every `refreshMicros` us
+    // NOTE: This function does not terminate unless interrupted.
+    status_t printWatchedTagsUntilInterrupt(useconds_t refreshMicros, int outFd);
+
+    // Print all events in vector `events' that came after lastPrintedEvent
+    static void printNewWatchedEvents(int outFd,
+                                      const char *cameraId,
+                                      const String16 &packageName,
+                                      const std::vector<std::string> &events,
+                                      const std::string &lastPrintedEvent);
+
+    // Parses comma separated clients list and adds them to mWatchedClientPackages.
+    // Does not acquire mLogLock before modifying mWatchedClientPackages. It is the caller's
+    // responsibility to acquire mLogLock before calling this function.
+    void parseClientsToWatchLocked(String8 clients);
+
     // Prints the shell command help
     status_t printHelp(int out);
 
+    // Returns true if client should monitor tags based on the contents of mWatchedClientPackages.
+    // Acquires mLogLock before querying mWatchedClientPackages.
+    bool isClientWatched(const BasicClient *client);
+
+    // Returns true if client should monitor tags based on the contents of mWatchedClientPackages.
+    // Does not acquire mLogLock before querying mWatchedClientPackages. It is the caller's
+    // responsibility to acquire mLogLock before calling this functions.
+    bool isClientWatchedLocked(const BasicClient *client);
+
     /**
      * Get the current system time as a formatted string.
      */
@@ -1182,6 +1242,10 @@
     // Use separate keys for offline devices.
     static const String8 kOfflineDevice;
 
+    // Sentinel value to be stored in `mWatchedClientsPackages` to indicate that all clients should
+    // be watched.
+    static const String16 kWatchAllClientsFlag;
+
     // TODO: right now each BasicClient holds one AppOpsManager instance.
     // We can refactor the code so all of clients share this instance
     AppOpsManager mAppOps;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 1f3d478..67ec150 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -1797,6 +1797,35 @@
     return dumpDevice(fd, args);
 }
 
+status_t CameraDeviceClient::startWatchingTags(const String8 &tags, int out) {
+    sp<CameraDeviceBase> device = mDevice;
+    if (!device) {
+        dprintf(out, "  Device is detached.");
+        return OK;
+    }
+    device->startWatchingTags(tags);
+    return OK;
+}
+
+status_t CameraDeviceClient::stopWatchingTags(int out) {
+    sp<CameraDeviceBase> device = mDevice;
+    if (!device) {
+        dprintf(out, "  Device is detached.");
+        return OK;
+    }
+    device->stopWatchingTags();
+    return OK;
+}
+
+status_t CameraDeviceClient::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+    sp<CameraDeviceBase> device = mDevice;
+    if (!device) {
+        return OK;
+    }
+    device->dumpWatchedEventsToVector(out);
+    return OK;
+}
+
 void CameraDeviceClient::notifyError(int32_t errorCode,
                                      const CaptureResultExtras& resultExtras) {
     // Thread safe. Don't bother locking.
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 76b3f53..288f2d7 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -199,6 +199,10 @@
 
     virtual status_t      dumpClient(int fd, const Vector<String16>& args);
 
+    virtual status_t      startWatchingTags(const String8 &tags, int out);
+    virtual status_t      stopWatchingTags(int out);
+    virtual status_t      dumpWatchedEventsToVector(std::vector<std::string> &out);
+
     /**
      * Device listener interface
      */
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
index 652842b..10fa33f 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
@@ -110,6 +110,18 @@
     return OK;
 }
 
+status_t CameraOfflineSessionClient::startWatchingTags(const String8 &tags, int outFd) {
+    return BasicClient::startWatchingTags(tags, outFd);
+}
+
+status_t CameraOfflineSessionClient::stopWatchingTags(int outFd) {
+    return BasicClient::stopWatchingTags(outFd);
+}
+
+status_t CameraOfflineSessionClient::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+    return BasicClient::dumpWatchedEventsToVector(out);
+}
+
 binder::Status CameraOfflineSessionClient::disconnect() {
     Mutex::Autolock icl(mBinderSerializationLock);
 
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index b5238b8..920a176 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -71,6 +71,10 @@
 
     status_t dumpClient(int /*fd*/, const Vector<String16>& /*args*/) override;
 
+    status_t startWatchingTags(const String8 &tags, int outFd) override;
+    status_t stopWatchingTags(int outFd) override;
+    status_t dumpWatchedEventsToVector(std::vector<std::string> &out) override;
+
     status_t initialize(sp<CameraProviderManager> /*manager*/,
             const String8& /*monitorTags*/) override;
 
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 1147e23..0283d01 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -155,6 +155,38 @@
 }
 
 template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::startWatchingTags(const String8 &tags, int out) {
+  sp<CameraDeviceBase> device = mDevice;
+  if (!device) {
+    dprintf(out, "  Device is detached");
+    return OK;
+  }
+
+  return device->startWatchingTags(tags);
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::stopWatchingTags(int out) {
+  sp<CameraDeviceBase> device = mDevice;
+  if (!device) {
+    dprintf(out, "  Device is detached");
+    return OK;
+  }
+
+  return device->stopWatchingTags();
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+    sp<CameraDeviceBase> device = mDevice;
+    if (!device) {
+        // Nothing to dump if the device is detached
+        return OK;
+    }
+    return device->dumpWatchedEventsToVector(out);
+}
+
+template <typename TClientBase>
 status_t Camera2ClientBase<TClientBase>::dumpDevice(
                                                 int fd,
                                                 const Vector<String16>& args) {
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index b593bfa..ba6a230 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -61,6 +61,9 @@
 
     virtual status_t      initialize(sp<CameraProviderManager> manager, const String8& monitorTags);
     virtual status_t      dumpClient(int fd, const Vector<String16>& args);
+    virtual status_t      startWatchingTags(const String8 &tags, int out);
+    virtual status_t      stopWatchingTags(int out);
+    virtual status_t      dumpWatchedEventsToVector(std::vector<std::string> &out);
 
     /**
      * NotificationListener implementation
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 3c95ed3..8168821 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -97,6 +97,9 @@
     virtual status_t disconnect() = 0;
 
     virtual status_t dump(int fd, const Vector<String16> &args) = 0;
+    virtual status_t startWatchingTags(const String8 &tags) = 0;
+    virtual status_t stopWatchingTags() = 0;
+    virtual status_t dumpWatchedEventsToVector(std::vector<std::string> &out) = 0;
 
     /**
      * The physical camera device's static characteristics metadata buffer
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index b10a4c4..75b2f80 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -1960,16 +1960,19 @@
         const hardware::hidl_string& cameraDeviceName,
         TorchModeStatus newStatus) {
     sp<StatusListener> listener;
+    SystemCameraKind systemCameraKind = SystemCameraKind::PUBLIC;
     std::string id;
+    bool known = false;
     {
-        std::lock_guard<std::mutex> lock(mManager->mStatusListenerMutex);
-        bool known = false;
+        // Hold mLock for accessing mDevices
+        std::lock_guard<std::mutex> lock(mLock);
         for (auto& deviceInfo : mDevices) {
             if (deviceInfo->mName == cameraDeviceName) {
                 ALOGI("Camera device %s torch status is now %s", cameraDeviceName.c_str(),
                         torchStatusToString(newStatus));
                 id = deviceInfo->mId;
                 known = true;
+                systemCameraKind = deviceInfo->mSystemCameraKind;
                 if (TorchModeStatus::AVAILABLE_ON != newStatus) {
                     mManager->removeRef(DeviceMode::TORCH, id);
                 }
@@ -1981,11 +1984,19 @@
                     mProviderName.c_str(), cameraDeviceName.c_str(), newStatus);
             return hardware::Void();
         }
+        // no lock needed since listener is set up only once during
+        // CameraProviderManager initialization and then never changed till it is
+        // destructed.
         listener = mManager->getStatusListener();
-    }
+     }
     // Call without lock held to allow reentrancy into provider manager
+    // The problem with holding mLock here is that we
+    // might be limiting re-entrancy : CameraService::onTorchStatusChanged calls
+    // back into CameraProviderManager which might try to hold mLock again (eg:
+    // findDeviceInfo, which should be holding mLock while iterating through
+    // each provider's devices).
     if (listener != nullptr) {
-        listener->onTorchStatusChanged(String8(id.c_str()), newStatus);
+        listener->onTorchStatusChanged(String8(id.c_str()), newStatus, systemCameraKind);
     }
     return hardware::Void();
 }
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index e3763a1..fdb2673 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -155,6 +155,9 @@
                 const String8 &physicalCameraId,
                 hardware::camera::common::V1_0::CameraDeviceStatus newStatus) = 0;
         virtual void onTorchStatusChanged(const String8 &cameraId,
+                hardware::camera::common::V1_0::TorchModeStatus newStatus,
+                SystemCameraKind kind) = 0;
+        virtual void onTorchStatusChanged(const String8 &cameraId,
                 hardware::camera::common::V1_0::TorchModeStatus newStatus) = 0;
         virtual void onNewProviderRegistered() = 0;
     };
@@ -329,8 +332,6 @@
     // All private members, unless otherwise noted, expect mInterfaceMutex to be locked before use
     mutable std::mutex mInterfaceMutex;
 
-    // the status listener update callbacks will lock mStatusMutex
-    mutable std::mutex mStatusListenerMutex;
     wp<StatusListener> mListener;
     ServiceInteractionProxy* mServiceProxy;
 
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 67f99bf..8925b15 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -848,6 +848,21 @@
     return OK;
 }
 
+status_t Camera3Device::startWatchingTags(const String8 &tags) {
+    mTagMonitor.parseTagsToMonitor(tags);
+    return OK;
+}
+
+status_t Camera3Device::stopWatchingTags() {
+    mTagMonitor.disableMonitoring();
+    return OK;
+}
+
+status_t Camera3Device::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+    mTagMonitor.getLatestMonitoredTagEvents(out);
+    return OK;
+}
+
 const CameraMetadata& Camera3Device::infoPhysical(const String8& physicalId) const {
     ALOGVV("%s: E", __FUNCTION__);
     if (CC_UNLIKELY(mStatus == STATUS_UNINITIALIZED ||
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 4ffdc5f..f2394ad 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -106,6 +106,9 @@
     status_t initialize(sp<CameraProviderManager> manager, const String8& monitorTags) override;
     status_t disconnect() override;
     status_t dump(int fd, const Vector<String16> &args) override;
+    status_t startWatchingTags(const String8 &tags) override;
+    status_t stopWatchingTags() override;
+    status_t dumpWatchedEventsToVector(std::vector<std::string> &out) override;
     const CameraMetadata& info() const override;
     const CameraMetadata& infoPhysical(const String8& physicalId) const override;
 
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index a74fd9d..c8a6b32 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -274,6 +274,8 @@
             hardware::camera::common::V1_0::CameraDeviceStatus) override {}
     void onTorchStatusChanged(const String8 &,
             hardware::camera::common::V1_0::TorchModeStatus) override {}
+    void onTorchStatusChanged(const String8 &,
+            hardware::camera::common::V1_0::TorchModeStatus, SystemCameraKind) override {}
     void onNewProviderRegistered() override {}
 };
 
diff --git a/services/camera/libcameraservice/utils/CameraTraces.cpp b/services/camera/libcameraservice/utils/CameraTraces.cpp
index 0198690..0cd4f5d 100644
--- a/services/camera/libcameraservice/utils/CameraTraces.cpp
+++ b/services/camera/libcameraservice/utils/CameraTraces.cpp
@@ -64,7 +64,7 @@
     ATRACE_END();
 }
 
-status_t CameraTraces::dump(int fd, const Vector<String16> &args __attribute__((unused))) {
+status_t CameraTraces::dump(int fd) {
     ALOGV("%s: fd = %d", __FUNCTION__, fd);
     Mutex::Autolock al(sImpl.tracesLock);
     List<ProcessCallStack>& pcsList = sImpl.pcsList;
diff --git a/services/camera/libcameraservice/utils/CameraTraces.h b/services/camera/libcameraservice/utils/CameraTraces.h
index 13ca16d..71fa334 100644
--- a/services/camera/libcameraservice/utils/CameraTraces.h
+++ b/services/camera/libcameraservice/utils/CameraTraces.h
@@ -42,7 +42,7 @@
      *
      * <p>Each line is indented by DUMP_INDENT spaces.</p>
      */
-    static status_t dump(int fd, const Vector<String16>& args);
+    static status_t dump(int fd);
 
 private:
     enum {
diff --git a/services/camera/libcameraservice/utils/TagMonitor.cpp b/services/camera/libcameraservice/utils/TagMonitor.cpp
index 4488098..461f5e9 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.cpp
+++ b/services/camera/libcameraservice/utils/TagMonitor.cpp
@@ -239,38 +239,59 @@
     } else {
         dprintf(fd, "     Tag monitoring disabled (enable with -m <name1,..,nameN>)\n");
     }
-    if (mMonitoringEvents.size() > 0) {
-        dprintf(fd, "     Monitored tag event log:\n");
-        for (const auto& event : mMonitoringEvents) {
-            int indentation = (event.source == REQUEST) ? 15 : 30;
-            dprintf(fd, "        f%d:%" PRId64 "ns:%*s%*s%s.%s: ",
-                    event.frameNumber, event.timestamp,
-                    2, event.cameraId.c_str(),
-                    indentation,
-                    event.source == REQUEST ? "REQ:" : "RES:",
-                    get_local_camera_metadata_section_name_vendor_id(event.tag,
-                            mVendorTagId),
-                    get_local_camera_metadata_tag_name_vendor_id(event.tag,
-                            mVendorTagId));
-            if (event.newData.size() == 0) {
-                dprintf(fd, " (Removed)\n");
-            } else {
-                printData(fd, event.newData.data(), event.tag,
-                        event.type, event.newData.size() / camera_metadata_type_size[event.type],
-                        indentation + 18, event.outputStreamIds, event.inputStreamId);
-            }
-        }
-    }
 
+    if (mMonitoringEvents.size() == 0) { return; }
+
+    dprintf(fd, "     Monitored tag event log:\n");
+
+    std::vector<std::string> eventStrs;
+    dumpMonitoredTagEventsToVectorLocked(eventStrs);
+    for (const std::string &eventStr : eventStrs) {
+        dprintf(fd, "        %s", eventStr.c_str());
+    }
 }
 
-// TODO: Consolidate with printData from camera_metadata.h
+void TagMonitor::getLatestMonitoredTagEvents(std::vector<std::string> &out) {
+    std::lock_guard<std::mutex> lock(mMonitorMutex);
+    dumpMonitoredTagEventsToVectorLocked(out);
+}
+
+void TagMonitor::dumpMonitoredTagEventsToVectorLocked(std::vector<std::string> &vec) {
+    if (mMonitoringEvents.size() == 0) { return; }
+
+    for (const auto& event : mMonitoringEvents) {
+        int indentation = (event.source == REQUEST) ? 15 : 30;
+        String8 eventString = String8::format("f%d:%" PRId64 "ns:%*s%*s%s.%s: ",
+                event.frameNumber, event.timestamp,
+                2, event.cameraId.c_str(),
+                indentation,
+                event.source == REQUEST ? "REQ:" : "RES:",
+                get_local_camera_metadata_section_name_vendor_id(event.tag, mVendorTagId),
+                get_local_camera_metadata_tag_name_vendor_id(event.tag, mVendorTagId));
+        if (event.newData.size() == 0) {
+            eventString += " (Removed)";
+        } else {
+            eventString += getEventDataString(event.newData.data(),
+                                    event.tag,
+                                    event.type,
+                                    event.newData.size() / camera_metadata_type_size[event.type],
+                                    indentation + 18,
+                                    event.outputStreamIds,
+                                    event.inputStreamId);
+        }
+        vec.emplace_back(eventString.string());
+    }
+}
 
 #define CAMERA_METADATA_ENUM_STRING_MAX_SIZE 29
 
-void TagMonitor::printData(int fd, const uint8_t *data_ptr, uint32_t tag,
-        int type, int count, int indentation, const std::unordered_set<int32_t> &outputStreamIds,
-        int32_t inputStreamId) {
+String8 TagMonitor::getEventDataString(const uint8_t* data_ptr,
+                                    uint32_t tag,
+                                    int type,
+                                    int count,
+                                    int indentation,
+                                    const std::unordered_set<int32_t>& outputStreamIds,
+                                    int32_t inputStreamId) {
     static int values_per_line[NUM_TYPES] = {
         [TYPE_BYTE]     = 16,
         [TYPE_INT32]    = 8,
@@ -279,6 +300,7 @@
         [TYPE_DOUBLE]   = 4,
         [TYPE_RATIONAL] = 4,
     };
+
     size_t type_size = camera_metadata_type_size[type];
     char value_string_tmp[CAMERA_METADATA_ENUM_STRING_MAX_SIZE];
     uint32_t value;
@@ -286,10 +308,11 @@
     int lines = count / values_per_line[type];
     if (count % values_per_line[type] != 0) lines++;
 
+    String8 returnStr = String8();
     int index = 0;
     int j, k;
     for (j = 0; j < lines; j++) {
-        dprintf(fd, "%*s[", (j != 0) ? indentation + 4 : 0, "");
+        returnStr.appendFormat("%*s[", (j != 0) ? indentation + 4 : 0, "");
         for (k = 0;
              k < values_per_line[type] && count > 0;
              k++, count--, index += type_size) {
@@ -302,10 +325,9 @@
                                                      value_string_tmp,
                                                      sizeof(value_string_tmp))
                         == OK) {
-                        dprintf(fd, "%s ", value_string_tmp);
+                        returnStr += value_string_tmp;
                     } else {
-                        dprintf(fd, "%hhu ",
-                                *(data_ptr + index));
+                        returnStr.appendFormat("%hhu", *(data_ptr + index));
                     }
                     break;
                 case TYPE_INT32:
@@ -316,47 +338,43 @@
                                                      value_string_tmp,
                                                      sizeof(value_string_tmp))
                         == OK) {
-                        dprintf(fd, "%s ", value_string_tmp);
+                        returnStr += value_string_tmp;
                     } else {
-                        dprintf(fd, "%" PRId32 " ",
-                                *(int32_t*)(data_ptr + index));
+                        returnStr.appendFormat("%" PRId32 " ", *(int32_t*)(data_ptr + index));
                     }
                     break;
                 case TYPE_FLOAT:
-                    dprintf(fd, "%0.8f ",
-                            *(float*)(data_ptr + index));
+                    returnStr.appendFormat("%0.8f", *(float*)(data_ptr + index));
                     break;
                 case TYPE_INT64:
-                    dprintf(fd, "%" PRId64 " ",
-                            *(int64_t*)(data_ptr + index));
+                    returnStr.appendFormat("%" PRId64 " ", *(int64_t*)(data_ptr + index));
                     break;
                 case TYPE_DOUBLE:
-                    dprintf(fd, "%0.8f ",
-                            *(double*)(data_ptr + index));
+                    returnStr.appendFormat("%0.8f ", *(double*)(data_ptr + index));
                     break;
                 case TYPE_RATIONAL: {
                     int32_t numerator = *(int32_t*)(data_ptr + index);
                     int32_t denominator = *(int32_t*)(data_ptr + index + 4);
-                    dprintf(fd, "(%d / %d) ",
-                            numerator, denominator);
+                    returnStr.appendFormat("(%d / %d) ", numerator, denominator);
                     break;
                 }
                 default:
-                    dprintf(fd, "??? ");
+                    returnStr += "??? ";
             }
         }
-        dprintf(fd, "] ");
-        if (outputStreamIds.size() > 0) {
-            dprintf(fd, "output stream ids: ");
+        returnStr += "] ";
+        if (!outputStreamIds.empty()) {
+            returnStr += "output stream ids: ";
             for (const auto &id : outputStreamIds) {
-                dprintf(fd, " %d ", id);
+                returnStr.appendFormat(" %d ", id);
             }
         }
         if (inputStreamId != -1) {
-            dprintf(fd, "input stream id: %d", inputStreamId);
+            returnStr.appendFormat("input stream id: %d", inputStreamId);
         }
-        dprintf(fd, "\n");
+        returnStr += "\n";
     }
+    return returnStr;
 }
 
 template<typename T>
diff --git a/services/camera/libcameraservice/utils/TagMonitor.h b/services/camera/libcameraservice/utils/TagMonitor.h
index f6df4b7..088d6fe 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.h
+++ b/services/camera/libcameraservice/utils/TagMonitor.h
@@ -74,11 +74,23 @@
     // Dump current event log to the provided fd
     void dumpMonitoredMetadata(int fd);
 
-  private:
+    // Dumps the latest monitored Tag events to the passed vector.
+    // NOTE: The events are appended to the vector in reverser chronological order
+    // (i.e. most recent first)
+    void getLatestMonitoredTagEvents(std::vector<std::string> &out);
 
-    static void printData(int fd, const uint8_t *data_ptr, uint32_t tag,
-            int type, int count, int indentation,
-            const std::unordered_set<int32_t> &outputStreamIds, int32_t inputStreamId);
+  private:
+    // Dumps monitored tag events to the passed vector without acquiring
+    // mMonitorMutex. mMonitorMutex must be acquired before calling this
+    // function.
+    void dumpMonitoredTagEventsToVectorLocked(std::vector<std::string> &out);
+
+    static String8 getEventDataString(const uint8_t *data_ptr,
+                                       uint32_t tag, int type,
+                                       int count,
+                                       int indentation,
+                                       const std::unordered_set<int32_t> &outputStreamIds,
+                                       int32_t inputStreamId);
 
     void monitorSingleMetadata(TagMonitor::eventSource source, int64_t frameNumber,
             nsecs_t timestamp, const std::string& cameraId, uint32_t tag,
diff --git a/services/mediacodec/OWNERS b/services/mediacodec/OWNERS
index c716cce..3453a76 100644
--- a/services/mediacodec/OWNERS
+++ b/services/mediacodec/OWNERS
@@ -1,2 +1,3 @@
 jeffv@google.com
-marcone@google.com
+essick@google.com
+wonsik@google.com
diff --git a/services/mediaextractor/OWNERS b/services/mediaextractor/OWNERS
index c716cce..2a779c2 100644
--- a/services/mediaextractor/OWNERS
+++ b/services/mediaextractor/OWNERS
@@ -1,2 +1,3 @@
 jeffv@google.com
-marcone@google.com
+essick@google.com
+aquilescanta@google.com
diff --git a/services/minijail/OWNERS b/services/minijail/OWNERS
index 19f4f9f..9ebf41e 100644
--- a/services/minijail/OWNERS
+++ b/services/minijail/OWNERS
@@ -1,2 +1,2 @@
 jorgelo@google.com
-marcone@google.com
+essick@google.com
diff --git a/services/oboeservice/AAudioCommandQueue.cpp b/services/oboeservice/AAudioCommandQueue.cpp
new file mode 100644
index 0000000..ddaabe8
--- /dev/null
+++ b/services/oboeservice/AAudioCommandQueue.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioCommandQueue"
+//#define LOG_NDEBUG 0
+
+#include <chrono>
+
+#include <utils/Log.h>
+
+#include "AAudioCommandQueue.h"
+
+namespace aaudio {
+
+aaudio_result_t AAudioCommandQueue::sendCommand(std::shared_ptr<AAudioCommand> command) {
+    {
+        std::scoped_lock<std::mutex> _l(mLock);
+        mCommands.push(command);
+        mWaitWorkCond.notify_one();
+    }
+
+    std::unique_lock _cl(command->lock);
+    android::base::ScopedLockAssertion lockAssertion(command->lock);
+    ALOGV("Sending command %d, wait for reply(%d) with timeout %jd",
+           command->operationCode, command->isWaitingForReply, command->timeoutNanoseconds);
+    // `mWaitForReply` is first initialized when the command is constructed. It will be flipped
+    // when the command is completed.
+    auto timeoutExpire = std::chrono::steady_clock::now()
+            + std::chrono::nanoseconds(command->timeoutNanoseconds);
+    while (command->isWaitingForReply) {
+        if (command->conditionVariable.wait_until(_cl, timeoutExpire)
+                == std::cv_status::timeout) {
+            ALOGD("Command %d time out", command->operationCode);
+            command->result = AAUDIO_ERROR_TIMEOUT;
+            command->isWaitingForReply = false;
+        }
+    }
+    ALOGV("Command %d sent with result as %d", command->operationCode, command->result);
+    return command->result;
+}
+
+std::shared_ptr<AAudioCommand> AAudioCommandQueue::waitForCommand(int64_t timeoutNanos) {
+    std::shared_ptr<AAudioCommand> command;
+    {
+        std::unique_lock _l(mLock);
+        android::base::ScopedLockAssertion lockAssertion(mLock);
+        if (timeoutNanos >= 0) {
+            mWaitWorkCond.wait_for(_l, std::chrono::nanoseconds(timeoutNanos), [this]() {
+                android::base::ScopedLockAssertion lockAssertion(mLock);
+                return !mRunning || !mCommands.empty();
+            });
+        } else {
+            mWaitWorkCond.wait(_l, [this]() {
+                android::base::ScopedLockAssertion lockAssertion(mLock);
+                return !mRunning || !mCommands.empty();
+            });
+        }
+        if (!mCommands.empty()) {
+            command = mCommands.front();
+            mCommands.pop();
+        }
+    }
+    return command;
+}
+
+void AAudioCommandQueue::stopWaiting() {
+    std::scoped_lock<std::mutex> _l(mLock);
+    mRunning = false;
+    mWaitWorkCond.notify_one();
+}
+
+} // namespace aaudio
\ No newline at end of file
diff --git a/services/oboeservice/AAudioCommandQueue.h b/services/oboeservice/AAudioCommandQueue.h
new file mode 100644
index 0000000..5f25507
--- /dev/null
+++ b/services/oboeservice/AAudioCommandQueue.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <condition_variable>
+#include <memory>
+#include <mutex>
+#include <queue>
+
+#include <aaudio/AAudio.h>
+#include <android-base/thread_annotations.h>
+
+namespace aaudio {
+
+typedef int32_t aaudio_command_opcode;
+
+class AAudioCommandParam {
+public:
+    AAudioCommandParam() = default;
+    virtual ~AAudioCommandParam() = default;
+};
+
+class AAudioCommand {
+public:
+    explicit AAudioCommand(
+            aaudio_command_opcode opCode, std::shared_ptr<AAudioCommandParam> param = nullptr,
+            bool waitForReply = false, int64_t timeoutNanos = 0)
+            : operationCode(opCode), parameter(param), isWaitingForReply(waitForReply),
+              timeoutNanoseconds(timeoutNanos) { }
+    virtual ~AAudioCommand() = default;
+
+    std::mutex lock;
+    std::condition_variable conditionVariable;
+
+    const aaudio_command_opcode operationCode;
+    std::shared_ptr<AAudioCommandParam> parameter;
+    bool isWaitingForReply GUARDED_BY(lock);
+    const int64_t timeoutNanoseconds;
+    aaudio_result_t result GUARDED_BY(lock) = AAUDIO_OK;
+};
+
+class AAudioCommandQueue {
+public:
+    AAudioCommandQueue() = default;
+    ~AAudioCommandQueue() = default;
+
+    /**
+     * Send a command to the command queue. The return will be waiting for a specified timeout
+     * period indicated by the command if it is required.
+     *
+     * @param command the command to send to the command queue.
+     * @return the result of sending the command or the result of executing the command if command
+     *         need to wait for a reply. If timeout happens, AAUDIO_ERROR_TIMEOUT will be returned.
+     */
+    aaudio_result_t sendCommand(std::shared_ptr<AAudioCommand> command);
+
+    /**
+     * Wait for next available command OR until the timeout is expired.
+     *
+     * @param timeoutNanos the maximum time to wait for next command (0 means return immediately in
+     *                     any case), negative to wait forever.
+     * @return the next available command if any or a nullptr when there is none.
+     */
+    std::shared_ptr<AAudioCommand> waitForCommand(int64_t timeoutNanos = -1);
+
+    /**
+     * Force stop waiting for next command
+     */
+    void stopWaiting();
+
+private:
+    std::mutex mLock;
+    std::condition_variable mWaitWorkCond;
+
+    std::queue<std::shared_ptr<AAudioCommand>> mCommands GUARDED_BY(mLock);
+    bool mRunning GUARDED_BY(mLock) = true;
+};
+
+} // namespace aaudio
\ No newline at end of file
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 35a0890..42b07ae 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -401,16 +401,17 @@
 /**
  * Get an immutable description of the data queue from the HAL.
  */
-aaudio_result_t AAudioServiceEndpointMMAP::getDownDataDescription(AudioEndpointParcelable &parcelable)
+aaudio_result_t AAudioServiceEndpointMMAP::getDownDataDescription(
+        AudioEndpointParcelable* parcelable)
 {
     // Gather information on the data queue based on HAL info.
     int32_t bytesPerFrame = calculateBytesPerFrame();
     int32_t capacityInBytes = getBufferCapacity() * bytesPerFrame;
-    int fdIndex = parcelable.addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
-    parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
-    parcelable.mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
-    parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
-    parcelable.mDownDataQueueParcelable.setCapacityInFrames(getBufferCapacity());
+    int fdIndex = parcelable->addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
+    parcelable->mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
+    parcelable->mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
+    parcelable->mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
+    parcelable->mDownDataQueueParcelable.setCapacityInFrames(getBufferCapacity());
     return AAUDIO_OK;
 }
 
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index 5a53885..ddfac63 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -79,7 +79,7 @@
     void onRoutingChanged(audio_port_handle_t portHandle) override;
     // ------------------------------------------------------------------------------
 
-    aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable);
+    aaudio_result_t getDownDataDescription(AudioEndpointParcelable* parcelable);
 
     int64_t getHardwareTimeOffsetNanos() const {
         return mHardwareTimeOffsetNanos;
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 4ffc127..a25a791 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -34,23 +34,25 @@
 #include "AAudioService.h"
 #include "AAudioServiceEndpoint.h"
 #include "AAudioServiceStreamBase.h"
-#include "TimestampScheduler.h"
 
 using namespace android;  // TODO just import names needed
 using namespace aaudio;   // TODO just import names needed
 
 using content::AttributionSourceState;
 
+static const int64_t TIMEOUT_NANOS = 3LL * 1000 * 1000 * 1000;
+
 /**
  * Base class for streams in the service.
  * @return
  */
 
 AAudioServiceStreamBase::AAudioServiceStreamBase(AAudioService &audioService)
-        : mTimestampThread("AATime")
+        : mCommandThread("AACommand")
         , mAtomicStreamTimestamp()
         , mAudioService(audioService) {
     mMmapClient.attributionSource = AttributionSourceState();
+    mThreadEnabled = true;
 }
 
 AAudioServiceStreamBase::~AAudioServiceStreamBase() {
@@ -70,6 +72,13 @@
                         || getState() == AAUDIO_STREAM_STATE_UNINITIALIZED),
                         "service stream %p still open, state = %d",
                         this, getState());
+
+    // Stop the command thread before destroying.
+    if (mThreadEnabled) {
+        mThreadEnabled = false;
+        mCommandQueue.stopWaiting();
+        mCommandThread.stop();
+    }
 }
 
 std::string AAudioServiceStreamBase::dumpHeader() {
@@ -166,6 +175,16 @@
         mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
         copyFrom(*mServiceEndpoint);
     }
+
+    // Make sure this object does not get deleted before the run() method
+    // can protect it by making a strong pointer.
+    mThreadEnabled = true;
+    incStrong(nullptr); // See run() method.
+    result = mCommandThread.start(this);
+    if (result != AAUDIO_OK) {
+        decStrong(nullptr); // run() can't do it so we have to do it here.
+        goto error;
+    }
     return result;
 
 error:
@@ -174,8 +193,16 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::close() {
-    std::lock_guard<std::mutex> lock(mLock);
-    return close_l();
+    auto command = std::make_shared<AAudioCommand>(
+            CLOSE, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+    aaudio_result_t result = mCommandQueue.sendCommand(command);
+
+    // Stop the command thread as the stream is closed.
+    mThreadEnabled = false;
+    mCommandQueue.stopWaiting();
+    mCommandThread.stop();
+
+    return result;
 }
 
 aaudio_result_t AAudioServiceStreamBase::close_l() {
@@ -183,8 +210,7 @@
         return AAUDIO_OK;
     }
 
-    // This will call stopTimestampThread() and also stop the stream,
-    // just in case it was not already stopped.
+    // This will stop the stream, just in case it was not already stopped.
     stop_l();
 
     aaudio_result_t result = AAUDIO_OK;
@@ -224,8 +250,12 @@
  * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
  */
 aaudio_result_t AAudioServiceStreamBase::start() {
-    std::lock_guard<std::mutex> lock(mLock);
+    auto command = std::make_shared<AAudioCommand>(
+            START, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+    return mCommandQueue.sendCommand(command);
+}
 
+aaudio_result_t AAudioServiceStreamBase::start_l() {
     const int64_t beginNs = AudioClock::getNanoseconds();
     aaudio_result_t result = AAUDIO_OK;
 
@@ -261,15 +291,6 @@
     // This should happen at the end of the start.
     sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
     setState(AAUDIO_STREAM_STATE_STARTED);
-    mThreadEnabled.store(true);
-    // Make sure this object does not get deleted before the run() method
-    // can protect it by making a strong pointer.
-    incStrong(nullptr); // See run() method.
-    result = mTimestampThread.start(this);
-    if (result != AAUDIO_OK) {
-        decStrong(nullptr); // run() can't do it so we have to do it here.
-        goto error;
-    }
 
     return result;
 
@@ -279,8 +300,9 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::pause() {
-    std::lock_guard<std::mutex> lock(mLock);
-    return pause_l();
+    auto command = std::make_shared<AAudioCommand>(
+            PAUSE, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+    return mCommandQueue.sendCommand(command);
 }
 
 aaudio_result_t AAudioServiceStreamBase::pause_l() {
@@ -298,12 +320,6 @@
             .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)result)
             .record(); });
 
-    result = stopTimestampThread();
-    if (result != AAUDIO_OK) {
-        disconnect_l();
-        return result;
-    }
-
     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
     if (endpoint == nullptr) {
         ALOGE("%s() has no endpoint", __func__);
@@ -322,8 +338,9 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::stop() {
-    std::lock_guard<std::mutex> lock(mLock);
-    return stop_l();
+    auto command = std::make_shared<AAudioCommand>(
+            STOP, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+    return mCommandQueue.sendCommand(command);
 }
 
 aaudio_result_t AAudioServiceStreamBase::stop_l() {
@@ -343,12 +360,6 @@
 
     setState(AAUDIO_STREAM_STATE_STOPPING);
 
-    // Temporarily unlock because we are joining the timestamp thread and it may try
-    // to acquire mLock.
-    mLock.unlock();
-    result = stopTimestampThread();
-    mLock.lock();
-
     if (result != AAUDIO_OK) {
         disconnect_l();
         return result;
@@ -373,17 +384,13 @@
     return result;
 }
 
-aaudio_result_t AAudioServiceStreamBase::stopTimestampThread() {
-    aaudio_result_t result = AAUDIO_OK;
-    // clear flag that tells thread to loop
-    if (mThreadEnabled.exchange(false)) {
-        result = mTimestampThread.stop();
-    }
-    return result;
+aaudio_result_t AAudioServiceStreamBase::flush() {
+    auto command = std::make_shared<AAudioCommand>(
+            FLUSH, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+    return mCommandQueue.sendCommand(command);
 }
 
-aaudio_result_t AAudioServiceStreamBase::flush() {
-    std::lock_guard<std::mutex> lock(mLock);
+aaudio_result_t AAudioServiceStreamBase::flush_l() {
     aaudio_result_t result = AAudio_isFlushAllowed(getState());
     if (result != AAUDIO_OK) {
         return result;
@@ -404,48 +411,111 @@
     return AAUDIO_OK;
 }
 
-// implement Runnable, periodically send timestamps to client
+// implement Runnable, periodically send timestamps to client and process commands from queue.
 __attribute__((no_sanitize("integer")))
 void AAudioServiceStreamBase::run() {
-    ALOGD("%s() %s entering >>>>>>>>>>>>>> TIMESTAMPS", __func__, getTypeText());
+    ALOGD("%s() %s entering >>>>>>>>>>>>>> COMMANDS", __func__, getTypeText());
     // Hold onto the ref counted stream until the end.
     android::sp<AAudioServiceStreamBase> holdStream(this);
     TimestampScheduler timestampScheduler;
+    int64_t nextTime;
     // Balance the incStrong from when the thread was launched.
     holdStream->decStrong(nullptr);
 
-    timestampScheduler.setBurstPeriod(mFramesPerBurst, getSampleRate());
-    timestampScheduler.start(AudioClock::getNanoseconds());
-    int64_t nextTime = timestampScheduler.nextAbsoluteTime();
+    // Taking mLock while starting the thread. All the operation must be able to
+    // run with holding the lock.
+    std::scoped_lock<std::mutex> _l(mLock);
+
     int32_t loopCount = 0;
-    aaudio_result_t result = AAUDIO_OK;
-    while(mThreadEnabled.load()) {
+    while (mThreadEnabled.load()) {
         loopCount++;
-        if (AudioClock::getNanoseconds() >= nextTime) {
-            result = sendCurrentTimestamp();
-            if (result != AAUDIO_OK) {
-                ALOGE("%s() timestamp thread got result = %d", __func__, result);
-                break;
+        int64_t timeoutNanos = -1;
+        if (isRunning()) {
+            timeoutNanos = nextTime - AudioClock::getNanoseconds();
+            timeoutNanos = std::max<int64_t>(0, timeoutNanos);
+        }
+
+        auto command = mCommandQueue.waitForCommand(timeoutNanos);
+        if (!mThreadEnabled) {
+            // Break the loop if the thread is disabled.
+            break;
+        }
+
+        if (isRunning() && AudioClock::getNanoseconds() >= nextTime) {
+            // It is time to update timestamp.
+            if (sendCurrentTimestamp_l() != AAUDIO_OK) {
+                ALOGE("Failed to send current timestamp, stop updating timestamp");
+                disconnect_l();
+            } else {
+                nextTime = timestampScheduler.nextAbsoluteTime();
             }
-            nextTime = timestampScheduler.nextAbsoluteTime();
-        } else  {
-            // Sleep until it is time to send the next timestamp.
-            // TODO Wait for a signal with a timeout so that we can stop more quickly.
-            AudioClock::sleepUntilNanoTime(nextTime);
+        }
+
+        if (command != nullptr) {
+            std::scoped_lock<std::mutex> _commandLock(command->lock);
+            switch (command->operationCode) {
+                case START:
+                    command->result = start_l();
+                    timestampScheduler.setBurstPeriod(mFramesPerBurst, getSampleRate());
+                    timestampScheduler.start(AudioClock::getNanoseconds());
+                    nextTime = timestampScheduler.nextAbsoluteTime();
+                    break;
+                case PAUSE:
+                    command->result = pause_l();
+                    break;
+                case STOP:
+                    command->result = stop_l();
+                    break;
+                case FLUSH:
+                    command->result = flush_l();
+                    break;
+                case CLOSE:
+                    command->result = close_l();
+                    break;
+                case DISCONNECT:
+                    disconnect_l();
+                    break;
+                case REGISTER_AUDIO_THREAD: {
+                    RegisterAudioThreadParam *param =
+                            (RegisterAudioThreadParam *) command->parameter.get();
+                    command->result =
+                            param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
+                                             : registerAudioThread_l(param->mOwnerPid,
+                                                                     param->mClientThreadId,
+                                                                     param->mPriority);
+                }
+                    break;
+                case UNREGISTER_AUDIO_THREAD: {
+                    UnregisterAudioThreadParam *param =
+                            (UnregisterAudioThreadParam *) command->parameter.get();
+                    command->result =
+                            param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
+                                             : unregisterAudioThread_l(param->mClientThreadId);
+                }
+                    break;
+                case GET_DESCRIPTION: {
+                    GetDescriptionParam *param = (GetDescriptionParam *) command->parameter.get();
+                    command->result = param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
+                                                        : getDescription_l(param->mParcelable);
+                }
+                    break;
+                default:
+                    ALOGE("Invalid command op code: %d", command->operationCode);
+                    break;
+            }
+            if (command->isWaitingForReply) {
+                command->isWaitingForReply = false;
+                command->conditionVariable.notify_one();
+            }
         }
     }
-    // This was moved from the calls in stop_l() and pause_l(), which could cause a deadlock
-    // if it resulted in a call to disconnect.
-    if (result == AAUDIO_OK) {
-        (void) sendCurrentTimestamp();
-    }
-    ALOGD("%s() %s exiting after %d loops <<<<<<<<<<<<<< TIMESTAMPS",
+    ALOGD("%s() %s exiting after %d loops <<<<<<<<<<<<<< COMMANDS",
           __func__, getTypeText(), loopCount);
 }
 
 void AAudioServiceStreamBase::disconnect() {
-    std::lock_guard<std::mutex> lock(mLock);
-    disconnect_l();
+    auto command = std::make_shared<AAudioCommand>(DISCONNECT);
+    mCommandQueue.sendCommand(command);
 }
 
 void AAudioServiceStreamBase::disconnect_l() {
@@ -461,15 +531,23 @@
     }
 }
 
-aaudio_result_t AAudioServiceStreamBase::registerAudioThread(pid_t clientThreadId,
-        int priority) {
-    std::lock_guard<std::mutex> lock(mLock);
+aaudio_result_t AAudioServiceStreamBase::registerAudioThread(pid_t clientThreadId, int priority) {
+    const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
+    auto command = std::make_shared<AAudioCommand>(
+            REGISTER_AUDIO_THREAD,
+            std::make_shared<RegisterAudioThreadParam>(ownerPid, clientThreadId, priority),
+            true /*waitForReply*/,
+            TIMEOUT_NANOS);
+    return mCommandQueue.sendCommand(command);
+}
+
+aaudio_result_t AAudioServiceStreamBase::registerAudioThread_l(
+        pid_t ownerPid, pid_t clientThreadId, int priority) {
     aaudio_result_t result = AAUDIO_OK;
     if (getRegisteredThread() != AAudioServiceStreamBase::ILLEGAL_THREAD_ID) {
         ALOGE("AAudioService::registerAudioThread(), thread already registered");
         result = AAUDIO_ERROR_INVALID_STATE;
     } else {
-        const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
         setRegisteredThread(clientThreadId);
         int err = android::requestPriority(ownerPid, clientThreadId,
                                            priority, true /* isForApp */);
@@ -483,7 +561,15 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::unregisterAudioThread(pid_t clientThreadId) {
-    std::lock_guard<std::mutex> lock(mLock);
+    auto command = std::make_shared<AAudioCommand>(
+            UNREGISTER_AUDIO_THREAD,
+            std::make_shared<UnregisterAudioThreadParam>(clientThreadId),
+            true /*waitForReply*/,
+            TIMEOUT_NANOS);
+    return mCommandQueue.sendCommand(command);
+}
+
+aaudio_result_t AAudioServiceStreamBase::unregisterAudioThread_l(pid_t clientThreadId) {
     aaudio_result_t result = AAUDIO_OK;
     if (getRegisteredThread() != clientThreadId) {
         ALOGE("%s(), wrong thread", __func__);
@@ -552,7 +638,7 @@
     return sendServiceEvent(AAUDIO_SERVICE_EVENT_XRUN, (int64_t) xRunCount);
 }
 
-aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp() {
+aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp_l() {
     AAudioServiceMessage command;
     // It is not worth filling up the queue with timestamps.
     // That can cause the stream to get suspended.
@@ -562,8 +648,8 @@
     }
 
     // Send a timestamp for the clock model.
-    aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position,
-                                                    &command.timestamp.timestamp);
+    aaudio_result_t result = getFreeRunningPosition_l(&command.timestamp.position,
+                                                      &command.timestamp.timestamp);
     if (result == AAUDIO_OK) {
         ALOGV("%s() SERVICE  %8lld at %lld", __func__,
               (long long) command.timestamp.position,
@@ -573,8 +659,8 @@
 
         if (result == AAUDIO_OK) {
             // Send a hardware timestamp for presentation time.
-            result = getHardwareTimestamp(&command.timestamp.position,
-                                          &command.timestamp.timestamp);
+            result = getHardwareTimestamp_l(&command.timestamp.position,
+                                            &command.timestamp.timestamp);
             if (result == AAUDIO_OK) {
                 ALOGV("%s() HARDWARE %8lld at %lld", __func__,
                       (long long) command.timestamp.position,
@@ -596,7 +682,15 @@
  * used to communicate with the underlying HAL or Service.
  */
 aaudio_result_t AAudioServiceStreamBase::getDescription(AudioEndpointParcelable &parcelable) {
-    std::lock_guard<std::mutex> lock(mLock);
+    auto command = std::make_shared<AAudioCommand>(
+            GET_DESCRIPTION,
+            std::make_shared<GetDescriptionParam>(&parcelable),
+            true /*waitForReply*/,
+            TIMEOUT_NANOS);
+    return mCommandQueue.sendCommand(command);
+}
+
+aaudio_result_t AAudioServiceStreamBase::getDescription_l(AudioEndpointParcelable* parcelable) {
     {
         std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
         if (mUpMessageQueue == nullptr) {
@@ -605,9 +699,9 @@
         }
         // Gather information on the message queue.
         mUpMessageQueue->fillParcelable(parcelable,
-                                        parcelable.mUpMessageQueueParcelable);
+                                        parcelable->mUpMessageQueueParcelable);
     }
-    return getAudioDataDescription(parcelable);
+    return getAudioDataDescription_l(parcelable);
 }
 
 void AAudioServiceStreamBase::onVolumeChanged(float volume) {
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 976996d..aa8e8cf 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -33,8 +33,10 @@
 #include "utility/AAudioUtilities.h"
 #include "utility/AudioClock.h"
 
-#include "SharedRingBuffer.h"
+#include "AAudioCommandQueue.h"
 #include "AAudioThread.h"
+#include "SharedRingBuffer.h"
+#include "TimestampScheduler.h"
 
 namespace android {
     class AAudioService;
@@ -235,10 +237,46 @@
     aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
                          aaudio_sharing_mode_t sharingMode);
 
+    aaudio_result_t start_l() REQUIRES(mLock);
     virtual aaudio_result_t close_l() REQUIRES(mLock);
     virtual aaudio_result_t pause_l() REQUIRES(mLock);
     virtual aaudio_result_t stop_l() REQUIRES(mLock);
     void disconnect_l() REQUIRES(mLock);
+    aaudio_result_t flush_l() REQUIRES(mLock);
+
+    class RegisterAudioThreadParam : public AAudioCommandParam {
+    public:
+        RegisterAudioThreadParam(pid_t ownerPid, pid_t clientThreadId, int priority)
+                : AAudioCommandParam(), mOwnerPid(ownerPid),
+                  mClientThreadId(clientThreadId), mPriority(priority) { }
+        ~RegisterAudioThreadParam() = default;
+
+        pid_t mOwnerPid;
+        pid_t mClientThreadId;
+        int mPriority;
+    };
+    aaudio_result_t registerAudioThread_l(
+            pid_t ownerPid, pid_t clientThreadId, int priority) REQUIRES(mLock);
+
+    class UnregisterAudioThreadParam : public AAudioCommandParam {
+    public:
+        UnregisterAudioThreadParam(pid_t clientThreadId)
+                : AAudioCommandParam(), mClientThreadId(clientThreadId) { }
+        ~UnregisterAudioThreadParam() = default;
+
+        pid_t mClientThreadId;
+    };
+    aaudio_result_t unregisterAudioThread_l(pid_t clientThreadId) REQUIRES(mLock);
+
+    class GetDescriptionParam : public AAudioCommandParam {
+    public:
+        GetDescriptionParam(AudioEndpointParcelable* parcelable)
+                : AAudioCommandParam(), mParcelable(parcelable) { }
+        ~GetDescriptionParam() = default;
+
+        AudioEndpointParcelable* mParcelable;
+    };
+    aaudio_result_t getDescription_l(AudioEndpointParcelable* parcelable) REQUIRES(mLock);
 
     void setState(aaudio_stream_state_t state);
 
@@ -250,7 +288,7 @@
 
     aaudio_result_t writeUpMessageQueue(AAudioServiceMessage *command);
 
-    aaudio_result_t sendCurrentTimestamp() EXCLUDES(mLock);
+    aaudio_result_t sendCurrentTimestamp_l() REQUIRES(mLock);
 
     aaudio_result_t sendXRunCount(int32_t xRunCount);
 
@@ -259,11 +297,13 @@
      * @param timeNanos
      * @return AAUDIO_OK or AAUDIO_ERROR_UNAVAILABLE or other negative error
      */
-    virtual aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) = 0;
+    virtual aaudio_result_t getFreeRunningPosition_l(
+            int64_t *positionFrames, int64_t *timeNanos) = 0;
 
-    virtual aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) = 0;
+    virtual aaudio_result_t getHardwareTimestamp_l(int64_t *positionFrames, int64_t *timeNanos) = 0;
 
-    virtual aaudio_result_t getAudioDataDescription(AudioEndpointParcelable &parcelable) = 0;
+    virtual aaudio_result_t getAudioDataDescription_l(AudioEndpointParcelable* parcelable) = 0;
+
 
     aaudio_stream_state_t   mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
 
@@ -279,9 +319,20 @@
     std::mutex              mUpMessageQueueLock;
     std::shared_ptr<SharedRingBuffer> mUpMessageQueue;
 
-    AAudioThread            mTimestampThread;
-    // This is used by one thread to tell another thread to exit. So it must be atomic.
+    enum : int32_t {
+        START,
+        PAUSE,
+        STOP,
+        FLUSH,
+        CLOSE,
+        DISCONNECT,
+        REGISTER_AUDIO_THREAD,
+        UNREGISTER_AUDIO_THREAD,
+        GET_DESCRIPTION,
+    };
+    AAudioThread            mCommandThread;
     std::atomic<bool>       mThreadEnabled{false};
+    AAudioCommandQueue      mCommandQueue;
 
     int32_t                 mFramesPerBurst = 0;
     android::AudioClient    mMmapClient; // set in open, used in MMAP start()
@@ -336,6 +387,8 @@
 protected:
     // Locking order is important.
     // Acquire mLock before acquiring AAudioServiceEndpoint::mLockStreams
+    // The lock will be held by the command thread. All operations needing the lock must run from
+    // the command thread.
     std::mutex              mLock; // Prevent start/stop/close etcetera from colliding
 };
 
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 57dc1ab..05b7f7d 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -141,7 +141,7 @@
 }
 
 // Get free-running DSP or DMA hardware position from the HAL.
-aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition(int64_t *positionFrames,
+aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition_l(int64_t *positionFrames,
                                                                   int64_t *timeNanos) {
     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
     if (endpoint == nullptr) {
@@ -158,14 +158,14 @@
         *positionFrames = timestamp.getPosition();
         *timeNanos = timestamp.getNanoseconds();
     } else if (result != AAUDIO_ERROR_UNAVAILABLE) {
-        disconnect();
+        disconnect_l();
     }
     return result;
 }
 
 // Get timestamp from presentation position.
 // If it fails, get timestamp that was written by getFreeRunningPosition()
-aaudio_result_t AAudioServiceStreamMMAP::getHardwareTimestamp(int64_t *positionFrames,
+aaudio_result_t AAudioServiceStreamMMAP::getHardwareTimestamp_l(int64_t *positionFrames,
                                                                 int64_t *timeNanos) {
 
     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
@@ -198,8 +198,8 @@
 }
 
 // Get an immutable description of the data queue from the HAL.
-aaudio_result_t AAudioServiceStreamMMAP::getAudioDataDescription(
-        AudioEndpointParcelable &parcelable)
+aaudio_result_t AAudioServiceStreamMMAP::getAudioDataDescription_l(
+        AudioEndpointParcelable* parcelable)
 {
     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
     if (endpoint == nullptr) {
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h
index 667465a..28da120 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.h
+++ b/services/oboeservice/AAudioServiceStreamMMAP.h
@@ -71,12 +71,14 @@
 
     aaudio_result_t stop_l() REQUIRES(mLock) override;
 
-    aaudio_result_t getAudioDataDescription(AudioEndpointParcelable &parcelable) override;
+    aaudio_result_t getAudioDataDescription_l(
+            AudioEndpointParcelable* parcelable) REQUIRES(mLock) override;
 
-    aaudio_result_t getFreeRunningPosition(int64_t *positionFrames,
-            int64_t *timeNanos) EXCLUDES(mLock) override;
+    aaudio_result_t getFreeRunningPosition_l(int64_t *positionFrames,
+            int64_t *timeNanos) REQUIRES(mLock) override;
 
-    aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
+    aaudio_result_t getHardwareTimestamp_l(
+            int64_t *positionFrames, int64_t *timeNanos) REQUIRES(mLock) override;
 
     /**
      * Device specific startup.
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index ad06d97..04fcd6d 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -211,8 +211,8 @@
 /**
  * Get an immutable description of the data queue created by this service.
  */
-aaudio_result_t AAudioServiceStreamShared::getAudioDataDescription(
-        AudioEndpointParcelable &parcelable)
+aaudio_result_t AAudioServiceStreamShared::getAudioDataDescription_l(
+        AudioEndpointParcelable* parcelable)
 {
     std::lock_guard<std::mutex> lock(audioDataQueueLock);
     if (mAudioDataQueue == nullptr) {
@@ -221,8 +221,8 @@
     }
     // Gather information on the data queue.
     mAudioDataQueue->fillParcelable(parcelable,
-                                    parcelable.mDownDataQueueParcelable);
-    parcelable.mDownDataQueueParcelable.setFramesPerBurst(getFramesPerBurst());
+                                    parcelable->mDownDataQueueParcelable);
+    parcelable->mDownDataQueueParcelable.setFramesPerBurst(getFramesPerBurst());
     return AAUDIO_OK;
 }
 
@@ -231,8 +231,8 @@
 }
 
 // Get timestamp that was written by mixer or distributor.
-aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition(int64_t *positionFrames,
-                                                                  int64_t *timeNanos) {
+aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition_l(int64_t *positionFrames,
+                                                                    int64_t *timeNanos) {
     // TODO Get presentation timestamp from the HAL
     if (mAtomicStreamTimestamp.isValid()) {
         Timestamp timestamp = mAtomicStreamTimestamp.read();
@@ -245,8 +245,8 @@
 }
 
 // Get timestamp from lower level service.
-aaudio_result_t AAudioServiceStreamShared::getHardwareTimestamp(int64_t *positionFrames,
-                                                                int64_t *timeNanos) {
+aaudio_result_t AAudioServiceStreamShared::getHardwareTimestamp_l(int64_t *positionFrames,
+                                                                  int64_t *timeNanos) {
 
     int64_t position = 0;
     sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
index 4fae5b4..78f9787 100644
--- a/services/oboeservice/AAudioServiceStreamShared.h
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -88,11 +88,14 @@
 
 protected:
 
-    aaudio_result_t getAudioDataDescription(AudioEndpointParcelable &parcelable) override;
+    aaudio_result_t getAudioDataDescription_l(
+            AudioEndpointParcelable* parcelable) REQUIRES(mLock) override;
 
-    aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
+    aaudio_result_t getFreeRunningPosition_l(
+            int64_t *positionFrames, int64_t *timeNanos) REQUIRES(mLock) override;
 
-    aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
+    aaudio_result_t getHardwareTimestamp_l(
+            int64_t *positionFrames, int64_t *timeNanos) REQUIRES(mLock) override;
 
     /**
      * @param requestedCapacityFrames
diff --git a/services/oboeservice/AAudioThread.cpp b/services/oboeservice/AAudioThread.cpp
index 68496ac..549fa59 100644
--- a/services/oboeservice/AAudioThread.cpp
+++ b/services/oboeservice/AAudioThread.cpp
@@ -16,9 +16,10 @@
 
 #define LOG_TAG "AAudioThread"
 //#define LOG_NDEBUG 0
-#include <utils/Log.h>
 
-#include <pthread.h>
+#include <system_error>
+
+#include <utils/Log.h>
 
 #include <aaudio/AAudio.h>
 #include <utility/AAudioUtilities.h>
@@ -38,7 +39,7 @@
 }
 
 AAudioThread::~AAudioThread() {
-    ALOGE_IF(pthread_equal(pthread_self(), mThread),
+    ALOGE_IF(mThread.get_id() == std::this_thread::get_id(),
             "%s() destructor running in thread", __func__);
     ALOGE_IF(mHasThread, "%s() thread never joined", __func__);
 }
@@ -60,32 +61,16 @@
     }
 }
 
-// This is the entry point for the new thread created by createThread_l().
-// It converts the 'C' function call to a C++ method call.
-static void * AAudioThread_internalThreadProc(void *arg) {
-    AAudioThread *aaudioThread = (AAudioThread *) arg;
-    aaudioThread->dispatch();
-    return nullptr;
-}
-
 aaudio_result_t AAudioThread::start(Runnable *runnable) {
     if (mHasThread) {
         ALOGE("start() - mHasThread already true");
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    // mRunnable will be read by the new thread when it starts.
-    // pthread_create() forces a memory synchronization so mRunnable does not need to be atomic.
+    // mRunnable will be read by the new thread when it starts. A std::thread is created.
     mRunnable = runnable;
-    int err = pthread_create(&mThread, nullptr, AAudioThread_internalThreadProc, this);
-    if (err != 0) {
-        ALOGE("start() - pthread_create() returned %d %s", err, strerror(err));
-        return AAudioConvert_androidToAAudioResult(-err);
-    } else {
-        int err = pthread_setname_np(mThread, mName);
-        ALOGW_IF((err != 0), "Could not set name of AAudioThread. err = %d", err);
-        mHasThread = true;
-        return AAUDIO_OK;
-    }
+    mHasThread = true;
+    mThread = std::thread(&AAudioThread::dispatch, this);
+    return AAUDIO_OK;
 }
 
 aaudio_result_t AAudioThread::stop() {
@@ -93,18 +78,18 @@
         ALOGE("stop() but no thread running");
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    // Check to see if the thread is trying to stop itself.
-    if (pthread_equal(pthread_self(), mThread)) {
-        ALOGE("%s() attempt to pthread_join() from launched thread!", __func__);
-        return AAUDIO_ERROR_INTERNAL;
-    }
 
-    int err = pthread_join(mThread, nullptr);
-    if (err != 0) {
-        ALOGE("stop() - pthread_join() returned %d %s", err, strerror(err));
-        return AAudioConvert_androidToAAudioResult(-err);
-    } else {
+    if (mThread.get_id() == std::this_thread::get_id()) {
+        // The thread must not be joined by itself.
+        ALOGE("%s() attempt to join() from launched thread!", __func__);
+        return AAUDIO_ERROR_INTERNAL;
+    } else if (mThread.joinable()) {
+        // Double check if the thread is joinable to avoid exception when calling join.
+        mThread.join();
         mHasThread = false;
         return AAUDIO_OK;
+    } else {
+        ALOGE("%s() the thread is not joinable", __func__);
+        return AAUDIO_ERROR_INTERNAL;
     }
 }
diff --git a/services/oboeservice/AAudioThread.h b/services/oboeservice/AAudioThread.h
index 08a8a98..b2774e0 100644
--- a/services/oboeservice/AAudioThread.h
+++ b/services/oboeservice/AAudioThread.h
@@ -18,7 +18,7 @@
 #define AAUDIO_THREAD_H
 
 #include <atomic>
-#include <pthread.h>
+#include <thread>
 
 #include <aaudio/AAudio.h>
 
@@ -37,7 +37,6 @@
 
 /**
  * Abstraction for a host dependent thread.
- * TODO Consider using Android "Thread" class or std::thread instead.
  */
 class AAudioThread
 {
@@ -73,7 +72,7 @@
 
     Runnable    *mRunnable = nullptr;
     bool         mHasThread = false;
-    pthread_t    mThread = {};
+    std::thread  mThread;
 
     static std::atomic<uint32_t> mNextThreadIndex;
     char         mName[16]; // max length for a pthread_name
diff --git a/services/oboeservice/Android.bp b/services/oboeservice/Android.bp
index 3563d66..20a4c34 100644
--- a/services/oboeservice/Android.bp
+++ b/services/oboeservice/Android.bp
@@ -27,6 +27,7 @@
 
     srcs: [
         "AAudioClientTracker.cpp",
+        "AAudioCommandQueue.cpp",
         "AAudioEndpointManager.cpp",
         "AAudioMixer.cpp",
         "AAudioService.cpp",
diff --git a/services/oboeservice/SharedRingBuffer.cpp b/services/oboeservice/SharedRingBuffer.cpp
index c1d4e16..fd2a454 100644
--- a/services/oboeservice/SharedRingBuffer.cpp
+++ b/services/oboeservice/SharedRingBuffer.cpp
@@ -85,9 +85,9 @@
     return AAUDIO_OK;
 }
 
-void SharedRingBuffer::fillParcelable(AudioEndpointParcelable &endpointParcelable,
+void SharedRingBuffer::fillParcelable(AudioEndpointParcelable* endpointParcelable,
                     RingBufferParcelable &ringBufferParcelable) {
-    int fdIndex = endpointParcelable.addFileDescriptor(mFileDescriptor, mSharedMemorySizeInBytes);
+    int fdIndex = endpointParcelable->addFileDescriptor(mFileDescriptor, mSharedMemorySizeInBytes);
     ringBufferParcelable.setupMemory(fdIndex,
                                      SHARED_RINGBUFFER_DATA_OFFSET,
                                      mDataMemorySizeInBytes,
diff --git a/services/oboeservice/SharedRingBuffer.h b/services/oboeservice/SharedRingBuffer.h
index c3a9bb7..cff1261 100644
--- a/services/oboeservice/SharedRingBuffer.h
+++ b/services/oboeservice/SharedRingBuffer.h
@@ -45,7 +45,7 @@
 
     aaudio_result_t allocate(android::fifo_frames_t bytesPerFrame, android::fifo_frames_t capacityInFrames);
 
-    void fillParcelable(AudioEndpointParcelable &endpointParcelable,
+    void fillParcelable(AudioEndpointParcelable* endpointParcelable,
                         RingBufferParcelable &ringBufferParcelable);
 
     /**